aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/braille/braille_console.c13
-rw-r--r--drivers/acpi/glue.c5
-rw-r--r--drivers/acpi/sleep/proc.c10
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c25
-rw-r--r--drivers/ata/ata_piix.c184
-rw-r--r--drivers/ata/libata-core.c252
-rw-r--r--drivers/ata/libata-eh.c375
-rw-r--r--drivers/ata/libata-scsi.c112
-rw-r--r--drivers/ata/libata.h5
-rw-r--r--drivers/ata/pata_bf54x.c34
-rw-r--r--drivers/ata/pata_sil680.c2
-rw-r--r--drivers/ata/sata_fsl.c26
-rw-r--r--drivers/ata/sata_inic162x.c8
-rw-r--r--drivers/ata/sata_mv.c28
-rw-r--r--drivers/ata/sata_nv.c62
-rw-r--r--drivers/ata/sata_promise.c16
-rw-r--r--drivers/ata/sata_qstor.c12
-rw-r--r--drivers/ata/sata_sil.c16
-rw-r--r--drivers/ata/sata_sil24.c12
-rw-r--r--drivers/ata/sata_sis.c28
-rw-r--r--drivers/ata/sata_svw.c10
-rw-r--r--drivers/ata/sata_uli.c24
-rw-r--r--drivers/ata/sata_via.c24
-rw-r--r--drivers/ata/sata_vsc.c10
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/class.c136
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/block/aoe/aoeblk.c6
-rw-r--r--drivers/block/aoe/aoecmd.c19
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/cciss.c8
-rw-r--r--drivers/block/cciss_scsi.c151
-rw-r--r--drivers/block/cciss_scsi.h4
-rw-r--r--drivers/block/cpqarray.c2
-rw-r--r--drivers/block/floppy.c31
-rw-r--r--drivers/block/nbd.c4
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/block/ps3disk.c11
-rw-r--r--drivers/block/virtio_blk.c14
-rw-r--r--drivers/block/xen-blkfront.c76
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/btusb.c36
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/cdrom/gdrom.c4
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/char/tty_io.c14
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/hwmon/abituguru3.c3
-rw-r--r--drivers/hwmon/ad7414.c2
-rw-r--r--drivers/hwmon/atxp1.c18
-rw-r--r--drivers/hwmon/it87.c74
-rw-r--r--drivers/i2c/busses/i2c-powermac.c4
-rw-r--r--drivers/i2c/i2c-dev.c4
-rw-r--r--drivers/ide/Kconfig18
-rw-r--r--drivers/ide/ide-cd.c8
-rw-r--r--drivers/ide/ide-disk.c15
-rw-r--r--drivers/ide/ide-dma.c2
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ide/ide-tape.c10
-rw-r--r--drivers/ide/mips/Makefile1
-rw-r--r--drivers/ide/mips/swarm.c196
-rw-r--r--drivers/input/mouse/bcm5974.c13
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c4
-rw-r--r--drivers/leds/leds-fsg.c28
-rw-r--r--drivers/leds/leds-pca955x.c70
-rw-r--r--drivers/md/dm-crypt.c109
-rw-r--r--drivers/md/dm-exception-store.c29
-rw-r--r--drivers/md/dm-ioctl.c10
-rw-r--r--drivers/md/dm-mpath.c66
-rw-r--r--drivers/md/dm-mpath.h2
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-stripe.c4
-rw-r--r--drivers/md/dm-table.c97
-rw-r--r--drivers/md/dm.c52
-rw-r--r--drivers/md/dm.h10
-rw-r--r--drivers/md/linear.c10
-rw-r--r--drivers/md/md.c15
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c10
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c14
-rw-r--r--drivers/md/raid5.c75
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.h1
-rw-r--r--drivers/media/dvb/b2c2/flexcop-fe-tuner.c1
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c16
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c16
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c11
-rw-r--r--drivers/media/dvb/frontends/s5h1420.h8
-rw-r--r--drivers/media/dvb/siano/sms-cards.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/video/cafe_ccic.c1
-rw-r--r--drivers/media/video/cpia2/cpia2_usb.c5
-rw-r--r--drivers/media/video/cx18/cx18-cards.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c12
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c55
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c9
-rw-r--r--drivers/media/video/gspca/gspca.c3
-rw-r--r--drivers/media/video/gspca/pac7311.c1
-rw-r--r--drivers/media/video/gspca/sonixb.c4
-rw-r--r--drivers/media/video/gspca/sonixj.c19
-rw-r--r--drivers/media/video/gspca/spca561.c2
-rw-r--r--drivers/media/video/gspca/zc3xx.c4
-rw-r--r--drivers/media/video/ov511.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.c2
-rw-r--r--drivers/media/video/s2255drv.c3
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c2
-rw-r--r--drivers/media/video/w9968cf.c2
-rw-r--r--drivers/media/video/wm8739.c4
-rw-r--r--drivers/media/video/zoran_card.c2
-rw-r--r--drivers/media/video/zoran_driver.c15
-rw-r--r--drivers/memstick/core/mspro_block.c4
-rw-r--r--drivers/mfd/Kconfig4
-rw-r--r--drivers/mfd/asic3.c2
-rw-r--r--drivers/misc/eeepc-laptop.c16
-rw-r--r--drivers/mmc/card/block.c21
-rw-r--r--drivers/mmc/card/mmc_test.c4
-rw-r--r--drivers/mmc/host/atmel-mci.c18
-rw-r--r--drivers/mmc/host/tmio_mmc.h4
-rw-r--r--drivers/mtd/ftl.c24
-rw-r--r--drivers/mtd/mtd_blkdevs.c16
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/ethtool.c9
-rw-r--r--drivers/net/e1000e/ich8lan.c78
-rw-r--r--drivers/net/e1000e/netdev.c72
-rw-r--r--drivers/net/e1000e/param.c30
-rw-r--r--drivers/net/wireless/ath9k/core.c11
-rw-r--r--drivers/net/wireless/ath9k/core.h1
-rw-r--r--drivers/net/wireless/ath9k/main.c12
-rw-r--r--drivers/net/wireless/ath9k/xmit.c6
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/pci/pci-sysfs.c19
-rw-r--r--drivers/pci/pcie/aspm.c2
-rw-r--r--drivers/pci/search.c6
-rw-r--r--drivers/pcmcia/ds.c23
-rw-r--r--drivers/pnp/Makefile5
-rw-r--r--drivers/pnp/pnpacpi/core.c2
-rw-r--r--drivers/pnp/pnpbios/core.c2
-rw-r--r--drivers/rtc/rtc-dev.c15
-rw-r--r--drivers/s390/block/dasd_proc.c3
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/cio/qdio_setup.c55
-rw-r--r--drivers/s390/scsi/zfcp_aux.c150
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c45
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c75
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h1
-rw-r--r--drivers/s390/scsi/zfcp_def.h179
-rw-r--r--drivers/s390/scsi/zfcp_erp.c229
-rw-r--r--drivers/s390/scsi/zfcp_ext.h27
-rw-r--r--drivers/s390/scsi/zfcp_fc.c227
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c584
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h75
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c67
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c28
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c60
-rw-r--r--drivers/scsi/Kconfig8
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c4
-rw-r--r--drivers/scsi/gdth.c60
-rw-r--r--drivers/scsi/gdth.h2
-rw-r--r--drivers/scsi/gdth_proc.c66
-rw-r--r--drivers/scsi/gdth_proc.h3
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ide-scsi.c2
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/libiscsi.c19
-rw-r--r--drivers/scsi/libsas/sas_ata.c10
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c30
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c6
-rw-r--r--drivers/scsi/ncr53c8xx.c4
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h71
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c338
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/qlogicpti.c1
-rw-r--r--drivers/scsi/scsi.c105
-rw-r--r--drivers/scsi/scsi_error.c90
-rw-r--r--drivers/scsi/scsi_lib.c59
-rw-r--r--drivers/scsi/scsi_netlink.c523
-rw-r--r--drivers/scsi/scsi_priv.h7
-rw-r--r--drivers/scsi/scsi_proc.c8
-rw-r--r--drivers/scsi/scsi_scan.c20
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/scsi_tgt_lib.c8
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/sd.c122
-rw-r--r--drivers/scsi/sg.c667
-rw-r--r--drivers/scsi/sr.c7
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/tmscsim.c4
-rw-r--r--drivers/serial/atmel_serial.c32
-rw-r--r--drivers/spi/orion_spi.c2
-rw-r--r--drivers/spi/pxa2xx_spi.c4
-rw-r--r--drivers/ssb/main.c1
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/core/hub.c39
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c4
-rw-r--r--drivers/usb/host/ehci-hcd.c26
-rw-r--r--drivers/usb/host/ehci-sched.c32
-rw-r--r--drivers/usb/musb/Kconfig1
-rw-r--r--drivers/usb/musb/musb_core.c4
-rw-r--r--drivers/usb/musb/omap2430.c4
-rw-r--r--drivers/usb/musb/omap2430.h4
-rw-r--r--drivers/usb/serial/cp2101.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.h5
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/sierra.c12
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/usb/storage/Kconfig12
-rw-r--r--drivers/usb/storage/Makefile3
-rw-r--r--drivers/usb/storage/unusual_devs.h30
-rw-r--r--drivers/usb/storage/usb.c2
-rw-r--r--drivers/video/console/fbcon.c6
-rw-r--r--drivers/video/console/fbcon.h2
-rw-r--r--drivers/watchdog/geodewdt.c6
-rw-r--r--drivers/watchdog/ibmasr.c1
-rw-r--r--drivers/watchdog/pnx4008_wdt.c4
-rw-r--r--drivers/watchdog/rc32434_wdt.c6
-rw-r--r--drivers/watchdog/rdc321x_wdt.c6
-rw-r--r--drivers/watchdog/wdt285.c15
239 files changed, 4704 insertions, 3307 deletions
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index 0a5f6b2114c5..d672cfe7ca59 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -376,6 +376,8 @@ int braille_register_console(struct console *console, int index,
376 console->flags |= CON_ENABLED; 376 console->flags |= CON_ENABLED;
377 console->index = index; 377 console->index = index;
378 braille_co = console; 378 braille_co = console;
379 register_keyboard_notifier(&keyboard_notifier_block);
380 register_vt_notifier(&vt_notifier_block);
379 return 0; 381 return 0;
380} 382}
381 383
@@ -383,15 +385,8 @@ int braille_unregister_console(struct console *console)
383{ 385{
384 if (braille_co != console) 386 if (braille_co != console)
385 return -EINVAL; 387 return -EINVAL;
388 unregister_keyboard_notifier(&keyboard_notifier_block);
389 unregister_vt_notifier(&vt_notifier_block);
386 braille_co = NULL; 390 braille_co = NULL;
387 return 0; 391 return 0;
388} 392}
389
390static int __init braille_init(void)
391{
392 register_keyboard_notifier(&keyboard_notifier_block);
393 register_vt_notifier(&vt_notifier_block);
394 return 0;
395}
396
397console_initcall(braille_init);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 084109507c9f..8dd3336efd7e 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -165,8 +165,11 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
165 "firmware_node"); 165 "firmware_node");
166 ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, 166 ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
167 "physical_node"); 167 "physical_node");
168 if (acpi_dev->wakeup.flags.valid) 168 if (acpi_dev->wakeup.flags.valid) {
169 device_set_wakeup_capable(dev, true); 169 device_set_wakeup_capable(dev, true);
170 device_set_wakeup_enable(dev,
171 acpi_dev->wakeup.state.enabled);
172 }
170 } 173 }
171 174
172 return 0; 175 return 0;
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 4ebbba2b6b19..bf5b04de02d1 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -377,6 +377,14 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
377 return 0; 377 return 0;
378} 378}
379 379
380static void physical_device_enable_wakeup(struct acpi_device *adev)
381{
382 struct device *dev = acpi_get_physical_device(adev->handle);
383
384 if (dev && device_can_wakeup(dev))
385 device_set_wakeup_enable(dev, adev->wakeup.state.enabled);
386}
387
380static ssize_t 388static ssize_t
381acpi_system_write_wakeup_device(struct file *file, 389acpi_system_write_wakeup_device(struct file *file,
382 const char __user * buffer, 390 const char __user * buffer,
@@ -411,6 +419,7 @@ acpi_system_write_wakeup_device(struct file *file,
411 } 419 }
412 } 420 }
413 if (found_dev) { 421 if (found_dev) {
422 physical_device_enable_wakeup(found_dev);
414 list_for_each_safe(node, next, &acpi_wakeup_device_list) { 423 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
415 struct acpi_device *dev = container_of(node, 424 struct acpi_device *dev = container_of(node,
416 struct 425 struct
@@ -428,6 +437,7 @@ acpi_system_write_wakeup_device(struct file *file,
428 dev->pnp.bus_id, found_dev->pnp.bus_id); 437 dev->pnp.bus_id, found_dev->pnp.bus_id);
429 dev->wakeup.state.enabled = 438 dev->wakeup.state.enabled =
430 found_dev->wakeup.state.enabled; 439 found_dev->wakeup.state.enabled;
440 physical_device_enable_wakeup(dev);
431 } 441 }
432 } 442 }
433 } 443 }
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 11c8c19f0fb7..f17cd4b572f8 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -663,7 +663,7 @@ config HAVE_PATA_PLATFORM
663 663
664config PATA_PLATFORM 664config PATA_PLATFORM
665 tristate "Generic platform device PATA support" 665 tristate "Generic platform device PATA support"
666 depends on EMBEDDED || ARCH_RPC || PPC || HAVE_PATA_PLATFORM 666 depends on EMBEDDED || PPC || HAVE_PATA_PLATFORM
667 help 667 help
668 This option enables support for generic directly connected ATA 668 This option enables support for generic directly connected ATA
669 devices commonly found on embedded systems. 669 devices commonly found on embedded systems.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 2e1a7cb2ed5f..aeadd00411a1 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -267,8 +267,8 @@ struct ahci_port_priv {
267 * per PM slot */ 267 * per PM slot */
268}; 268};
269 269
270static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 270static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
271static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 271static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
272static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 272static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
273static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 273static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
274static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); 274static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
@@ -316,6 +316,7 @@ static struct device_attribute *ahci_shost_attrs[] = {
316 316
317static struct device_attribute *ahci_sdev_attrs[] = { 317static struct device_attribute *ahci_sdev_attrs[] = {
318 &dev_attr_sw_activity, 318 &dev_attr_sw_activity,
319 &dev_attr_unload_heads,
319 NULL 320 NULL
320}; 321};
321 322
@@ -820,10 +821,10 @@ static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
820 return 0; 821 return 0;
821} 822}
822 823
823static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 824static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
824{ 825{
825 void __iomem *port_mmio = ahci_port_base(ap); 826 void __iomem *port_mmio = ahci_port_base(link->ap);
826 int offset = ahci_scr_offset(ap, sc_reg); 827 int offset = ahci_scr_offset(link->ap, sc_reg);
827 828
828 if (offset) { 829 if (offset) {
829 *val = readl(port_mmio + offset); 830 *val = readl(port_mmio + offset);
@@ -832,10 +833,10 @@ static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
832 return -EINVAL; 833 return -EINVAL;
833} 834}
834 835
835static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 836static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
836{ 837{
837 void __iomem *port_mmio = ahci_port_base(ap); 838 void __iomem *port_mmio = ahci_port_base(link->ap);
838 int offset = ahci_scr_offset(ap, sc_reg); 839 int offset = ahci_scr_offset(link->ap, sc_reg);
839 840
840 if (offset) { 841 if (offset) {
841 writel(val, port_mmio + offset); 842 writel(val, port_mmio + offset);
@@ -973,7 +974,7 @@ static void ahci_disable_alpm(struct ata_port *ap)
973 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT); 974 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
974 975
975 /* go ahead and clean out PhyRdy Change from Serror too */ 976 /* go ahead and clean out PhyRdy Change from Serror too */
976 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18))); 977 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
977 978
978 /* 979 /*
979 * Clear flag to indicate that we should ignore all PhyRdy 980 * Clear flag to indicate that we should ignore all PhyRdy
@@ -1937,8 +1938,8 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1937 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); 1938 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1938 1939
1939 /* AHCI needs SError cleared; otherwise, it might lock up */ 1940 /* AHCI needs SError cleared; otherwise, it might lock up */
1940 ahci_scr_read(ap, SCR_ERROR, &serror); 1941 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
1941 ahci_scr_write(ap, SCR_ERROR, serror); 1942 ahci_scr_write(&ap->link, SCR_ERROR, serror);
1942 host_ehi->serror |= serror; 1943 host_ehi->serror |= serror;
1943 1944
1944 /* some controllers set IRQ_IF_ERR on device errors, ignore it */ 1945 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
@@ -2027,7 +2028,7 @@ static void ahci_port_intr(struct ata_port *ap)
2027 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) && 2028 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2028 (status & PORT_IRQ_PHYRDY)) { 2029 (status & PORT_IRQ_PHYRDY)) {
2029 status &= ~PORT_IRQ_PHYRDY; 2030 status &= ~PORT_IRQ_PHYRDY;
2030 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18))); 2031 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2031 } 2032 }
2032 2033
2033 if (unlikely(status & PORT_IRQ_ERROR)) { 2034 if (unlikely(status & PORT_IRQ_ERROR)) {
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index e6b4606e36b6..e9e32ed6b1a3 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -165,8 +165,10 @@ static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
165static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev); 165static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev);
166static int ich_pata_cable_detect(struct ata_port *ap); 166static int ich_pata_cable_detect(struct ata_port *ap);
167static u8 piix_vmw_bmdma_status(struct ata_port *ap); 167static u8 piix_vmw_bmdma_status(struct ata_port *ap);
168static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val); 168static int piix_sidpr_scr_read(struct ata_link *link,
169static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val); 169 unsigned int reg, u32 *val);
170static int piix_sidpr_scr_write(struct ata_link *link,
171 unsigned int reg, u32 val);
170#ifdef CONFIG_PM 172#ifdef CONFIG_PM
171static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 173static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
172static int piix_pci_device_resume(struct pci_dev *pdev); 174static int piix_pci_device_resume(struct pci_dev *pdev);
@@ -278,12 +280,15 @@ static const struct pci_device_id piix_pci_tbl[] = {
278 /* SATA Controller IDE (PCH) */ 280 /* SATA Controller IDE (PCH) */
279 { 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 281 { 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
280 /* SATA Controller IDE (PCH) */ 282 /* SATA Controller IDE (PCH) */
283 { 0x8086, 0x3b21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
284 /* SATA Controller IDE (PCH) */
281 { 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 285 { 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
282 /* SATA Controller IDE (PCH) */ 286 /* SATA Controller IDE (PCH) */
287 { 0x8086, 0x3b28, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
288 /* SATA Controller IDE (PCH) */
283 { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 289 { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
284 /* SATA Controller IDE (PCH) */ 290 /* SATA Controller IDE (PCH) */
285 { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 291 { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
286
287 { } /* terminate list */ 292 { } /* terminate list */
288}; 293};
289 294
@@ -582,6 +587,7 @@ static const struct ich_laptop ich_laptop[] = {
582 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ 587 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
583 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ 588 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
584 { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ 589 { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
590 { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
585 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ 591 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
586 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ 592 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
587 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ 593 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
@@ -885,23 +891,9 @@ static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev)
885 * Serial ATA Index/Data Pair Superset Registers access 891 * Serial ATA Index/Data Pair Superset Registers access
886 * 892 *
887 * Beginning from ICH8, there's a sane way to access SCRs using index 893 * Beginning from ICH8, there's a sane way to access SCRs using index
888 * and data register pair located at BAR5. This creates an 894 * and data register pair located at BAR5 which means that we have
889 * interesting problem of mapping two SCRs to one port. 895 * separate SCRs for master and slave. This is handled using libata
890 * 896 * slave_link facility.
891 * Although they have separate SCRs, the master and slave aren't
892 * independent enough to be treated as separate links - e.g. softreset
893 * resets both. Also, there's no protocol defined for hard resetting
894 * singled device sharing the virtual port (no defined way to acquire
895 * device signature). This is worked around by merging the SCR values
896 * into one sensible value and requesting follow-up SRST after
897 * hardreset.
898 *
899 * SCR merging is perfomed in nibbles which is the unit contents in
900 * SCRs are organized. If two values are equal, the value is used.
901 * When they differ, merge table which lists precedence of possible
902 * values is consulted and the first match or the last entry when
903 * nothing matches is used. When there's no merge table for the
904 * specific nibble, value from the first port is used.
905 */ 897 */
906static const int piix_sidx_map[] = { 898static const int piix_sidx_map[] = {
907 [SCR_STATUS] = 0, 899 [SCR_STATUS] = 0,
@@ -909,120 +901,38 @@ static const int piix_sidx_map[] = {
909 [SCR_CONTROL] = 1, 901 [SCR_CONTROL] = 1,
910}; 902};
911 903
912static void piix_sidpr_sel(struct ata_device *dev, unsigned int reg) 904static void piix_sidpr_sel(struct ata_link *link, unsigned int reg)
913{ 905{
914 struct ata_port *ap = dev->link->ap; 906 struct ata_port *ap = link->ap;
915 struct piix_host_priv *hpriv = ap->host->private_data; 907 struct piix_host_priv *hpriv = ap->host->private_data;
916 908
917 iowrite32(((ap->port_no * 2 + dev->devno) << 8) | piix_sidx_map[reg], 909 iowrite32(((ap->port_no * 2 + link->pmp) << 8) | piix_sidx_map[reg],
918 hpriv->sidpr + PIIX_SIDPR_IDX); 910 hpriv->sidpr + PIIX_SIDPR_IDX);
919} 911}
920 912
921static int piix_sidpr_read(struct ata_device *dev, unsigned int reg) 913static int piix_sidpr_scr_read(struct ata_link *link,
922{ 914 unsigned int reg, u32 *val)
923 struct piix_host_priv *hpriv = dev->link->ap->host->private_data;
924
925 piix_sidpr_sel(dev, reg);
926 return ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
927}
928
929static void piix_sidpr_write(struct ata_device *dev, unsigned int reg, u32 val)
930{
931 struct piix_host_priv *hpriv = dev->link->ap->host->private_data;
932
933 piix_sidpr_sel(dev, reg);
934 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
935}
936
937static u32 piix_merge_scr(u32 val0, u32 val1, const int * const *merge_tbl)
938{
939 u32 val = 0;
940 int i, mi;
941
942 for (i = 0, mi = 0; i < 32 / 4; i++) {
943 u8 c0 = (val0 >> (i * 4)) & 0xf;
944 u8 c1 = (val1 >> (i * 4)) & 0xf;
945 u8 merged = c0;
946 const int *cur;
947
948 /* if no merge preference, assume the first value */
949 cur = merge_tbl[mi];
950 if (!cur)
951 goto done;
952 mi++;
953
954 /* if two values equal, use it */
955 if (c0 == c1)
956 goto done;
957
958 /* choose the first match or the last from the merge table */
959 while (*cur != -1) {
960 if (c0 == *cur || c1 == *cur)
961 break;
962 cur++;
963 }
964 if (*cur == -1)
965 cur--;
966 merged = *cur;
967 done:
968 val |= merged << (i * 4);
969 }
970
971 return val;
972}
973
974static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val)
975{ 915{
976 const int * const sstatus_merge_tbl[] = { 916 struct piix_host_priv *hpriv = link->ap->host->private_data;
977 /* DET */ (const int []){ 1, 3, 0, 4, 3, -1 },
978 /* SPD */ (const int []){ 2, 1, 0, -1 },
979 /* IPM */ (const int []){ 6, 2, 1, 0, -1 },
980 NULL,
981 };
982 const int * const scontrol_merge_tbl[] = {
983 /* DET */ (const int []){ 1, 0, 4, 0, -1 },
984 /* SPD */ (const int []){ 0, 2, 1, 0, -1 },
985 /* IPM */ (const int []){ 0, 1, 2, 3, 0, -1 },
986 NULL,
987 };
988 u32 v0, v1;
989 917
990 if (reg >= ARRAY_SIZE(piix_sidx_map)) 918 if (reg >= ARRAY_SIZE(piix_sidx_map))
991 return -EINVAL; 919 return -EINVAL;
992 920
993 if (!(ap->flags & ATA_FLAG_SLAVE_POSS)) { 921 piix_sidpr_sel(link, reg);
994 *val = piix_sidpr_read(&ap->link.device[0], reg); 922 *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
995 return 0;
996 }
997
998 v0 = piix_sidpr_read(&ap->link.device[0], reg);
999 v1 = piix_sidpr_read(&ap->link.device[1], reg);
1000
1001 switch (reg) {
1002 case SCR_STATUS:
1003 *val = piix_merge_scr(v0, v1, sstatus_merge_tbl);
1004 break;
1005 case SCR_ERROR:
1006 *val = v0 | v1;
1007 break;
1008 case SCR_CONTROL:
1009 *val = piix_merge_scr(v0, v1, scontrol_merge_tbl);
1010 break;
1011 }
1012
1013 return 0; 923 return 0;
1014} 924}
1015 925
1016static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val) 926static int piix_sidpr_scr_write(struct ata_link *link,
927 unsigned int reg, u32 val)
1017{ 928{
929 struct piix_host_priv *hpriv = link->ap->host->private_data;
930
1018 if (reg >= ARRAY_SIZE(piix_sidx_map)) 931 if (reg >= ARRAY_SIZE(piix_sidx_map))
1019 return -EINVAL; 932 return -EINVAL;
1020 933
1021 piix_sidpr_write(&ap->link.device[0], reg, val); 934 piix_sidpr_sel(link, reg);
1022 935 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
1023 if (ap->flags & ATA_FLAG_SLAVE_POSS)
1024 piix_sidpr_write(&ap->link.device[1], reg, val);
1025
1026 return 0; 936 return 0;
1027} 937}
1028 938
@@ -1363,28 +1273,28 @@ static const int *__devinit piix_init_sata_map(struct pci_dev *pdev,
1363 return map; 1273 return map;
1364} 1274}
1365 1275
1366static void __devinit piix_init_sidpr(struct ata_host *host) 1276static int __devinit piix_init_sidpr(struct ata_host *host)
1367{ 1277{
1368 struct pci_dev *pdev = to_pci_dev(host->dev); 1278 struct pci_dev *pdev = to_pci_dev(host->dev);
1369 struct piix_host_priv *hpriv = host->private_data; 1279 struct piix_host_priv *hpriv = host->private_data;
1370 struct ata_device *dev0 = &host->ports[0]->link.device[0]; 1280 struct ata_link *link0 = &host->ports[0]->link;
1371 u32 scontrol; 1281 u32 scontrol;
1372 int i; 1282 int i, rc;
1373 1283
1374 /* check for availability */ 1284 /* check for availability */
1375 for (i = 0; i < 4; i++) 1285 for (i = 0; i < 4; i++)
1376 if (hpriv->map[i] == IDE) 1286 if (hpriv->map[i] == IDE)
1377 return; 1287 return 0;
1378 1288
1379 if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR)) 1289 if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR))
1380 return; 1290 return 0;
1381 1291
1382 if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 || 1292 if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 ||
1383 pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN) 1293 pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN)
1384 return; 1294 return 0;
1385 1295
1386 if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME)) 1296 if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME))
1387 return; 1297 return 0;
1388 1298
1389 hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR]; 1299 hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
1390 1300
@@ -1392,7 +1302,7 @@ static void __devinit piix_init_sidpr(struct ata_host *host)
1392 * Give it a test drive by inhibiting power save modes which 1302 * Give it a test drive by inhibiting power save modes which
1393 * we'll do anyway. 1303 * we'll do anyway.
1394 */ 1304 */
1395 scontrol = piix_sidpr_read(dev0, SCR_CONTROL); 1305 piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
1396 1306
1397 /* if IPM is already 3, SCR access is probably working. Don't 1307 /* if IPM is already 3, SCR access is probably working. Don't
1398 * un-inhibit power save modes as BIOS might have inhibited 1308 * un-inhibit power save modes as BIOS might have inhibited
@@ -1400,18 +1310,30 @@ static void __devinit piix_init_sidpr(struct ata_host *host)
1400 */ 1310 */
1401 if ((scontrol & 0xf00) != 0x300) { 1311 if ((scontrol & 0xf00) != 0x300) {
1402 scontrol |= 0x300; 1312 scontrol |= 0x300;
1403 piix_sidpr_write(dev0, SCR_CONTROL, scontrol); 1313 piix_sidpr_scr_write(link0, SCR_CONTROL, scontrol);
1404 scontrol = piix_sidpr_read(dev0, SCR_CONTROL); 1314 piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
1405 1315
1406 if ((scontrol & 0xf00) != 0x300) { 1316 if ((scontrol & 0xf00) != 0x300) {
1407 dev_printk(KERN_INFO, host->dev, "SCR access via " 1317 dev_printk(KERN_INFO, host->dev, "SCR access via "
1408 "SIDPR is available but doesn't work\n"); 1318 "SIDPR is available but doesn't work\n");
1409 return; 1319 return 0;
1410 } 1320 }
1411 } 1321 }
1412 1322
1413 host->ports[0]->ops = &piix_sidpr_sata_ops; 1323 /* okay, SCRs available, set ops and ask libata for slave_link */
1414 host->ports[1]->ops = &piix_sidpr_sata_ops; 1324 for (i = 0; i < 2; i++) {
1325 struct ata_port *ap = host->ports[i];
1326
1327 ap->ops = &piix_sidpr_sata_ops;
1328
1329 if (ap->flags & ATA_FLAG_SLAVE_POSS) {
1330 rc = ata_slave_link_init(ap);
1331 if (rc)
1332 return rc;
1333 }
1334 }
1335
1336 return 0;
1415} 1337}
1416 1338
1417static void piix_iocfg_bit18_quirk(struct pci_dev *pdev) 1339static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
@@ -1521,7 +1443,9 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1521 /* initialize controller */ 1443 /* initialize controller */
1522 if (port_flags & ATA_FLAG_SATA) { 1444 if (port_flags & ATA_FLAG_SATA) {
1523 piix_init_pcs(host, piix_map_db_table[ent->driver_data]); 1445 piix_init_pcs(host, piix_map_db_table[ent->driver_data]);
1524 piix_init_sidpr(host); 1446 rc = piix_init_sidpr(host);
1447 if (rc)
1448 return rc;
1525 } 1449 }
1526 1450
1527 /* apply IOCFG bit18 quirk */ 1451 /* apply IOCFG bit18 quirk */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 79e3a8e7a84a..1ee9499bd343 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -163,6 +163,67 @@ MODULE_LICENSE("GPL");
163MODULE_VERSION(DRV_VERSION); 163MODULE_VERSION(DRV_VERSION);
164 164
165 165
166/*
167 * Iterator helpers. Don't use directly.
168 *
169 * LOCKING:
170 * Host lock or EH context.
171 */
172struct ata_link *__ata_port_next_link(struct ata_port *ap,
173 struct ata_link *link, bool dev_only)
174{
175 /* NULL link indicates start of iteration */
176 if (!link) {
177 if (dev_only && sata_pmp_attached(ap))
178 return ap->pmp_link;
179 return &ap->link;
180 }
181
182 /* we just iterated over the host master link, what's next? */
183 if (link == &ap->link) {
184 if (!sata_pmp_attached(ap)) {
185 if (unlikely(ap->slave_link) && !dev_only)
186 return ap->slave_link;
187 return NULL;
188 }
189 return ap->pmp_link;
190 }
191
192 /* slave_link excludes PMP */
193 if (unlikely(link == ap->slave_link))
194 return NULL;
195
196 /* iterate to the next PMP link */
197 if (++link < ap->pmp_link + ap->nr_pmp_links)
198 return link;
199 return NULL;
200}
201
202/**
203 * ata_dev_phys_link - find physical link for a device
204 * @dev: ATA device to look up physical link for
205 *
206 * Look up physical link which @dev is attached to. Note that
207 * this is different from @dev->link only when @dev is on slave
208 * link. For all other cases, it's the same as @dev->link.
209 *
210 * LOCKING:
211 * Don't care.
212 *
213 * RETURNS:
214 * Pointer to the found physical link.
215 */
216struct ata_link *ata_dev_phys_link(struct ata_device *dev)
217{
218 struct ata_port *ap = dev->link->ap;
219
220 if (!ap->slave_link)
221 return dev->link;
222 if (!dev->devno)
223 return &ap->link;
224 return ap->slave_link;
225}
226
166/** 227/**
167 * ata_force_cbl - force cable type according to libata.force 228 * ata_force_cbl - force cable type according to libata.force
168 * @ap: ATA port of interest 229 * @ap: ATA port of interest
@@ -206,7 +267,8 @@ void ata_force_cbl(struct ata_port *ap)
206 * the host link and all fan-out ports connected via PMP. If the 267 * the host link and all fan-out ports connected via PMP. If the
207 * device part is specified as 0 (e.g. 1.00:), it specifies the 268 * device part is specified as 0 (e.g. 1.00:), it specifies the
208 * first fan-out link not the host link. Device number 15 always 269 * first fan-out link not the host link. Device number 15 always
209 * points to the host link whether PMP is attached or not. 270 * points to the host link whether PMP is attached or not. If the
271 * controller has slave link, device number 16 points to it.
210 * 272 *
211 * LOCKING: 273 * LOCKING:
212 * EH context. 274 * EH context.
@@ -214,12 +276,11 @@ void ata_force_cbl(struct ata_port *ap)
214static void ata_force_link_limits(struct ata_link *link) 276static void ata_force_link_limits(struct ata_link *link)
215{ 277{
216 bool did_spd = false; 278 bool did_spd = false;
217 int linkno, i; 279 int linkno = link->pmp;
280 int i;
218 281
219 if (ata_is_host_link(link)) 282 if (ata_is_host_link(link))
220 linkno = 15; 283 linkno += 15;
221 else
222 linkno = link->pmp;
223 284
224 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 285 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
225 const struct ata_force_ent *fe = &ata_force_tbl[i]; 286 const struct ata_force_ent *fe = &ata_force_tbl[i];
@@ -266,9 +327,9 @@ static void ata_force_xfermask(struct ata_device *dev)
266 int alt_devno = devno; 327 int alt_devno = devno;
267 int i; 328 int i;
268 329
269 /* allow n.15 for the first device attached to host port */ 330 /* allow n.15/16 for devices attached to host port */
270 if (ata_is_host_link(dev->link) && devno == 0) 331 if (ata_is_host_link(dev->link))
271 alt_devno = 15; 332 alt_devno += 15;
272 333
273 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 334 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
274 const struct ata_force_ent *fe = &ata_force_tbl[i]; 335 const struct ata_force_ent *fe = &ata_force_tbl[i];
@@ -320,9 +381,9 @@ static void ata_force_horkage(struct ata_device *dev)
320 int alt_devno = devno; 381 int alt_devno = devno;
321 int i; 382 int i;
322 383
323 /* allow n.15 for the first device attached to host port */ 384 /* allow n.15/16 for devices attached to host port */
324 if (ata_is_host_link(dev->link) && devno == 0) 385 if (ata_is_host_link(dev->link))
325 alt_devno = 15; 386 alt_devno += 15;
326 387
327 for (i = 0; i < ata_force_tbl_size; i++) { 388 for (i = 0; i < ata_force_tbl_size; i++) {
328 const struct ata_force_ent *fe = &ata_force_tbl[i]; 389 const struct ata_force_ent *fe = &ata_force_tbl[i];
@@ -2681,7 +2742,7 @@ static void sata_print_link_status(struct ata_link *link)
2681 return; 2742 return;
2682 sata_scr_read(link, SCR_CONTROL, &scontrol); 2743 sata_scr_read(link, SCR_CONTROL, &scontrol);
2683 2744
2684 if (ata_link_online(link)) { 2745 if (ata_phys_link_online(link)) {
2685 tmp = (sstatus >> 4) & 0xf; 2746 tmp = (sstatus >> 4) & 0xf;
2686 ata_link_printk(link, KERN_INFO, 2747 ata_link_printk(link, KERN_INFO,
2687 "SATA link up %s (SStatus %X SControl %X)\n", 2748 "SATA link up %s (SStatus %X SControl %X)\n",
@@ -3372,6 +3433,12 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3372 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3433 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3373 int warned = 0; 3434 int warned = 0;
3374 3435
3436 /* Slave readiness can't be tested separately from master. On
3437 * M/S emulation configuration, this function should be called
3438 * only on the master and it will handle both master and slave.
3439 */
3440 WARN_ON(link == link->ap->slave_link);
3441
3375 if (time_after(nodev_deadline, deadline)) 3442 if (time_after(nodev_deadline, deadline))
3376 nodev_deadline = deadline; 3443 nodev_deadline = deadline;
3377 3444
@@ -3593,7 +3660,7 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3593 } 3660 }
3594 3661
3595 /* no point in trying softreset on offline link */ 3662 /* no point in trying softreset on offline link */
3596 if (ata_link_offline(link)) 3663 if (ata_phys_link_offline(link))
3597 ehc->i.action &= ~ATA_EH_SOFTRESET; 3664 ehc->i.action &= ~ATA_EH_SOFTRESET;
3598 3665
3599 return 0; 3666 return 0;
@@ -3671,7 +3738,7 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3671 if (rc) 3738 if (rc)
3672 goto out; 3739 goto out;
3673 /* if link is offline nothing more to do */ 3740 /* if link is offline nothing more to do */
3674 if (ata_link_offline(link)) 3741 if (ata_phys_link_offline(link))
3675 goto out; 3742 goto out;
3676 3743
3677 /* Link is online. From this point, -ENODEV too is an error. */ 3744 /* Link is online. From this point, -ENODEV too is an error. */
@@ -4868,10 +4935,8 @@ int sata_scr_valid(struct ata_link *link)
4868int sata_scr_read(struct ata_link *link, int reg, u32 *val) 4935int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4869{ 4936{
4870 if (ata_is_host_link(link)) { 4937 if (ata_is_host_link(link)) {
4871 struct ata_port *ap = link->ap;
4872
4873 if (sata_scr_valid(link)) 4938 if (sata_scr_valid(link))
4874 return ap->ops->scr_read(ap, reg, val); 4939 return link->ap->ops->scr_read(link, reg, val);
4875 return -EOPNOTSUPP; 4940 return -EOPNOTSUPP;
4876 } 4941 }
4877 4942
@@ -4897,10 +4962,8 @@ int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4897int sata_scr_write(struct ata_link *link, int reg, u32 val) 4962int sata_scr_write(struct ata_link *link, int reg, u32 val)
4898{ 4963{
4899 if (ata_is_host_link(link)) { 4964 if (ata_is_host_link(link)) {
4900 struct ata_port *ap = link->ap;
4901
4902 if (sata_scr_valid(link)) 4965 if (sata_scr_valid(link))
4903 return ap->ops->scr_write(ap, reg, val); 4966 return link->ap->ops->scr_write(link, reg, val);
4904 return -EOPNOTSUPP; 4967 return -EOPNOTSUPP;
4905 } 4968 }
4906 4969
@@ -4925,13 +4988,12 @@ int sata_scr_write(struct ata_link *link, int reg, u32 val)
4925int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 4988int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4926{ 4989{
4927 if (ata_is_host_link(link)) { 4990 if (ata_is_host_link(link)) {
4928 struct ata_port *ap = link->ap;
4929 int rc; 4991 int rc;
4930 4992
4931 if (sata_scr_valid(link)) { 4993 if (sata_scr_valid(link)) {
4932 rc = ap->ops->scr_write(ap, reg, val); 4994 rc = link->ap->ops->scr_write(link, reg, val);
4933 if (rc == 0) 4995 if (rc == 0)
4934 rc = ap->ops->scr_read(ap, reg, &val); 4996 rc = link->ap->ops->scr_read(link, reg, &val);
4935 return rc; 4997 return rc;
4936 } 4998 }
4937 return -EOPNOTSUPP; 4999 return -EOPNOTSUPP;
@@ -4941,7 +5003,7 @@ int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4941} 5003}
4942 5004
4943/** 5005/**
4944 * ata_link_online - test whether the given link is online 5006 * ata_phys_link_online - test whether the given link is online
4945 * @link: ATA link to test 5007 * @link: ATA link to test
4946 * 5008 *
4947 * Test whether @link is online. Note that this function returns 5009 * Test whether @link is online. Note that this function returns
@@ -4952,20 +5014,20 @@ int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4952 * None. 5014 * None.
4953 * 5015 *
4954 * RETURNS: 5016 * RETURNS:
4955 * 1 if the port online status is available and online. 5017 * True if the port online status is available and online.
4956 */ 5018 */
4957int ata_link_online(struct ata_link *link) 5019bool ata_phys_link_online(struct ata_link *link)
4958{ 5020{
4959 u32 sstatus; 5021 u32 sstatus;
4960 5022
4961 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5023 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4962 (sstatus & 0xf) == 0x3) 5024 (sstatus & 0xf) == 0x3)
4963 return 1; 5025 return true;
4964 return 0; 5026 return false;
4965} 5027}
4966 5028
4967/** 5029/**
4968 * ata_link_offline - test whether the given link is offline 5030 * ata_phys_link_offline - test whether the given link is offline
4969 * @link: ATA link to test 5031 * @link: ATA link to test
4970 * 5032 *
4971 * Test whether @link is offline. Note that this function 5033 * Test whether @link is offline. Note that this function
@@ -4976,16 +5038,68 @@ int ata_link_online(struct ata_link *link)
4976 * None. 5038 * None.
4977 * 5039 *
4978 * RETURNS: 5040 * RETURNS:
4979 * 1 if the port offline status is available and offline. 5041 * True if the port offline status is available and offline.
4980 */ 5042 */
4981int ata_link_offline(struct ata_link *link) 5043bool ata_phys_link_offline(struct ata_link *link)
4982{ 5044{
4983 u32 sstatus; 5045 u32 sstatus;
4984 5046
4985 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5047 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4986 (sstatus & 0xf) != 0x3) 5048 (sstatus & 0xf) != 0x3)
4987 return 1; 5049 return true;
4988 return 0; 5050 return false;
5051}
5052
5053/**
5054 * ata_link_online - test whether the given link is online
5055 * @link: ATA link to test
5056 *
5057 * Test whether @link is online. This is identical to
5058 * ata_phys_link_online() when there's no slave link. When
5059 * there's a slave link, this function should only be called on
5060 * the master link and will return true if any of M/S links is
5061 * online.
5062 *
5063 * LOCKING:
5064 * None.
5065 *
5066 * RETURNS:
5067 * True if the port online status is available and online.
5068 */
5069bool ata_link_online(struct ata_link *link)
5070{
5071 struct ata_link *slave = link->ap->slave_link;
5072
5073 WARN_ON(link == slave); /* shouldn't be called on slave link */
5074
5075 return ata_phys_link_online(link) ||
5076 (slave && ata_phys_link_online(slave));
5077}
5078
5079/**
5080 * ata_link_offline - test whether the given link is offline
5081 * @link: ATA link to test
5082 *
5083 * Test whether @link is offline. This is identical to
5084 * ata_phys_link_offline() when there's no slave link. When
5085 * there's a slave link, this function should only be called on
5086 * the master link and will return true if both M/S links are
5087 * offline.
5088 *
5089 * LOCKING:
5090 * None.
5091 *
5092 * RETURNS:
5093 * True if the port offline status is available and offline.
5094 */
5095bool ata_link_offline(struct ata_link *link)
5096{
5097 struct ata_link *slave = link->ap->slave_link;
5098
5099 WARN_ON(link == slave); /* shouldn't be called on slave link */
5100
5101 return ata_phys_link_offline(link) &&
5102 (!slave || ata_phys_link_offline(slave));
4989} 5103}
4990 5104
4991#ifdef CONFIG_PM 5105#ifdef CONFIG_PM
@@ -5127,11 +5241,11 @@ int ata_port_start(struct ata_port *ap)
5127 */ 5241 */
5128void ata_dev_init(struct ata_device *dev) 5242void ata_dev_init(struct ata_device *dev)
5129{ 5243{
5130 struct ata_link *link = dev->link; 5244 struct ata_link *link = ata_dev_phys_link(dev);
5131 struct ata_port *ap = link->ap; 5245 struct ata_port *ap = link->ap;
5132 unsigned long flags; 5246 unsigned long flags;
5133 5247
5134 /* SATA spd limit is bound to the first device */ 5248 /* SATA spd limit is bound to the attached device, reset together */
5135 link->sata_spd_limit = link->hw_sata_spd_limit; 5249 link->sata_spd_limit = link->hw_sata_spd_limit;
5136 link->sata_spd = 0; 5250 link->sata_spd = 0;
5137 5251
@@ -5264,6 +5378,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5264 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5378 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5265 INIT_LIST_HEAD(&ap->eh_done_q); 5379 INIT_LIST_HEAD(&ap->eh_done_q);
5266 init_waitqueue_head(&ap->eh_wait_q); 5380 init_waitqueue_head(&ap->eh_wait_q);
5381 init_completion(&ap->park_req_pending);
5267 init_timer_deferrable(&ap->fastdrain_timer); 5382 init_timer_deferrable(&ap->fastdrain_timer);
5268 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5383 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5269 ap->fastdrain_timer.data = (unsigned long)ap; 5384 ap->fastdrain_timer.data = (unsigned long)ap;
@@ -5294,6 +5409,7 @@ static void ata_host_release(struct device *gendev, void *res)
5294 scsi_host_put(ap->scsi_host); 5409 scsi_host_put(ap->scsi_host);
5295 5410
5296 kfree(ap->pmp_link); 5411 kfree(ap->pmp_link);
5412 kfree(ap->slave_link);
5297 kfree(ap); 5413 kfree(ap);
5298 host->ports[i] = NULL; 5414 host->ports[i] = NULL;
5299 } 5415 }
@@ -5414,6 +5530,68 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5414 return host; 5530 return host;
5415} 5531}
5416 5532
5533/**
5534 * ata_slave_link_init - initialize slave link
5535 * @ap: port to initialize slave link for
5536 *
5537 * Create and initialize slave link for @ap. This enables slave
5538 * link handling on the port.
5539 *
5540 * In libata, a port contains links and a link contains devices.
5541 * There is single host link but if a PMP is attached to it,
5542 * there can be multiple fan-out links. On SATA, there's usually
5543 * a single device connected to a link but PATA and SATA
5544 * controllers emulating TF based interface can have two - master
5545 * and slave.
5546 *
5547 * However, there are a few controllers which don't fit into this
5548 * abstraction too well - SATA controllers which emulate TF
5549 * interface with both master and slave devices but also have
5550 * separate SCR register sets for each device. These controllers
5551 * need separate links for physical link handling
5552 * (e.g. onlineness, link speed) but should be treated like a
5553 * traditional M/S controller for everything else (e.g. command
5554 * issue, softreset).
5555 *
5556 * slave_link is libata's way of handling this class of
5557 * controllers without impacting core layer too much. For
5558 * anything other than physical link handling, the default host
5559 * link is used for both master and slave. For physical link
5560 * handling, separate @ap->slave_link is used. All dirty details
5561 * are implemented inside libata core layer. From LLD's POV, the
5562 * only difference is that prereset, hardreset and postreset are
5563 * called once more for the slave link, so the reset sequence
5564 * looks like the following.
5565 *
5566 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5567 * softreset(M) -> postreset(M) -> postreset(S)
5568 *
5569 * Note that softreset is called only for the master. Softreset
5570 * resets both M/S by definition, so SRST on master should handle
5571 * both (the standard method will work just fine).
5572 *
5573 * LOCKING:
5574 * Should be called before host is registered.
5575 *
5576 * RETURNS:
5577 * 0 on success, -errno on failure.
5578 */
5579int ata_slave_link_init(struct ata_port *ap)
5580{
5581 struct ata_link *link;
5582
5583 WARN_ON(ap->slave_link);
5584 WARN_ON(ap->flags & ATA_FLAG_PMP);
5585
5586 link = kzalloc(sizeof(*link), GFP_KERNEL);
5587 if (!link)
5588 return -ENOMEM;
5589
5590 ata_link_init(ap, link, 1);
5591 ap->slave_link = link;
5592 return 0;
5593}
5594
5417static void ata_host_stop(struct device *gendev, void *res) 5595static void ata_host_stop(struct device *gendev, void *res)
5418{ 5596{
5419 struct ata_host *host = dev_get_drvdata(gendev); 5597 struct ata_host *host = dev_get_drvdata(gendev);
@@ -5640,6 +5818,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5640 5818
5641 /* init sata_spd_limit to the current value */ 5819 /* init sata_spd_limit to the current value */
5642 sata_link_init_spd(&ap->link); 5820 sata_link_init_spd(&ap->link);
5821 if (ap->slave_link)
5822 sata_link_init_spd(ap->slave_link);
5643 5823
5644 /* print per-port info to dmesg */ 5824 /* print per-port info to dmesg */
5645 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 5825 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
@@ -6260,10 +6440,12 @@ EXPORT_SYMBOL_GPL(ata_base_port_ops);
6260EXPORT_SYMBOL_GPL(sata_port_ops); 6440EXPORT_SYMBOL_GPL(sata_port_ops);
6261EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6441EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6262EXPORT_SYMBOL_GPL(ata_dummy_port_info); 6442EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6443EXPORT_SYMBOL_GPL(__ata_port_next_link);
6263EXPORT_SYMBOL_GPL(ata_std_bios_param); 6444EXPORT_SYMBOL_GPL(ata_std_bios_param);
6264EXPORT_SYMBOL_GPL(ata_host_init); 6445EXPORT_SYMBOL_GPL(ata_host_init);
6265EXPORT_SYMBOL_GPL(ata_host_alloc); 6446EXPORT_SYMBOL_GPL(ata_host_alloc);
6266EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6447EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6448EXPORT_SYMBOL_GPL(ata_slave_link_init);
6267EXPORT_SYMBOL_GPL(ata_host_start); 6449EXPORT_SYMBOL_GPL(ata_host_start);
6268EXPORT_SYMBOL_GPL(ata_host_register); 6450EXPORT_SYMBOL_GPL(ata_host_register);
6269EXPORT_SYMBOL_GPL(ata_host_activate); 6451EXPORT_SYMBOL_GPL(ata_host_activate);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c1db2f234d2e..a93247cc395a 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -33,6 +33,7 @@
33 */ 33 */
34 34
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/blkdev.h>
36#include <linux/pci.h> 37#include <linux/pci.h>
37#include <scsi/scsi.h> 38#include <scsi/scsi.h>
38#include <scsi/scsi_host.h> 39#include <scsi/scsi_host.h>
@@ -79,6 +80,8 @@ enum {
79 */ 80 */
80 ATA_EH_PRERESET_TIMEOUT = 10000, 81 ATA_EH_PRERESET_TIMEOUT = 10000,
81 ATA_EH_FASTDRAIN_INTERVAL = 3000, 82 ATA_EH_FASTDRAIN_INTERVAL = 3000,
83
84 ATA_EH_UA_TRIES = 5,
82}; 85};
83 86
84/* The following table determines how we sequence resets. Each entry 87/* The following table determines how we sequence resets. Each entry
@@ -457,29 +460,29 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
457 * RETURNS: 460 * RETURNS:
458 * EH_HANDLED or EH_NOT_HANDLED 461 * EH_HANDLED or EH_NOT_HANDLED
459 */ 462 */
460enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 463enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
461{ 464{
462 struct Scsi_Host *host = cmd->device->host; 465 struct Scsi_Host *host = cmd->device->host;
463 struct ata_port *ap = ata_shost_to_port(host); 466 struct ata_port *ap = ata_shost_to_port(host);
464 unsigned long flags; 467 unsigned long flags;
465 struct ata_queued_cmd *qc; 468 struct ata_queued_cmd *qc;
466 enum scsi_eh_timer_return ret; 469 enum blk_eh_timer_return ret;
467 470
468 DPRINTK("ENTER\n"); 471 DPRINTK("ENTER\n");
469 472
470 if (ap->ops->error_handler) { 473 if (ap->ops->error_handler) {
471 ret = EH_NOT_HANDLED; 474 ret = BLK_EH_NOT_HANDLED;
472 goto out; 475 goto out;
473 } 476 }
474 477
475 ret = EH_HANDLED; 478 ret = BLK_EH_HANDLED;
476 spin_lock_irqsave(ap->lock, flags); 479 spin_lock_irqsave(ap->lock, flags);
477 qc = ata_qc_from_tag(ap, ap->link.active_tag); 480 qc = ata_qc_from_tag(ap, ap->link.active_tag);
478 if (qc) { 481 if (qc) {
479 WARN_ON(qc->scsicmd != cmd); 482 WARN_ON(qc->scsicmd != cmd);
480 qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 483 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
481 qc->err_mask |= AC_ERR_TIMEOUT; 484 qc->err_mask |= AC_ERR_TIMEOUT;
482 ret = EH_NOT_HANDLED; 485 ret = BLK_EH_NOT_HANDLED;
483 } 486 }
484 spin_unlock_irqrestore(ap->lock, flags); 487 spin_unlock_irqrestore(ap->lock, flags);
485 488
@@ -831,7 +834,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
831 * Note that ATA_QCFLAG_FAILED is unconditionally set after 834 * Note that ATA_QCFLAG_FAILED is unconditionally set after
832 * this function completes. 835 * this function completes.
833 */ 836 */
834 scsi_req_abort_cmd(qc->scsicmd); 837 blk_abort_request(qc->scsicmd->request);
835} 838}
836 839
837/** 840/**
@@ -1357,6 +1360,37 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
1357} 1360}
1358 1361
1359/** 1362/**
1363 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1364 * @dev: target ATAPI device
1365 * @r_sense_key: out parameter for sense_key
1366 *
1367 * Perform ATAPI TEST_UNIT_READY.
1368 *
1369 * LOCKING:
1370 * EH context (may sleep).
1371 *
1372 * RETURNS:
1373 * 0 on success, AC_ERR_* mask on failure.
1374 */
1375static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1376{
1377 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1378 struct ata_taskfile tf;
1379 unsigned int err_mask;
1380
1381 ata_tf_init(dev, &tf);
1382
1383 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1384 tf.command = ATA_CMD_PACKET;
1385 tf.protocol = ATAPI_PROT_NODATA;
1386
1387 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1388 if (err_mask == AC_ERR_DEV)
1389 *r_sense_key = tf.feature >> 4;
1390 return err_mask;
1391}
1392
1393/**
1360 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1394 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1361 * @dev: device to perform REQUEST_SENSE to 1395 * @dev: device to perform REQUEST_SENSE to
1362 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 1396 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
@@ -1756,7 +1790,7 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1756static unsigned int ata_eh_speed_down(struct ata_device *dev, 1790static unsigned int ata_eh_speed_down(struct ata_device *dev,
1757 unsigned int eflags, unsigned int err_mask) 1791 unsigned int eflags, unsigned int err_mask)
1758{ 1792{
1759 struct ata_link *link = dev->link; 1793 struct ata_link *link = ata_dev_phys_link(dev);
1760 int xfer_ok = 0; 1794 int xfer_ok = 0;
1761 unsigned int verdict; 1795 unsigned int verdict;
1762 unsigned int action = 0; 1796 unsigned int action = 0;
@@ -1880,7 +1914,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
1880 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1914 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1881 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1915 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1882 1916
1883 if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link) 1917 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
1918 ata_dev_phys_link(qc->dev) != link)
1884 continue; 1919 continue;
1885 1920
1886 /* inherit upper level err_mask */ 1921 /* inherit upper level err_mask */
@@ -1967,6 +2002,23 @@ void ata_eh_autopsy(struct ata_port *ap)
1967 ata_port_for_each_link(link, ap) 2002 ata_port_for_each_link(link, ap)
1968 ata_eh_link_autopsy(link); 2003 ata_eh_link_autopsy(link);
1969 2004
2005 /* Handle the frigging slave link. Autopsy is done similarly
2006 * but actions and flags are transferred over to the master
2007 * link and handled from there.
2008 */
2009 if (ap->slave_link) {
2010 struct ata_eh_context *mehc = &ap->link.eh_context;
2011 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2012
2013 ata_eh_link_autopsy(ap->slave_link);
2014
2015 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2016 mehc->i.action |= sehc->i.action;
2017 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2018 mehc->i.flags |= sehc->i.flags;
2019 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2020 }
2021
1970 /* Autopsy of fanout ports can affect host link autopsy. 2022 /* Autopsy of fanout ports can affect host link autopsy.
1971 * Perform host link autopsy last. 2023 * Perform host link autopsy last.
1972 */ 2024 */
@@ -2001,7 +2053,8 @@ static void ata_eh_link_report(struct ata_link *link)
2001 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2053 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2002 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2054 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2003 2055
2004 if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link || 2056 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2057 ata_dev_phys_link(qc->dev) != link ||
2005 ((qc->flags & ATA_QCFLAG_QUIET) && 2058 ((qc->flags & ATA_QCFLAG_QUIET) &&
2006 qc->err_mask == AC_ERR_DEV)) 2059 qc->err_mask == AC_ERR_DEV))
2007 continue; 2060 continue;
@@ -2068,7 +2121,7 @@ static void ata_eh_link_report(struct ata_link *link)
2068 char cdb_buf[70] = ""; 2121 char cdb_buf[70] = "";
2069 2122
2070 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2123 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2071 qc->dev->link != link || !qc->err_mask) 2124 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2072 continue; 2125 continue;
2073 2126
2074 if (qc->dma_dir != DMA_NONE) { 2127 if (qc->dma_dir != DMA_NONE) {
@@ -2160,12 +2213,14 @@ void ata_eh_report(struct ata_port *ap)
2160} 2213}
2161 2214
2162static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2215static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2163 unsigned int *classes, unsigned long deadline) 2216 unsigned int *classes, unsigned long deadline,
2217 bool clear_classes)
2164{ 2218{
2165 struct ata_device *dev; 2219 struct ata_device *dev;
2166 2220
2167 ata_link_for_each_dev(dev, link) 2221 if (clear_classes)
2168 classes[dev->devno] = ATA_DEV_UNKNOWN; 2222 ata_link_for_each_dev(dev, link)
2223 classes[dev->devno] = ATA_DEV_UNKNOWN;
2169 2224
2170 return reset(link, classes, deadline); 2225 return reset(link, classes, deadline);
2171} 2226}
@@ -2187,17 +2242,20 @@ int ata_eh_reset(struct ata_link *link, int classify,
2187 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2242 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2188{ 2243{
2189 struct ata_port *ap = link->ap; 2244 struct ata_port *ap = link->ap;
2245 struct ata_link *slave = ap->slave_link;
2190 struct ata_eh_context *ehc = &link->eh_context; 2246 struct ata_eh_context *ehc = &link->eh_context;
2247 struct ata_eh_context *sehc = &slave->eh_context;
2191 unsigned int *classes = ehc->classes; 2248 unsigned int *classes = ehc->classes;
2192 unsigned int lflags = link->flags; 2249 unsigned int lflags = link->flags;
2193 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2250 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2194 int max_tries = 0, try = 0; 2251 int max_tries = 0, try = 0;
2252 struct ata_link *failed_link;
2195 struct ata_device *dev; 2253 struct ata_device *dev;
2196 unsigned long deadline, now; 2254 unsigned long deadline, now;
2197 ata_reset_fn_t reset; 2255 ata_reset_fn_t reset;
2198 unsigned long flags; 2256 unsigned long flags;
2199 u32 sstatus; 2257 u32 sstatus;
2200 int nr_known, rc; 2258 int nr_unknown, rc;
2201 2259
2202 /* 2260 /*
2203 * Prepare to reset 2261 * Prepare to reset
@@ -2252,8 +2310,30 @@ int ata_eh_reset(struct ata_link *link, int classify,
2252 } 2310 }
2253 2311
2254 if (prereset) { 2312 if (prereset) {
2255 rc = prereset(link, 2313 unsigned long deadline = ata_deadline(jiffies,
2256 ata_deadline(jiffies, ATA_EH_PRERESET_TIMEOUT)); 2314 ATA_EH_PRERESET_TIMEOUT);
2315
2316 if (slave) {
2317 sehc->i.action &= ~ATA_EH_RESET;
2318 sehc->i.action |= ehc->i.action;
2319 }
2320
2321 rc = prereset(link, deadline);
2322
2323 /* If present, do prereset on slave link too. Reset
2324 * is skipped iff both master and slave links report
2325 * -ENOENT or clear ATA_EH_RESET.
2326 */
2327 if (slave && (rc == 0 || rc == -ENOENT)) {
2328 int tmp;
2329
2330 tmp = prereset(slave, deadline);
2331 if (tmp != -ENOENT)
2332 rc = tmp;
2333
2334 ehc->i.action |= sehc->i.action;
2335 }
2336
2257 if (rc) { 2337 if (rc) {
2258 if (rc == -ENOENT) { 2338 if (rc == -ENOENT) {
2259 ata_link_printk(link, KERN_DEBUG, 2339 ata_link_printk(link, KERN_DEBUG,
@@ -2302,25 +2382,51 @@ int ata_eh_reset(struct ata_link *link, int classify,
2302 else 2382 else
2303 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2383 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2304 2384
2305 rc = ata_do_reset(link, reset, classes, deadline); 2385 rc = ata_do_reset(link, reset, classes, deadline, true);
2306 if (rc && rc != -EAGAIN) 2386 if (rc && rc != -EAGAIN) {
2387 failed_link = link;
2307 goto fail; 2388 goto fail;
2389 }
2390
2391 /* hardreset slave link if existent */
2392 if (slave && reset == hardreset) {
2393 int tmp;
2394
2395 if (verbose)
2396 ata_link_printk(slave, KERN_INFO,
2397 "hard resetting link\n");
2308 2398
2399 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2400 tmp = ata_do_reset(slave, reset, classes, deadline,
2401 false);
2402 switch (tmp) {
2403 case -EAGAIN:
2404 rc = -EAGAIN;
2405 case 0:
2406 break;
2407 default:
2408 failed_link = slave;
2409 rc = tmp;
2410 goto fail;
2411 }
2412 }
2413
2414 /* perform follow-up SRST if necessary */
2309 if (reset == hardreset && 2415 if (reset == hardreset &&
2310 ata_eh_followup_srst_needed(link, rc, classes)) { 2416 ata_eh_followup_srst_needed(link, rc, classes)) {
2311 /* okay, let's do follow-up softreset */
2312 reset = softreset; 2417 reset = softreset;
2313 2418
2314 if (!reset) { 2419 if (!reset) {
2315 ata_link_printk(link, KERN_ERR, 2420 ata_link_printk(link, KERN_ERR,
2316 "follow-up softreset required " 2421 "follow-up softreset required "
2317 "but no softreset avaliable\n"); 2422 "but no softreset avaliable\n");
2423 failed_link = link;
2318 rc = -EINVAL; 2424 rc = -EINVAL;
2319 goto fail; 2425 goto fail;
2320 } 2426 }
2321 2427
2322 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2428 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2323 rc = ata_do_reset(link, reset, classes, deadline); 2429 rc = ata_do_reset(link, reset, classes, deadline, true);
2324 } 2430 }
2325 } else { 2431 } else {
2326 if (verbose) 2432 if (verbose)
@@ -2341,7 +2447,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2341 dev->pio_mode = XFER_PIO_0; 2447 dev->pio_mode = XFER_PIO_0;
2342 dev->flags &= ~ATA_DFLAG_SLEEPING; 2448 dev->flags &= ~ATA_DFLAG_SLEEPING;
2343 2449
2344 if (ata_link_offline(link)) 2450 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2345 continue; 2451 continue;
2346 2452
2347 /* apply class override */ 2453 /* apply class override */
@@ -2354,6 +2460,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
2354 /* record current link speed */ 2460 /* record current link speed */
2355 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2461 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2356 link->sata_spd = (sstatus >> 4) & 0xf; 2462 link->sata_spd = (sstatus >> 4) & 0xf;
2463 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2464 slave->sata_spd = (sstatus >> 4) & 0xf;
2357 2465
2358 /* thaw the port */ 2466 /* thaw the port */
2359 if (ata_is_host_link(link)) 2467 if (ata_is_host_link(link))
@@ -2366,12 +2474,17 @@ int ata_eh_reset(struct ata_link *link, int classify,
2366 * reset and here. This race is mediated by cross checking 2474 * reset and here. This race is mediated by cross checking
2367 * link onlineness and classification result later. 2475 * link onlineness and classification result later.
2368 */ 2476 */
2369 if (postreset) 2477 if (postreset) {
2370 postreset(link, classes); 2478 postreset(link, classes);
2479 if (slave)
2480 postreset(slave, classes);
2481 }
2371 2482
2372 /* clear cached SError */ 2483 /* clear cached SError */
2373 spin_lock_irqsave(link->ap->lock, flags); 2484 spin_lock_irqsave(link->ap->lock, flags);
2374 link->eh_info.serror = 0; 2485 link->eh_info.serror = 0;
2486 if (slave)
2487 slave->eh_info.serror = 0;
2375 spin_unlock_irqrestore(link->ap->lock, flags); 2488 spin_unlock_irqrestore(link->ap->lock, flags);
2376 2489
2377 /* Make sure onlineness and classification result correspond. 2490 /* Make sure onlineness and classification result correspond.
@@ -2381,19 +2494,21 @@ int ata_eh_reset(struct ata_link *link, int classify,
2381 * link onlineness and classification result, those conditions 2494 * link onlineness and classification result, those conditions
2382 * can be reliably detected and retried. 2495 * can be reliably detected and retried.
2383 */ 2496 */
2384 nr_known = 0; 2497 nr_unknown = 0;
2385 ata_link_for_each_dev(dev, link) { 2498 ata_link_for_each_dev(dev, link) {
2386 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */ 2499 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
2387 if (classes[dev->devno] == ATA_DEV_UNKNOWN) 2500 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2388 classes[dev->devno] = ATA_DEV_NONE; 2501 classes[dev->devno] = ATA_DEV_NONE;
2389 else 2502 if (ata_phys_link_online(ata_dev_phys_link(dev)))
2390 nr_known++; 2503 nr_unknown++;
2504 }
2391 } 2505 }
2392 2506
2393 if (classify && !nr_known && ata_link_online(link)) { 2507 if (classify && nr_unknown) {
2394 if (try < max_tries) { 2508 if (try < max_tries) {
2395 ata_link_printk(link, KERN_WARNING, "link online but " 2509 ata_link_printk(link, KERN_WARNING, "link online but "
2396 "device misclassified, retrying\n"); 2510 "device misclassified, retrying\n");
2511 failed_link = link;
2397 rc = -EAGAIN; 2512 rc = -EAGAIN;
2398 goto fail; 2513 goto fail;
2399 } 2514 }
@@ -2404,6 +2519,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
2404 2519
2405 /* reset successful, schedule revalidation */ 2520 /* reset successful, schedule revalidation */
2406 ata_eh_done(link, NULL, ATA_EH_RESET); 2521 ata_eh_done(link, NULL, ATA_EH_RESET);
2522 if (slave)
2523 ata_eh_done(slave, NULL, ATA_EH_RESET);
2407 ehc->last_reset = jiffies; 2524 ehc->last_reset = jiffies;
2408 ehc->i.action |= ATA_EH_REVALIDATE; 2525 ehc->i.action |= ATA_EH_REVALIDATE;
2409 2526
@@ -2411,6 +2528,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
2411 out: 2528 out:
2412 /* clear hotplug flag */ 2529 /* clear hotplug flag */
2413 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2530 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2531 if (slave)
2532 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2414 2533
2415 spin_lock_irqsave(ap->lock, flags); 2534 spin_lock_irqsave(ap->lock, flags);
2416 ap->pflags &= ~ATA_PFLAG_RESETTING; 2535 ap->pflags &= ~ATA_PFLAG_RESETTING;
@@ -2431,7 +2550,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2431 if (time_before(now, deadline)) { 2550 if (time_before(now, deadline)) {
2432 unsigned long delta = deadline - now; 2551 unsigned long delta = deadline - now;
2433 2552
2434 ata_link_printk(link, KERN_WARNING, 2553 ata_link_printk(failed_link, KERN_WARNING,
2435 "reset failed (errno=%d), retrying in %u secs\n", 2554 "reset failed (errno=%d), retrying in %u secs\n",
2436 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2555 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2437 2556
@@ -2439,13 +2558,92 @@ int ata_eh_reset(struct ata_link *link, int classify,
2439 delta = schedule_timeout_uninterruptible(delta); 2558 delta = schedule_timeout_uninterruptible(delta);
2440 } 2559 }
2441 2560
2442 if (rc == -EPIPE || try == max_tries - 1) 2561 if (try == max_tries - 1) {
2443 sata_down_spd_limit(link); 2562 sata_down_spd_limit(link);
2563 if (slave)
2564 sata_down_spd_limit(slave);
2565 } else if (rc == -EPIPE)
2566 sata_down_spd_limit(failed_link);
2567
2444 if (hardreset) 2568 if (hardreset)
2445 reset = hardreset; 2569 reset = hardreset;
2446 goto retry; 2570 goto retry;
2447} 2571}
2448 2572
2573static inline void ata_eh_pull_park_action(struct ata_port *ap)
2574{
2575 struct ata_link *link;
2576 struct ata_device *dev;
2577 unsigned long flags;
2578
2579 /*
2580 * This function can be thought of as an extended version of
2581 * ata_eh_about_to_do() specially crafted to accommodate the
2582 * requirements of ATA_EH_PARK handling. Since the EH thread
2583 * does not leave the do {} while () loop in ata_eh_recover as
2584 * long as the timeout for a park request to *one* device on
2585 * the port has not expired, and since we still want to pick
2586 * up park requests to other devices on the same port or
2587 * timeout updates for the same device, we have to pull
2588 * ATA_EH_PARK actions from eh_info into eh_context.i
2589 * ourselves at the beginning of each pass over the loop.
2590 *
2591 * Additionally, all write accesses to &ap->park_req_pending
2592 * through INIT_COMPLETION() (see below) or complete_all()
2593 * (see ata_scsi_park_store()) are protected by the host lock.
2594 * As a result we have that park_req_pending.done is zero on
2595 * exit from this function, i.e. when ATA_EH_PARK actions for
2596 * *all* devices on port ap have been pulled into the
2597 * respective eh_context structs. If, and only if,
2598 * park_req_pending.done is non-zero by the time we reach
2599 * wait_for_completion_timeout(), another ATA_EH_PARK action
2600 * has been scheduled for at least one of the devices on port
2601 * ap and we have to cycle over the do {} while () loop in
2602 * ata_eh_recover() again.
2603 */
2604
2605 spin_lock_irqsave(ap->lock, flags);
2606 INIT_COMPLETION(ap->park_req_pending);
2607 ata_port_for_each_link(link, ap) {
2608 ata_link_for_each_dev(dev, link) {
2609 struct ata_eh_info *ehi = &link->eh_info;
2610
2611 link->eh_context.i.dev_action[dev->devno] |=
2612 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2613 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2614 }
2615 }
2616 spin_unlock_irqrestore(ap->lock, flags);
2617}
2618
2619static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2620{
2621 struct ata_eh_context *ehc = &dev->link->eh_context;
2622 struct ata_taskfile tf;
2623 unsigned int err_mask;
2624
2625 ata_tf_init(dev, &tf);
2626 if (park) {
2627 ehc->unloaded_mask |= 1 << dev->devno;
2628 tf.command = ATA_CMD_IDLEIMMEDIATE;
2629 tf.feature = 0x44;
2630 tf.lbal = 0x4c;
2631 tf.lbam = 0x4e;
2632 tf.lbah = 0x55;
2633 } else {
2634 ehc->unloaded_mask &= ~(1 << dev->devno);
2635 tf.command = ATA_CMD_CHK_POWER;
2636 }
2637
2638 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2639 tf.protocol |= ATA_PROT_NODATA;
2640 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2641 if (park && (err_mask || tf.lbal != 0xc4)) {
2642 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2643 ehc->unloaded_mask &= ~(1 << dev->devno);
2644 }
2645}
2646
2449static int ata_eh_revalidate_and_attach(struct ata_link *link, 2647static int ata_eh_revalidate_and_attach(struct ata_link *link,
2450 struct ata_device **r_failed_dev) 2648 struct ata_device **r_failed_dev)
2451{ 2649{
@@ -2472,7 +2670,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
2472 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2670 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2473 WARN_ON(dev->class == ATA_DEV_PMP); 2671 WARN_ON(dev->class == ATA_DEV_PMP);
2474 2672
2475 if (ata_link_offline(link)) { 2673 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2476 rc = -EIO; 2674 rc = -EIO;
2477 goto err; 2675 goto err;
2478 } 2676 }
@@ -2610,6 +2808,53 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2610 return rc; 2808 return rc;
2611} 2809}
2612 2810
2811/**
2812 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
2813 * @dev: ATAPI device to clear UA for
2814 *
2815 * Resets and other operations can make an ATAPI device raise
2816 * UNIT ATTENTION which causes the next operation to fail. This
2817 * function clears UA.
2818 *
2819 * LOCKING:
2820 * EH context (may sleep).
2821 *
2822 * RETURNS:
2823 * 0 on success, -errno on failure.
2824 */
2825static int atapi_eh_clear_ua(struct ata_device *dev)
2826{
2827 int i;
2828
2829 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
2830 u8 sense_buffer[SCSI_SENSE_BUFFERSIZE];
2831 u8 sense_key = 0;
2832 unsigned int err_mask;
2833
2834 err_mask = atapi_eh_tur(dev, &sense_key);
2835 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
2836 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
2837 "failed (err_mask=0x%x)\n", err_mask);
2838 return -EIO;
2839 }
2840
2841 if (!err_mask || sense_key != UNIT_ATTENTION)
2842 return 0;
2843
2844 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
2845 if (err_mask) {
2846 ata_dev_printk(dev, KERN_WARNING, "failed to clear "
2847 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
2848 return -EIO;
2849 }
2850 }
2851
2852 ata_dev_printk(dev, KERN_WARNING,
2853 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
2854
2855 return 0;
2856}
2857
2613static int ata_link_nr_enabled(struct ata_link *link) 2858static int ata_link_nr_enabled(struct ata_link *link)
2614{ 2859{
2615 struct ata_device *dev; 2860 struct ata_device *dev;
@@ -2697,7 +2942,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2697 /* This is the last chance, better to slow 2942 /* This is the last chance, better to slow
2698 * down than lose it. 2943 * down than lose it.
2699 */ 2944 */
2700 sata_down_spd_limit(dev->link); 2945 sata_down_spd_limit(ata_dev_phys_link(dev));
2701 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2946 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2702 } 2947 }
2703 } 2948 }
@@ -2707,7 +2952,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2707 ata_dev_disable(dev); 2952 ata_dev_disable(dev);
2708 2953
2709 /* detach if offline */ 2954 /* detach if offline */
2710 if (ata_link_offline(dev->link)) 2955 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2711 ata_eh_detach_dev(dev); 2956 ata_eh_detach_dev(dev);
2712 2957
2713 /* schedule probe if necessary */ 2958 /* schedule probe if necessary */
@@ -2755,7 +3000,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2755 struct ata_device *dev; 3000 struct ata_device *dev;
2756 int nr_failed_devs; 3001 int nr_failed_devs;
2757 int rc; 3002 int rc;
2758 unsigned long flags; 3003 unsigned long flags, deadline;
2759 3004
2760 DPRINTK("ENTER\n"); 3005 DPRINTK("ENTER\n");
2761 3006
@@ -2829,6 +3074,56 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2829 } 3074 }
2830 } 3075 }
2831 3076
3077 do {
3078 unsigned long now;
3079
3080 /*
3081 * clears ATA_EH_PARK in eh_info and resets
3082 * ap->park_req_pending
3083 */
3084 ata_eh_pull_park_action(ap);
3085
3086 deadline = jiffies;
3087 ata_port_for_each_link(link, ap) {
3088 ata_link_for_each_dev(dev, link) {
3089 struct ata_eh_context *ehc = &link->eh_context;
3090 unsigned long tmp;
3091
3092 if (dev->class != ATA_DEV_ATA)
3093 continue;
3094 if (!(ehc->i.dev_action[dev->devno] &
3095 ATA_EH_PARK))
3096 continue;
3097 tmp = dev->unpark_deadline;
3098 if (time_before(deadline, tmp))
3099 deadline = tmp;
3100 else if (time_before_eq(tmp, jiffies))
3101 continue;
3102 if (ehc->unloaded_mask & (1 << dev->devno))
3103 continue;
3104
3105 ata_eh_park_issue_cmd(dev, 1);
3106 }
3107 }
3108
3109 now = jiffies;
3110 if (time_before_eq(deadline, now))
3111 break;
3112
3113 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3114 deadline - now);
3115 } while (deadline);
3116 ata_port_for_each_link(link, ap) {
3117 ata_link_for_each_dev(dev, link) {
3118 if (!(link->eh_context.unloaded_mask &
3119 (1 << dev->devno)))
3120 continue;
3121
3122 ata_eh_park_issue_cmd(dev, 0);
3123 ata_eh_done(link, dev, ATA_EH_PARK);
3124 }
3125 }
3126
2832 /* the rest */ 3127 /* the rest */
2833 ata_port_for_each_link(link, ap) { 3128 ata_port_for_each_link(link, ap) {
2834 struct ata_eh_context *ehc = &link->eh_context; 3129 struct ata_eh_context *ehc = &link->eh_context;
@@ -2852,6 +3147,20 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2852 ehc->i.flags &= ~ATA_EHI_SETMODE; 3147 ehc->i.flags &= ~ATA_EHI_SETMODE;
2853 } 3148 }
2854 3149
3150 /* If reset has been issued, clear UA to avoid
3151 * disrupting the current users of the device.
3152 */
3153 if (ehc->i.flags & ATA_EHI_DID_RESET) {
3154 ata_link_for_each_dev(dev, link) {
3155 if (dev->class != ATA_DEV_ATAPI)
3156 continue;
3157 rc = atapi_eh_clear_ua(dev);
3158 if (rc)
3159 goto dev_fail;
3160 }
3161 }
3162
3163 /* configure link power saving */
2855 if (ehc->i.action & ATA_EH_LPM) 3164 if (ehc->i.action & ATA_EH_LPM)
2856 ata_link_for_each_dev(dev, link) 3165 ata_link_for_each_dev(dev, link)
2857 ata_dev_enable_pm(dev, ap->pm_policy); 3166 ata_dev_enable_pm(dev, ap->pm_policy);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index b9d3ba423cb2..59fe051957ef 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -183,6 +183,105 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
183 ata_scsi_lpm_show, ata_scsi_lpm_put); 183 ata_scsi_lpm_show, ata_scsi_lpm_put);
184EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); 184EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
185 185
186static ssize_t ata_scsi_park_show(struct device *device,
187 struct device_attribute *attr, char *buf)
188{
189 struct scsi_device *sdev = to_scsi_device(device);
190 struct ata_port *ap;
191 struct ata_link *link;
192 struct ata_device *dev;
193 unsigned long flags;
194 unsigned int uninitialized_var(msecs);
195 int rc = 0;
196
197 ap = ata_shost_to_port(sdev->host);
198
199 spin_lock_irqsave(ap->lock, flags);
200 dev = ata_scsi_find_dev(ap, sdev);
201 if (!dev) {
202 rc = -ENODEV;
203 goto unlock;
204 }
205 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
206 rc = -EOPNOTSUPP;
207 goto unlock;
208 }
209
210 link = dev->link;
211 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
212 link->eh_context.unloaded_mask & (1 << dev->devno) &&
213 time_after(dev->unpark_deadline, jiffies))
214 msecs = jiffies_to_msecs(dev->unpark_deadline - jiffies);
215 else
216 msecs = 0;
217
218unlock:
219 spin_unlock_irq(ap->lock);
220
221 return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
222}
223
224static ssize_t ata_scsi_park_store(struct device *device,
225 struct device_attribute *attr,
226 const char *buf, size_t len)
227{
228 struct scsi_device *sdev = to_scsi_device(device);
229 struct ata_port *ap;
230 struct ata_device *dev;
231 long int input;
232 unsigned long flags;
233 int rc;
234
235 rc = strict_strtol(buf, 10, &input);
236 if (rc || input < -2)
237 return -EINVAL;
238 if (input > ATA_TMOUT_MAX_PARK) {
239 rc = -EOVERFLOW;
240 input = ATA_TMOUT_MAX_PARK;
241 }
242
243 ap = ata_shost_to_port(sdev->host);
244
245 spin_lock_irqsave(ap->lock, flags);
246 dev = ata_scsi_find_dev(ap, sdev);
247 if (unlikely(!dev)) {
248 rc = -ENODEV;
249 goto unlock;
250 }
251 if (dev->class != ATA_DEV_ATA) {
252 rc = -EOPNOTSUPP;
253 goto unlock;
254 }
255
256 if (input >= 0) {
257 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
258 rc = -EOPNOTSUPP;
259 goto unlock;
260 }
261
262 dev->unpark_deadline = ata_deadline(jiffies, input);
263 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
264 ata_port_schedule_eh(ap);
265 complete(&ap->park_req_pending);
266 } else {
267 switch (input) {
268 case -1:
269 dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
270 break;
271 case -2:
272 dev->flags |= ATA_DFLAG_NO_UNLOAD;
273 break;
274 }
275 }
276unlock:
277 spin_unlock_irqrestore(ap->lock, flags);
278
279 return rc ? rc : len;
280}
281DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
282 ata_scsi_park_show, ata_scsi_park_store);
283EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
284
186static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) 285static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
187{ 286{
188 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 287 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
@@ -269,6 +368,12 @@ DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show,
269 ata_scsi_activity_store); 368 ata_scsi_activity_store);
270EXPORT_SYMBOL_GPL(dev_attr_sw_activity); 369EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
271 370
371struct device_attribute *ata_common_sdev_attrs[] = {
372 &dev_attr_unload_heads,
373 NULL
374};
375EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
376
272static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, 377static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
273 void (*done)(struct scsi_cmnd *)) 378 void (*done)(struct scsi_cmnd *))
274{ 379{
@@ -954,6 +1059,9 @@ static int atapi_drain_needed(struct request *rq)
954static int ata_scsi_dev_config(struct scsi_device *sdev, 1059static int ata_scsi_dev_config(struct scsi_device *sdev,
955 struct ata_device *dev) 1060 struct ata_device *dev)
956{ 1061{
1062 if (!ata_id_has_unload(dev->id))
1063 dev->flags |= ATA_DFLAG_NO_UNLOAD;
1064
957 /* configure max sectors */ 1065 /* configure max sectors */
958 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); 1066 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
959 1067
@@ -977,6 +1085,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
977 1085
978 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN); 1086 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
979 } else { 1087 } else {
1088 if (ata_id_is_ssd(dev->id))
1089 queue_flag_set_unlocked(QUEUE_FLAG_NONROT,
1090 sdev->request_queue);
1091
980 /* ATA devices must be sector aligned */ 1092 /* ATA devices must be sector aligned */
981 blk_queue_update_dma_alignment(sdev->request_queue, 1093 blk_queue_update_dma_alignment(sdev->request_queue,
982 ATA_SECT_SIZE - 1); 1094 ATA_SECT_SIZE - 1);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index ade5c75b6144..fe2839e58774 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -70,6 +70,7 @@ extern int atapi_passthru16;
70extern int libata_fua; 70extern int libata_fua;
71extern int libata_noacpi; 71extern int libata_noacpi;
72extern int libata_allow_tpm; 72extern int libata_allow_tpm;
73extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
73extern void ata_force_cbl(struct ata_port *ap); 74extern void ata_force_cbl(struct ata_port *ap);
74extern u64 ata_tf_to_lba(const struct ata_taskfile *tf); 75extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
75extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf); 76extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
@@ -107,6 +108,8 @@ extern void ata_qc_issue(struct ata_queued_cmd *qc);
107extern void __ata_qc_complete(struct ata_queued_cmd *qc); 108extern void __ata_qc_complete(struct ata_queued_cmd *qc);
108extern int atapi_check_dma(struct ata_queued_cmd *qc); 109extern int atapi_check_dma(struct ata_queued_cmd *qc);
109extern void swap_buf_le16(u16 *buf, unsigned int buf_words); 110extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
111extern bool ata_phys_link_online(struct ata_link *link);
112extern bool ata_phys_link_offline(struct ata_link *link);
110extern void ata_dev_init(struct ata_device *dev); 113extern void ata_dev_init(struct ata_device *dev);
111extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp); 114extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp);
112extern int sata_link_init_spd(struct ata_link *link); 115extern int sata_link_init_spd(struct ata_link *link);
@@ -152,7 +155,7 @@ extern int ata_bus_probe(struct ata_port *ap);
152/* libata-eh.c */ 155/* libata-eh.c */
153extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd); 156extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
154extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd); 157extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
155extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); 158extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
156extern void ata_scsi_error(struct Scsi_Host *host); 159extern void ata_scsi_error(struct Scsi_Host *host);
157extern void ata_port_wait_eh(struct ata_port *ap); 160extern void ata_port_wait_eh(struct ata_port *ap);
158extern void ata_eh_fastdrain_timerfn(unsigned long arg); 161extern void ata_eh_fastdrain_timerfn(unsigned long arg);
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index d3932901a3b3..1266924c11f9 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1632,6 +1632,8 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev)
1632 return -ENODEV; 1632 return -ENODEV;
1633 } 1633 }
1634 1634
1635 dev_set_drvdata(&pdev->dev, host);
1636
1635 return 0; 1637 return 0;
1636} 1638}
1637 1639
@@ -1648,6 +1650,7 @@ static int __devexit bfin_atapi_remove(struct platform_device *pdev)
1648 struct ata_host *host = dev_get_drvdata(dev); 1650 struct ata_host *host = dev_get_drvdata(dev);
1649 1651
1650 ata_host_detach(host); 1652 ata_host_detach(host);
1653 dev_set_drvdata(&pdev->dev, NULL);
1651 1654
1652 peripheral_free_list(atapi_io_port); 1655 peripheral_free_list(atapi_io_port);
1653 1656
@@ -1655,27 +1658,44 @@ static int __devexit bfin_atapi_remove(struct platform_device *pdev)
1655} 1658}
1656 1659
1657#ifdef CONFIG_PM 1660#ifdef CONFIG_PM
1658int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state) 1661static int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state)
1659{ 1662{
1660 return 0; 1663 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1664 if (host)
1665 return ata_host_suspend(host, state);
1666 else
1667 return 0;
1661} 1668}
1662 1669
1663int bfin_atapi_resume(struct platform_device *pdev) 1670static int bfin_atapi_resume(struct platform_device *pdev)
1664{ 1671{
1672 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1673 int ret;
1674
1675 if (host) {
1676 ret = bfin_reset_controller(host);
1677 if (ret) {
1678 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
1679 return ret;
1680 }
1681 ata_host_resume(host);
1682 }
1683
1665 return 0; 1684 return 0;
1666} 1685}
1686#else
1687#define bfin_atapi_suspend NULL
1688#define bfin_atapi_resume NULL
1667#endif 1689#endif
1668 1690
1669static struct platform_driver bfin_atapi_driver = { 1691static struct platform_driver bfin_atapi_driver = {
1670 .probe = bfin_atapi_probe, 1692 .probe = bfin_atapi_probe,
1671 .remove = __devexit_p(bfin_atapi_remove), 1693 .remove = __devexit_p(bfin_atapi_remove),
1694 .suspend = bfin_atapi_suspend,
1695 .resume = bfin_atapi_resume,
1672 .driver = { 1696 .driver = {
1673 .name = DRV_NAME, 1697 .name = DRV_NAME,
1674 .owner = THIS_MODULE, 1698 .owner = THIS_MODULE,
1675#ifdef CONFIG_PM
1676 .suspend = bfin_atapi_suspend,
1677 .resume = bfin_atapi_resume,
1678#endif
1679 }, 1699 },
1680}; 1700};
1681 1701
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index e970b227fbce..a598bb36aafc 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -230,7 +230,7 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
230 tmpbyte & 1, tmpbyte & 0x30); 230 tmpbyte & 1, tmpbyte & 0x30);
231 231
232 *try_mmio = 0; 232 *try_mmio = 0;
233#ifdef CONFIG_PPC_MERGE 233#ifdef CONFIG_PPC
234 if (machine_is(cell)) 234 if (machine_is(cell))
235 *try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5); 235 *try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5);
236#endif 236#endif
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 3924e7209a44..1a56db92ff7a 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -469,10 +469,10 @@ static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
469 return true; 469 return true;
470} 470}
471 471
472static int sata_fsl_scr_write(struct ata_port *ap, unsigned int sc_reg_in, 472static int sata_fsl_scr_write(struct ata_link *link,
473 u32 val) 473 unsigned int sc_reg_in, u32 val)
474{ 474{
475 struct sata_fsl_host_priv *host_priv = ap->host->private_data; 475 struct sata_fsl_host_priv *host_priv = link->ap->host->private_data;
476 void __iomem *ssr_base = host_priv->ssr_base; 476 void __iomem *ssr_base = host_priv->ssr_base;
477 unsigned int sc_reg; 477 unsigned int sc_reg;
478 478
@@ -493,10 +493,10 @@ static int sata_fsl_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
493 return 0; 493 return 0;
494} 494}
495 495
496static int sata_fsl_scr_read(struct ata_port *ap, unsigned int sc_reg_in, 496static int sata_fsl_scr_read(struct ata_link *link,
497 u32 *val) 497 unsigned int sc_reg_in, u32 *val)
498{ 498{
499 struct sata_fsl_host_priv *host_priv = ap->host->private_data; 499 struct sata_fsl_host_priv *host_priv = link->ap->host->private_data;
500 void __iomem *ssr_base = host_priv->ssr_base; 500 void __iomem *ssr_base = host_priv->ssr_base;
501 unsigned int sc_reg; 501 unsigned int sc_reg;
502 502
@@ -645,12 +645,12 @@ static int sata_fsl_port_start(struct ata_port *ap)
645 * Workaround for 8315DS board 3gbps link-up issue, 645 * Workaround for 8315DS board 3gbps link-up issue,
646 * currently limit SATA port to GEN1 speed 646 * currently limit SATA port to GEN1 speed
647 */ 647 */
648 sata_fsl_scr_read(ap, SCR_CONTROL, &temp); 648 sata_fsl_scr_read(&ap->link, SCR_CONTROL, &temp);
649 temp &= ~(0xF << 4); 649 temp &= ~(0xF << 4);
650 temp |= (0x1 << 4); 650 temp |= (0x1 << 4);
651 sata_fsl_scr_write(ap, SCR_CONTROL, temp); 651 sata_fsl_scr_write(&ap->link, SCR_CONTROL, temp);
652 652
653 sata_fsl_scr_read(ap, SCR_CONTROL, &temp); 653 sata_fsl_scr_read(&ap->link, SCR_CONTROL, &temp);
654 dev_printk(KERN_WARNING, dev, "scr_control, speed limited to %x\n", 654 dev_printk(KERN_WARNING, dev, "scr_control, speed limited to %x\n",
655 temp); 655 temp);
656#endif 656#endif
@@ -868,7 +868,7 @@ issue_srst:
868 ioread32(CQ + hcr_base), 868 ioread32(CQ + hcr_base),
869 ioread32(CA + hcr_base), ioread32(CC + hcr_base)); 869 ioread32(CA + hcr_base), ioread32(CC + hcr_base));
870 870
871 sata_fsl_scr_read(ap, SCR_ERROR, &Serror); 871 sata_fsl_scr_read(&ap->link, SCR_ERROR, &Serror);
872 872
873 DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); 873 DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
874 DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); 874 DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
@@ -972,9 +972,9 @@ static void sata_fsl_error_intr(struct ata_port *ap)
972 * Handle & Clear SError 972 * Handle & Clear SError
973 */ 973 */
974 974
975 sata_fsl_scr_read(ap, SCR_ERROR, &SError); 975 sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError);
976 if (unlikely(SError & 0xFFFF0000)) { 976 if (unlikely(SError & 0xFFFF0000)) {
977 sata_fsl_scr_write(ap, SCR_ERROR, SError); 977 sata_fsl_scr_write(&ap->link, SCR_ERROR, SError);
978 } 978 }
979 979
980 DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n", 980 DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n",
@@ -1091,7 +1091,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
1091 1091
1092 hstatus = ioread32(hcr_base + HSTATUS); 1092 hstatus = ioread32(hcr_base + HSTATUS);
1093 1093
1094 sata_fsl_scr_read(ap, SCR_ERROR, &SError); 1094 sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError);
1095 1095
1096 if (unlikely(SError & 0xFFFF0000)) { 1096 if (unlikely(SError & 0xFFFF0000)) {
1097 DPRINTK("serror @host_intr : 0x%x\n", SError); 1097 DPRINTK("serror @host_intr : 0x%x\n", SError);
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 5032c32fa505..fbbd87c96f10 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -269,9 +269,9 @@ static void inic_reset_port(void __iomem *port_base)
269 writeb(0xff, port_base + PORT_IRQ_STAT); 269 writeb(0xff, port_base + PORT_IRQ_STAT);
270} 270}
271 271
272static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) 272static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
273{ 273{
274 void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR; 274 void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
275 void __iomem *addr; 275 void __iomem *addr;
276 276
277 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 277 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
@@ -286,9 +286,9 @@ static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
286 return 0; 286 return 0;
287} 287}
288 288
289static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 289static int inic_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
290{ 290{
291 void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR; 291 void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
292 292
293 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 293 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
294 return -EINVAL; 294 return -EINVAL;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index c815f8ecf6e6..2b24ae58b52e 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -493,10 +493,10 @@ struct mv_hw_ops {
493 void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 493 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
494}; 494};
495 495
496static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 496static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
497static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 497static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
498static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 498static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
499static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 499static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
500static int mv_port_start(struct ata_port *ap); 500static int mv_port_start(struct ata_port *ap);
501static void mv_port_stop(struct ata_port *ap); 501static void mv_port_stop(struct ata_port *ap);
502static int mv_qc_defer(struct ata_queued_cmd *qc); 502static int mv_qc_defer(struct ata_queued_cmd *qc);
@@ -1070,23 +1070,23 @@ static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1070 return ofs; 1070 return ofs;
1071} 1071}
1072 1072
1073static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) 1073static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1074{ 1074{
1075 unsigned int ofs = mv_scr_offset(sc_reg_in); 1075 unsigned int ofs = mv_scr_offset(sc_reg_in);
1076 1076
1077 if (ofs != 0xffffffffU) { 1077 if (ofs != 0xffffffffU) {
1078 *val = readl(mv_ap_base(ap) + ofs); 1078 *val = readl(mv_ap_base(link->ap) + ofs);
1079 return 0; 1079 return 0;
1080 } else 1080 } else
1081 return -EINVAL; 1081 return -EINVAL;
1082} 1082}
1083 1083
1084static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 1084static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1085{ 1085{
1086 unsigned int ofs = mv_scr_offset(sc_reg_in); 1086 unsigned int ofs = mv_scr_offset(sc_reg_in);
1087 1087
1088 if (ofs != 0xffffffffU) { 1088 if (ofs != 0xffffffffU) {
1089 writelfl(val, mv_ap_base(ap) + ofs); 1089 writelfl(val, mv_ap_base(link->ap) + ofs);
1090 return 0; 1090 return 0;
1091 } else 1091 } else
1092 return -EINVAL; 1092 return -EINVAL;
@@ -2251,11 +2251,11 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
2251 return ofs; 2251 return ofs;
2252} 2252}
2253 2253
2254static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) 2254static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
2255{ 2255{
2256 struct mv_host_priv *hpriv = ap->host->private_data; 2256 struct mv_host_priv *hpriv = link->ap->host->private_data;
2257 void __iomem *mmio = hpriv->base; 2257 void __iomem *mmio = hpriv->base;
2258 void __iomem *addr = mv5_phy_base(mmio, ap->port_no); 2258 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2259 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2259 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2260 2260
2261 if (ofs != 0xffffffffU) { 2261 if (ofs != 0xffffffffU) {
@@ -2265,11 +2265,11 @@ static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
2265 return -EINVAL; 2265 return -EINVAL;
2266} 2266}
2267 2267
2268static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 2268static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
2269{ 2269{
2270 struct mv_host_priv *hpriv = ap->host->private_data; 2270 struct mv_host_priv *hpriv = link->ap->host->private_data;
2271 void __iomem *mmio = hpriv->base; 2271 void __iomem *mmio = hpriv->base;
2272 void __iomem *addr = mv5_phy_base(mmio, ap->port_no); 2272 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2273 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2273 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2274 2274
2275 if (ofs != 0xffffffffU) { 2275 if (ofs != 0xffffffffU) {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 1e1f3f3757ae..fae3841de0d8 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -302,13 +302,15 @@ static void nv_ck804_host_stop(struct ata_host *host);
302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); 302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); 303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance); 304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 305static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 306static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307 307
308static void nv_nf2_freeze(struct ata_port *ap); 308static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap); 309static void nv_nf2_thaw(struct ata_port *ap);
310static void nv_ck804_freeze(struct ata_port *ap); 310static void nv_ck804_freeze(struct ata_port *ap);
311static void nv_ck804_thaw(struct ata_port *ap); 311static void nv_ck804_thaw(struct ata_port *ap);
312static int nv_hardreset(struct ata_link *link, unsigned int *class,
313 unsigned long deadline);
312static int nv_adma_slave_config(struct scsi_device *sdev); 314static int nv_adma_slave_config(struct scsi_device *sdev);
313static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); 315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
314static void nv_adma_qc_prep(struct ata_queued_cmd *qc); 316static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
@@ -403,28 +405,45 @@ static struct scsi_host_template nv_swncq_sht = {
403 .slave_configure = nv_swncq_slave_config, 405 .slave_configure = nv_swncq_slave_config,
404}; 406};
405 407
406static struct ata_port_operations nv_generic_ops = { 408/* OSDL bz3352 reports that some nv controllers can't determine device
409 * signature reliably and nv_hardreset is implemented to work around
410 * the problem. This was reported on nf3 and it's unclear whether any
411 * other controllers are affected. However, the workaround has been
412 * applied to all variants and there isn't much to gain by trying to
413 * find out exactly which ones are affected at this point especially
414 * because NV has moved over to ahci for newer controllers.
415 */
416static struct ata_port_operations nv_common_ops = {
407 .inherits = &ata_bmdma_port_ops, 417 .inherits = &ata_bmdma_port_ops,
408 .hardreset = ATA_OP_NULL, 418 .hardreset = nv_hardreset,
409 .scr_read = nv_scr_read, 419 .scr_read = nv_scr_read,
410 .scr_write = nv_scr_write, 420 .scr_write = nv_scr_write,
411}; 421};
412 422
423/* OSDL bz11195 reports that link doesn't come online after hardreset
424 * on generic nv's and there have been several other similar reports
425 * on linux-ide. Disable hardreset for generic nv's.
426 */
427static struct ata_port_operations nv_generic_ops = {
428 .inherits = &nv_common_ops,
429 .hardreset = ATA_OP_NULL,
430};
431
413static struct ata_port_operations nv_nf2_ops = { 432static struct ata_port_operations nv_nf2_ops = {
414 .inherits = &nv_generic_ops, 433 .inherits = &nv_common_ops,
415 .freeze = nv_nf2_freeze, 434 .freeze = nv_nf2_freeze,
416 .thaw = nv_nf2_thaw, 435 .thaw = nv_nf2_thaw,
417}; 436};
418 437
419static struct ata_port_operations nv_ck804_ops = { 438static struct ata_port_operations nv_ck804_ops = {
420 .inherits = &nv_generic_ops, 439 .inherits = &nv_common_ops,
421 .freeze = nv_ck804_freeze, 440 .freeze = nv_ck804_freeze,
422 .thaw = nv_ck804_thaw, 441 .thaw = nv_ck804_thaw,
423 .host_stop = nv_ck804_host_stop, 442 .host_stop = nv_ck804_host_stop,
424}; 443};
425 444
426static struct ata_port_operations nv_adma_ops = { 445static struct ata_port_operations nv_adma_ops = {
427 .inherits = &nv_generic_ops, 446 .inherits = &nv_common_ops,
428 447
429 .check_atapi_dma = nv_adma_check_atapi_dma, 448 .check_atapi_dma = nv_adma_check_atapi_dma,
430 .sff_tf_read = nv_adma_tf_read, 449 .sff_tf_read = nv_adma_tf_read,
@@ -448,7 +467,7 @@ static struct ata_port_operations nv_adma_ops = {
448}; 467};
449 468
450static struct ata_port_operations nv_swncq_ops = { 469static struct ata_port_operations nv_swncq_ops = {
451 .inherits = &nv_generic_ops, 470 .inherits = &nv_common_ops,
452 471
453 .qc_defer = ata_std_qc_defer, 472 .qc_defer = ata_std_qc_defer,
454 .qc_prep = nv_swncq_qc_prep, 473 .qc_prep = nv_swncq_qc_prep,
@@ -1492,21 +1511,21 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1492 return ret; 1511 return ret;
1493} 1512}
1494 1513
1495static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 1514static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1496{ 1515{
1497 if (sc_reg > SCR_CONTROL) 1516 if (sc_reg > SCR_CONTROL)
1498 return -EINVAL; 1517 return -EINVAL;
1499 1518
1500 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4)); 1519 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1501 return 0; 1520 return 0;
1502} 1521}
1503 1522
1504static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 1523static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1505{ 1524{
1506 if (sc_reg > SCR_CONTROL) 1525 if (sc_reg > SCR_CONTROL)
1507 return -EINVAL; 1526 return -EINVAL;
1508 1527
1509 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 1528 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1510 return 0; 1529 return 0;
1511} 1530}
1512 1531
@@ -1586,6 +1605,21 @@ static void nv_mcp55_thaw(struct ata_port *ap)
1586 ata_sff_thaw(ap); 1605 ata_sff_thaw(ap);
1587} 1606}
1588 1607
1608static int nv_hardreset(struct ata_link *link, unsigned int *class,
1609 unsigned long deadline)
1610{
1611 int rc;
1612
1613 /* SATA hardreset fails to retrieve proper device signature on
1614 * some controllers. Request follow up SRST. For more info,
1615 * see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1616 */
1617 rc = sata_sff_hardreset(link, class, deadline);
1618 if (rc)
1619 return rc;
1620 return -EAGAIN;
1621}
1622
1589static void nv_adma_error_handler(struct ata_port *ap) 1623static void nv_adma_error_handler(struct ata_port *ap)
1590{ 1624{
1591 struct nv_adma_port_priv *pp = ap->private_data; 1625 struct nv_adma_port_priv *pp = ap->private_data;
@@ -2184,9 +2218,9 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2184 if (!pp->qc_active) 2218 if (!pp->qc_active)
2185 return; 2219 return;
2186 2220
2187 if (ap->ops->scr_read(ap, SCR_ERROR, &serror)) 2221 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2188 return; 2222 return;
2189 ap->ops->scr_write(ap, SCR_ERROR, serror); 2223 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2190 2224
2191 if (ata_stat & ATA_ERR) { 2225 if (ata_stat & ATA_ERR) {
2192 ata_ehi_clear_desc(ehi); 2226 ata_ehi_clear_desc(ehi);
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 030665ba76b7..750d8cdc00cd 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -137,8 +137,8 @@ struct pdc_port_priv {
137 dma_addr_t pkt_dma; 137 dma_addr_t pkt_dma;
138}; 138};
139 139
140static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 140static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
141static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 141static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
142static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 142static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
143static int pdc_common_port_start(struct ata_port *ap); 143static int pdc_common_port_start(struct ata_port *ap);
144static int pdc_sata_port_start(struct ata_port *ap); 144static int pdc_sata_port_start(struct ata_port *ap);
@@ -386,19 +386,21 @@ static int pdc_sata_cable_detect(struct ata_port *ap)
386 return ATA_CBL_SATA; 386 return ATA_CBL_SATA;
387} 387}
388 388
389static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 389static int pdc_sata_scr_read(struct ata_link *link,
390 unsigned int sc_reg, u32 *val)
390{ 391{
391 if (sc_reg > SCR_CONTROL) 392 if (sc_reg > SCR_CONTROL)
392 return -EINVAL; 393 return -EINVAL;
393 *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4)); 394 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
394 return 0; 395 return 0;
395} 396}
396 397
397static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 398static int pdc_sata_scr_write(struct ata_link *link,
399 unsigned int sc_reg, u32 val)
398{ 400{
399 if (sc_reg > SCR_CONTROL) 401 if (sc_reg > SCR_CONTROL)
400 return -EINVAL; 402 return -EINVAL;
401 writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 403 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
402 return 0; 404 return 0;
403} 405}
404 406
@@ -731,7 +733,7 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
731 if (sata_scr_valid(&ap->link)) { 733 if (sata_scr_valid(&ap->link)) {
732 u32 serror; 734 u32 serror;
733 735
734 pdc_sata_scr_read(ap, SCR_ERROR, &serror); 736 pdc_sata_scr_read(&ap->link, SCR_ERROR, &serror);
735 ehi->serror |= serror; 737 ehi->serror |= serror;
736 } 738 }
737 739
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 1600107047cf..a000c86ac859 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -111,8 +111,8 @@ struct qs_port_priv {
111 qs_state_t state; 111 qs_state_t state;
112}; 112};
113 113
114static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 114static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
115static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 115static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
116static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 116static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
117static int qs_port_start(struct ata_port *ap); 117static int qs_port_start(struct ata_port *ap);
118static void qs_host_stop(struct ata_host *host); 118static void qs_host_stop(struct ata_host *host);
@@ -242,11 +242,11 @@ static int qs_prereset(struct ata_link *link, unsigned long deadline)
242 return ata_sff_prereset(link, deadline); 242 return ata_sff_prereset(link, deadline);
243} 243}
244 244
245static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 245static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
246{ 246{
247 if (sc_reg > SCR_CONTROL) 247 if (sc_reg > SCR_CONTROL)
248 return -EINVAL; 248 return -EINVAL;
249 *val = readl(ap->ioaddr.scr_addr + (sc_reg * 8)); 249 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8));
250 return 0; 250 return 0;
251} 251}
252 252
@@ -256,11 +256,11 @@ static void qs_error_handler(struct ata_port *ap)
256 ata_std_error_handler(ap); 256 ata_std_error_handler(ap);
257} 257}
258 258
259static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 259static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
260{ 260{
261 if (sc_reg > SCR_CONTROL) 261 if (sc_reg > SCR_CONTROL)
262 return -EINVAL; 262 return -EINVAL;
263 writel(val, ap->ioaddr.scr_addr + (sc_reg * 8)); 263 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8));
264 return 0; 264 return 0;
265} 265}
266 266
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 88bf4212590f..031d7b7dee34 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -115,8 +115,8 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
115static int sil_pci_device_resume(struct pci_dev *pdev); 115static int sil_pci_device_resume(struct pci_dev *pdev);
116#endif 116#endif
117static void sil_dev_config(struct ata_device *dev); 117static void sil_dev_config(struct ata_device *dev);
118static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 118static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
119static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 119static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
120static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); 120static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
121static void sil_freeze(struct ata_port *ap); 121static void sil_freeze(struct ata_port *ap);
122static void sil_thaw(struct ata_port *ap); 122static void sil_thaw(struct ata_port *ap);
@@ -317,9 +317,9 @@ static inline void __iomem *sil_scr_addr(struct ata_port *ap,
317 return NULL; 317 return NULL;
318} 318}
319 319
320static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 320static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
321{ 321{
322 void __iomem *mmio = sil_scr_addr(ap, sc_reg); 322 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
323 323
324 if (mmio) { 324 if (mmio) {
325 *val = readl(mmio); 325 *val = readl(mmio);
@@ -328,9 +328,9 @@ static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
328 return -EINVAL; 328 return -EINVAL;
329} 329}
330 330
331static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 331static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
332{ 332{
333 void __iomem *mmio = sil_scr_addr(ap, sc_reg); 333 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
334 334
335 if (mmio) { 335 if (mmio) {
336 writel(val, mmio); 336 writel(val, mmio);
@@ -352,8 +352,8 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
352 * controllers continue to assert IRQ as long as 352 * controllers continue to assert IRQ as long as
353 * SError bits are pending. Clear SError immediately. 353 * SError bits are pending. Clear SError immediately.
354 */ 354 */
355 sil_scr_read(ap, SCR_ERROR, &serror); 355 sil_scr_read(&ap->link, SCR_ERROR, &serror);
356 sil_scr_write(ap, SCR_ERROR, serror); 356 sil_scr_write(&ap->link, SCR_ERROR, serror);
357 357
358 /* Sometimes spurious interrupts occur, double check 358 /* Sometimes spurious interrupts occur, double check
359 * it's PHYRDY CHG. 359 * it's PHYRDY CHG.
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 84ffcc26a74b..4621807a1a6a 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -340,8 +340,8 @@ struct sil24_port_priv {
340}; 340};
341 341
342static void sil24_dev_config(struct ata_device *dev); 342static void sil24_dev_config(struct ata_device *dev);
343static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val); 343static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
344static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 344static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
345static int sil24_qc_defer(struct ata_queued_cmd *qc); 345static int sil24_qc_defer(struct ata_queued_cmd *qc);
346static void sil24_qc_prep(struct ata_queued_cmd *qc); 346static void sil24_qc_prep(struct ata_queued_cmd *qc);
347static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); 347static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
@@ -504,9 +504,9 @@ static int sil24_scr_map[] = {
504 [SCR_ACTIVE] = 3, 504 [SCR_ACTIVE] = 3,
505}; 505};
506 506
507static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) 507static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
508{ 508{
509 void __iomem *scr_addr = sil24_port_base(ap) + PORT_SCONTROL; 509 void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
510 510
511 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { 511 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
512 void __iomem *addr; 512 void __iomem *addr;
@@ -517,9 +517,9 @@ static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
517 return -EINVAL; 517 return -EINVAL;
518} 518}
519 519
520static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 520static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
521{ 521{
522 void __iomem *scr_addr = sil24_port_base(ap) + PORT_SCONTROL; 522 void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
523 523
524 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { 524 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
525 void __iomem *addr; 525 void __iomem *addr;
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 1010b3069bd5..9c43b4e7c4a6 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -64,8 +64,8 @@ enum {
64}; 64};
65 65
66static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 66static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
67static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 67static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
68static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 68static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
69 69
70static const struct pci_device_id sis_pci_tbl[] = { 70static const struct pci_device_id sis_pci_tbl[] = {
71 { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */ 71 { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */
@@ -134,10 +134,11 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
134 return addr; 134 return addr;
135} 135}
136 136
137static u32 sis_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 137static u32 sis_scr_cfg_read(struct ata_link *link,
138 unsigned int sc_reg, u32 *val)
138{ 139{
139 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 140 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
140 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); 141 unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
141 u32 val2 = 0; 142 u32 val2 = 0;
142 u8 pmr; 143 u8 pmr;
143 144
@@ -158,10 +159,11 @@ static u32 sis_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
158 return 0; 159 return 0;
159} 160}
160 161
161static int sis_scr_cfg_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 162static int sis_scr_cfg_write(struct ata_link *link,
163 unsigned int sc_reg, u32 val)
162{ 164{
163 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 165 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
164 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); 166 unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
165 u8 pmr; 167 u8 pmr;
166 168
167 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */ 169 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
@@ -178,8 +180,9 @@ static int sis_scr_cfg_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
178 return 0; 180 return 0;
179} 181}
180 182
181static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 183static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
182{ 184{
185 struct ata_port *ap = link->ap;
183 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 186 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
184 u8 pmr; 187 u8 pmr;
185 188
@@ -187,7 +190,7 @@ static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
187 return -EINVAL; 190 return -EINVAL;
188 191
189 if (ap->flags & SIS_FLAG_CFGSCR) 192 if (ap->flags & SIS_FLAG_CFGSCR)
190 return sis_scr_cfg_read(ap, sc_reg, val); 193 return sis_scr_cfg_read(link, sc_reg, val);
191 194
192 pci_read_config_byte(pdev, SIS_PMR, &pmr); 195 pci_read_config_byte(pdev, SIS_PMR, &pmr);
193 196
@@ -202,8 +205,9 @@ static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
202 return 0; 205 return 0;
203} 206}
204 207
205static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 208static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
206{ 209{
210 struct ata_port *ap = link->ap;
207 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 211 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
208 u8 pmr; 212 u8 pmr;
209 213
@@ -213,7 +217,7 @@ static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
213 pci_read_config_byte(pdev, SIS_PMR, &pmr); 217 pci_read_config_byte(pdev, SIS_PMR, &pmr);
214 218
215 if (ap->flags & SIS_FLAG_CFGSCR) 219 if (ap->flags & SIS_FLAG_CFGSCR)
216 return sis_scr_cfg_write(ap, sc_reg, val); 220 return sis_scr_cfg_write(link, sc_reg, val);
217 else { 221 else {
218 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 222 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
219 if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || 223 if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index fb13b82aacba..609d147813ae 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -123,20 +123,22 @@ static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc)
123 } 123 }
124} 124}
125 125
126static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 126static int k2_sata_scr_read(struct ata_link *link,
127 unsigned int sc_reg, u32 *val)
127{ 128{
128 if (sc_reg > SCR_CONTROL) 129 if (sc_reg > SCR_CONTROL)
129 return -EINVAL; 130 return -EINVAL;
130 *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4)); 131 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
131 return 0; 132 return 0;
132} 133}
133 134
134 135
135static int k2_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 136static int k2_sata_scr_write(struct ata_link *link,
137 unsigned int sc_reg, u32 val)
136{ 138{
137 if (sc_reg > SCR_CONTROL) 139 if (sc_reg > SCR_CONTROL)
138 return -EINVAL; 140 return -EINVAL;
139 writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 141 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
140 return 0; 142 return 0;
141} 143}
142 144
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index db529b849948..019575bb3e08 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -57,8 +57,8 @@ struct uli_priv {
57}; 57};
58 58
59static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 59static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
60static int uli_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 60static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
61static int uli_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 61static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
62 62
63static const struct pci_device_id uli_pci_tbl[] = { 63static const struct pci_device_id uli_pci_tbl[] = {
64 { PCI_VDEVICE(AL, 0x5289), uli_5289 }, 64 { PCI_VDEVICE(AL, 0x5289), uli_5289 },
@@ -107,39 +107,39 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
107 return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg); 107 return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
108} 108}
109 109
110static u32 uli_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg) 110static u32 uli_scr_cfg_read(struct ata_link *link, unsigned int sc_reg)
111{ 111{
112 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 112 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
113 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); 113 unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
114 u32 val; 114 u32 val;
115 115
116 pci_read_config_dword(pdev, cfg_addr, &val); 116 pci_read_config_dword(pdev, cfg_addr, &val);
117 return val; 117 return val;
118} 118}
119 119
120static void uli_scr_cfg_write(struct ata_port *ap, unsigned int scr, u32 val) 120static void uli_scr_cfg_write(struct ata_link *link, unsigned int scr, u32 val)
121{ 121{
122 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 122 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
123 unsigned int cfg_addr = get_scr_cfg_addr(ap, scr); 123 unsigned int cfg_addr = get_scr_cfg_addr(link->ap, scr);
124 124
125 pci_write_config_dword(pdev, cfg_addr, val); 125 pci_write_config_dword(pdev, cfg_addr, val);
126} 126}
127 127
128static int uli_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 128static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
129{ 129{
130 if (sc_reg > SCR_CONTROL) 130 if (sc_reg > SCR_CONTROL)
131 return -EINVAL; 131 return -EINVAL;
132 132
133 *val = uli_scr_cfg_read(ap, sc_reg); 133 *val = uli_scr_cfg_read(link, sc_reg);
134 return 0; 134 return 0;
135} 135}
136 136
137static int uli_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 137static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
138{ 138{
139 if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0 139 if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
140 return -EINVAL; 140 return -EINVAL;
141 141
142 uli_scr_cfg_write(ap, sc_reg, val); 142 uli_scr_cfg_write(link, sc_reg, val);
143 return 0; 143 return 0;
144} 144}
145 145
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 96deeb354e16..1cfa74535d91 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -68,8 +68,8 @@ enum {
68}; 68};
69 69
70static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 70static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
71static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 71static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
72static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 72static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
73static void svia_noop_freeze(struct ata_port *ap); 73static void svia_noop_freeze(struct ata_port *ap);
74static int vt6420_prereset(struct ata_link *link, unsigned long deadline); 74static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
75static int vt6421_pata_cable_detect(struct ata_port *ap); 75static int vt6421_pata_cable_detect(struct ata_port *ap);
@@ -152,19 +152,19 @@ MODULE_LICENSE("GPL");
152MODULE_DEVICE_TABLE(pci, svia_pci_tbl); 152MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
153MODULE_VERSION(DRV_VERSION); 153MODULE_VERSION(DRV_VERSION);
154 154
155static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 155static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
156{ 156{
157 if (sc_reg > SCR_CONTROL) 157 if (sc_reg > SCR_CONTROL)
158 return -EINVAL; 158 return -EINVAL;
159 *val = ioread32(ap->ioaddr.scr_addr + (4 * sc_reg)); 159 *val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg));
160 return 0; 160 return 0;
161} 161}
162 162
163static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 163static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
164{ 164{
165 if (sc_reg > SCR_CONTROL) 165 if (sc_reg > SCR_CONTROL)
166 return -EINVAL; 166 return -EINVAL;
167 iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg)); 167 iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg));
168 return 0; 168 return 0;
169} 169}
170 170
@@ -210,20 +210,20 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
210 goto skip_scr; 210 goto skip_scr;
211 211
212 /* Resume phy. This is the old SATA resume sequence */ 212 /* Resume phy. This is the old SATA resume sequence */
213 svia_scr_write(ap, SCR_CONTROL, 0x300); 213 svia_scr_write(link, SCR_CONTROL, 0x300);
214 svia_scr_read(ap, SCR_CONTROL, &scontrol); /* flush */ 214 svia_scr_read(link, SCR_CONTROL, &scontrol); /* flush */
215 215
216 /* wait for phy to become ready, if necessary */ 216 /* wait for phy to become ready, if necessary */
217 do { 217 do {
218 msleep(200); 218 msleep(200);
219 svia_scr_read(ap, SCR_STATUS, &sstatus); 219 svia_scr_read(link, SCR_STATUS, &sstatus);
220 if ((sstatus & 0xf) != 1) 220 if ((sstatus & 0xf) != 1)
221 break; 221 break;
222 } while (time_before(jiffies, timeout)); 222 } while (time_before(jiffies, timeout));
223 223
224 /* open code sata_print_link_status() */ 224 /* open code sata_print_link_status() */
225 svia_scr_read(ap, SCR_STATUS, &sstatus); 225 svia_scr_read(link, SCR_STATUS, &sstatus);
226 svia_scr_read(ap, SCR_CONTROL, &scontrol); 226 svia_scr_read(link, SCR_CONTROL, &scontrol);
227 227
228 online = (sstatus & 0xf) == 0x3; 228 online = (sstatus & 0xf) == 0x3;
229 229
@@ -232,7 +232,7 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
232 online ? "up" : "down", sstatus, scontrol); 232 online ? "up" : "down", sstatus, scontrol);
233 233
234 /* SStatus is read one more time */ 234 /* SStatus is read one more time */
235 svia_scr_read(ap, SCR_STATUS, &sstatus); 235 svia_scr_read(link, SCR_STATUS, &sstatus);
236 236
237 if (!online) { 237 if (!online) {
238 /* tell EH to bail */ 238 /* tell EH to bail */
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index f3d635c0a2e9..c57cdff9e6bd 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -98,20 +98,22 @@ enum {
98 VSC_SATA_INT_PHY_CHANGE), 98 VSC_SATA_INT_PHY_CHANGE),
99}; 99};
100 100
101static int vsc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 101static int vsc_sata_scr_read(struct ata_link *link,
102 unsigned int sc_reg, u32 *val)
102{ 103{
103 if (sc_reg > SCR_CONTROL) 104 if (sc_reg > SCR_CONTROL)
104 return -EINVAL; 105 return -EINVAL;
105 *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4)); 106 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
106 return 0; 107 return 0;
107} 108}
108 109
109 110
110static int vsc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 111static int vsc_sata_scr_write(struct ata_link *link,
112 unsigned int sc_reg, u32 val)
111{ 113{
112 if (sc_reg > SCR_CONTROL) 114 if (sc_reg > SCR_CONTROL)
113 return -EINVAL; 115 return -EINVAL;
114 writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 116 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
115 return 0; 117 return 0;
116} 118}
117 119
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 31dc0cd84afa..0a5f055dffba 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -54,7 +54,7 @@ struct driver_private {
54 */ 54 */
55struct class_private { 55struct class_private {
56 struct kset class_subsys; 56 struct kset class_subsys;
57 struct list_head class_devices; 57 struct klist class_devices;
58 struct list_head class_interfaces; 58 struct list_head class_interfaces;
59 struct kset class_dirs; 59 struct kset class_dirs;
60 struct mutex class_mutex; 60 struct mutex class_mutex;
diff --git a/drivers/base/class.c b/drivers/base/class.c
index cc5e28c8885c..eb85e4312301 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -135,6 +135,20 @@ static void remove_class_attrs(struct class *cls)
135 } 135 }
136} 136}
137 137
138static void klist_class_dev_get(struct klist_node *n)
139{
140 struct device *dev = container_of(n, struct device, knode_class);
141
142 get_device(dev);
143}
144
145static void klist_class_dev_put(struct klist_node *n)
146{
147 struct device *dev = container_of(n, struct device, knode_class);
148
149 put_device(dev);
150}
151
138int __class_register(struct class *cls, struct lock_class_key *key) 152int __class_register(struct class *cls, struct lock_class_key *key)
139{ 153{
140 struct class_private *cp; 154 struct class_private *cp;
@@ -145,7 +159,7 @@ int __class_register(struct class *cls, struct lock_class_key *key)
145 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 159 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
146 if (!cp) 160 if (!cp)
147 return -ENOMEM; 161 return -ENOMEM;
148 INIT_LIST_HEAD(&cp->class_devices); 162 klist_init(&cp->class_devices, klist_class_dev_get, klist_class_dev_put);
149 INIT_LIST_HEAD(&cp->class_interfaces); 163 INIT_LIST_HEAD(&cp->class_interfaces);
150 kset_init(&cp->class_dirs); 164 kset_init(&cp->class_dirs);
151 __mutex_init(&cp->class_mutex, "struct class mutex", key); 165 __mutex_init(&cp->class_mutex, "struct class mutex", key);
@@ -269,6 +283,71 @@ char *make_class_name(const char *name, struct kobject *kobj)
269#endif 283#endif
270 284
271/** 285/**
286 * class_dev_iter_init - initialize class device iterator
287 * @iter: class iterator to initialize
288 * @class: the class we wanna iterate over
289 * @start: the device to start iterating from, if any
290 * @type: device_type of the devices to iterate over, NULL for all
291 *
292 * Initialize class iterator @iter such that it iterates over devices
293 * of @class. If @start is set, the list iteration will start there,
294 * otherwise if it is NULL, the iteration starts at the beginning of
295 * the list.
296 */
297void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
298 struct device *start, const struct device_type *type)
299{
300 struct klist_node *start_knode = NULL;
301
302 if (start)
303 start_knode = &start->knode_class;
304 klist_iter_init_node(&class->p->class_devices, &iter->ki, start_knode);
305 iter->type = type;
306}
307EXPORT_SYMBOL_GPL(class_dev_iter_init);
308
309/**
310 * class_dev_iter_next - iterate to the next device
311 * @iter: class iterator to proceed
312 *
313 * Proceed @iter to the next device and return it. Returns NULL if
314 * iteration is complete.
315 *
316 * The returned device is referenced and won't be released till
317 * iterator is proceed to the next device or exited. The caller is
318 * free to do whatever it wants to do with the device including
319 * calling back into class code.
320 */
321struct device *class_dev_iter_next(struct class_dev_iter *iter)
322{
323 struct klist_node *knode;
324 struct device *dev;
325
326 while (1) {
327 knode = klist_next(&iter->ki);
328 if (!knode)
329 return NULL;
330 dev = container_of(knode, struct device, knode_class);
331 if (!iter->type || iter->type == dev->type)
332 return dev;
333 }
334}
335EXPORT_SYMBOL_GPL(class_dev_iter_next);
336
337/**
338 * class_dev_iter_exit - finish iteration
339 * @iter: class iterator to finish
340 *
341 * Finish an iteration. Always call this function after iteration is
342 * complete whether the iteration ran till the end or not.
343 */
344void class_dev_iter_exit(struct class_dev_iter *iter)
345{
346 klist_iter_exit(&iter->ki);
347}
348EXPORT_SYMBOL_GPL(class_dev_iter_exit);
349
350/**
272 * class_for_each_device - device iterator 351 * class_for_each_device - device iterator
273 * @class: the class we're iterating 352 * @class: the class we're iterating
274 * @start: the device to start with in the list, if any. 353 * @start: the device to start with in the list, if any.
@@ -283,13 +362,13 @@ char *make_class_name(const char *name, struct kobject *kobj)
283 * We check the return of @fn each time. If it returns anything 362 * We check the return of @fn each time. If it returns anything
284 * other than 0, we break out and return that value. 363 * other than 0, we break out and return that value.
285 * 364 *
286 * Note, we hold class->class_mutex in this function, so it can not be 365 * @fn is allowed to do anything including calling back into class
287 * re-acquired in @fn, otherwise it will self-deadlocking. For 366 * code. There's no locking restriction.
288 * example, calls to add or remove class members would be verboten.
289 */ 367 */
290int class_for_each_device(struct class *class, struct device *start, 368int class_for_each_device(struct class *class, struct device *start,
291 void *data, int (*fn)(struct device *, void *)) 369 void *data, int (*fn)(struct device *, void *))
292{ 370{
371 struct class_dev_iter iter;
293 struct device *dev; 372 struct device *dev;
294 int error = 0; 373 int error = 0;
295 374
@@ -301,20 +380,13 @@ int class_for_each_device(struct class *class, struct device *start,
301 return -EINVAL; 380 return -EINVAL;
302 } 381 }
303 382
304 mutex_lock(&class->p->class_mutex); 383 class_dev_iter_init(&iter, class, start, NULL);
305 list_for_each_entry(dev, &class->p->class_devices, node) { 384 while ((dev = class_dev_iter_next(&iter))) {
306 if (start) {
307 if (start == dev)
308 start = NULL;
309 continue;
310 }
311 dev = get_device(dev);
312 error = fn(dev, data); 385 error = fn(dev, data);
313 put_device(dev);
314 if (error) 386 if (error)
315 break; 387 break;
316 } 388 }
317 mutex_unlock(&class->p->class_mutex); 389 class_dev_iter_exit(&iter);
318 390
319 return error; 391 return error;
320} 392}
@@ -337,16 +409,15 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
337 * 409 *
338 * Note, you will need to drop the reference with put_device() after use. 410 * Note, you will need to drop the reference with put_device() after use.
339 * 411 *
340 * We hold class->class_mutex in this function, so it can not be 412 * @fn is allowed to do anything including calling back into class
341 * re-acquired in @match, otherwise it will self-deadlocking. For 413 * code. There's no locking restriction.
342 * example, calls to add or remove class members would be verboten.
343 */ 414 */
344struct device *class_find_device(struct class *class, struct device *start, 415struct device *class_find_device(struct class *class, struct device *start,
345 void *data, 416 void *data,
346 int (*match)(struct device *, void *)) 417 int (*match)(struct device *, void *))
347{ 418{
419 struct class_dev_iter iter;
348 struct device *dev; 420 struct device *dev;
349 int found = 0;
350 421
351 if (!class) 422 if (!class)
352 return NULL; 423 return NULL;
@@ -356,29 +427,23 @@ struct device *class_find_device(struct class *class, struct device *start,
356 return NULL; 427 return NULL;
357 } 428 }
358 429
359 mutex_lock(&class->p->class_mutex); 430 class_dev_iter_init(&iter, class, start, NULL);
360 list_for_each_entry(dev, &class->p->class_devices, node) { 431 while ((dev = class_dev_iter_next(&iter))) {
361 if (start) {
362 if (start == dev)
363 start = NULL;
364 continue;
365 }
366 dev = get_device(dev);
367 if (match(dev, data)) { 432 if (match(dev, data)) {
368 found = 1; 433 get_device(dev);
369 break; 434 break;
370 } else 435 }
371 put_device(dev);
372 } 436 }
373 mutex_unlock(&class->p->class_mutex); 437 class_dev_iter_exit(&iter);
374 438
375 return found ? dev : NULL; 439 return dev;
376} 440}
377EXPORT_SYMBOL_GPL(class_find_device); 441EXPORT_SYMBOL_GPL(class_find_device);
378 442
379int class_interface_register(struct class_interface *class_intf) 443int class_interface_register(struct class_interface *class_intf)
380{ 444{
381 struct class *parent; 445 struct class *parent;
446 struct class_dev_iter iter;
382 struct device *dev; 447 struct device *dev;
383 448
384 if (!class_intf || !class_intf->class) 449 if (!class_intf || !class_intf->class)
@@ -391,8 +456,10 @@ int class_interface_register(struct class_interface *class_intf)
391 mutex_lock(&parent->p->class_mutex); 456 mutex_lock(&parent->p->class_mutex);
392 list_add_tail(&class_intf->node, &parent->p->class_interfaces); 457 list_add_tail(&class_intf->node, &parent->p->class_interfaces);
393 if (class_intf->add_dev) { 458 if (class_intf->add_dev) {
394 list_for_each_entry(dev, &parent->p->class_devices, node) 459 class_dev_iter_init(&iter, parent, NULL, NULL);
460 while ((dev = class_dev_iter_next(&iter)))
395 class_intf->add_dev(dev, class_intf); 461 class_intf->add_dev(dev, class_intf);
462 class_dev_iter_exit(&iter);
396 } 463 }
397 mutex_unlock(&parent->p->class_mutex); 464 mutex_unlock(&parent->p->class_mutex);
398 465
@@ -402,6 +469,7 @@ int class_interface_register(struct class_interface *class_intf)
402void class_interface_unregister(struct class_interface *class_intf) 469void class_interface_unregister(struct class_interface *class_intf)
403{ 470{
404 struct class *parent = class_intf->class; 471 struct class *parent = class_intf->class;
472 struct class_dev_iter iter;
405 struct device *dev; 473 struct device *dev;
406 474
407 if (!parent) 475 if (!parent)
@@ -410,8 +478,10 @@ void class_interface_unregister(struct class_interface *class_intf)
410 mutex_lock(&parent->p->class_mutex); 478 mutex_lock(&parent->p->class_mutex);
411 list_del_init(&class_intf->node); 479 list_del_init(&class_intf->node);
412 if (class_intf->remove_dev) { 480 if (class_intf->remove_dev) {
413 list_for_each_entry(dev, &parent->p->class_devices, node) 481 class_dev_iter_init(&iter, parent, NULL, NULL);
482 while ((dev = class_dev_iter_next(&iter)))
414 class_intf->remove_dev(dev, class_intf); 483 class_intf->remove_dev(dev, class_intf);
484 class_dev_iter_exit(&iter);
415 } 485 }
416 mutex_unlock(&parent->p->class_mutex); 486 mutex_unlock(&parent->p->class_mutex);
417 487
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d021c98605b3..b98cb1416a2d 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -536,7 +536,6 @@ void device_initialize(struct device *dev)
536 klist_init(&dev->klist_children, klist_children_get, 536 klist_init(&dev->klist_children, klist_children_get,
537 klist_children_put); 537 klist_children_put);
538 INIT_LIST_HEAD(&dev->dma_pools); 538 INIT_LIST_HEAD(&dev->dma_pools);
539 INIT_LIST_HEAD(&dev->node);
540 init_MUTEX(&dev->sem); 539 init_MUTEX(&dev->sem);
541 spin_lock_init(&dev->devres_lock); 540 spin_lock_init(&dev->devres_lock);
542 INIT_LIST_HEAD(&dev->devres_head); 541 INIT_LIST_HEAD(&dev->devres_head);
@@ -916,7 +915,8 @@ int device_add(struct device *dev)
916 if (dev->class) { 915 if (dev->class) {
917 mutex_lock(&dev->class->p->class_mutex); 916 mutex_lock(&dev->class->p->class_mutex);
918 /* tie the class to the device */ 917 /* tie the class to the device */
919 list_add_tail(&dev->node, &dev->class->p->class_devices); 918 klist_add_tail(&dev->knode_class,
919 &dev->class->p->class_devices);
920 920
921 /* notify any interfaces that the device is here */ 921 /* notify any interfaces that the device is here */
922 list_for_each_entry(class_intf, 922 list_for_each_entry(class_intf,
@@ -1032,7 +1032,7 @@ void device_del(struct device *dev)
1032 if (class_intf->remove_dev) 1032 if (class_intf->remove_dev)
1033 class_intf->remove_dev(dev, class_intf); 1033 class_intf->remove_dev(dev, class_intf);
1034 /* remove the device from the class list */ 1034 /* remove the device from the class list */
1035 list_del_init(&dev->node); 1035 klist_del(&dev->knode_class);
1036 mutex_unlock(&dev->class->p->class_mutex); 1036 mutex_unlock(&dev->class->p->class_mutex);
1037 } 1037 }
1038 device_remove_file(dev, &uevent_attr); 1038 device_remove_file(dev, &uevent_attr);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 0c39782b2660..aa69556c3485 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -109,12 +109,12 @@ static const struct attribute_group attr_group = {
109static int 109static int
110aoedisk_add_sysfs(struct aoedev *d) 110aoedisk_add_sysfs(struct aoedev *d)
111{ 111{
112 return sysfs_create_group(&d->gd->dev.kobj, &attr_group); 112 return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group);
113} 113}
114void 114void
115aoedisk_rm_sysfs(struct aoedev *d) 115aoedisk_rm_sysfs(struct aoedev *d)
116{ 116{
117 sysfs_remove_group(&d->gd->dev.kobj, &attr_group); 117 sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group);
118} 118}
119 119
120static int 120static int
@@ -276,7 +276,7 @@ aoeblk_gdalloc(void *vp)
276 gd->first_minor = d->sysminor * AOE_PARTITIONS; 276 gd->first_minor = d->sysminor * AOE_PARTITIONS;
277 gd->fops = &aoe_bdops; 277 gd->fops = &aoe_bdops;
278 gd->private_data = d; 278 gd->private_data = d;
279 gd->capacity = d->ssize; 279 set_capacity(gd, d->ssize);
280 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", 280 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
281 d->aoemajor, d->aoeminor); 281 d->aoemajor, d->aoeminor);
282 282
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 2f1746295d06..961d29a53cab 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -645,7 +645,7 @@ aoecmd_sleepwork(struct work_struct *work)
645 unsigned long flags; 645 unsigned long flags;
646 u64 ssize; 646 u64 ssize;
647 647
648 ssize = d->gd->capacity; 648 ssize = get_capacity(d->gd);
649 bd = bdget_disk(d->gd, 0); 649 bd = bdget_disk(d->gd, 0);
650 650
651 if (bd) { 651 if (bd) {
@@ -707,7 +707,7 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
707 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE)) 707 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
708 return; 708 return;
709 if (d->gd != NULL) { 709 if (d->gd != NULL) {
710 d->gd->capacity = ssize; 710 set_capacity(d->gd, ssize);
711 d->flags |= DEVFL_NEWSIZE; 711 d->flags |= DEVFL_NEWSIZE;
712 } else 712 } else
713 d->flags |= DEVFL_GDALLOC; 713 d->flags |= DEVFL_GDALLOC;
@@ -756,12 +756,17 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
756 unsigned long n_sect = bio->bi_size >> 9; 756 unsigned long n_sect = bio->bi_size >> 9;
757 const int rw = bio_data_dir(bio); 757 const int rw = bio_data_dir(bio);
758 struct hd_struct *part; 758 struct hd_struct *part;
759 int cpu;
759 760
760 part = get_part(disk, sector); 761 cpu = part_stat_lock();
761 all_stat_inc(disk, part, ios[rw], sector); 762 part = disk_map_sector_rcu(disk, sector);
762 all_stat_add(disk, part, ticks[rw], duration, sector); 763
763 all_stat_add(disk, part, sectors[rw], n_sect, sector); 764 part_stat_inc(cpu, part, ios[rw]);
764 all_stat_add(disk, part, io_ticks, duration, sector); 765 part_stat_add(cpu, part, ticks[rw], duration);
766 part_stat_add(cpu, part, sectors[rw], n_sect);
767 part_stat_add(cpu, part, io_ticks, duration);
768
769 part_stat_unlock();
765} 770}
766 771
767void 772void
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index a1d813ab0d6b..6a8038d115b5 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -91,7 +91,7 @@ aoedev_downdev(struct aoedev *d)
91 } 91 }
92 92
93 if (d->gd) 93 if (d->gd)
94 d->gd->capacity = 0; 94 set_capacity(d->gd, 0);
95 95
96 d->flags &= ~DEVFL_UP; 96 d->flags &= ~DEVFL_UP;
97} 97}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index b73116ef9236..1e1f9153000c 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3460,8 +3460,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3460 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not"); 3460 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3461 3461
3462 hba[i]->cmd_pool_bits = 3462 hba[i]->cmd_pool_bits =
3463 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG - 3463 kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
3464 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL); 3464 * sizeof(unsigned long), GFP_KERNEL);
3465 hba[i]->cmd_pool = (CommandList_struct *) 3465 hba[i]->cmd_pool = (CommandList_struct *)
3466 pci_alloc_consistent(hba[i]->pdev, 3466 pci_alloc_consistent(hba[i]->pdev,
3467 hba[i]->nr_cmds * sizeof(CommandList_struct), 3467 hba[i]->nr_cmds * sizeof(CommandList_struct),
@@ -3493,8 +3493,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3493 /* command and error info recs zeroed out before 3493 /* command and error info recs zeroed out before
3494 they are used */ 3494 they are used */
3495 memset(hba[i]->cmd_pool_bits, 0, 3495 memset(hba[i]->cmd_pool_bits, 0,
3496 ((hba[i]->nr_cmds + BITS_PER_LONG - 3496 DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
3497 1) / BITS_PER_LONG) * sizeof(unsigned long)); 3497 * sizeof(unsigned long));
3498 3498
3499 hba[i]->num_luns = 0; 3499 hba[i]->num_luns = 0;
3500 hba[i]->highest_lun = -1; 3500 hba[i]->highest_lun = -1;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index e1233aabda77..a3fd87b41444 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -365,7 +365,7 @@ struct scsi2map {
365 365
366static int 366static int
367cciss_scsi_add_entry(int ctlr, int hostno, 367cciss_scsi_add_entry(int ctlr, int hostno,
368 unsigned char *scsi3addr, int devtype, 368 struct cciss_scsi_dev_t *device,
369 struct scsi2map *added, int *nadded) 369 struct scsi2map *added, int *nadded)
370{ 370{
371 /* assumes hba[ctlr]->scsi_ctlr->lock is held */ 371 /* assumes hba[ctlr]->scsi_ctlr->lock is held */
@@ -384,12 +384,12 @@ cciss_scsi_add_entry(int ctlr, int hostno,
384 lun = 0; 384 lun = 0;
385 /* Is this device a non-zero lun of a multi-lun device */ 385 /* Is this device a non-zero lun of a multi-lun device */
386 /* byte 4 of the 8-byte LUN addr will contain the logical unit no. */ 386 /* byte 4 of the 8-byte LUN addr will contain the logical unit no. */
387 if (scsi3addr[4] != 0) { 387 if (device->scsi3addr[4] != 0) {
388 /* Search through our list and find the device which */ 388 /* Search through our list and find the device which */
389 /* has the same 8 byte LUN address, excepting byte 4. */ 389 /* has the same 8 byte LUN address, excepting byte 4. */
390 /* Assign the same bus and target for this new LUN. */ 390 /* Assign the same bus and target for this new LUN. */
391 /* Use the logical unit number from the firmware. */ 391 /* Use the logical unit number from the firmware. */
392 memcpy(addr1, scsi3addr, 8); 392 memcpy(addr1, device->scsi3addr, 8);
393 addr1[4] = 0; 393 addr1[4] = 0;
394 for (i = 0; i < n; i++) { 394 for (i = 0; i < n; i++) {
395 sd = &ccissscsi[ctlr].dev[i]; 395 sd = &ccissscsi[ctlr].dev[i];
@@ -399,7 +399,7 @@ cciss_scsi_add_entry(int ctlr, int hostno,
399 if (memcmp(addr1, addr2, 8) == 0) { 399 if (memcmp(addr1, addr2, 8) == 0) {
400 bus = sd->bus; 400 bus = sd->bus;
401 target = sd->target; 401 target = sd->target;
402 lun = scsi3addr[4]; 402 lun = device->scsi3addr[4];
403 break; 403 break;
404 } 404 }
405 } 405 }
@@ -420,8 +420,12 @@ cciss_scsi_add_entry(int ctlr, int hostno,
420 added[*nadded].lun = sd->lun; 420 added[*nadded].lun = sd->lun;
421 (*nadded)++; 421 (*nadded)++;
422 422
423 memcpy(&sd->scsi3addr[0], scsi3addr, 8); 423 memcpy(sd->scsi3addr, device->scsi3addr, 8);
424 sd->devtype = devtype; 424 memcpy(sd->vendor, device->vendor, sizeof(sd->vendor));
425 memcpy(sd->revision, device->revision, sizeof(sd->revision));
426 memcpy(sd->device_id, device->device_id, sizeof(sd->device_id));
427 sd->devtype = device->devtype;
428
425 ccissscsi[ctlr].ndevices++; 429 ccissscsi[ctlr].ndevices++;
426 430
427 /* initially, (before registering with scsi layer) we don't 431 /* initially, (before registering with scsi layer) we don't
@@ -487,6 +491,22 @@ static void fixup_botched_add(int ctlr, char *scsi3addr)
487 CPQ_TAPE_UNLOCK(ctlr, flags); 491 CPQ_TAPE_UNLOCK(ctlr, flags);
488} 492}
489 493
494static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
495 struct cciss_scsi_dev_t *dev2)
496{
497 return dev1->devtype == dev2->devtype &&
498 memcmp(dev1->scsi3addr, dev2->scsi3addr,
499 sizeof(dev1->scsi3addr)) == 0 &&
500 memcmp(dev1->device_id, dev2->device_id,
501 sizeof(dev1->device_id)) == 0 &&
502 memcmp(dev1->vendor, dev2->vendor,
503 sizeof(dev1->vendor)) == 0 &&
504 memcmp(dev1->model, dev2->model,
505 sizeof(dev1->model)) == 0 &&
506 memcmp(dev1->revision, dev2->revision,
507 sizeof(dev1->revision)) == 0;
508}
509
490static int 510static int
491adjust_cciss_scsi_table(int ctlr, int hostno, 511adjust_cciss_scsi_table(int ctlr, int hostno,
492 struct cciss_scsi_dev_t sd[], int nsds) 512 struct cciss_scsi_dev_t sd[], int nsds)
@@ -532,7 +552,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
532 for (j=0;j<nsds;j++) { 552 for (j=0;j<nsds;j++) {
533 if (SCSI3ADDR_EQ(sd[j].scsi3addr, 553 if (SCSI3ADDR_EQ(sd[j].scsi3addr,
534 csd->scsi3addr)) { 554 csd->scsi3addr)) {
535 if (sd[j].devtype == csd->devtype) 555 if (device_is_the_same(&sd[j], csd))
536 found=2; 556 found=2;
537 else 557 else
538 found=1; 558 found=1;
@@ -548,22 +568,26 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
548 cciss_scsi_remove_entry(ctlr, hostno, i, 568 cciss_scsi_remove_entry(ctlr, hostno, i,
549 removed, &nremoved); 569 removed, &nremoved);
550 /* remove ^^^, hence i not incremented */ 570 /* remove ^^^, hence i not incremented */
551 } 571 } else if (found == 1) { /* device is different in some way */
552 else if (found == 1) { /* device is different kind */
553 changes++; 572 changes++;
554 printk("cciss%d: device c%db%dt%dl%d type changed " 573 printk("cciss%d: device c%db%dt%dl%d has changed.\n",
555 "(device type now %s).\n", 574 ctlr, hostno, csd->bus, csd->target, csd->lun);
556 ctlr, hostno, csd->bus, csd->target, csd->lun,
557 scsi_device_type(csd->devtype));
558 cciss_scsi_remove_entry(ctlr, hostno, i, 575 cciss_scsi_remove_entry(ctlr, hostno, i,
559 removed, &nremoved); 576 removed, &nremoved);
560 /* remove ^^^, hence i not incremented */ 577 /* remove ^^^, hence i not incremented */
561 if (cciss_scsi_add_entry(ctlr, hostno, 578 if (cciss_scsi_add_entry(ctlr, hostno, &sd[j],
562 &sd[j].scsi3addr[0], sd[j].devtype,
563 added, &nadded) != 0) 579 added, &nadded) != 0)
564 /* we just removed one, so add can't fail. */ 580 /* we just removed one, so add can't fail. */
565 BUG(); 581 BUG();
566 csd->devtype = sd[j].devtype; 582 csd->devtype = sd[j].devtype;
583 memcpy(csd->device_id, sd[j].device_id,
584 sizeof(csd->device_id));
585 memcpy(csd->vendor, sd[j].vendor,
586 sizeof(csd->vendor));
587 memcpy(csd->model, sd[j].model,
588 sizeof(csd->model));
589 memcpy(csd->revision, sd[j].revision,
590 sizeof(csd->revision));
567 } else /* device is same as it ever was, */ 591 } else /* device is same as it ever was, */
568 i++; /* so just move along. */ 592 i++; /* so just move along. */
569 } 593 }
@@ -577,7 +601,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
577 csd = &ccissscsi[ctlr].dev[j]; 601 csd = &ccissscsi[ctlr].dev[j];
578 if (SCSI3ADDR_EQ(sd[i].scsi3addr, 602 if (SCSI3ADDR_EQ(sd[i].scsi3addr,
579 csd->scsi3addr)) { 603 csd->scsi3addr)) {
580 if (sd[i].devtype == csd->devtype) 604 if (device_is_the_same(&sd[i], csd))
581 found=2; /* found device */ 605 found=2; /* found device */
582 else 606 else
583 found=1; /* found a bug. */ 607 found=1; /* found a bug. */
@@ -586,16 +610,14 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
586 } 610 }
587 if (!found) { 611 if (!found) {
588 changes++; 612 changes++;
589 if (cciss_scsi_add_entry(ctlr, hostno, 613 if (cciss_scsi_add_entry(ctlr, hostno, &sd[i],
590
591 &sd[i].scsi3addr[0], sd[i].devtype,
592 added, &nadded) != 0) 614 added, &nadded) != 0)
593 break; 615 break;
594 } else if (found == 1) { 616 } else if (found == 1) {
595 /* should never happen... */ 617 /* should never happen... */
596 changes++; 618 changes++;
597 printk("cciss%d: device unexpectedly changed type\n", 619 printk(KERN_WARNING "cciss%d: device "
598 ctlr); 620 "unexpectedly changed\n", ctlr);
599 /* but if it does happen, we just ignore that device */ 621 /* but if it does happen, we just ignore that device */
600 } 622 }
601 } 623 }
@@ -1012,7 +1034,8 @@ cciss_scsi_interpret_error(CommandList_struct *cp)
1012 1034
1013static int 1035static int
1014cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, 1036cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
1015 unsigned char *buf, unsigned char bufsize) 1037 unsigned char page, unsigned char *buf,
1038 unsigned char bufsize)
1016{ 1039{
1017 int rc; 1040 int rc;
1018 CommandList_struct *cp; 1041 CommandList_struct *cp;
@@ -1032,8 +1055,8 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
1032 ei = cp->err_info; 1055 ei = cp->err_info;
1033 1056
1034 cdb[0] = CISS_INQUIRY; 1057 cdb[0] = CISS_INQUIRY;
1035 cdb[1] = 0; 1058 cdb[1] = (page != 0);
1036 cdb[2] = 0; 1059 cdb[2] = page;
1037 cdb[3] = 0; 1060 cdb[3] = 0;
1038 cdb[4] = bufsize; 1061 cdb[4] = bufsize;
1039 cdb[5] = 0; 1062 cdb[5] = 0;
@@ -1053,6 +1076,25 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
1053 return rc; 1076 return rc;
1054} 1077}
1055 1078
1079/* Get the device id from inquiry page 0x83 */
1080static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
1081 unsigned char *device_id, int buflen)
1082{
1083 int rc;
1084 unsigned char *buf;
1085
1086 if (buflen > 16)
1087 buflen = 16;
1088 buf = kzalloc(64, GFP_KERNEL);
1089 if (!buf)
1090 return -1;
1091 rc = cciss_scsi_do_inquiry(c, scsi3addr, 0x83, buf, 64);
1092 if (rc == 0)
1093 memcpy(device_id, &buf[8], buflen);
1094 kfree(buf);
1095 return rc != 0;
1096}
1097
1056static int 1098static int
1057cciss_scsi_do_report_phys_luns(ctlr_info_t *c, 1099cciss_scsi_do_report_phys_luns(ctlr_info_t *c,
1058 ReportLunData_struct *buf, int bufsize) 1100 ReportLunData_struct *buf, int bufsize)
@@ -1142,25 +1184,21 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1142 ctlr_info_t *c; 1184 ctlr_info_t *c;
1143 __u32 num_luns=0; 1185 __u32 num_luns=0;
1144 unsigned char *ch; 1186 unsigned char *ch;
1145 /* unsigned char found[CCISS_MAX_SCSI_DEVS_PER_HBA]; */ 1187 struct cciss_scsi_dev_t *currentsd, *this_device;
1146 struct cciss_scsi_dev_t currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
1147 int ncurrent=0; 1188 int ncurrent=0;
1148 int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8; 1189 int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
1149 int i; 1190 int i;
1150 1191
1151 c = (ctlr_info_t *) hba[cntl_num]; 1192 c = (ctlr_info_t *) hba[cntl_num];
1152 ld_buff = kzalloc(reportlunsize, GFP_KERNEL); 1193 ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
1153 if (ld_buff == NULL) {
1154 printk(KERN_ERR "cciss: out of memory\n");
1155 return;
1156 }
1157 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1194 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1158 if (inq_buff == NULL) { 1195 currentsd = kzalloc(sizeof(*currentsd) *
1159 printk(KERN_ERR "cciss: out of memory\n"); 1196 (CCISS_MAX_SCSI_DEVS_PER_HBA+1), GFP_KERNEL);
1160 kfree(ld_buff); 1197 if (ld_buff == NULL || inq_buff == NULL || currentsd == NULL) {
1161 return; 1198 printk(KERN_ERR "cciss: out of memory\n");
1199 goto out;
1162 } 1200 }
1163 1201 this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
1164 if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) { 1202 if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) {
1165 ch = &ld_buff->LUNListLength[0]; 1203 ch = &ld_buff->LUNListLength[0];
1166 num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8; 1204 num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
@@ -1179,23 +1217,34 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1179 1217
1180 1218
1181 /* adjust our table of devices */ 1219 /* adjust our table of devices */
1182 for(i=0; i<num_luns; i++) 1220 for (i = 0; i < num_luns; i++) {
1183 {
1184 int devtype;
1185
1186 /* for each physical lun, do an inquiry */ 1221 /* for each physical lun, do an inquiry */
1187 if (ld_buff->LUN[i][3] & 0xC0) continue; 1222 if (ld_buff->LUN[i][3] & 0xC0) continue;
1188 memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE); 1223 memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
1189 memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8); 1224 memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
1190 1225
1191 if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, inq_buff, 1226 if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, 0, inq_buff,
1192 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 1227 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0)
1193 /* Inquiry failed (msg printed already) */ 1228 /* Inquiry failed (msg printed already) */
1194 devtype = 0; /* so we will skip this device. */ 1229 continue; /* so we will skip this device. */
1195 } else /* what kind of device is this? */ 1230
1196 devtype = (inq_buff[0] & 0x1f); 1231 this_device->devtype = (inq_buff[0] & 0x1f);
1197 1232 this_device->bus = -1;
1198 switch (devtype) 1233 this_device->target = -1;
1234 this_device->lun = -1;
1235 memcpy(this_device->scsi3addr, scsi3addr, 8);
1236 memcpy(this_device->vendor, &inq_buff[8],
1237 sizeof(this_device->vendor));
1238 memcpy(this_device->model, &inq_buff[16],
1239 sizeof(this_device->model));
1240 memcpy(this_device->revision, &inq_buff[32],
1241 sizeof(this_device->revision));
1242 memset(this_device->device_id, 0,
1243 sizeof(this_device->device_id));
1244 cciss_scsi_get_device_id(hba[cntl_num], scsi3addr,
1245 this_device->device_id, sizeof(this_device->device_id));
1246
1247 switch (this_device->devtype)
1199 { 1248 {
1200 case 0x05: /* CD-ROM */ { 1249 case 0x05: /* CD-ROM */ {
1201 1250
@@ -1220,15 +1269,10 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1220 if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { 1269 if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
1221 printk(KERN_INFO "cciss%d: %s ignored, " 1270 printk(KERN_INFO "cciss%d: %s ignored, "
1222 "too many devices.\n", cntl_num, 1271 "too many devices.\n", cntl_num,
1223 scsi_device_type(devtype)); 1272 scsi_device_type(this_device->devtype));
1224 break; 1273 break;
1225 } 1274 }
1226 memcpy(&currentsd[ncurrent].scsi3addr[0], 1275 currentsd[ncurrent] = *this_device;
1227 &scsi3addr[0], 8);
1228 currentsd[ncurrent].devtype = devtype;
1229 currentsd[ncurrent].bus = -1;
1230 currentsd[ncurrent].target = -1;
1231 currentsd[ncurrent].lun = -1;
1232 ncurrent++; 1276 ncurrent++;
1233 break; 1277 break;
1234 default: 1278 default:
@@ -1240,6 +1284,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1240out: 1284out:
1241 kfree(inq_buff); 1285 kfree(inq_buff);
1242 kfree(ld_buff); 1286 kfree(ld_buff);
1287 kfree(currentsd);
1243 return; 1288 return;
1244} 1289}
1245 1290
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h
index d9c2c586502f..7b750245ae76 100644
--- a/drivers/block/cciss_scsi.h
+++ b/drivers/block/cciss_scsi.h
@@ -66,6 +66,10 @@ struct cciss_scsi_dev_t {
66 int devtype; 66 int devtype;
67 int bus, target, lun; /* as presented to the OS */ 67 int bus, target, lun; /* as presented to the OS */
68 unsigned char scsi3addr[8]; /* as presented to the HW */ 68 unsigned char scsi3addr[8]; /* as presented to the HW */
69 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
70 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
71 unsigned char model[16]; /* bytes 16-31 of inquiry data */
72 unsigned char revision[4]; /* bytes 32-35 of inquiry data */
69}; 73};
70 74
71struct cciss_scsi_hba_t { 75struct cciss_scsi_hba_t {
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 09c14341e6e3..3d967525e9a9 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -424,7 +424,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
424 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t), 424 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
425 &(hba[i]->cmd_pool_dhandle)); 425 &(hba[i]->cmd_pool_dhandle));
426 hba[i]->cmd_pool_bits = kcalloc( 426 hba[i]->cmd_pool_bits = kcalloc(
427 (NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG, sizeof(unsigned long), 427 DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
428 GFP_KERNEL); 428 GFP_KERNEL);
429 429
430 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool) 430 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 395f8ea7981c..cf64ddf5d839 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -423,8 +423,15 @@ static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
423 * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical 423 * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
424 * side 0 is on physical side 0 (but with the misnamed sector IDs). 424 * side 0 is on physical side 0 (but with the misnamed sector IDs).
425 * 'stretch' should probably be renamed to something more general, like 425 * 'stretch' should probably be renamed to something more general, like
426 * 'options'. Other parameters should be self-explanatory (see also 426 * 'options'.
427 * setfdprm(8)). 427 *
428 * Bits 2 through 9 of 'stretch' tell the number of the first sector.
429 * The LSB (bit 2) is flipped. For most disks, the first sector
430 * is 1 (represented by 0x00<<2). For some CP/M and music sampler
431 * disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2).
432 * For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2).
433 *
434 * Other parameters should be self-explanatory (see also setfdprm(8)).
428 */ 435 */
429/* 436/*
430 Size 437 Size
@@ -1355,20 +1362,20 @@ static void fdc_specify(void)
1355 } 1362 }
1356 1363
1357 /* Convert step rate from microseconds to milliseconds and 4 bits */ 1364 /* Convert step rate from microseconds to milliseconds and 4 bits */
1358 srt = 16 - (DP->srt * scale_dtr / 1000 + NOMINAL_DTR - 1) / NOMINAL_DTR; 1365 srt = 16 - DIV_ROUND_UP(DP->srt * scale_dtr / 1000, NOMINAL_DTR);
1359 if (slow_floppy) { 1366 if (slow_floppy) {
1360 srt = srt / 4; 1367 srt = srt / 4;
1361 } 1368 }
1362 SUPBOUND(srt, 0xf); 1369 SUPBOUND(srt, 0xf);
1363 INFBOUND(srt, 0); 1370 INFBOUND(srt, 0);
1364 1371
1365 hlt = (DP->hlt * scale_dtr / 2 + NOMINAL_DTR - 1) / NOMINAL_DTR; 1372 hlt = DIV_ROUND_UP(DP->hlt * scale_dtr / 2, NOMINAL_DTR);
1366 if (hlt < 0x01) 1373 if (hlt < 0x01)
1367 hlt = 0x01; 1374 hlt = 0x01;
1368 else if (hlt > 0x7f) 1375 else if (hlt > 0x7f)
1369 hlt = hlt_max_code; 1376 hlt = hlt_max_code;
1370 1377
1371 hut = (DP->hut * scale_dtr / 16 + NOMINAL_DTR - 1) / NOMINAL_DTR; 1378 hut = DIV_ROUND_UP(DP->hut * scale_dtr / 16, NOMINAL_DTR);
1372 if (hut < 0x1) 1379 if (hut < 0x1)
1373 hut = 0x1; 1380 hut = 0x1;
1374 else if (hut > 0xf) 1381 else if (hut > 0xf)
@@ -2236,9 +2243,9 @@ static void setup_format_params(int track)
2236 } 2243 }
2237 } 2244 }
2238 } 2245 }
2239 if (_floppy->stretch & FD_ZEROBASED) { 2246 if (_floppy->stretch & FD_SECTBASEMASK) {
2240 for (count = 0; count < F_SECT_PER_TRACK; count++) 2247 for (count = 0; count < F_SECT_PER_TRACK; count++)
2241 here[count].sect--; 2248 here[count].sect += FD_SECTBASE(_floppy) - 1;
2242 } 2249 }
2243} 2250}
2244 2251
@@ -2385,7 +2392,7 @@ static void rw_interrupt(void)
2385 2392
2386#ifdef FLOPPY_SANITY_CHECK 2393#ifdef FLOPPY_SANITY_CHECK
2387 if (nr_sectors / ssize > 2394 if (nr_sectors / ssize >
2388 (in_sector_offset + current_count_sectors + ssize - 1) / ssize) { 2395 DIV_ROUND_UP(in_sector_offset + current_count_sectors, ssize)) {
2389 DPRINT("long rw: %x instead of %lx\n", 2396 DPRINT("long rw: %x instead of %lx\n",
2390 nr_sectors, current_count_sectors); 2397 nr_sectors, current_count_sectors);
2391 printk("rs=%d s=%d\n", R_SECTOR, SECTOR); 2398 printk("rs=%d s=%d\n", R_SECTOR, SECTOR);
@@ -2649,7 +2656,7 @@ static int make_raw_rw_request(void)
2649 } 2656 }
2650 HEAD = fsector_t / _floppy->sect; 2657 HEAD = fsector_t / _floppy->sect;
2651 2658
2652 if (((_floppy->stretch & (FD_SWAPSIDES | FD_ZEROBASED)) || 2659 if (((_floppy->stretch & (FD_SWAPSIDES | FD_SECTBASEMASK)) ||
2653 TESTF(FD_NEED_TWADDLE)) && fsector_t < _floppy->sect) 2660 TESTF(FD_NEED_TWADDLE)) && fsector_t < _floppy->sect)
2654 max_sector = _floppy->sect; 2661 max_sector = _floppy->sect;
2655 2662
@@ -2679,7 +2686,7 @@ static int make_raw_rw_request(void)
2679 CODE2SIZE; 2686 CODE2SIZE;
2680 SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE; 2687 SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
2681 SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) + 2688 SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) +
2682 ((_floppy->stretch & FD_ZEROBASED) ? 0 : 1); 2689 FD_SECTBASE(_floppy);
2683 2690
2684 /* tracksize describes the size which can be filled up with sectors 2691 /* tracksize describes the size which can be filled up with sectors
2685 * of size ssize. 2692 * of size ssize.
@@ -3311,7 +3318,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
3311 g->head <= 0 || 3318 g->head <= 0 ||
3312 g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) || 3319 g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
3313 /* check if reserved bits are set */ 3320 /* check if reserved bits are set */
3314 (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_ZEROBASED)) != 0) 3321 (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
3315 return -EINVAL; 3322 return -EINVAL;
3316 if (type) { 3323 if (type) {
3317 if (!capable(CAP_SYS_ADMIN)) 3324 if (!capable(CAP_SYS_ADMIN))
@@ -3356,7 +3363,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
3356 if (DRS->maxblock > user_params[drive].sect || 3363 if (DRS->maxblock > user_params[drive].sect ||
3357 DRS->maxtrack || 3364 DRS->maxtrack ||
3358 ((user_params[drive].sect ^ oldStretch) & 3365 ((user_params[drive].sect ^ oldStretch) &
3359 (FD_SWAPSIDES | FD_ZEROBASED))) 3366 (FD_SWAPSIDES | FD_SECTBASEMASK)))
3360 invalidate_drive(bdev); 3367 invalidate_drive(bdev);
3361 else 3368 else
3362 process_fd_request(); 3369 process_fd_request();
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 1778e4a2c672..7b3351260d56 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -403,7 +403,7 @@ static int nbd_do_it(struct nbd_device *lo)
403 BUG_ON(lo->magic != LO_MAGIC); 403 BUG_ON(lo->magic != LO_MAGIC);
404 404
405 lo->pid = current->pid; 405 lo->pid = current->pid;
406 ret = sysfs_create_file(&lo->disk->dev.kobj, &pid_attr.attr); 406 ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
407 if (ret) { 407 if (ret) {
408 printk(KERN_ERR "nbd: sysfs_create_file failed!"); 408 printk(KERN_ERR "nbd: sysfs_create_file failed!");
409 return ret; 409 return ret;
@@ -412,7 +412,7 @@ static int nbd_do_it(struct nbd_device *lo)
412 while ((req = nbd_read_stat(lo)) != NULL) 412 while ((req = nbd_read_stat(lo)) != NULL)
413 nbd_end_request(req); 413 nbd_end_request(req);
414 414
415 sysfs_remove_file(&lo->disk->dev.kobj, &pid_attr.attr); 415 sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
416 return 0; 416 return 0;
417} 417}
418 418
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 29b7a648cc6e..0e077150568b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2544,7 +2544,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
2544 if (last_zone != zone) { 2544 if (last_zone != zone) {
2545 BUG_ON(last_zone != zone + pd->settings.size); 2545 BUG_ON(last_zone != zone + pd->settings.size);
2546 first_sectors = last_zone - bio->bi_sector; 2546 first_sectors = last_zone - bio->bi_sector;
2547 bp = bio_split(bio, bio_split_pool, first_sectors); 2547 bp = bio_split(bio, first_sectors);
2548 BUG_ON(!bp); 2548 BUG_ON(!bp);
2549 pkt_make_request(q, &bp->bio1); 2549 pkt_make_request(q, &bp->bio1);
2550 pkt_make_request(q, &bp->bio2); 2550 pkt_make_request(q, &bp->bio2);
@@ -2911,7 +2911,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2911 if (!disk->queue) 2911 if (!disk->queue)
2912 goto out_mem2; 2912 goto out_mem2;
2913 2913
2914 pd->pkt_dev = MKDEV(disk->major, disk->first_minor); 2914 pd->pkt_dev = MKDEV(pktdev_major, idx);
2915 ret = pkt_new_dev(pd, dev); 2915 ret = pkt_new_dev(pd, dev);
2916 if (ret) 2916 if (ret)
2917 goto out_new_dev; 2917 goto out_new_dev;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index d797e209951d..936466f62afd 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -199,7 +199,8 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
199 if (blk_fs_request(req)) { 199 if (blk_fs_request(req)) {
200 if (ps3disk_submit_request_sg(dev, req)) 200 if (ps3disk_submit_request_sg(dev, req))
201 break; 201 break;
202 } else if (req->cmd_type == REQ_TYPE_FLUSH) { 202 } else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
203 req->cmd[0] == REQ_LB_OP_FLUSH) {
203 if (ps3disk_submit_flush_request(dev, req)) 204 if (ps3disk_submit_flush_request(dev, req))
204 break; 205 break;
205 } else { 206 } else {
@@ -257,7 +258,8 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
257 return IRQ_HANDLED; 258 return IRQ_HANDLED;
258 } 259 }
259 260
260 if (req->cmd_type == REQ_TYPE_FLUSH) { 261 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
262 req->cmd[0] == REQ_LB_OP_FLUSH) {
261 read = 0; 263 read = 0;
262 num_sectors = req->hard_cur_sectors; 264 num_sectors = req->hard_cur_sectors;
263 op = "flush"; 265 op = "flush";
@@ -405,7 +407,8 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
405 407
406 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 408 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
407 409
408 req->cmd_type = REQ_TYPE_FLUSH; 410 req->cmd_type = REQ_TYPE_LINUX_BLOCK;
411 req->cmd[0] = REQ_LB_OP_FLUSH;
409} 412}
410 413
411static unsigned long ps3disk_mask; 414static unsigned long ps3disk_mask;
@@ -538,7 +541,7 @@ static int ps3disk_remove(struct ps3_system_bus_device *_dev)
538 struct ps3disk_private *priv = dev->sbd.core.driver_data; 541 struct ps3disk_private *priv = dev->sbd.core.driver_data;
539 542
540 mutex_lock(&ps3disk_mask_mutex); 543 mutex_lock(&ps3disk_mask_mutex);
541 __clear_bit(priv->gendisk->first_minor / PS3DISK_MINORS, 544 __clear_bit(MINOR(disk_devt(priv->gendisk)) / PS3DISK_MINORS,
542 &ps3disk_mask); 545 &ps3disk_mask);
543 mutex_unlock(&ps3disk_mask_mutex); 546 mutex_unlock(&ps3disk_mask_mutex);
544 del_gendisk(priv->gendisk); 547 del_gendisk(priv->gendisk);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 42251095134f..6ec5fc052786 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -47,20 +47,20 @@ static void blk_done(struct virtqueue *vq)
47 47
48 spin_lock_irqsave(&vblk->lock, flags); 48 spin_lock_irqsave(&vblk->lock, flags);
49 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { 49 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
50 int uptodate; 50 int error;
51 switch (vbr->status) { 51 switch (vbr->status) {
52 case VIRTIO_BLK_S_OK: 52 case VIRTIO_BLK_S_OK:
53 uptodate = 1; 53 error = 0;
54 break; 54 break;
55 case VIRTIO_BLK_S_UNSUPP: 55 case VIRTIO_BLK_S_UNSUPP:
56 uptodate = -ENOTTY; 56 error = -ENOTTY;
57 break; 57 break;
58 default: 58 default:
59 uptodate = 0; 59 error = -EIO;
60 break; 60 break;
61 } 61 }
62 62
63 end_dequeued_request(vbr->req, uptodate); 63 __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req));
64 list_del(&vbr->list); 64 list_del(&vbr->list);
65 mempool_free(vbr, vblk->pool); 65 mempool_free(vbr, vblk->pool);
66 } 66 }
@@ -84,11 +84,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
84 if (blk_fs_request(vbr->req)) { 84 if (blk_fs_request(vbr->req)) {
85 vbr->out_hdr.type = 0; 85 vbr->out_hdr.type = 0;
86 vbr->out_hdr.sector = vbr->req->sector; 86 vbr->out_hdr.sector = vbr->req->sector;
87 vbr->out_hdr.ioprio = vbr->req->ioprio; 87 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
88 } else if (blk_pc_request(vbr->req)) { 88 } else if (blk_pc_request(vbr->req)) {
89 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; 89 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
90 vbr->out_hdr.sector = 0; 90 vbr->out_hdr.sector = 0;
91 vbr->out_hdr.ioprio = vbr->req->ioprio; 91 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
92 } else { 92 } else {
93 /* We don't put anything else in the queue. */ 93 /* We don't put anything else in the queue. */
94 BUG(); 94 BUG();
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3ca643cafccd..bff602ccccf3 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -105,15 +105,17 @@ static DEFINE_SPINLOCK(blkif_io_lock);
105#define GRANT_INVALID_REF 0 105#define GRANT_INVALID_REF 0
106 106
107#define PARTS_PER_DISK 16 107#define PARTS_PER_DISK 16
108#define PARTS_PER_EXT_DISK 256
108 109
109#define BLKIF_MAJOR(dev) ((dev)>>8) 110#define BLKIF_MAJOR(dev) ((dev)>>8)
110#define BLKIF_MINOR(dev) ((dev) & 0xff) 111#define BLKIF_MINOR(dev) ((dev) & 0xff)
111 112
112#define DEV_NAME "xvd" /* name in /dev */ 113#define EXT_SHIFT 28
114#define EXTENDED (1<<EXT_SHIFT)
115#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
116#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
113 117
114/* Information about our VBDs. */ 118#define DEV_NAME "xvd" /* name in /dev */
115#define MAX_VBDS 64
116static LIST_HEAD(vbds_list);
117 119
118static int get_id_from_freelist(struct blkfront_info *info) 120static int get_id_from_freelist(struct blkfront_info *info)
119{ 121{
@@ -386,31 +388,60 @@ static int xlvbd_barrier(struct blkfront_info *info)
386} 388}
387 389
388 390
389static int xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity, 391static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
390 int vdevice, u16 vdisk_info, u16 sector_size, 392 struct blkfront_info *info,
391 struct blkfront_info *info) 393 u16 vdisk_info, u16 sector_size)
392{ 394{
393 struct gendisk *gd; 395 struct gendisk *gd;
394 int nr_minors = 1; 396 int nr_minors = 1;
395 int err = -ENODEV; 397 int err = -ENODEV;
398 unsigned int offset;
399 int minor;
400 int nr_parts;
396 401
397 BUG_ON(info->gd != NULL); 402 BUG_ON(info->gd != NULL);
398 BUG_ON(info->rq != NULL); 403 BUG_ON(info->rq != NULL);
399 404
400 if ((minor % PARTS_PER_DISK) == 0) 405 if ((info->vdevice>>EXT_SHIFT) > 1) {
401 nr_minors = PARTS_PER_DISK; 406 /* this is above the extended range; something is wrong */
407 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
408 return -ENODEV;
409 }
410
411 if (!VDEV_IS_EXTENDED(info->vdevice)) {
412 minor = BLKIF_MINOR(info->vdevice);
413 nr_parts = PARTS_PER_DISK;
414 } else {
415 minor = BLKIF_MINOR_EXT(info->vdevice);
416 nr_parts = PARTS_PER_EXT_DISK;
417 }
418
419 if ((minor % nr_parts) == 0)
420 nr_minors = nr_parts;
402 421
403 gd = alloc_disk(nr_minors); 422 gd = alloc_disk(nr_minors);
404 if (gd == NULL) 423 if (gd == NULL)
405 goto out; 424 goto out;
406 425
407 if (nr_minors > 1) 426 offset = minor / nr_parts;
408 sprintf(gd->disk_name, "%s%c", DEV_NAME, 427
409 'a' + minor / PARTS_PER_DISK); 428 if (nr_minors > 1) {
410 else 429 if (offset < 26)
411 sprintf(gd->disk_name, "%s%c%d", DEV_NAME, 430 sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
412 'a' + minor / PARTS_PER_DISK, 431 else
413 minor % PARTS_PER_DISK); 432 sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
433 'a' + ((offset / 26)-1), 'a' + (offset % 26));
434 } else {
435 if (offset < 26)
436 sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
437 'a' + offset,
438 minor & (nr_parts - 1));
439 else
440 sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
441 'a' + ((offset / 26) - 1),
442 'a' + (offset % 26),
443 minor & (nr_parts - 1));
444 }
414 445
415 gd->major = XENVBD_MAJOR; 446 gd->major = XENVBD_MAJOR;
416 gd->first_minor = minor; 447 gd->first_minor = minor;
@@ -699,8 +730,13 @@ static int blkfront_probe(struct xenbus_device *dev,
699 err = xenbus_scanf(XBT_NIL, dev->nodename, 730 err = xenbus_scanf(XBT_NIL, dev->nodename,
700 "virtual-device", "%i", &vdevice); 731 "virtual-device", "%i", &vdevice);
701 if (err != 1) { 732 if (err != 1) {
702 xenbus_dev_fatal(dev, err, "reading virtual-device"); 733 /* go looking in the extended area instead */
703 return err; 734 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
735 "%i", &vdevice);
736 if (err != 1) {
737 xenbus_dev_fatal(dev, err, "reading virtual-device");
738 return err;
739 }
704 } 740 }
705 741
706 info = kzalloc(sizeof(*info), GFP_KERNEL); 742 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -861,9 +897,7 @@ static void blkfront_connect(struct blkfront_info *info)
861 if (err) 897 if (err)
862 info->feature_barrier = 0; 898 info->feature_barrier = 0;
863 899
864 err = xlvbd_alloc_gendisk(BLKIF_MINOR(info->vdevice), 900 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
865 sectors, info->vdevice,
866 binfo, sector_size, info);
867 if (err) { 901 if (err) {
868 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", 902 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
869 info->xbdev->otherend); 903 info->xbdev->otherend);
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 1e55a658e6ce..32f3a8ed8d3d 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -256,7 +256,6 @@ static inline int bpa10x_submit_intr_urb(struct hci_dev *hdev)
256 BT_ERR("%s urb %p submission failed (%d)", 256 BT_ERR("%s urb %p submission failed (%d)",
257 hdev->name, urb, -err); 257 hdev->name, urb, -err);
258 usb_unanchor_urb(urb); 258 usb_unanchor_urb(urb);
259 kfree(buf);
260 } 259 }
261 260
262 usb_free_urb(urb); 261 usb_free_urb(urb);
@@ -298,7 +297,6 @@ static inline int bpa10x_submit_bulk_urb(struct hci_dev *hdev)
298 BT_ERR("%s urb %p submission failed (%d)", 297 BT_ERR("%s urb %p submission failed (%d)",
299 hdev->name, urb, -err); 298 hdev->name, urb, -err);
300 usb_unanchor_urb(urb); 299 usb_unanchor_urb(urb);
301 kfree(buf);
302 } 300 }
303 301
304 usb_free_urb(urb); 302 usb_free_urb(urb);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 6a010681ecf3..af472e052732 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,14 +102,19 @@ static struct usb_device_id blacklist_table[] = {
102 { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, 102 { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
103 103
104 /* Broadcom BCM2046 */ 104 /* Broadcom BCM2046 */
105 { USB_DEVICE(0x0a5c, 0x2146), .driver_info = BTUSB_RESET },
105 { USB_DEVICE(0x0a5c, 0x2151), .driver_info = BTUSB_RESET }, 106 { USB_DEVICE(0x0a5c, 0x2151), .driver_info = BTUSB_RESET },
106 107
108 /* Apple MacBook Pro with Broadcom chip */
109 { USB_DEVICE(0x05ac, 0x820f), .driver_info = BTUSB_RESET },
110
107 /* IBM/Lenovo ThinkPad with Broadcom chip */ 111 /* IBM/Lenovo ThinkPad with Broadcom chip */
108 { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, 112 { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
109 { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, 113 { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
110 114
111 /* Targus ACB10US */ 115 /* Targus ACB10US */
112 { USB_DEVICE(0x0a5c, 0x2100), .driver_info = BTUSB_RESET }, 116 { USB_DEVICE(0x0a5c, 0x2100), .driver_info = BTUSB_RESET },
117 { USB_DEVICE(0x0a5c, 0x2154), .driver_info = BTUSB_RESET },
113 118
114 /* ANYCOM Bluetooth USB-200 and USB-250 */ 119 /* ANYCOM Bluetooth USB-200 and USB-250 */
115 { USB_DEVICE(0x0a5c, 0x2111), .driver_info = BTUSB_RESET }, 120 { USB_DEVICE(0x0a5c, 0x2111), .driver_info = BTUSB_RESET },
@@ -147,6 +152,9 @@ static struct usb_device_id blacklist_table[] = {
147 { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, 152 { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
148 { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, 153 { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
149 154
155 /* Belkin F8T016 device */
156 { USB_DEVICE(0x050d, 0x016a), .driver_info = BTUSB_RESET },
157
150 /* Digianswer devices */ 158 /* Digianswer devices */
151 { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER }, 159 { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER },
152 { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE }, 160 { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE },
@@ -169,6 +177,7 @@ static struct usb_device_id blacklist_table[] = {
169struct btusb_data { 177struct btusb_data {
170 struct hci_dev *hdev; 178 struct hci_dev *hdev;
171 struct usb_device *udev; 179 struct usb_device *udev;
180 struct usb_interface *intf;
172 struct usb_interface *isoc; 181 struct usb_interface *isoc;
173 182
174 spinlock_t lock; 183 spinlock_t lock;
@@ -267,7 +276,6 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev)
267 BT_ERR("%s urb %p submission failed (%d)", 276 BT_ERR("%s urb %p submission failed (%d)",
268 hdev->name, urb, -err); 277 hdev->name, urb, -err);
269 usb_unanchor_urb(urb); 278 usb_unanchor_urb(urb);
270 kfree(buf);
271 } 279 }
272 280
273 usb_free_urb(urb); 281 usb_free_urb(urb);
@@ -350,7 +358,6 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev)
350 BT_ERR("%s urb %p submission failed (%d)", 358 BT_ERR("%s urb %p submission failed (%d)",
351 hdev->name, urb, -err); 359 hdev->name, urb, -err);
352 usb_unanchor_urb(urb); 360 usb_unanchor_urb(urb);
353 kfree(buf);
354 } 361 }
355 362
356 usb_free_urb(urb); 363 usb_free_urb(urb);
@@ -471,7 +478,6 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev)
471 BT_ERR("%s urb %p submission failed (%d)", 478 BT_ERR("%s urb %p submission failed (%d)",
472 hdev->name, urb, -err); 479 hdev->name, urb, -err);
473 usb_unanchor_urb(urb); 480 usb_unanchor_urb(urb);
474 kfree(buf);
475 } 481 }
476 482
477 usb_free_urb(urb); 483 usb_free_urb(urb);
@@ -516,7 +522,7 @@ static int btusb_open(struct hci_dev *hdev)
516 522
517 err = btusb_submit_intr_urb(hdev); 523 err = btusb_submit_intr_urb(hdev);
518 if (err < 0) { 524 if (err < 0) {
519 clear_bit(BTUSB_INTR_RUNNING, &hdev->flags); 525 clear_bit(BTUSB_INTR_RUNNING, &data->flags);
520 clear_bit(HCI_RUNNING, &hdev->flags); 526 clear_bit(HCI_RUNNING, &hdev->flags);
521 } 527 }
522 528
@@ -532,8 +538,10 @@ static int btusb_close(struct hci_dev *hdev)
532 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 538 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
533 return 0; 539 return 0;
534 540
541 cancel_work_sync(&data->work);
542
535 clear_bit(BTUSB_ISOC_RUNNING, &data->flags); 543 clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
536 usb_kill_anchored_urbs(&data->intr_anchor); 544 usb_kill_anchored_urbs(&data->isoc_anchor);
537 545
538 clear_bit(BTUSB_BULK_RUNNING, &data->flags); 546 clear_bit(BTUSB_BULK_RUNNING, &data->flags);
539 usb_kill_anchored_urbs(&data->bulk_anchor); 547 usb_kill_anchored_urbs(&data->bulk_anchor);
@@ -821,6 +829,7 @@ static int btusb_probe(struct usb_interface *intf,
821 } 829 }
822 830
823 data->udev = interface_to_usbdev(intf); 831 data->udev = interface_to_usbdev(intf);
832 data->intf = intf;
824 833
825 spin_lock_init(&data->lock); 834 spin_lock_init(&data->lock);
826 835
@@ -889,7 +898,7 @@ static int btusb_probe(struct usb_interface *intf,
889 898
890 if (data->isoc) { 899 if (data->isoc) {
891 err = usb_driver_claim_interface(&btusb_driver, 900 err = usb_driver_claim_interface(&btusb_driver,
892 data->isoc, NULL); 901 data->isoc, data);
893 if (err < 0) { 902 if (err < 0) {
894 hci_free_dev(hdev); 903 hci_free_dev(hdev);
895 kfree(data); 904 kfree(data);
@@ -921,13 +930,22 @@ static void btusb_disconnect(struct usb_interface *intf)
921 930
922 hdev = data->hdev; 931 hdev = data->hdev;
923 932
924 if (data->isoc) 933 __hci_dev_hold(hdev);
925 usb_driver_release_interface(&btusb_driver, data->isoc);
926 934
927 usb_set_intfdata(intf, NULL); 935 usb_set_intfdata(data->intf, NULL);
936
937 if (data->isoc)
938 usb_set_intfdata(data->isoc, NULL);
928 939
929 hci_unregister_dev(hdev); 940 hci_unregister_dev(hdev);
930 941
942 if (intf == data->isoc)
943 usb_driver_release_interface(&btusb_driver, data->intf);
944 else if (data->isoc)
945 usb_driver_release_interface(&btusb_driver, data->isoc);
946
947 __hci_dev_put(hdev);
948
931 hci_free_dev(hdev); 949 hci_free_dev(hdev);
932} 950}
933 951
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 74031de517e6..d47f2f80accd 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2097,7 +2097,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2097 2097
2098 len = nr * CD_FRAMESIZE_RAW; 2098 len = nr * CD_FRAMESIZE_RAW;
2099 2099
2100 ret = blk_rq_map_user(q, rq, ubuf, len); 2100 ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
2101 if (ret) 2101 if (ret)
2102 break; 2102 break;
2103 2103
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 1231d95aa695..d6ba77a2dd7b 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -624,14 +624,14 @@ static void gdrom_readdisk_dma(struct work_struct *work)
624 ctrl_outb(1, GDROM_DMA_STATUS_REG); 624 ctrl_outb(1, GDROM_DMA_STATUS_REG);
625 wait_event_interruptible_timeout(request_queue, 625 wait_event_interruptible_timeout(request_queue,
626 gd.transfer == 0, GDROM_DEFAULT_TIMEOUT); 626 gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
627 err = gd.transfer; 627 err = gd.transfer ? -EIO : 0;
628 gd.transfer = 0; 628 gd.transfer = 0;
629 gd.pending = 0; 629 gd.pending = 0;
630 /* now seek to take the request spinlock 630 /* now seek to take the request spinlock
631 * before handling ending the request */ 631 * before handling ending the request */
632 spin_lock(&gdrom_lock); 632 spin_lock(&gdrom_lock);
633 list_del_init(&req->queuelist); 633 list_del_init(&req->queuelist);
634 end_dequeued_request(req, 1 - err); 634 __blk_end_request(req, err, blk_rq_bytes(req));
635 } 635 }
636 spin_unlock(&gdrom_lock); 636 spin_unlock(&gdrom_lock);
637 kfree(read_command); 637 kfree(read_command);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 7ce1ac4baa6d..6af435b89867 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -661,10 +661,10 @@ void add_disk_randomness(struct gendisk *disk)
661 if (!disk || !disk->random) 661 if (!disk || !disk->random)
662 return; 662 return;
663 /* first major is 1, so we get >= 0x200 here */ 663 /* first major is 1, so we get >= 0x200 here */
664 DEBUG_ENT("disk event %d:%d\n", disk->major, disk->first_minor); 664 DEBUG_ENT("disk event %d:%d\n",
665 MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
665 666
666 add_timer_randomness(disk->random, 667 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
667 0x100 + MKDEV(disk->major, disk->first_minor));
668} 668}
669#endif 669#endif
670 670
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index daeb8f766971..e4dce8709541 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -695,13 +695,23 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
695{ 695{
696 struct tty_driver *p, *res = NULL; 696 struct tty_driver *p, *res = NULL;
697 int tty_line = 0; 697 int tty_line = 0;
698 int len;
698 char *str; 699 char *str;
699 700
701 for (str = name; *str; str++)
702 if ((*str >= '0' && *str <= '9') || *str == ',')
703 break;
704 if (!*str)
705 return NULL;
706
707 len = str - name;
708 tty_line = simple_strtoul(str, &str, 10);
709
700 mutex_lock(&tty_mutex); 710 mutex_lock(&tty_mutex);
701 /* Search through the tty devices to look for a match */ 711 /* Search through the tty devices to look for a match */
702 list_for_each_entry(p, &tty_drivers, tty_drivers) { 712 list_for_each_entry(p, &tty_drivers, tty_drivers) {
703 str = name + strlen(p->name); 713 if (strncmp(name, p->name, len) != 0)
704 tty_line = simple_strtoul(str, &str, 10); 714 continue;
705 if (*str == ',') 715 if (*str == ',')
706 str++; 716 str++;
707 if (*str == '\0') 717 if (*str == '\0')
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 94df91771243..0778d99aea7c 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -364,7 +364,7 @@ static void dw_dma_tasklet(unsigned long data)
364 int i; 364 int i;
365 365
366 status_block = dma_readl(dw, RAW.BLOCK); 366 status_block = dma_readl(dw, RAW.BLOCK);
367 status_xfer = dma_readl(dw, RAW.BLOCK); 367 status_xfer = dma_readl(dw, RAW.XFER);
368 status_err = dma_readl(dw, RAW.ERROR); 368 status_err = dma_readl(dw, RAW.ERROR);
369 369
370 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", 370 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index d568c65c1370..d9e7a49d6cbf 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -279,7 +279,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
279 { "OTES1 Fan", 36, 2, 60, 1, 0 }, 279 { "OTES1 Fan", 36, 2, 60, 1, 0 },
280 { NULL, 0, 0, 0, 0, 0 } } 280 { NULL, 0, 0, 0, 0, 0 } }
281 }, 281 },
282 { 0x0011, NULL /* Abit AT8 32X, need DMI string */, { 282 { 0x0011, "AT8 32X(ATI RD580-ULI M1575)", {
283 { "CPU Core", 0, 0, 10, 1, 0 }, 283 { "CPU Core", 0, 0, 10, 1, 0 },
284 { "DDR", 1, 0, 20, 1, 0 }, 284 { "DDR", 1, 0, 20, 1, 0 },
285 { "DDR VTT", 2, 0, 10, 1, 0 }, 285 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -303,6 +303,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
303 { "SYS Fan", 34, 2, 60, 1, 0 }, 303 { "SYS Fan", 34, 2, 60, 1, 0 },
304 { "AUX1 Fan", 35, 2, 60, 1, 0 }, 304 { "AUX1 Fan", 35, 2, 60, 1, 0 },
305 { "AUX2 Fan", 36, 2, 60, 1, 0 }, 305 { "AUX2 Fan", 36, 2, 60, 1, 0 },
306 { "AUX3 Fan", 37, 2, 60, 1, 0 },
306 { NULL, 0, 0, 0, 0, 0 } } 307 { NULL, 0, 0, 0, 0, 0 } }
307 }, 308 },
308 { 0x0012, NULL /* Abit AN8 32X, need DMI string */, { 309 { 0x0012, NULL /* Abit AN8 32X, need DMI string */, {
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index ce8d94fbfd7e..bfda8c80ef24 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -69,7 +69,7 @@ static inline int ad7414_write(struct i2c_client *client, u8 reg, u8 value)
69 return i2c_smbus_write_byte_data(client, reg, value); 69 return i2c_smbus_write_byte_data(client, reg, value);
70} 70}
71 71
72struct ad7414_data *ad7414_update_device(struct device *dev) 72static struct ad7414_data *ad7414_update_device(struct device *dev)
73{ 73{
74 struct i2c_client *client = to_i2c_client(dev); 74 struct i2c_client *client = to_i2c_client(dev);
75 struct ad7414_data *data = i2c_get_clientdata(client); 75 struct ad7414_data *data = i2c_get_clientdata(client);
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index d191118ba0cb..d6b490d3e36f 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -31,7 +31,7 @@
31 31
32MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
33MODULE_DESCRIPTION("System voltages control via Attansic ATXP1"); 33MODULE_DESCRIPTION("System voltages control via Attansic ATXP1");
34MODULE_VERSION("0.6.2"); 34MODULE_VERSION("0.6.3");
35MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>"); 35MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
36 36
37#define ATXP1_VID 0x00 37#define ATXP1_VID 0x00
@@ -289,16 +289,16 @@ static int atxp1_detect(struct i2c_client *new_client, int kind,
289 if (!((i2c_smbus_read_byte_data(new_client, 0x3e) == 0) && 289 if (!((i2c_smbus_read_byte_data(new_client, 0x3e) == 0) &&
290 (i2c_smbus_read_byte_data(new_client, 0x3f) == 0) && 290 (i2c_smbus_read_byte_data(new_client, 0x3f) == 0) &&
291 (i2c_smbus_read_byte_data(new_client, 0xfe) == 0) && 291 (i2c_smbus_read_byte_data(new_client, 0xfe) == 0) &&
292 (i2c_smbus_read_byte_data(new_client, 0xff) == 0) )) { 292 (i2c_smbus_read_byte_data(new_client, 0xff) == 0)))
293 return -ENODEV;
293 294
294 /* No vendor ID, now checking if registers 0x10,0x11 (non-existent) 295 /* No vendor ID, now checking if registers 0x10,0x11 (non-existent)
295 * showing the same as register 0x00 */ 296 * showing the same as register 0x00 */
296 temp = i2c_smbus_read_byte_data(new_client, 0x00); 297 temp = i2c_smbus_read_byte_data(new_client, 0x00);
297 298
298 if (!((i2c_smbus_read_byte_data(new_client, 0x10) == temp) && 299 if (!((i2c_smbus_read_byte_data(new_client, 0x10) == temp) &&
299 (i2c_smbus_read_byte_data(new_client, 0x11) == temp) )) 300 (i2c_smbus_read_byte_data(new_client, 0x11) == temp)))
300 return -ENODEV; 301 return -ENODEV;
301 }
302 302
303 /* Get VRM */ 303 /* Get VRM */
304 temp = vid_which_vrm(); 304 temp = vid_which_vrm();
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 30cdb0956779..d793cc011990 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -46,6 +46,8 @@
46#include <linux/err.h> 46#include <linux/err.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/sysfs.h> 48#include <linux/sysfs.h>
49#include <linux/string.h>
50#include <linux/dmi.h>
49#include <asm/io.h> 51#include <asm/io.h>
50 52
51#define DRVNAME "it87" 53#define DRVNAME "it87"
@@ -236,6 +238,8 @@ struct it87_sio_data {
236 /* Values read from Super-I/O config space */ 238 /* Values read from Super-I/O config space */
237 u8 revision; 239 u8 revision;
238 u8 vid_value; 240 u8 vid_value;
241 /* Values set based on DMI strings */
242 u8 skip_pwm;
239}; 243};
240 244
241/* For each registered chip, we need to keep some data in memory. 245/* For each registered chip, we need to keep some data in memory.
@@ -273,10 +277,10 @@ struct it87_data {
273static inline int has_16bit_fans(const struct it87_data *data) 277static inline int has_16bit_fans(const struct it87_data *data)
274{ 278{
275 /* IT8705F Datasheet 0.4.1, 3h == Version G. 279 /* IT8705F Datasheet 0.4.1, 3h == Version G.
276 IT8712F Datasheet 0.9.1, section 8.3.5 indicates 7h == Version I. 280 IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
277 These are the first revisions with 16bit tachometer support. */ 281 These are the first revisions with 16bit tachometer support. */
278 return (data->type == it87 && data->revision >= 0x03) 282 return (data->type == it87 && data->revision >= 0x03)
279 || (data->type == it8712 && data->revision >= 0x07) 283 || (data->type == it8712 && data->revision >= 0x08)
280 || data->type == it8716 284 || data->type == it8716
281 || data->type == it8718; 285 || data->type == it8718;
282} 286}
@@ -964,6 +968,7 @@ static int __init it87_find(unsigned short *address,
964{ 968{
965 int err = -ENODEV; 969 int err = -ENODEV;
966 u16 chip_type; 970 u16 chip_type;
971 const char *board_vendor, *board_name;
967 972
968 superio_enter(); 973 superio_enter();
969 chip_type = force_id ? force_id : superio_inw(DEVID); 974 chip_type = force_id ? force_id : superio_inw(DEVID);
@@ -1022,6 +1027,24 @@ static int __init it87_find(unsigned short *address,
1022 pr_info("it87: in7 is VCCH (+5V Stand-By)\n"); 1027 pr_info("it87: in7 is VCCH (+5V Stand-By)\n");
1023 } 1028 }
1024 1029
1030 /* Disable specific features based on DMI strings */
1031 board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
1032 board_name = dmi_get_system_info(DMI_BOARD_NAME);
1033 if (board_vendor && board_name) {
1034 if (strcmp(board_vendor, "nVIDIA") == 0
1035 && strcmp(board_name, "FN68PT") == 0) {
1036 /* On the Shuttle SN68PT, FAN_CTL2 is apparently not
1037 connected to a fan, but to something else. One user
1038 has reported instant system power-off when changing
1039 the PWM2 duty cycle, so we disable it.
1040 I use the board name string as the trigger in case
1041 the same board is ever used in other systems. */
1042 pr_info("it87: Disabling pwm2 due to "
1043 "hardware constraints\n");
1044 sio_data->skip_pwm = (1 << 1);
1045 }
1046 }
1047
1025exit: 1048exit:
1026 superio_exit(); 1049 superio_exit();
1027 return err; 1050 return err;
@@ -1168,25 +1191,33 @@ static int __devinit it87_probe(struct platform_device *pdev)
1168 } 1191 }
1169 1192
1170 if (enable_pwm_interface) { 1193 if (enable_pwm_interface) {
1171 if ((err = device_create_file(dev, 1194 if (!(sio_data->skip_pwm & (1 << 0))) {
1172 &sensor_dev_attr_pwm1_enable.dev_attr)) 1195 if ((err = device_create_file(dev,
1173 || (err = device_create_file(dev, 1196 &sensor_dev_attr_pwm1_enable.dev_attr))
1174 &sensor_dev_attr_pwm2_enable.dev_attr)) 1197 || (err = device_create_file(dev,
1175 || (err = device_create_file(dev, 1198 &sensor_dev_attr_pwm1.dev_attr))
1176 &sensor_dev_attr_pwm3_enable.dev_attr)) 1199 || (err = device_create_file(dev,
1177 || (err = device_create_file(dev, 1200 &dev_attr_pwm1_freq)))
1178 &sensor_dev_attr_pwm1.dev_attr)) 1201 goto ERROR4;
1179 || (err = device_create_file(dev, 1202 }
1180 &sensor_dev_attr_pwm2.dev_attr)) 1203 if (!(sio_data->skip_pwm & (1 << 1))) {
1181 || (err = device_create_file(dev, 1204 if ((err = device_create_file(dev,
1182 &sensor_dev_attr_pwm3.dev_attr)) 1205 &sensor_dev_attr_pwm2_enable.dev_attr))
1183 || (err = device_create_file(dev, 1206 || (err = device_create_file(dev,
1184 &dev_attr_pwm1_freq)) 1207 &sensor_dev_attr_pwm2.dev_attr))
1185 || (err = device_create_file(dev, 1208 || (err = device_create_file(dev,
1186 &dev_attr_pwm2_freq)) 1209 &dev_attr_pwm2_freq)))
1187 || (err = device_create_file(dev, 1210 goto ERROR4;
1188 &dev_attr_pwm3_freq))) 1211 }
1189 goto ERROR4; 1212 if (!(sio_data->skip_pwm & (1 << 2))) {
1213 if ((err = device_create_file(dev,
1214 &sensor_dev_attr_pwm3_enable.dev_attr))
1215 || (err = device_create_file(dev,
1216 &sensor_dev_attr_pwm3.dev_attr))
1217 || (err = device_create_file(dev,
1218 &dev_attr_pwm3_freq)))
1219 goto ERROR4;
1220 }
1190 } 1221 }
1191 1222
1192 if (data->type == it8712 || data->type == it8716 1223 if (data->type == it8712 || data->type == it8716
@@ -1546,6 +1577,7 @@ static int __init sm_it87_init(void)
1546 unsigned short isa_address=0; 1577 unsigned short isa_address=0;
1547 struct it87_sio_data sio_data; 1578 struct it87_sio_data sio_data;
1548 1579
1580 memset(&sio_data, 0, sizeof(struct it87_sio_data));
1549 err = it87_find(&isa_address, &sio_data); 1581 err = it87_find(&isa_address, &sio_data);
1550 if (err) 1582 if (err)
1551 return err; 1583 return err;
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 22f6d5c00d80..0e7b1c6724aa 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -180,7 +180,7 @@ static const struct i2c_algorithm i2c_powermac_algorithm = {
180}; 180};
181 181
182 182
183static int i2c_powermac_remove(struct platform_device *dev) 183static int __devexit i2c_powermac_remove(struct platform_device *dev)
184{ 184{
185 struct i2c_adapter *adapter = platform_get_drvdata(dev); 185 struct i2c_adapter *adapter = platform_get_drvdata(dev);
186 struct pmac_i2c_bus *bus = i2c_get_adapdata(adapter); 186 struct pmac_i2c_bus *bus = i2c_get_adapdata(adapter);
@@ -200,7 +200,7 @@ static int i2c_powermac_remove(struct platform_device *dev)
200} 200}
201 201
202 202
203static int __devexit i2c_powermac_probe(struct platform_device *dev) 203static int __devinit i2c_powermac_probe(struct platform_device *dev)
204{ 204{
205 struct pmac_i2c_bus *bus = dev->dev.platform_data; 205 struct pmac_i2c_bus *bus = dev->dev.platform_data;
206 struct device_node *parent = NULL; 206 struct device_node *parent = NULL;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index af4491fa7e34..307d976c9b69 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -583,8 +583,10 @@ static int __init i2c_dev_init(void)
583 goto out; 583 goto out;
584 584
585 i2c_dev_class = class_create(THIS_MODULE, "i2c-dev"); 585 i2c_dev_class = class_create(THIS_MODULE, "i2c-dev");
586 if (IS_ERR(i2c_dev_class)) 586 if (IS_ERR(i2c_dev_class)) {
587 res = PTR_ERR(i2c_dev_class);
587 goto out_unreg_chrdev; 588 goto out_unreg_chrdev;
589 }
588 590
589 res = i2c_add_driver(&i2cdev_driver); 591 res = i2c_add_driver(&i2cdev_driver);
590 if (res) 592 if (res)
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index fc735ab08ff4..052879a6f853 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -292,6 +292,20 @@ config IDE_GENERIC
292 tristate "generic/default IDE chipset support" 292 tristate "generic/default IDE chipset support"
293 depends on ALPHA || X86 || IA64 || M32R || MIPS 293 depends on ALPHA || X86 || IA64 || M32R || MIPS
294 help 294 help
295 This is the generic IDE driver. This driver attaches to the
296 fixed legacy ports (e.g. on PCs 0x1f0/0x170, 0x1e8/0x168 and
297 so on). Please note that if this driver is built into the
298 kernel or loaded before other ATA (IDE or libata) drivers
299 and the controller is located at legacy ports, this driver
300 may grab those ports and thus can prevent the controller
301 specific driver from attaching.
302
303 Also, currently, IDE generic doesn't allow IRQ sharing
304 meaning that the IRQs it grabs won't be available to other
305 controllers sharing those IRQs which usually makes drivers
306 for those controllers fail. Generally, it's not a good idea
307 to load IDE generic driver on modern systems.
308
295 If unsure, say N. 309 If unsure, say N.
296 310
297config BLK_DEV_PLATFORM 311config BLK_DEV_PLATFORM
@@ -766,10 +780,6 @@ config BLK_DEV_IDEDMA_PMAC
766 to transfer data to and from memory. Saying Y is safe and improves 780 to transfer data to and from memory. Saying Y is safe and improves
767 performance. 781 performance.
768 782
769config BLK_DEV_IDE_SWARM
770 tristate "IDE for Sibyte evaluation boards"
771 depends on SIBYTE_SB1xxx_SOC
772
773config BLK_DEV_IDE_AU1XXX 783config BLK_DEV_IDE_AU1XXX
774 bool "IDE for AMD Alchemy Au1200" 784 bool "IDE for AMD Alchemy Au1200"
775 depends on SOC_AU1200 785 depends on SOC_AU1200
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 49a8c589e346..03c2cb6a58bc 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1113,7 +1113,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
1113 1113
1114 if (write) { 1114 if (write) {
1115 /* disk has become write protected */ 1115 /* disk has become write protected */
1116 if (cd->disk->policy) { 1116 if (get_disk_ro(cd->disk)) {
1117 cdrom_end_request(drive, 0); 1117 cdrom_end_request(drive, 0);
1118 return ide_stopped; 1118 return ide_stopped;
1119 } 1119 }
@@ -1661,7 +1661,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1661 cdi->mask &= ~CDC_PLAY_AUDIO; 1661 cdi->mask &= ~CDC_PLAY_AUDIO;
1662 1662
1663 mechtype = buf[8 + 6] >> 5; 1663 mechtype = buf[8 + 6] >> 5;
1664 if (mechtype == mechtype_caddy || mechtype == mechtype_popup) 1664 if (mechtype == mechtype_caddy ||
1665 mechtype == mechtype_popup ||
1666 (drive->atapi_flags & IDE_AFLAG_NO_AUTOCLOSE))
1665 cdi->mask |= CDC_CLOSE_TRAY; 1667 cdi->mask |= CDC_CLOSE_TRAY;
1666 1668
1667 if (cdi->sanyo_slot > 0) { 1669 if (cdi->sanyo_slot > 0) {
@@ -1859,6 +1861,8 @@ static const struct cd_list_entry ide_cd_quirks_list[] = {
1859 { "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, 1861 { "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1860 { "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, 1862 { "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1861 { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, 1863 { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1864 { "Optiarc DVD RW AD-7200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1865 { "Optiarc DVD RW AD-7543A", NULL, IDE_AFLAG_NO_AUTOCLOSE },
1862 { NULL, NULL, 0 } 1866 { NULL, NULL, 0 }
1863}; 1867};
1864 1868
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 07ef88bd109b..33ea8c048717 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -41,6 +41,12 @@
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/div64.h> 42#include <asm/div64.h>
43 43
44#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
45#define IDE_DISK_MINORS (1 << PARTN_BITS)
46#else
47#define IDE_DISK_MINORS 0
48#endif
49
44struct ide_disk_obj { 50struct ide_disk_obj {
45 ide_drive_t *drive; 51 ide_drive_t *drive;
46 ide_driver_t *driver; 52 ide_driver_t *driver;
@@ -1151,8 +1157,7 @@ static int ide_disk_probe(ide_drive_t *drive)
1151 if (!idkp) 1157 if (!idkp)
1152 goto failed; 1158 goto failed;
1153 1159
1154 g = alloc_disk_node(1 << PARTN_BITS, 1160 g = alloc_disk_node(IDE_DISK_MINORS, hwif_to_node(drive->hwif));
1155 hwif_to_node(drive->hwif));
1156 if (!g) 1161 if (!g)
1157 goto out_free_idkp; 1162 goto out_free_idkp;
1158 1163
@@ -1178,9 +1183,11 @@ static int ide_disk_probe(ide_drive_t *drive)
1178 } else 1183 } else
1179 drive->attach = 1; 1184 drive->attach = 1;
1180 1185
1181 g->minors = 1 << PARTN_BITS; 1186 g->minors = IDE_DISK_MINORS;
1182 g->driverfs_dev = &drive->gendev; 1187 g->driverfs_dev = &drive->gendev;
1183 g->flags = drive->removable ? GENHD_FL_REMOVABLE : 0; 1188 g->flags |= GENHD_FL_EXT_DEVT;
1189 if (drive->removable)
1190 g->flags |= GENHD_FL_REMOVABLE;
1184 set_capacity(g, idedisk_capacity(drive)); 1191 set_capacity(g, idedisk_capacity(drive));
1185 g->fops = &idedisk_ops; 1192 g->fops = &idedisk_ops;
1186 add_disk(g); 1193 add_disk(g);
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index adc682755857..3fa07c0aeaa4 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -211,7 +211,7 @@ int ide_build_dmatable (ide_drive_t *drive, struct request *rq)
211 xcount = bcount & 0xffff; 211 xcount = bcount & 0xffff;
212 if (is_trm290) 212 if (is_trm290)
213 xcount = ((xcount >> 2) - 1) << 16; 213 xcount = ((xcount >> 2) - 1) << 16;
214 if (xcount == 0x0000) { 214 else if (xcount == 0x0000) {
215 /* 215 /*
216 * Most chipsets correctly interpret a length of 0x0000 as 64KB, 216 * Most chipsets correctly interpret a length of 0x0000 as 64KB,
217 * but at least one (e.g. CS5530) misinterprets it as zero (!). 217 * but at least one (e.g. CS5530) misinterprets it as zero (!).
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 994e41099b42..70aa86c8807e 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1188,7 +1188,7 @@ static struct kobject *exact_match(dev_t dev, int *part, void *data)
1188{ 1188{
1189 struct gendisk *p = data; 1189 struct gendisk *p = data;
1190 *part &= (1 << PARTN_BITS) - 1; 1190 *part &= (1 << PARTN_BITS) - 1;
1191 return &p->dev.kobj; 1191 return &disk_to_dev(p)->kobj;
1192} 1192}
1193 1193
1194static int exact_lock(dev_t dev, void *data) 1194static int exact_lock(dev_t dev, void *data)
@@ -1492,7 +1492,7 @@ static struct device_attribute *ide_port_attrs[] = {
1492 1492
1493static int ide_sysfs_register_port(ide_hwif_t *hwif) 1493static int ide_sysfs_register_port(ide_hwif_t *hwif)
1494{ 1494{
1495 int i, rc; 1495 int i, uninitialized_var(rc);
1496 1496
1497 for (i = 0; ide_port_attrs[i]; i++) { 1497 for (i = 0; ide_port_attrs[i]; i++) {
1498 rc = device_create_file(hwif->portdev, ide_port_attrs[i]); 1498 rc = device_create_file(hwif->portdev, ide_port_attrs[i]);
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 1bce84b56630..3833189144ed 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -2338,7 +2338,7 @@ static void idetape_get_inquiry_results(ide_drive_t *drive)
2338{ 2338{
2339 idetape_tape_t *tape = drive->driver_data; 2339 idetape_tape_t *tape = drive->driver_data;
2340 struct ide_atapi_pc pc; 2340 struct ide_atapi_pc pc;
2341 char fw_rev[6], vendor_id[10], product_id[18]; 2341 char fw_rev[4], vendor_id[8], product_id[16];
2342 2342
2343 idetape_create_inquiry_cmd(&pc); 2343 idetape_create_inquiry_cmd(&pc);
2344 if (idetape_queue_pc_tail(drive, &pc)) { 2344 if (idetape_queue_pc_tail(drive, &pc)) {
@@ -2350,11 +2350,11 @@ static void idetape_get_inquiry_results(ide_drive_t *drive)
2350 memcpy(product_id, &pc.buf[16], 16); 2350 memcpy(product_id, &pc.buf[16], 16);
2351 memcpy(fw_rev, &pc.buf[32], 4); 2351 memcpy(fw_rev, &pc.buf[32], 4);
2352 2352
2353 ide_fixstring(vendor_id, 10, 0); 2353 ide_fixstring(vendor_id, 8, 0);
2354 ide_fixstring(product_id, 18, 0); 2354 ide_fixstring(product_id, 16, 0);
2355 ide_fixstring(fw_rev, 6, 0); 2355 ide_fixstring(fw_rev, 4, 0);
2356 2356
2357 printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n", 2357 printk(KERN_INFO "ide-tape: %s <-> %s: %.8s %.16s rev %.4s\n",
2358 drive->name, tape->name, vendor_id, product_id, fw_rev); 2358 drive->name, tape->name, vendor_id, product_id, fw_rev);
2359} 2359}
2360 2360
diff --git a/drivers/ide/mips/Makefile b/drivers/ide/mips/Makefile
index 677c7b2bac92..5873fa0b8769 100644
--- a/drivers/ide/mips/Makefile
+++ b/drivers/ide/mips/Makefile
@@ -1,4 +1,3 @@
1obj-$(CONFIG_BLK_DEV_IDE_SWARM) += swarm.o
2obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o 1obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o
3 2
4EXTRA_CFLAGS := -Idrivers/ide 3EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
deleted file mode 100644
index badf79fc9e3a..000000000000
--- a/drivers/ide/mips/swarm.c
+++ /dev/null
@@ -1,196 +0,0 @@
1/*
2 * Copyright (C) 2001, 2002, 2003 Broadcom Corporation
3 * Copyright (C) 2004 MontaVista Software Inc.
4 * Author: Manish Lachwani, mlachwani@mvista.com
5 * Copyright (C) 2004 MIPS Technologies, Inc. All rights reserved.
6 * Author: Maciej W. Rozycki <macro@mips.com>
7 * Copyright (c) 2006, 2008 Maciej W. Rozycki
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24/*
25 * Derived loosely from ide-pmac.c, so:
26 * Copyright (C) 1998 Paul Mackerras.
27 * Copyright (C) 1995-1998 Mark Lord
28 */
29
30/*
31 * Boards with SiByte processors so far have supported IDE devices via
32 * the Generic Bus, PCI bus, and built-in PCMCIA interface. In all
33 * cases, byte-swapping must be avoided for these devices (whereas
34 * other PCI devices, for example, will require swapping). Any
35 * SiByte-targetted kernel including IDE support will include this
36 * file. Probing of a Generic Bus for an IDE device is controlled by
37 * the definition of "SIBYTE_HAVE_IDE", which is provided by
38 * <asm/sibyte/board.h> for Broadcom boards.
39 */
40
41#include <linux/ide.h>
42#include <linux/ioport.h>
43#include <linux/kernel.h>
44#include <linux/types.h>
45#include <linux/platform_device.h>
46
47#include <asm/io.h>
48
49#include <asm/sibyte/board.h>
50#include <asm/sibyte/sb1250_genbus.h>
51#include <asm/sibyte/sb1250_regs.h>
52
53#define DRV_NAME "ide-swarm"
54
55static char swarm_ide_string[] = DRV_NAME;
56
57static struct resource swarm_ide_resource = {
58 .name = "SWARM GenBus IDE",
59 .flags = IORESOURCE_MEM,
60};
61
62static struct platform_device *swarm_ide_dev;
63
64static const struct ide_port_info swarm_port_info = {
65 .name = DRV_NAME,
66 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
67};
68
69/*
70 * swarm_ide_probe - if the board header indicates the existence of
71 * Generic Bus IDE, allocate a HWIF for it.
72 */
73static int __devinit swarm_ide_probe(struct device *dev)
74{
75 u8 __iomem *base;
76 struct ide_host *host;
77 phys_t offset, size;
78 int i, rc;
79 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
80
81 if (!SIBYTE_HAVE_IDE)
82 return -ENODEV;
83
84 base = ioremap(A_IO_EXT_BASE, 0x800);
85 offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS));
86 size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS));
87 iounmap(base);
88
89 offset = G_IO_START_ADDR(offset) << S_IO_ADDRBASE;
90 size = (G_IO_MULT_SIZE(size) + 1) << S_IO_REGSIZE;
91 if (offset < A_PHYS_GENBUS || offset >= A_PHYS_GENBUS_END) {
92 printk(KERN_INFO DRV_NAME
93 ": IDE interface at GenBus disabled\n");
94 return -EBUSY;
95 }
96
97 printk(KERN_INFO DRV_NAME ": IDE interface at GenBus slot %i\n",
98 IDE_CS);
99
100 swarm_ide_resource.start = offset;
101 swarm_ide_resource.end = offset + size - 1;
102 if (request_resource(&iomem_resource, &swarm_ide_resource)) {
103 printk(KERN_ERR DRV_NAME
104 ": can't request I/O memory resource\n");
105 return -EBUSY;
106 }
107
108 base = ioremap(offset, size);
109
110 for (i = 0; i <= 7; i++)
111 hw.io_ports_array[i] =
112 (unsigned long)(base + ((0x1f0 + i) << 5));
113 hw.io_ports.ctl_addr =
114 (unsigned long)(base + (0x3f6 << 5));
115 hw.irq = K_INT_GB_IDE;
116 hw.chipset = ide_generic;
117
118 rc = ide_host_add(&swarm_port_info, hws, &host);
119 if (rc)
120 goto err;
121
122 dev_set_drvdata(dev, host);
123
124 return 0;
125err:
126 release_resource(&swarm_ide_resource);
127 iounmap(base);
128 return rc;
129}
130
131static struct device_driver swarm_ide_driver = {
132 .name = swarm_ide_string,
133 .bus = &platform_bus_type,
134 .probe = swarm_ide_probe,
135};
136
137static void swarm_ide_platform_release(struct device *device)
138{
139 struct platform_device *pldev;
140
141 /* free device */
142 pldev = to_platform_device(device);
143 kfree(pldev);
144}
145
146static int __devinit swarm_ide_init_module(void)
147{
148 struct platform_device *pldev;
149 int err;
150
151 printk(KERN_INFO "SWARM IDE driver\n");
152
153 if (driver_register(&swarm_ide_driver)) {
154 printk(KERN_ERR "Driver registration failed\n");
155 err = -ENODEV;
156 goto out;
157 }
158
159 if (!(pldev = kzalloc(sizeof (*pldev), GFP_KERNEL))) {
160 err = -ENOMEM;
161 goto out_unregister_driver;
162 }
163
164 pldev->name = swarm_ide_string;
165 pldev->id = 0;
166 pldev->dev.release = swarm_ide_platform_release;
167
168 if (platform_device_register(pldev)) {
169 err = -ENODEV;
170 goto out_free_pldev;
171 }
172
173 if (!pldev->dev.driver) {
174 /*
175 * The driver was not bound to this device, there was
176 * no hardware at this address. Unregister it, as the
177 * release fuction will take care of freeing the
178 * allocated structure
179 */
180 platform_device_unregister (pldev);
181 }
182
183 swarm_ide_dev = pldev;
184
185 return 0;
186
187out_free_pldev:
188 kfree(pldev);
189
190out_unregister_driver:
191 driver_unregister(&swarm_ide_driver);
192out:
193 return err;
194}
195
196module_init(swarm_ide_init_module);
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 18f4d7f6ce6d..2998a6ac9ae4 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -351,8 +351,9 @@ static int report_tp_state(struct bcm5974 *dev, int size)
351#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300 351#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300
352#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0 352#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0
353#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01 353#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01
354#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08
354 355
355static int bcm5974_wellspring_mode(struct bcm5974 *dev) 356static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
356{ 357{
357 char *data = kmalloc(8, GFP_KERNEL); 358 char *data = kmalloc(8, GFP_KERNEL);
358 int retval = 0, size; 359 int retval = 0, size;
@@ -377,7 +378,9 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev)
377 } 378 }
378 379
379 /* apply the mode switch */ 380 /* apply the mode switch */
380 data[0] = BCM5974_WELLSPRING_MODE_VENDOR_VALUE; 381 data[0] = on ?
382 BCM5974_WELLSPRING_MODE_VENDOR_VALUE :
383 BCM5974_WELLSPRING_MODE_NORMAL_VALUE;
381 384
382 /* write configuration */ 385 /* write configuration */
383 size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 386 size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
@@ -392,7 +395,8 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev)
392 goto out; 395 goto out;
393 } 396 }
394 397
395 dprintk(2, "bcm5974: switched to wellspring mode.\n"); 398 dprintk(2, "bcm5974: switched to %s mode.\n",
399 on ? "wellspring" : "normal");
396 400
397 out: 401 out:
398 kfree(data); 402 kfree(data);
@@ -481,7 +485,7 @@ exit:
481 */ 485 */
482static int bcm5974_start_traffic(struct bcm5974 *dev) 486static int bcm5974_start_traffic(struct bcm5974 *dev)
483{ 487{
484 if (bcm5974_wellspring_mode(dev)) { 488 if (bcm5974_wellspring_mode(dev, true)) {
485 dprintk(1, "bcm5974: mode switch failed\n"); 489 dprintk(1, "bcm5974: mode switch failed\n");
486 goto error; 490 goto error;
487 } 491 }
@@ -504,6 +508,7 @@ static void bcm5974_pause_traffic(struct bcm5974 *dev)
504{ 508{
505 usb_kill_urb(dev->tp_urb); 509 usb_kill_urb(dev->tp_urb);
506 usb_kill_urb(dev->bt_urb); 510 usb_kill_urb(dev->bt_urb);
511 bcm5974_wellspring_mode(dev, false);
507} 512}
508 513
509/* 514/*
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index bf44f9d68342..c8b7e8a45c4d 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -119,8 +119,8 @@ static int __devinit jornada720_ts_probe(struct platform_device *pdev)
119 input_dev->id.bustype = BUS_HOST; 119 input_dev->id.bustype = BUS_HOST;
120 input_dev->dev.parent = &pdev->dev; 120 input_dev->dev.parent = &pdev->dev;
121 121
122 input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); 122 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
123 input_dev->keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH); 123 input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
124 input_set_abs_params(input_dev, ABS_X, 270, 3900, 0, 0); 124 input_set_abs_params(input_dev, ABS_X, 270, 3900, 0, 0);
125 input_set_abs_params(input_dev, ABS_Y, 180, 3700, 0, 0); 125 input_set_abs_params(input_dev, ABS_Y, 180, 3700, 0, 0);
126 126
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index be0e12144b8b..34935155c1c0 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -161,6 +161,16 @@ static int fsg_led_probe(struct platform_device *pdev)
161{ 161{
162 int ret; 162 int ret;
163 163
164 /* Map the LED chip select address space */
165 latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512);
166 if (!latch_address) {
167 ret = -ENOMEM;
168 goto failremap;
169 }
170
171 latch_value = 0xffff;
172 *latch_address = latch_value;
173
164 ret = led_classdev_register(&pdev->dev, &fsg_wlan_led); 174 ret = led_classdev_register(&pdev->dev, &fsg_wlan_led);
165 if (ret < 0) 175 if (ret < 0)
166 goto failwlan; 176 goto failwlan;
@@ -185,20 +195,8 @@ static int fsg_led_probe(struct platform_device *pdev)
185 if (ret < 0) 195 if (ret < 0)
186 goto failring; 196 goto failring;
187 197
188 /* Map the LED chip select address space */
189 latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512);
190 if (!latch_address) {
191 ret = -ENOMEM;
192 goto failremap;
193 }
194
195 latch_value = 0xffff;
196 *latch_address = latch_value;
197
198 return ret; 198 return ret;
199 199
200 failremap:
201 led_classdev_unregister(&fsg_ring_led);
202 failring: 200 failring:
203 led_classdev_unregister(&fsg_sync_led); 201 led_classdev_unregister(&fsg_sync_led);
204 failsync: 202 failsync:
@@ -210,14 +208,14 @@ static int fsg_led_probe(struct platform_device *pdev)
210 failwan: 208 failwan:
211 led_classdev_unregister(&fsg_wlan_led); 209 led_classdev_unregister(&fsg_wlan_led);
212 failwlan: 210 failwlan:
211 iounmap(latch_address);
212 failremap:
213 213
214 return ret; 214 return ret;
215} 215}
216 216
217static int fsg_led_remove(struct platform_device *pdev) 217static int fsg_led_remove(struct platform_device *pdev)
218{ 218{
219 iounmap(latch_address);
220
221 led_classdev_unregister(&fsg_wlan_led); 219 led_classdev_unregister(&fsg_wlan_led);
222 led_classdev_unregister(&fsg_wan_led); 220 led_classdev_unregister(&fsg_wan_led);
223 led_classdev_unregister(&fsg_sata_led); 221 led_classdev_unregister(&fsg_sata_led);
@@ -225,6 +223,8 @@ static int fsg_led_remove(struct platform_device *pdev)
225 led_classdev_unregister(&fsg_sync_led); 223 led_classdev_unregister(&fsg_sync_led);
226 led_classdev_unregister(&fsg_ring_led); 224 led_classdev_unregister(&fsg_ring_led);
227 225
226 iounmap(latch_address);
227
228 return 0; 228 return 0;
229} 229}
230 230
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 146c06972863..f508729123b5 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -248,11 +248,10 @@ static int __devinit pca955x_probe(struct i2c_client *client,
248 const struct i2c_device_id *id) 248 const struct i2c_device_id *id)
249{ 249{
250 struct pca955x_led *pca955x; 250 struct pca955x_led *pca955x;
251 int i;
252 int err = -ENODEV;
253 struct pca955x_chipdef *chip; 251 struct pca955x_chipdef *chip;
254 struct i2c_adapter *adapter; 252 struct i2c_adapter *adapter;
255 struct led_platform_data *pdata; 253 struct led_platform_data *pdata;
254 int i, err;
256 255
257 chip = &pca955x_chipdefs[id->driver_data]; 256 chip = &pca955x_chipdefs[id->driver_data];
258 adapter = to_i2c_adapter(client->dev.parent); 257 adapter = to_i2c_adapter(client->dev.parent);
@@ -282,43 +281,41 @@ static int __devinit pca955x_probe(struct i2c_client *client,
282 } 281 }
283 } 282 }
284 283
284 pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL);
285 if (!pca955x)
286 return -ENOMEM;
287
288 i2c_set_clientdata(client, pca955x);
289
285 for (i = 0; i < chip->bits; i++) { 290 for (i = 0; i < chip->bits; i++) {
286 pca955x = kzalloc(sizeof(struct pca955x_led), GFP_KERNEL); 291 pca955x[i].chipdef = chip;
287 if (!pca955x) { 292 pca955x[i].client = client;
288 err = -ENOMEM; 293 pca955x[i].led_num = i;
289 goto exit;
290 }
291 294
292 pca955x->chipdef = chip;
293 pca955x->client = client;
294 pca955x->led_num = i;
295 /* Platform data can specify LED names and default triggers */ 295 /* Platform data can specify LED names and default triggers */
296 if (pdata) { 296 if (pdata) {
297 if (pdata->leds[i].name) 297 if (pdata->leds[i].name)
298 snprintf(pca955x->name, 32, "pca955x:%s", 298 snprintf(pca955x[i].name,
299 pdata->leds[i].name); 299 sizeof(pca955x[i].name), "pca955x:%s",
300 pdata->leds[i].name);
300 if (pdata->leds[i].default_trigger) 301 if (pdata->leds[i].default_trigger)
301 pca955x->led_cdev.default_trigger = 302 pca955x[i].led_cdev.default_trigger =
302 pdata->leds[i].default_trigger; 303 pdata->leds[i].default_trigger;
303 } else { 304 } else {
304 snprintf(pca955x->name, 32, "pca955x:%d", i); 305 snprintf(pca955x[i].name, sizeof(pca955x[i].name),
306 "pca955x:%d", i);
305 } 307 }
306 spin_lock_init(&pca955x->lock);
307 308
308 pca955x->led_cdev.name = pca955x->name; 309 spin_lock_init(&pca955x[i].lock);
309 pca955x->led_cdev.brightness_set =
310 pca955x_led_set;
311 310
312 /* 311 pca955x[i].led_cdev.name = pca955x[i].name;
313 * Client data is a pointer to the _first_ pca955x_led 312 pca955x[i].led_cdev.brightness_set = pca955x_led_set;
314 * struct
315 */
316 if (i == 0)
317 i2c_set_clientdata(client, pca955x);
318 313
319 INIT_WORK(&(pca955x->work), pca955x_led_work); 314 INIT_WORK(&pca955x[i].work, pca955x_led_work);
320 315
321 led_classdev_register(&client->dev, &(pca955x->led_cdev)); 316 err = led_classdev_register(&client->dev, &pca955x[i].led_cdev);
317 if (err < 0)
318 goto exit;
322 } 319 }
323 320
324 /* Turn off LEDs */ 321 /* Turn off LEDs */
@@ -336,23 +333,32 @@ static int __devinit pca955x_probe(struct i2c_client *client,
336 pca955x_write_psc(client, 1, 0); 333 pca955x_write_psc(client, 1, 0);
337 334
338 return 0; 335 return 0;
336
339exit: 337exit:
338 while (i--) {
339 led_classdev_unregister(&pca955x[i].led_cdev);
340 cancel_work_sync(&pca955x[i].work);
341 }
342
343 kfree(pca955x);
344 i2c_set_clientdata(client, NULL);
345
340 return err; 346 return err;
341} 347}
342 348
343static int __devexit pca955x_remove(struct i2c_client *client) 349static int __devexit pca955x_remove(struct i2c_client *client)
344{ 350{
345 struct pca955x_led *pca955x = i2c_get_clientdata(client); 351 struct pca955x_led *pca955x = i2c_get_clientdata(client);
346 int leds = pca955x->chipdef->bits;
347 int i; 352 int i;
348 353
349 for (i = 0; i < leds; i++) { 354 for (i = 0; i < pca955x->chipdef->bits; i++) {
350 led_classdev_unregister(&(pca955x->led_cdev)); 355 led_classdev_unregister(&pca955x[i].led_cdev);
351 cancel_work_sync(&(pca955x->work)); 356 cancel_work_sync(&pca955x[i].work);
352 kfree(pca955x);
353 pca955x = pca955x + 1;
354 } 357 }
355 358
359 kfree(pca955x);
360 i2c_set_clientdata(client, NULL);
361
356 return 0; 362 return 0;
357} 363}
358 364
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 13956437bc81..682ef9e6acd3 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -333,7 +333,6 @@ static void crypt_convert_init(struct crypt_config *cc,
333 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 333 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
334 ctx->sector = sector + cc->iv_offset; 334 ctx->sector = sector + cc->iv_offset;
335 init_completion(&ctx->restart); 335 init_completion(&ctx->restart);
336 atomic_set(&ctx->pending, 1);
337} 336}
338 337
339static int crypt_convert_block(struct crypt_config *cc, 338static int crypt_convert_block(struct crypt_config *cc,
@@ -408,6 +407,8 @@ static int crypt_convert(struct crypt_config *cc,
408{ 407{
409 int r; 408 int r;
410 409
410 atomic_set(&ctx->pending, 1);
411
411 while(ctx->idx_in < ctx->bio_in->bi_vcnt && 412 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
412 ctx->idx_out < ctx->bio_out->bi_vcnt) { 413 ctx->idx_out < ctx->bio_out->bi_vcnt) {
413 414
@@ -456,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio)
456/* 457/*
457 * Generate a new unfragmented bio with the given size 458 * Generate a new unfragmented bio with the given size
458 * This should never violate the device limitations 459 * This should never violate the device limitations
459 * May return a smaller bio when running out of pages 460 * May return a smaller bio when running out of pages, indicated by
461 * *out_of_pages set to 1.
460 */ 462 */
461static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) 463static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
464 unsigned *out_of_pages)
462{ 465{
463 struct crypt_config *cc = io->target->private; 466 struct crypt_config *cc = io->target->private;
464 struct bio *clone; 467 struct bio *clone;
@@ -472,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
472 return NULL; 475 return NULL;
473 476
474 clone_init(io, clone); 477 clone_init(io, clone);
478 *out_of_pages = 0;
475 479
476 for (i = 0; i < nr_iovecs; i++) { 480 for (i = 0; i < nr_iovecs; i++) {
477 page = mempool_alloc(cc->page_pool, gfp_mask); 481 page = mempool_alloc(cc->page_pool, gfp_mask);
478 if (!page) 482 if (!page) {
483 *out_of_pages = 1;
479 break; 484 break;
485 }
480 486
481 /* 487 /*
482 * if additional pages cannot be allocated without waiting, 488 * if additional pages cannot be allocated without waiting,
@@ -517,6 +523,27 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
517 } 523 }
518} 524}
519 525
526static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
527 struct bio *bio, sector_t sector)
528{
529 struct crypt_config *cc = ti->private;
530 struct dm_crypt_io *io;
531
532 io = mempool_alloc(cc->io_pool, GFP_NOIO);
533 io->target = ti;
534 io->base_bio = bio;
535 io->sector = sector;
536 io->error = 0;
537 atomic_set(&io->pending, 0);
538
539 return io;
540}
541
542static void crypt_inc_pending(struct dm_crypt_io *io)
543{
544 atomic_inc(&io->pending);
545}
546
520/* 547/*
521 * One of the bios was finished. Check for completion of 548 * One of the bios was finished. Check for completion of
522 * the whole request and correctly clean up the buffer. 549 * the whole request and correctly clean up the buffer.
@@ -591,7 +618,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
591 struct bio *base_bio = io->base_bio; 618 struct bio *base_bio = io->base_bio;
592 struct bio *clone; 619 struct bio *clone;
593 620
594 atomic_inc(&io->pending); 621 crypt_inc_pending(io);
595 622
596 /* 623 /*
597 * The block layer might modify the bvec array, so always 624 * The block layer might modify the bvec array, so always
@@ -653,6 +680,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
653 crypt_free_buffer_pages(cc, clone); 680 crypt_free_buffer_pages(cc, clone);
654 bio_put(clone); 681 bio_put(clone);
655 io->error = -EIO; 682 io->error = -EIO;
683 crypt_dec_pending(io);
656 return; 684 return;
657 } 685 }
658 686
@@ -664,28 +692,34 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
664 692
665 if (async) 693 if (async)
666 kcryptd_queue_io(io); 694 kcryptd_queue_io(io);
667 else { 695 else
668 atomic_inc(&io->pending);
669 generic_make_request(clone); 696 generic_make_request(clone);
670 }
671} 697}
672 698
673static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) 699static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
674{ 700{
675 struct crypt_config *cc = io->target->private; 701 struct crypt_config *cc = io->target->private;
676 struct bio *clone; 702 struct bio *clone;
703 int crypt_finished;
704 unsigned out_of_pages = 0;
677 unsigned remaining = io->base_bio->bi_size; 705 unsigned remaining = io->base_bio->bi_size;
678 int r; 706 int r;
679 707
680 /* 708 /*
709 * Prevent io from disappearing until this function completes.
710 */
711 crypt_inc_pending(io);
712 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
713
714 /*
681 * The allocated buffers can be smaller than the whole bio, 715 * The allocated buffers can be smaller than the whole bio,
682 * so repeat the whole process until all the data can be handled. 716 * so repeat the whole process until all the data can be handled.
683 */ 717 */
684 while (remaining) { 718 while (remaining) {
685 clone = crypt_alloc_buffer(io, remaining); 719 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
686 if (unlikely(!clone)) { 720 if (unlikely(!clone)) {
687 io->error = -ENOMEM; 721 io->error = -ENOMEM;
688 return; 722 break;
689 } 723 }
690 724
691 io->ctx.bio_out = clone; 725 io->ctx.bio_out = clone;
@@ -693,37 +727,32 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
693 727
694 remaining -= clone->bi_size; 728 remaining -= clone->bi_size;
695 729
730 crypt_inc_pending(io);
696 r = crypt_convert(cc, &io->ctx); 731 r = crypt_convert(cc, &io->ctx);
732 crypt_finished = atomic_dec_and_test(&io->ctx.pending);
697 733
698 if (atomic_dec_and_test(&io->ctx.pending)) { 734 /* Encryption was already finished, submit io now */
699 /* processed, no running async crypto */ 735 if (crypt_finished) {
700 kcryptd_crypt_write_io_submit(io, r, 0); 736 kcryptd_crypt_write_io_submit(io, r, 0);
701 if (unlikely(r < 0))
702 return;
703 } else
704 atomic_inc(&io->pending);
705 737
706 /* out of memory -> run queues */ 738 /*
707 if (unlikely(remaining)) { 739 * If there was an error, do not try next fragments.
708 /* wait for async crypto then reinitialize pending */ 740 * For async, error is processed in async handler.
709 wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); 741 */
710 atomic_set(&io->ctx.pending, 1); 742 if (unlikely(r < 0))
711 congestion_wait(WRITE, HZ/100); 743 break;
712 } 744 }
713 }
714}
715 745
716static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 746 /*
717{ 747 * Out of memory -> run queues
718 struct crypt_config *cc = io->target->private; 748 * But don't wait if split was due to the io size restriction
719 749 */
720 /* 750 if (unlikely(out_of_pages))
721 * Prevent io from disappearing until this function completes. 751 congestion_wait(WRITE, HZ/100);
722 */
723 atomic_inc(&io->pending);
724 752
725 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); 753 if (unlikely(remaining))
726 kcryptd_crypt_write_convert_loop(io); 754 wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
755 }
727 756
728 crypt_dec_pending(io); 757 crypt_dec_pending(io);
729} 758}
@@ -741,7 +770,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
741 struct crypt_config *cc = io->target->private; 770 struct crypt_config *cc = io->target->private;
742 int r = 0; 771 int r = 0;
743 772
744 atomic_inc(&io->pending); 773 crypt_inc_pending(io);
745 774
746 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 775 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
747 io->sector); 776 io->sector);
@@ -1108,15 +1137,9 @@ static void crypt_dtr(struct dm_target *ti)
1108static int crypt_map(struct dm_target *ti, struct bio *bio, 1137static int crypt_map(struct dm_target *ti, struct bio *bio,
1109 union map_info *map_context) 1138 union map_info *map_context)
1110{ 1139{
1111 struct crypt_config *cc = ti->private;
1112 struct dm_crypt_io *io; 1140 struct dm_crypt_io *io;
1113 1141
1114 io = mempool_alloc(cc->io_pool, GFP_NOIO); 1142 io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
1115 io->target = ti;
1116 io->base_bio = bio;
1117 io->sector = bio->bi_sector - ti->begin;
1118 io->error = 0;
1119 atomic_set(&io->pending, 0);
1120 1143
1121 if (bio_data_dir(io->base_bio) == READ) 1144 if (bio_data_dir(io->base_bio) == READ)
1122 kcryptd_queue_io(io); 1145 kcryptd_queue_io(io);
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 41f408068a7c..769ab677f8e0 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -108,12 +108,12 @@ struct pstore {
108 * Used to keep track of which metadata area the data in 108 * Used to keep track of which metadata area the data in
109 * 'chunk' refers to. 109 * 'chunk' refers to.
110 */ 110 */
111 uint32_t current_area; 111 chunk_t current_area;
112 112
113 /* 113 /*
114 * The next free chunk for an exception. 114 * The next free chunk for an exception.
115 */ 115 */
116 uint32_t next_free; 116 chunk_t next_free;
117 117
118 /* 118 /*
119 * The index of next free exception in the current 119 * The index of next free exception in the current
@@ -175,7 +175,7 @@ static void do_metadata(struct work_struct *work)
175/* 175/*
176 * Read or write a chunk aligned and sized block of data from a device. 176 * Read or write a chunk aligned and sized block of data from a device.
177 */ 177 */
178static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata) 178static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
179{ 179{
180 struct dm_io_region where = { 180 struct dm_io_region where = {
181 .bdev = ps->snap->cow->bdev, 181 .bdev = ps->snap->cow->bdev,
@@ -209,16 +209,23 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
209} 209}
210 210
211/* 211/*
212 * Convert a metadata area index to a chunk index.
213 */
214static chunk_t area_location(struct pstore *ps, chunk_t area)
215{
216 return 1 + ((ps->exceptions_per_area + 1) * area);
217}
218
219/*
212 * Read or write a metadata area. Remembering to skip the first 220 * Read or write a metadata area. Remembering to skip the first
213 * chunk which holds the header. 221 * chunk which holds the header.
214 */ 222 */
215static int area_io(struct pstore *ps, uint32_t area, int rw) 223static int area_io(struct pstore *ps, chunk_t area, int rw)
216{ 224{
217 int r; 225 int r;
218 uint32_t chunk; 226 chunk_t chunk;
219 227
220 /* convert a metadata area index to a chunk index */ 228 chunk = area_location(ps, area);
221 chunk = 1 + ((ps->exceptions_per_area + 1) * area);
222 229
223 r = chunk_io(ps, chunk, rw, 0); 230 r = chunk_io(ps, chunk, rw, 0);
224 if (r) 231 if (r)
@@ -228,7 +235,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw)
228 return 0; 235 return 0;
229} 236}
230 237
231static int zero_area(struct pstore *ps, uint32_t area) 238static int zero_area(struct pstore *ps, chunk_t area)
232{ 239{
233 memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); 240 memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
234 return area_io(ps, area, WRITE); 241 return area_io(ps, area, WRITE);
@@ -404,7 +411,7 @@ static int insert_exceptions(struct pstore *ps, int *full)
404 411
405static int read_exceptions(struct pstore *ps) 412static int read_exceptions(struct pstore *ps)
406{ 413{
407 uint32_t area; 414 chunk_t area;
408 int r, full = 1; 415 int r, full = 1;
409 416
410 /* 417 /*
@@ -517,6 +524,7 @@ static int persistent_prepare(struct exception_store *store,
517{ 524{
518 struct pstore *ps = get_info(store); 525 struct pstore *ps = get_info(store);
519 uint32_t stride; 526 uint32_t stride;
527 chunk_t next_free;
520 sector_t size = get_dev_size(store->snap->cow->bdev); 528 sector_t size = get_dev_size(store->snap->cow->bdev);
521 529
522 /* Is there enough room ? */ 530 /* Is there enough room ? */
@@ -530,7 +538,8 @@ static int persistent_prepare(struct exception_store *store,
530 * into account the location of the metadata chunks. 538 * into account the location of the metadata chunks.
531 */ 539 */
532 stride = (ps->exceptions_per_area + 1); 540 stride = (ps->exceptions_per_area + 1);
533 if ((++ps->next_free % stride) == 1) 541 next_free = ++ps->next_free;
542 if (sector_div(next_free, stride) == 1)
534 ps->next_free++; 543 ps->next_free++;
535 544
536 atomic_inc(&ps->pending_count); 545 atomic_inc(&ps->pending_count);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index b262c0042de3..dca401dc70a0 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -426,7 +426,7 @@ static int list_devices(struct dm_ioctl *param, size_t param_size)
426 old_nl->next = (uint32_t) ((void *) nl - 426 old_nl->next = (uint32_t) ((void *) nl -
427 (void *) old_nl); 427 (void *) old_nl);
428 disk = dm_disk(hc->md); 428 disk = dm_disk(hc->md);
429 nl->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor)); 429 nl->dev = huge_encode_dev(disk_devt(disk));
430 nl->next = 0; 430 nl->next = 0;
431 strcpy(nl->name, hc->name); 431 strcpy(nl->name, hc->name);
432 432
@@ -539,7 +539,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
539 if (dm_suspended(md)) 539 if (dm_suspended(md))
540 param->flags |= DM_SUSPEND_FLAG; 540 param->flags |= DM_SUSPEND_FLAG;
541 541
542 param->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor)); 542 param->dev = huge_encode_dev(disk_devt(disk));
543 543
544 /* 544 /*
545 * Yes, this will be out of date by the time it gets back 545 * Yes, this will be out of date by the time it gets back
@@ -548,7 +548,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
548 */ 548 */
549 param->open_count = dm_open_count(md); 549 param->open_count = dm_open_count(md);
550 550
551 if (disk->policy) 551 if (get_disk_ro(disk))
552 param->flags |= DM_READONLY_FLAG; 552 param->flags |= DM_READONLY_FLAG;
553 553
554 param->event_nr = dm_get_event_nr(md); 554 param->event_nr = dm_get_event_nr(md);
@@ -1131,7 +1131,7 @@ static void retrieve_deps(struct dm_table *table,
1131 unsigned int count = 0; 1131 unsigned int count = 0;
1132 struct list_head *tmp; 1132 struct list_head *tmp;
1133 size_t len, needed; 1133 size_t len, needed;
1134 struct dm_dev *dd; 1134 struct dm_dev_internal *dd;
1135 struct dm_target_deps *deps; 1135 struct dm_target_deps *deps;
1136 1136
1137 deps = get_result_buffer(param, param_size, &len); 1137 deps = get_result_buffer(param, param_size, &len);
@@ -1157,7 +1157,7 @@ static void retrieve_deps(struct dm_table *table,
1157 deps->count = count; 1157 deps->count = count;
1158 count = 0; 1158 count = 0;
1159 list_for_each_entry (dd, dm_table_get_devices(table), list) 1159 list_for_each_entry (dd, dm_table_get_devices(table), list)
1160 deps->dev[count++] = huge_encode_dev(dd->bdev->bd_dev); 1160 deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev);
1161 1161
1162 param->data_size = param->data_start + needed; 1162 param->data_size = param->data_start + needed;
1163} 1163}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 71dd65aa31b6..103304c1e3b0 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -30,9 +30,11 @@ struct pgpath {
30 struct list_head list; 30 struct list_head list;
31 31
32 struct priority_group *pg; /* Owning PG */ 32 struct priority_group *pg; /* Owning PG */
33 unsigned is_active; /* Path status */
33 unsigned fail_count; /* Cumulative failure count */ 34 unsigned fail_count; /* Cumulative failure count */
34 35
35 struct dm_path path; 36 struct dm_path path;
37 struct work_struct deactivate_path;
36}; 38};
37 39
38#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) 40#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -63,6 +65,7 @@ struct multipath {
63 65
64 const char *hw_handler_name; 66 const char *hw_handler_name;
65 struct work_struct activate_path; 67 struct work_struct activate_path;
68 struct pgpath *pgpath_to_activate;
66 unsigned nr_priority_groups; 69 unsigned nr_priority_groups;
67 struct list_head priority_groups; 70 struct list_head priority_groups;
68 unsigned pg_init_required; /* pg_init needs calling? */ 71 unsigned pg_init_required; /* pg_init needs calling? */
@@ -111,6 +114,7 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
111static void process_queued_ios(struct work_struct *work); 114static void process_queued_ios(struct work_struct *work);
112static void trigger_event(struct work_struct *work); 115static void trigger_event(struct work_struct *work);
113static void activate_path(struct work_struct *work); 116static void activate_path(struct work_struct *work);
117static void deactivate_path(struct work_struct *work);
114 118
115 119
116/*----------------------------------------------- 120/*-----------------------------------------------
@@ -121,8 +125,10 @@ static struct pgpath *alloc_pgpath(void)
121{ 125{
122 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); 126 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
123 127
124 if (pgpath) 128 if (pgpath) {
125 pgpath->path.is_active = 1; 129 pgpath->is_active = 1;
130 INIT_WORK(&pgpath->deactivate_path, deactivate_path);
131 }
126 132
127 return pgpath; 133 return pgpath;
128} 134}
@@ -132,6 +138,14 @@ static void free_pgpath(struct pgpath *pgpath)
132 kfree(pgpath); 138 kfree(pgpath);
133} 139}
134 140
141static void deactivate_path(struct work_struct *work)
142{
143 struct pgpath *pgpath =
144 container_of(work, struct pgpath, deactivate_path);
145
146 blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
147}
148
135static struct priority_group *alloc_priority_group(void) 149static struct priority_group *alloc_priority_group(void)
136{ 150{
137 struct priority_group *pg; 151 struct priority_group *pg;
@@ -146,6 +160,7 @@ static struct priority_group *alloc_priority_group(void)
146 160
147static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) 161static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
148{ 162{
163 unsigned long flags;
149 struct pgpath *pgpath, *tmp; 164 struct pgpath *pgpath, *tmp;
150 struct multipath *m = ti->private; 165 struct multipath *m = ti->private;
151 166
@@ -154,6 +169,10 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
154 if (m->hw_handler_name) 169 if (m->hw_handler_name)
155 scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); 170 scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
156 dm_put_device(ti, pgpath->path.dev); 171 dm_put_device(ti, pgpath->path.dev);
172 spin_lock_irqsave(&m->lock, flags);
173 if (m->pgpath_to_activate == pgpath)
174 m->pgpath_to_activate = NULL;
175 spin_unlock_irqrestore(&m->lock, flags);
157 free_pgpath(pgpath); 176 free_pgpath(pgpath);
158 } 177 }
159} 178}
@@ -421,6 +440,7 @@ static void process_queued_ios(struct work_struct *work)
421 __choose_pgpath(m); 440 __choose_pgpath(m);
422 441
423 pgpath = m->current_pgpath; 442 pgpath = m->current_pgpath;
443 m->pgpath_to_activate = m->current_pgpath;
424 444
425 if ((pgpath && !m->queue_io) || 445 if ((pgpath && !m->queue_io) ||
426 (!pgpath && !m->queue_if_no_path)) 446 (!pgpath && !m->queue_if_no_path))
@@ -556,12 +576,12 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
556 /* we need at least a path arg */ 576 /* we need at least a path arg */
557 if (as->argc < 1) { 577 if (as->argc < 1) {
558 ti->error = "no device given"; 578 ti->error = "no device given";
559 return NULL; 579 return ERR_PTR(-EINVAL);
560 } 580 }
561 581
562 p = alloc_pgpath(); 582 p = alloc_pgpath();
563 if (!p) 583 if (!p)
564 return NULL; 584 return ERR_PTR(-ENOMEM);
565 585
566 r = dm_get_device(ti, shift(as), ti->begin, ti->len, 586 r = dm_get_device(ti, shift(as), ti->begin, ti->len,
567 dm_table_get_mode(ti->table), &p->path.dev); 587 dm_table_get_mode(ti->table), &p->path.dev);
@@ -589,7 +609,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
589 609
590 bad: 610 bad:
591 free_pgpath(p); 611 free_pgpath(p);
592 return NULL; 612 return ERR_PTR(r);
593} 613}
594 614
595static struct priority_group *parse_priority_group(struct arg_set *as, 615static struct priority_group *parse_priority_group(struct arg_set *as,
@@ -607,14 +627,14 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
607 627
608 if (as->argc < 2) { 628 if (as->argc < 2) {
609 as->argc = 0; 629 as->argc = 0;
610 ti->error = "not enough priority group aruments"; 630 ti->error = "not enough priority group arguments";
611 return NULL; 631 return ERR_PTR(-EINVAL);
612 } 632 }
613 633
614 pg = alloc_priority_group(); 634 pg = alloc_priority_group();
615 if (!pg) { 635 if (!pg) {
616 ti->error = "couldn't allocate priority group"; 636 ti->error = "couldn't allocate priority group";
617 return NULL; 637 return ERR_PTR(-ENOMEM);
618 } 638 }
619 pg->m = m; 639 pg->m = m;
620 640
@@ -647,8 +667,10 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
647 path_args.argv = as->argv; 667 path_args.argv = as->argv;
648 668
649 pgpath = parse_path(&path_args, &pg->ps, ti); 669 pgpath = parse_path(&path_args, &pg->ps, ti);
650 if (!pgpath) 670 if (IS_ERR(pgpath)) {
671 r = PTR_ERR(pgpath);
651 goto bad; 672 goto bad;
673 }
652 674
653 pgpath->pg = pg; 675 pgpath->pg = pg;
654 list_add_tail(&pgpath->list, &pg->pgpaths); 676 list_add_tail(&pgpath->list, &pg->pgpaths);
@@ -659,7 +681,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
659 681
660 bad: 682 bad:
661 free_priority_group(pg, ti); 683 free_priority_group(pg, ti);
662 return NULL; 684 return ERR_PTR(r);
663} 685}
664 686
665static int parse_hw_handler(struct arg_set *as, struct multipath *m) 687static int parse_hw_handler(struct arg_set *as, struct multipath *m)
@@ -778,8 +800,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
778 struct priority_group *pg; 800 struct priority_group *pg;
779 801
780 pg = parse_priority_group(&as, m); 802 pg = parse_priority_group(&as, m);
781 if (!pg) { 803 if (IS_ERR(pg)) {
782 r = -EINVAL; 804 r = PTR_ERR(pg);
783 goto bad; 805 goto bad;
784 } 806 }
785 807
@@ -845,13 +867,13 @@ static int fail_path(struct pgpath *pgpath)
845 867
846 spin_lock_irqsave(&m->lock, flags); 868 spin_lock_irqsave(&m->lock, flags);
847 869
848 if (!pgpath->path.is_active) 870 if (!pgpath->is_active)
849 goto out; 871 goto out;
850 872
851 DMWARN("Failing path %s.", pgpath->path.dev->name); 873 DMWARN("Failing path %s.", pgpath->path.dev->name);
852 874
853 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); 875 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
854 pgpath->path.is_active = 0; 876 pgpath->is_active = 0;
855 pgpath->fail_count++; 877 pgpath->fail_count++;
856 878
857 m->nr_valid_paths--; 879 m->nr_valid_paths--;
@@ -863,6 +885,7 @@ static int fail_path(struct pgpath *pgpath)
863 pgpath->path.dev->name, m->nr_valid_paths); 885 pgpath->path.dev->name, m->nr_valid_paths);
864 886
865 queue_work(kmultipathd, &m->trigger_event); 887 queue_work(kmultipathd, &m->trigger_event);
888 queue_work(kmultipathd, &pgpath->deactivate_path);
866 889
867out: 890out:
868 spin_unlock_irqrestore(&m->lock, flags); 891 spin_unlock_irqrestore(&m->lock, flags);
@@ -881,7 +904,7 @@ static int reinstate_path(struct pgpath *pgpath)
881 904
882 spin_lock_irqsave(&m->lock, flags); 905 spin_lock_irqsave(&m->lock, flags);
883 906
884 if (pgpath->path.is_active) 907 if (pgpath->is_active)
885 goto out; 908 goto out;
886 909
887 if (!pgpath->pg->ps.type->reinstate_path) { 910 if (!pgpath->pg->ps.type->reinstate_path) {
@@ -895,7 +918,7 @@ static int reinstate_path(struct pgpath *pgpath)
895 if (r) 918 if (r)
896 goto out; 919 goto out;
897 920
898 pgpath->path.is_active = 1; 921 pgpath->is_active = 1;
899 922
900 m->current_pgpath = NULL; 923 m->current_pgpath = NULL;
901 if (!m->nr_valid_paths++ && m->queue_size) 924 if (!m->nr_valid_paths++ && m->queue_size)
@@ -1093,8 +1116,15 @@ static void activate_path(struct work_struct *work)
1093 int ret; 1116 int ret;
1094 struct multipath *m = 1117 struct multipath *m =
1095 container_of(work, struct multipath, activate_path); 1118 container_of(work, struct multipath, activate_path);
1096 struct dm_path *path = &m->current_pgpath->path; 1119 struct dm_path *path;
1120 unsigned long flags;
1097 1121
1122 spin_lock_irqsave(&m->lock, flags);
1123 path = &m->pgpath_to_activate->path;
1124 m->pgpath_to_activate = NULL;
1125 spin_unlock_irqrestore(&m->lock, flags);
1126 if (!path)
1127 return;
1098 ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); 1128 ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
1099 pg_init_done(path, ret); 1129 pg_init_done(path, ret);
1100} 1130}
@@ -1276,7 +1306,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
1276 1306
1277 list_for_each_entry(p, &pg->pgpaths, list) { 1307 list_for_each_entry(p, &pg->pgpaths, list) {
1278 DMEMIT("%s %s %u ", p->path.dev->name, 1308 DMEMIT("%s %s %u ", p->path.dev->name,
1279 p->path.is_active ? "A" : "F", 1309 p->is_active ? "A" : "F",
1280 p->fail_count); 1310 p->fail_count);
1281 if (pg->ps.type->status) 1311 if (pg->ps.type->status)
1282 sz += pg->ps.type->status(&pg->ps, 1312 sz += pg->ps.type->status(&pg->ps,
diff --git a/drivers/md/dm-mpath.h b/drivers/md/dm-mpath.h
index c198b856a452..e230f7196259 100644
--- a/drivers/md/dm-mpath.h
+++ b/drivers/md/dm-mpath.h
@@ -13,8 +13,6 @@ struct dm_dev;
13 13
14struct dm_path { 14struct dm_path {
15 struct dm_dev *dev; /* Read-only */ 15 struct dm_dev *dev; /* Read-only */
16 unsigned is_active; /* Read-only */
17
18 void *pscontext; /* For path-selector use */ 16 void *pscontext; /* For path-selector use */
19}; 17};
20 18
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ff05fe893083..29913e42c4ab 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -842,7 +842,9 @@ static int recover(struct mirror_set *ms, struct region *reg)
842 } 842 }
843 843
844 /* hand to kcopyd */ 844 /* hand to kcopyd */
845 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags); 845 if (!errors_handled(ms))
846 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
847
846 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, 848 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
847 flags, recovery_complete, reg); 849 flags, recovery_complete, reg);
848 850
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 4de90ab3968b..b745d8ac625b 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -284,8 +284,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
284 284
285 memset(major_minor, 0, sizeof(major_minor)); 285 memset(major_minor, 0, sizeof(major_minor));
286 sprintf(major_minor, "%d:%d", 286 sprintf(major_minor, "%d:%d",
287 bio->bi_bdev->bd_disk->major, 287 MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
288 bio->bi_bdev->bd_disk->first_minor); 288 MINOR(disk_devt(bio->bi_bdev->bd_disk)));
289 289
290 /* 290 /*
291 * Test to see which stripe drive triggered the event 291 * Test to see which stripe drive triggered the event
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 61f441409234..a740a6950f59 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -250,7 +250,8 @@ static void free_devices(struct list_head *devices)
250 struct list_head *tmp, *next; 250 struct list_head *tmp, *next;
251 251
252 list_for_each_safe(tmp, next, devices) { 252 list_for_each_safe(tmp, next, devices) {
253 struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); 253 struct dm_dev_internal *dd =
254 list_entry(tmp, struct dm_dev_internal, list);
254 kfree(dd); 255 kfree(dd);
255 } 256 }
256} 257}
@@ -327,12 +328,12 @@ static int lookup_device(const char *path, dev_t *dev)
327/* 328/*
328 * See if we've already got a device in the list. 329 * See if we've already got a device in the list.
329 */ 330 */
330static struct dm_dev *find_device(struct list_head *l, dev_t dev) 331static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
331{ 332{
332 struct dm_dev *dd; 333 struct dm_dev_internal *dd;
333 334
334 list_for_each_entry (dd, l, list) 335 list_for_each_entry (dd, l, list)
335 if (dd->bdev->bd_dev == dev) 336 if (dd->dm_dev.bdev->bd_dev == dev)
336 return dd; 337 return dd;
337 338
338 return NULL; 339 return NULL;
@@ -341,45 +342,47 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev)
341/* 342/*
342 * Open a device so we can use it as a map destination. 343 * Open a device so we can use it as a map destination.
343 */ 344 */
344static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md) 345static int open_dev(struct dm_dev_internal *d, dev_t dev,
346 struct mapped_device *md)
345{ 347{
346 static char *_claim_ptr = "I belong to device-mapper"; 348 static char *_claim_ptr = "I belong to device-mapper";
347 struct block_device *bdev; 349 struct block_device *bdev;
348 350
349 int r; 351 int r;
350 352
351 BUG_ON(d->bdev); 353 BUG_ON(d->dm_dev.bdev);
352 354
353 bdev = open_by_devnum(dev, d->mode); 355 bdev = open_by_devnum(dev, d->dm_dev.mode);
354 if (IS_ERR(bdev)) 356 if (IS_ERR(bdev))
355 return PTR_ERR(bdev); 357 return PTR_ERR(bdev);
356 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); 358 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
357 if (r) 359 if (r)
358 blkdev_put(bdev); 360 blkdev_put(bdev);
359 else 361 else
360 d->bdev = bdev; 362 d->dm_dev.bdev = bdev;
361 return r; 363 return r;
362} 364}
363 365
364/* 366/*
365 * Close a device that we've been using. 367 * Close a device that we've been using.
366 */ 368 */
367static void close_dev(struct dm_dev *d, struct mapped_device *md) 369static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
368{ 370{
369 if (!d->bdev) 371 if (!d->dm_dev.bdev)
370 return; 372 return;
371 373
372 bd_release_from_disk(d->bdev, dm_disk(md)); 374 bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
373 blkdev_put(d->bdev); 375 blkdev_put(d->dm_dev.bdev);
374 d->bdev = NULL; 376 d->dm_dev.bdev = NULL;
375} 377}
376 378
377/* 379/*
378 * If possible, this checks an area of a destination device is valid. 380 * If possible, this checks an area of a destination device is valid.
379 */ 381 */
380static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) 382static int check_device_area(struct dm_dev_internal *dd, sector_t start,
383 sector_t len)
381{ 384{
382 sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT; 385 sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
383 386
384 if (!dev_size) 387 if (!dev_size)
385 return 1; 388 return 1;
@@ -392,16 +395,17 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
392 * careful to leave things as they were if we fail to reopen the 395 * careful to leave things as they were if we fail to reopen the
393 * device. 396 * device.
394 */ 397 */
395static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md) 398static int upgrade_mode(struct dm_dev_internal *dd, int new_mode,
399 struct mapped_device *md)
396{ 400{
397 int r; 401 int r;
398 struct dm_dev dd_copy; 402 struct dm_dev_internal dd_copy;
399 dev_t dev = dd->bdev->bd_dev; 403 dev_t dev = dd->dm_dev.bdev->bd_dev;
400 404
401 dd_copy = *dd; 405 dd_copy = *dd;
402 406
403 dd->mode |= new_mode; 407 dd->dm_dev.mode |= new_mode;
404 dd->bdev = NULL; 408 dd->dm_dev.bdev = NULL;
405 r = open_dev(dd, dev, md); 409 r = open_dev(dd, dev, md);
406 if (!r) 410 if (!r)
407 close_dev(&dd_copy, md); 411 close_dev(&dd_copy, md);
@@ -421,7 +425,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
421{ 425{
422 int r; 426 int r;
423 dev_t uninitialized_var(dev); 427 dev_t uninitialized_var(dev);
424 struct dm_dev *dd; 428 struct dm_dev_internal *dd;
425 unsigned int major, minor; 429 unsigned int major, minor;
426 430
427 BUG_ON(!t); 431 BUG_ON(!t);
@@ -443,20 +447,20 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
443 if (!dd) 447 if (!dd)
444 return -ENOMEM; 448 return -ENOMEM;
445 449
446 dd->mode = mode; 450 dd->dm_dev.mode = mode;
447 dd->bdev = NULL; 451 dd->dm_dev.bdev = NULL;
448 452
449 if ((r = open_dev(dd, dev, t->md))) { 453 if ((r = open_dev(dd, dev, t->md))) {
450 kfree(dd); 454 kfree(dd);
451 return r; 455 return r;
452 } 456 }
453 457
454 format_dev_t(dd->name, dev); 458 format_dev_t(dd->dm_dev.name, dev);
455 459
456 atomic_set(&dd->count, 0); 460 atomic_set(&dd->count, 0);
457 list_add(&dd->list, &t->devices); 461 list_add(&dd->list, &t->devices);
458 462
459 } else if (dd->mode != (mode | dd->mode)) { 463 } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
460 r = upgrade_mode(dd, mode, t->md); 464 r = upgrade_mode(dd, mode, t->md);
461 if (r) 465 if (r)
462 return r; 466 return r;
@@ -465,11 +469,11 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
465 469
466 if (!check_device_area(dd, start, len)) { 470 if (!check_device_area(dd, start, len)) {
467 DMWARN("device %s too small for target", path); 471 DMWARN("device %s too small for target", path);
468 dm_put_device(ti, dd); 472 dm_put_device(ti, &dd->dm_dev);
469 return -EINVAL; 473 return -EINVAL;
470 } 474 }
471 475
472 *result = dd; 476 *result = &dd->dm_dev;
473 477
474 return 0; 478 return 0;
475} 479}
@@ -478,6 +482,13 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
478{ 482{
479 struct request_queue *q = bdev_get_queue(bdev); 483 struct request_queue *q = bdev_get_queue(bdev);
480 struct io_restrictions *rs = &ti->limits; 484 struct io_restrictions *rs = &ti->limits;
485 char b[BDEVNAME_SIZE];
486
487 if (unlikely(!q)) {
488 DMWARN("%s: Cannot set limits for nonexistent device %s",
489 dm_device_name(ti->table->md), bdevname(bdev, b));
490 return;
491 }
481 492
482 /* 493 /*
483 * Combine the device limits low. 494 * Combine the device limits low.
@@ -540,8 +551,11 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
540/* 551/*
541 * Decrement a devices use count and remove it if necessary. 552 * Decrement a devices use count and remove it if necessary.
542 */ 553 */
543void dm_put_device(struct dm_target *ti, struct dm_dev *dd) 554void dm_put_device(struct dm_target *ti, struct dm_dev *d)
544{ 555{
556 struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
557 dm_dev);
558
545 if (atomic_dec_and_test(&dd->count)) { 559 if (atomic_dec_and_test(&dd->count)) {
546 close_dev(dd, ti->table->md); 560 close_dev(dd, ti->table->md);
547 list_del(&dd->list); 561 list_del(&dd->list);
@@ -937,13 +951,20 @@ int dm_table_resume_targets(struct dm_table *t)
937 951
938int dm_table_any_congested(struct dm_table *t, int bdi_bits) 952int dm_table_any_congested(struct dm_table *t, int bdi_bits)
939{ 953{
940 struct dm_dev *dd; 954 struct dm_dev_internal *dd;
941 struct list_head *devices = dm_table_get_devices(t); 955 struct list_head *devices = dm_table_get_devices(t);
942 int r = 0; 956 int r = 0;
943 957
944 list_for_each_entry(dd, devices, list) { 958 list_for_each_entry(dd, devices, list) {
945 struct request_queue *q = bdev_get_queue(dd->bdev); 959 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
946 r |= bdi_congested(&q->backing_dev_info, bdi_bits); 960 char b[BDEVNAME_SIZE];
961
962 if (likely(q))
963 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
964 else
965 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
966 dm_device_name(t->md),
967 bdevname(dd->dm_dev.bdev, b));
947 } 968 }
948 969
949 return r; 970 return r;
@@ -951,13 +972,19 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
951 972
952void dm_table_unplug_all(struct dm_table *t) 973void dm_table_unplug_all(struct dm_table *t)
953{ 974{
954 struct dm_dev *dd; 975 struct dm_dev_internal *dd;
955 struct list_head *devices = dm_table_get_devices(t); 976 struct list_head *devices = dm_table_get_devices(t);
956 977
957 list_for_each_entry(dd, devices, list) { 978 list_for_each_entry(dd, devices, list) {
958 struct request_queue *q = bdev_get_queue(dd->bdev); 979 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
959 980 char b[BDEVNAME_SIZE];
960 blk_unplug(q); 981
982 if (likely(q))
983 blk_unplug(q);
984 else
985 DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
986 dm_device_name(t->md),
987 bdevname(dd->dm_dev.bdev, b));
961 } 988 }
962} 989}
963 990
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index bca448e11878..327de03a5bdf 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -377,13 +377,14 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
377static void start_io_acct(struct dm_io *io) 377static void start_io_acct(struct dm_io *io)
378{ 378{
379 struct mapped_device *md = io->md; 379 struct mapped_device *md = io->md;
380 int cpu;
380 381
381 io->start_time = jiffies; 382 io->start_time = jiffies;
382 383
383 preempt_disable(); 384 cpu = part_stat_lock();
384 disk_round_stats(dm_disk(md)); 385 part_round_stats(cpu, &dm_disk(md)->part0);
385 preempt_enable(); 386 part_stat_unlock();
386 dm_disk(md)->in_flight = atomic_inc_return(&md->pending); 387 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
387} 388}
388 389
389static int end_io_acct(struct dm_io *io) 390static int end_io_acct(struct dm_io *io)
@@ -391,15 +392,16 @@ static int end_io_acct(struct dm_io *io)
391 struct mapped_device *md = io->md; 392 struct mapped_device *md = io->md;
392 struct bio *bio = io->bio; 393 struct bio *bio = io->bio;
393 unsigned long duration = jiffies - io->start_time; 394 unsigned long duration = jiffies - io->start_time;
394 int pending; 395 int pending, cpu;
395 int rw = bio_data_dir(bio); 396 int rw = bio_data_dir(bio);
396 397
397 preempt_disable(); 398 cpu = part_stat_lock();
398 disk_round_stats(dm_disk(md)); 399 part_round_stats(cpu, &dm_disk(md)->part0);
399 preempt_enable(); 400 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
400 dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); 401 part_stat_unlock();
401 402
402 disk_stat_add(dm_disk(md), ticks[rw], duration); 403 dm_disk(md)->part0.in_flight = pending =
404 atomic_dec_return(&md->pending);
403 405
404 return !pending; 406 return !pending;
405} 407}
@@ -837,12 +839,14 @@ static int dm_merge_bvec(struct request_queue *q,
837 struct dm_table *map = dm_get_table(md); 839 struct dm_table *map = dm_get_table(md);
838 struct dm_target *ti; 840 struct dm_target *ti;
839 sector_t max_sectors; 841 sector_t max_sectors;
840 int max_size; 842 int max_size = 0;
841 843
842 if (unlikely(!map)) 844 if (unlikely(!map))
843 return 0; 845 goto out;
844 846
845 ti = dm_table_find_target(map, bvm->bi_sector); 847 ti = dm_table_find_target(map, bvm->bi_sector);
848 if (!dm_target_is_valid(ti))
849 goto out_table;
846 850
847 /* 851 /*
848 * Find maximum amount of I/O that won't need splitting 852 * Find maximum amount of I/O that won't need splitting
@@ -861,14 +865,16 @@ static int dm_merge_bvec(struct request_queue *q,
861 if (max_size && ti->type->merge) 865 if (max_size && ti->type->merge)
862 max_size = ti->type->merge(ti, bvm, biovec, max_size); 866 max_size = ti->type->merge(ti, bvm, biovec, max_size);
863 867
868out_table:
869 dm_table_put(map);
870
871out:
864 /* 872 /*
865 * Always allow an entire first page 873 * Always allow an entire first page
866 */ 874 */
867 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) 875 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
868 max_size = biovec->bv_len; 876 max_size = biovec->bv_len;
869 877
870 dm_table_put(map);
871
872 return max_size; 878 return max_size;
873} 879}
874 880
@@ -881,6 +887,7 @@ static int dm_request(struct request_queue *q, struct bio *bio)
881 int r = -EIO; 887 int r = -EIO;
882 int rw = bio_data_dir(bio); 888 int rw = bio_data_dir(bio);
883 struct mapped_device *md = q->queuedata; 889 struct mapped_device *md = q->queuedata;
890 int cpu;
884 891
885 /* 892 /*
886 * There is no use in forwarding any barrier request since we can't 893 * There is no use in forwarding any barrier request since we can't
@@ -893,8 +900,10 @@ static int dm_request(struct request_queue *q, struct bio *bio)
893 900
894 down_read(&md->io_lock); 901 down_read(&md->io_lock);
895 902
896 disk_stat_inc(dm_disk(md), ios[rw]); 903 cpu = part_stat_lock();
897 disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); 904 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
905 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
906 part_stat_unlock();
898 907
899 /* 908 /*
900 * If we're suspended we have to queue 909 * If we're suspended we have to queue
@@ -1142,7 +1151,7 @@ static void unlock_fs(struct mapped_device *md);
1142 1151
1143static void free_dev(struct mapped_device *md) 1152static void free_dev(struct mapped_device *md)
1144{ 1153{
1145 int minor = md->disk->first_minor; 1154 int minor = MINOR(disk_devt(md->disk));
1146 1155
1147 if (md->suspended_bdev) { 1156 if (md->suspended_bdev) {
1148 unlock_fs(md); 1157 unlock_fs(md);
@@ -1178,7 +1187,7 @@ static void event_callback(void *context)
1178 list_splice_init(&md->uevent_list, &uevents); 1187 list_splice_init(&md->uevent_list, &uevents);
1179 spin_unlock_irqrestore(&md->uevent_lock, flags); 1188 spin_unlock_irqrestore(&md->uevent_lock, flags);
1180 1189
1181 dm_send_uevents(&uevents, &md->disk->dev.kobj); 1190 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1182 1191
1183 atomic_inc(&md->event_nr); 1192 atomic_inc(&md->event_nr);
1184 wake_up(&md->eventq); 1193 wake_up(&md->eventq);
@@ -1263,7 +1272,7 @@ static struct mapped_device *dm_find_md(dev_t dev)
1263 1272
1264 md = idr_find(&_minor_idr, minor); 1273 md = idr_find(&_minor_idr, minor);
1265 if (md && (md == MINOR_ALLOCED || 1274 if (md && (md == MINOR_ALLOCED ||
1266 (dm_disk(md)->first_minor != minor) || 1275 (MINOR(disk_devt(dm_disk(md))) != minor) ||
1267 test_bit(DMF_FREEING, &md->flags))) { 1276 test_bit(DMF_FREEING, &md->flags))) {
1268 md = NULL; 1277 md = NULL;
1269 goto out; 1278 goto out;
@@ -1314,7 +1323,8 @@ void dm_put(struct mapped_device *md)
1314 1323
1315 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { 1324 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1316 map = dm_get_table(md); 1325 map = dm_get_table(md);
1317 idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor); 1326 idr_replace(&_minor_idr, MINOR_ALLOCED,
1327 MINOR(disk_devt(dm_disk(md))));
1318 set_bit(DMF_FREEING, &md->flags); 1328 set_bit(DMF_FREEING, &md->flags);
1319 spin_unlock(&_minor_lock); 1329 spin_unlock(&_minor_lock);
1320 if (!dm_suspended(md)) { 1330 if (!dm_suspended(md)) {
@@ -1634,7 +1644,7 @@ out:
1634 *---------------------------------------------------------------*/ 1644 *---------------------------------------------------------------*/
1635void dm_kobject_uevent(struct mapped_device *md) 1645void dm_kobject_uevent(struct mapped_device *md)
1636{ 1646{
1637 kobject_uevent(&md->disk->dev.kobj, KOBJ_CHANGE); 1647 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
1638} 1648}
1639 1649
1640uint32_t dm_next_uevent_seq(struct mapped_device *md) 1650uint32_t dm_next_uevent_seq(struct mapped_device *md)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 1e59a0b0a78a..cd189da2b2fa 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -25,13 +25,10 @@
25/* 25/*
26 * List of devices that a metadevice uses and should open/close. 26 * List of devices that a metadevice uses and should open/close.
27 */ 27 */
28struct dm_dev { 28struct dm_dev_internal {
29 struct list_head list; 29 struct list_head list;
30
31 atomic_t count; 30 atomic_t count;
32 int mode; 31 struct dm_dev dm_dev;
33 struct block_device *bdev;
34 char name[16];
35}; 32};
36 33
37struct dm_table; 34struct dm_table;
@@ -49,7 +46,6 @@ void dm_table_presuspend_targets(struct dm_table *t);
49void dm_table_postsuspend_targets(struct dm_table *t); 46void dm_table_postsuspend_targets(struct dm_table *t);
50int dm_table_resume_targets(struct dm_table *t); 47int dm_table_resume_targets(struct dm_table *t);
51int dm_table_any_congested(struct dm_table *t, int bdi_bits); 48int dm_table_any_congested(struct dm_table *t, int bdi_bits);
52void dm_table_unplug_all(struct dm_table *t);
53 49
54/* 50/*
55 * To check the return value from dm_table_find_target(). 51 * To check the return value from dm_table_find_target().
@@ -93,8 +89,6 @@ void dm_linear_exit(void);
93int dm_stripe_init(void); 89int dm_stripe_init(void);
94void dm_stripe_exit(void); 90void dm_stripe_exit(void);
95 91
96void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
97union map_info *dm_get_mapinfo(struct bio *bio);
98int dm_open_count(struct mapped_device *md); 92int dm_open_count(struct mapped_device *md);
99int dm_lock_for_deletion(struct mapped_device *md); 93int dm_lock_for_deletion(struct mapped_device *md);
100 94
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index b1eebf88c209..b9cbee688fae 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -318,14 +318,18 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
318 mddev_t *mddev = q->queuedata; 318 mddev_t *mddev = q->queuedata;
319 dev_info_t *tmp_dev; 319 dev_info_t *tmp_dev;
320 sector_t block; 320 sector_t block;
321 int cpu;
321 322
322 if (unlikely(bio_barrier(bio))) { 323 if (unlikely(bio_barrier(bio))) {
323 bio_endio(bio, -EOPNOTSUPP); 324 bio_endio(bio, -EOPNOTSUPP);
324 return 0; 325 return 0;
325 } 326 }
326 327
327 disk_stat_inc(mddev->gendisk, ios[rw]); 328 cpu = part_stat_lock();
328 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 329 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
330 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
331 bio_sectors(bio));
332 part_stat_unlock();
329 333
330 tmp_dev = which_dev(mddev, bio->bi_sector); 334 tmp_dev = which_dev(mddev, bio->bi_sector);
331 block = bio->bi_sector >> 1; 335 block = bio->bi_sector >> 1;
@@ -349,7 +353,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
349 * split it. 353 * split it.
350 */ 354 */
351 struct bio_pair *bp; 355 struct bio_pair *bp;
352 bp = bio_split(bio, bio_split_pool, 356 bp = bio_split(bio,
353 ((tmp_dev->offset + tmp_dev->size)<<1) - bio->bi_sector); 357 ((tmp_dev->offset + tmp_dev->size)<<1) - bio->bi_sector);
354 if (linear_make_request(q, &bp->bio1)) 358 if (linear_make_request(q, &bp->bio1))
355 generic_make_request(&bp->bio1); 359 generic_make_request(&bp->bio1);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index deeac4b44173..0a3a4bdcd4af 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1464,10 +1464,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1464 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 1464 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1465 goto fail; 1465 goto fail;
1466 1466
1467 if (rdev->bdev->bd_part) 1467 ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
1468 ko = &rdev->bdev->bd_part->dev.kobj;
1469 else
1470 ko = &rdev->bdev->bd_disk->dev.kobj;
1471 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) { 1468 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1472 kobject_del(&rdev->kobj); 1469 kobject_del(&rdev->kobj);
1473 goto fail; 1470 goto fail;
@@ -3470,8 +3467,8 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
3470 disk->queue = mddev->queue; 3467 disk->queue = mddev->queue;
3471 add_disk(disk); 3468 add_disk(disk);
3472 mddev->gendisk = disk; 3469 mddev->gendisk = disk;
3473 error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj, 3470 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
3474 "%s", "md"); 3471 &disk_to_dev(disk)->kobj, "%s", "md");
3475 mutex_unlock(&disks_mutex); 3472 mutex_unlock(&disks_mutex);
3476 if (error) 3473 if (error)
3477 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 3474 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
@@ -3761,7 +3758,7 @@ static int do_md_run(mddev_t * mddev)
3761 sysfs_notify(&mddev->kobj, NULL, "array_state"); 3758 sysfs_notify(&mddev->kobj, NULL, "array_state");
3762 sysfs_notify(&mddev->kobj, NULL, "sync_action"); 3759 sysfs_notify(&mddev->kobj, NULL, "sync_action");
3763 sysfs_notify(&mddev->kobj, NULL, "degraded"); 3760 sysfs_notify(&mddev->kobj, NULL, "degraded");
3764 kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE); 3761 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
3765 return 0; 3762 return 0;
3766} 3763}
3767 3764
@@ -5549,8 +5546,8 @@ static int is_mddev_idle(mddev_t *mddev)
5549 rcu_read_lock(); 5546 rcu_read_lock();
5550 rdev_for_each_rcu(rdev, mddev) { 5547 rdev_for_each_rcu(rdev, mddev) {
5551 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 5548 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
5552 curr_events = disk_stat_read(disk, sectors[0]) + 5549 curr_events = part_stat_read(&disk->part0, sectors[0]) +
5553 disk_stat_read(disk, sectors[1]) - 5550 part_stat_read(&disk->part0, sectors[1]) -
5554 atomic_read(&disk->sync_io); 5551 atomic_read(&disk->sync_io);
5555 /* sync IO will cause sync_io to increase before the disk_stats 5552 /* sync IO will cause sync_io to increase before the disk_stats
5556 * as sync_io is counted when a request starts, and 5553 * as sync_io is counted when a request starts, and
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index c4779ccba1c3..8bb8794129b3 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -147,6 +147,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
147 struct multipath_bh * mp_bh; 147 struct multipath_bh * mp_bh;
148 struct multipath_info *multipath; 148 struct multipath_info *multipath;
149 const int rw = bio_data_dir(bio); 149 const int rw = bio_data_dir(bio);
150 int cpu;
150 151
151 if (unlikely(bio_barrier(bio))) { 152 if (unlikely(bio_barrier(bio))) {
152 bio_endio(bio, -EOPNOTSUPP); 153 bio_endio(bio, -EOPNOTSUPP);
@@ -158,8 +159,11 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
158 mp_bh->master_bio = bio; 159 mp_bh->master_bio = bio;
159 mp_bh->mddev = mddev; 160 mp_bh->mddev = mddev;
160 161
161 disk_stat_inc(mddev->gendisk, ios[rw]); 162 cpu = part_stat_lock();
162 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 163 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
164 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
165 bio_sectors(bio));
166 part_stat_unlock();
163 167
164 mp_bh->path = multipath_map(conf); 168 mp_bh->path = multipath_map(conf);
165 if (mp_bh->path < 0) { 169 if (mp_bh->path < 0) {
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 183610635661..53508a8a981d 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -399,14 +399,18 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
399 sector_t chunk; 399 sector_t chunk;
400 sector_t block, rsect; 400 sector_t block, rsect;
401 const int rw = bio_data_dir(bio); 401 const int rw = bio_data_dir(bio);
402 int cpu;
402 403
403 if (unlikely(bio_barrier(bio))) { 404 if (unlikely(bio_barrier(bio))) {
404 bio_endio(bio, -EOPNOTSUPP); 405 bio_endio(bio, -EOPNOTSUPP);
405 return 0; 406 return 0;
406 } 407 }
407 408
408 disk_stat_inc(mddev->gendisk, ios[rw]); 409 cpu = part_stat_lock();
409 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 410 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
411 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
412 bio_sectors(bio));
413 part_stat_unlock();
410 414
411 chunk_size = mddev->chunk_size >> 10; 415 chunk_size = mddev->chunk_size >> 10;
412 chunk_sects = mddev->chunk_size >> 9; 416 chunk_sects = mddev->chunk_size >> 9;
@@ -423,7 +427,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
423 /* This is a one page bio that upper layers 427 /* This is a one page bio that upper layers
424 * refuse to split for us, so we need to split it. 428 * refuse to split for us, so we need to split it.
425 */ 429 */
426 bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); 430 bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
427 if (raid0_make_request(q, &bp->bio1)) 431 if (raid0_make_request(q, &bp->bio1))
428 generic_make_request(&bp->bio1); 432 generic_make_request(&bp->bio1);
429 if (raid0_make_request(q, &bp->bio2)) 433 if (raid0_make_request(q, &bp->bio2))
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 03a5ab705c20..b9764429d856 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -779,7 +779,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
779 struct page **behind_pages = NULL; 779 struct page **behind_pages = NULL;
780 const int rw = bio_data_dir(bio); 780 const int rw = bio_data_dir(bio);
781 const int do_sync = bio_sync(bio); 781 const int do_sync = bio_sync(bio);
782 int do_barriers; 782 int cpu, do_barriers;
783 mdk_rdev_t *blocked_rdev; 783 mdk_rdev_t *blocked_rdev;
784 784
785 /* 785 /*
@@ -804,8 +804,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
804 804
805 bitmap = mddev->bitmap; 805 bitmap = mddev->bitmap;
806 806
807 disk_stat_inc(mddev->gendisk, ios[rw]); 807 cpu = part_stat_lock();
808 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 808 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
809 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
810 bio_sectors(bio));
811 part_stat_unlock();
809 812
810 /* 813 /*
811 * make_request() can abort the operation when READA is being 814 * make_request() can abort the operation when READA is being
@@ -1302,9 +1305,6 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1302 sbio->bi_size = r1_bio->sectors << 9; 1305 sbio->bi_size = r1_bio->sectors << 9;
1303 sbio->bi_idx = 0; 1306 sbio->bi_idx = 0;
1304 sbio->bi_phys_segments = 0; 1307 sbio->bi_phys_segments = 0;
1305 sbio->bi_hw_segments = 0;
1306 sbio->bi_hw_front_size = 0;
1307 sbio->bi_hw_back_size = 0;
1308 sbio->bi_flags &= ~(BIO_POOL_MASK - 1); 1308 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1309 sbio->bi_flags |= 1 << BIO_UPTODATE; 1309 sbio->bi_flags |= 1 << BIO_UPTODATE;
1310 sbio->bi_next = NULL; 1310 sbio->bi_next = NULL;
@@ -1790,7 +1790,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1790 bio->bi_vcnt = 0; 1790 bio->bi_vcnt = 0;
1791 bio->bi_idx = 0; 1791 bio->bi_idx = 0;
1792 bio->bi_phys_segments = 0; 1792 bio->bi_phys_segments = 0;
1793 bio->bi_hw_segments = 0;
1794 bio->bi_size = 0; 1793 bio->bi_size = 0;
1795 bio->bi_end_io = NULL; 1794 bio->bi_end_io = NULL;
1796 bio->bi_private = NULL; 1795 bio->bi_private = NULL;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e34cd0e62473..8bdc9bfc2887 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -789,6 +789,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
789 mirror_info_t *mirror; 789 mirror_info_t *mirror;
790 r10bio_t *r10_bio; 790 r10bio_t *r10_bio;
791 struct bio *read_bio; 791 struct bio *read_bio;
792 int cpu;
792 int i; 793 int i;
793 int chunk_sects = conf->chunk_mask + 1; 794 int chunk_sects = conf->chunk_mask + 1;
794 const int rw = bio_data_dir(bio); 795 const int rw = bio_data_dir(bio);
@@ -816,7 +817,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
816 /* This is a one page bio that upper layers 817 /* This is a one page bio that upper layers
817 * refuse to split for us, so we need to split it. 818 * refuse to split for us, so we need to split it.
818 */ 819 */
819 bp = bio_split(bio, bio_split_pool, 820 bp = bio_split(bio,
820 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); 821 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
821 if (make_request(q, &bp->bio1)) 822 if (make_request(q, &bp->bio1))
822 generic_make_request(&bp->bio1); 823 generic_make_request(&bp->bio1);
@@ -843,8 +844,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
843 */ 844 */
844 wait_barrier(conf); 845 wait_barrier(conf);
845 846
846 disk_stat_inc(mddev->gendisk, ios[rw]); 847 cpu = part_stat_lock();
847 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 848 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
849 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
850 bio_sectors(bio));
851 part_stat_unlock();
848 852
849 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 853 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
850 854
@@ -1345,9 +1349,6 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1345 tbio->bi_size = r10_bio->sectors << 9; 1349 tbio->bi_size = r10_bio->sectors << 9;
1346 tbio->bi_idx = 0; 1350 tbio->bi_idx = 0;
1347 tbio->bi_phys_segments = 0; 1351 tbio->bi_phys_segments = 0;
1348 tbio->bi_hw_segments = 0;
1349 tbio->bi_hw_front_size = 0;
1350 tbio->bi_hw_back_size = 0;
1351 tbio->bi_flags &= ~(BIO_POOL_MASK - 1); 1352 tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1352 tbio->bi_flags |= 1 << BIO_UPTODATE; 1353 tbio->bi_flags |= 1 << BIO_UPTODATE;
1353 tbio->bi_next = NULL; 1354 tbio->bi_next = NULL;
@@ -1947,7 +1948,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1947 bio->bi_vcnt = 0; 1948 bio->bi_vcnt = 0;
1948 bio->bi_idx = 0; 1949 bio->bi_idx = 0;
1949 bio->bi_phys_segments = 0; 1950 bio->bi_phys_segments = 0;
1950 bio->bi_hw_segments = 0;
1951 bio->bi_size = 0; 1951 bio->bi_size = 0;
1952 } 1952 }
1953 1953
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 224de022e7c5..ae16794bef20 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -101,6 +101,40 @@
101const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); 101const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
102#endif 102#endif
103 103
104/*
105 * We maintain a biased count of active stripes in the bottom 16 bits of
106 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
107 */
108static inline int raid5_bi_phys_segments(struct bio *bio)
109{
110 return bio->bi_phys_segments & 0xffff;
111}
112
113static inline int raid5_bi_hw_segments(struct bio *bio)
114{
115 return (bio->bi_phys_segments >> 16) & 0xffff;
116}
117
118static inline int raid5_dec_bi_phys_segments(struct bio *bio)
119{
120 --bio->bi_phys_segments;
121 return raid5_bi_phys_segments(bio);
122}
123
124static inline int raid5_dec_bi_hw_segments(struct bio *bio)
125{
126 unsigned short val = raid5_bi_hw_segments(bio);
127
128 --val;
129 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
130 return val;
131}
132
133static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
134{
135 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
136}
137
104static inline int raid6_next_disk(int disk, int raid_disks) 138static inline int raid6_next_disk(int disk, int raid_disks)
105{ 139{
106 disk++; 140 disk++;
@@ -507,7 +541,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
507 while (rbi && rbi->bi_sector < 541 while (rbi && rbi->bi_sector <
508 dev->sector + STRIPE_SECTORS) { 542 dev->sector + STRIPE_SECTORS) {
509 rbi2 = r5_next_bio(rbi, dev->sector); 543 rbi2 = r5_next_bio(rbi, dev->sector);
510 if (--rbi->bi_phys_segments == 0) { 544 if (!raid5_dec_bi_phys_segments(rbi)) {
511 rbi->bi_next = return_bi; 545 rbi->bi_next = return_bi;
512 return_bi = rbi; 546 return_bi = rbi;
513 } 547 }
@@ -1725,7 +1759,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
1725 if (*bip) 1759 if (*bip)
1726 bi->bi_next = *bip; 1760 bi->bi_next = *bip;
1727 *bip = bi; 1761 *bip = bi;
1728 bi->bi_phys_segments ++; 1762 bi->bi_phys_segments++;
1729 spin_unlock_irq(&conf->device_lock); 1763 spin_unlock_irq(&conf->device_lock);
1730 spin_unlock(&sh->lock); 1764 spin_unlock(&sh->lock);
1731 1765
@@ -1819,7 +1853,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
1819 sh->dev[i].sector + STRIPE_SECTORS) { 1853 sh->dev[i].sector + STRIPE_SECTORS) {
1820 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1854 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1821 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1855 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1822 if (--bi->bi_phys_segments == 0) { 1856 if (!raid5_dec_bi_phys_segments(bi)) {
1823 md_write_end(conf->mddev); 1857 md_write_end(conf->mddev);
1824 bi->bi_next = *return_bi; 1858 bi->bi_next = *return_bi;
1825 *return_bi = bi; 1859 *return_bi = bi;
@@ -1834,7 +1868,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
1834 sh->dev[i].sector + STRIPE_SECTORS) { 1868 sh->dev[i].sector + STRIPE_SECTORS) {
1835 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 1869 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1836 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1870 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1837 if (--bi->bi_phys_segments == 0) { 1871 if (!raid5_dec_bi_phys_segments(bi)) {
1838 md_write_end(conf->mddev); 1872 md_write_end(conf->mddev);
1839 bi->bi_next = *return_bi; 1873 bi->bi_next = *return_bi;
1840 *return_bi = bi; 1874 *return_bi = bi;
@@ -1858,7 +1892,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
1858 struct bio *nextbi = 1892 struct bio *nextbi =
1859 r5_next_bio(bi, sh->dev[i].sector); 1893 r5_next_bio(bi, sh->dev[i].sector);
1860 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1894 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1861 if (--bi->bi_phys_segments == 0) { 1895 if (!raid5_dec_bi_phys_segments(bi)) {
1862 bi->bi_next = *return_bi; 1896 bi->bi_next = *return_bi;
1863 *return_bi = bi; 1897 *return_bi = bi;
1864 } 1898 }
@@ -2033,7 +2067,7 @@ static void handle_stripe_clean_event(raid5_conf_t *conf,
2033 while (wbi && wbi->bi_sector < 2067 while (wbi && wbi->bi_sector <
2034 dev->sector + STRIPE_SECTORS) { 2068 dev->sector + STRIPE_SECTORS) {
2035 wbi2 = r5_next_bio(wbi, dev->sector); 2069 wbi2 = r5_next_bio(wbi, dev->sector);
2036 if (--wbi->bi_phys_segments == 0) { 2070 if (!raid5_dec_bi_phys_segments(wbi)) {
2037 md_write_end(conf->mddev); 2071 md_write_end(conf->mddev);
2038 wbi->bi_next = *return_bi; 2072 wbi->bi_next = *return_bi;
2039 *return_bi = wbi; 2073 *return_bi = wbi;
@@ -2814,7 +2848,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2814 copy_data(0, rbi, dev->page, dev->sector); 2848 copy_data(0, rbi, dev->page, dev->sector);
2815 rbi2 = r5_next_bio(rbi, dev->sector); 2849 rbi2 = r5_next_bio(rbi, dev->sector);
2816 spin_lock_irq(&conf->device_lock); 2850 spin_lock_irq(&conf->device_lock);
2817 if (--rbi->bi_phys_segments == 0) { 2851 if (!raid5_dec_bi_phys_segments(rbi)) {
2818 rbi->bi_next = return_bi; 2852 rbi->bi_next = return_bi;
2819 return_bi = rbi; 2853 return_bi = rbi;
2820 } 2854 }
@@ -3155,8 +3189,11 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3155 if(bi) { 3189 if(bi) {
3156 conf->retry_read_aligned_list = bi->bi_next; 3190 conf->retry_read_aligned_list = bi->bi_next;
3157 bi->bi_next = NULL; 3191 bi->bi_next = NULL;
3192 /*
3193 * this sets the active strip count to 1 and the processed
3194 * strip count to zero (upper 8 bits)
3195 */
3158 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3196 bi->bi_phys_segments = 1; /* biased count of active stripes */
3159 bi->bi_hw_segments = 0; /* count of processed stripes */
3160 } 3197 }
3161 3198
3162 return bi; 3199 return bi;
@@ -3206,8 +3243,7 @@ static int bio_fits_rdev(struct bio *bi)
3206 if ((bi->bi_size>>9) > q->max_sectors) 3243 if ((bi->bi_size>>9) > q->max_sectors)
3207 return 0; 3244 return 0;
3208 blk_recount_segments(q, bi); 3245 blk_recount_segments(q, bi);
3209 if (bi->bi_phys_segments > q->max_phys_segments || 3246 if (bi->bi_phys_segments > q->max_phys_segments)
3210 bi->bi_hw_segments > q->max_hw_segments)
3211 return 0; 3247 return 0;
3212 3248
3213 if (q->merge_bvec_fn) 3249 if (q->merge_bvec_fn)
@@ -3351,7 +3387,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3351 sector_t logical_sector, last_sector; 3387 sector_t logical_sector, last_sector;
3352 struct stripe_head *sh; 3388 struct stripe_head *sh;
3353 const int rw = bio_data_dir(bi); 3389 const int rw = bio_data_dir(bi);
3354 int remaining; 3390 int cpu, remaining;
3355 3391
3356 if (unlikely(bio_barrier(bi))) { 3392 if (unlikely(bio_barrier(bi))) {
3357 bio_endio(bi, -EOPNOTSUPP); 3393 bio_endio(bi, -EOPNOTSUPP);
@@ -3360,8 +3396,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
3360 3396
3361 md_write_start(mddev, bi); 3397 md_write_start(mddev, bi);
3362 3398
3363 disk_stat_inc(mddev->gendisk, ios[rw]); 3399 cpu = part_stat_lock();
3364 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 3400 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
3401 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
3402 bio_sectors(bi));
3403 part_stat_unlock();
3365 3404
3366 if (rw == READ && 3405 if (rw == READ &&
3367 mddev->reshape_position == MaxSector && 3406 mddev->reshape_position == MaxSector &&
@@ -3468,7 +3507,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3468 3507
3469 } 3508 }
3470 spin_lock_irq(&conf->device_lock); 3509 spin_lock_irq(&conf->device_lock);
3471 remaining = --bi->bi_phys_segments; 3510 remaining = raid5_dec_bi_phys_segments(bi);
3472 spin_unlock_irq(&conf->device_lock); 3511 spin_unlock_irq(&conf->device_lock);
3473 if (remaining == 0) { 3512 if (remaining == 0) {
3474 3513
@@ -3752,7 +3791,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3752 sector += STRIPE_SECTORS, 3791 sector += STRIPE_SECTORS,
3753 scnt++) { 3792 scnt++) {
3754 3793
3755 if (scnt < raid_bio->bi_hw_segments) 3794 if (scnt < raid5_bi_hw_segments(raid_bio))
3756 /* already done this stripe */ 3795 /* already done this stripe */
3757 continue; 3796 continue;
3758 3797
@@ -3760,7 +3799,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3760 3799
3761 if (!sh) { 3800 if (!sh) {
3762 /* failed to get a stripe - must wait */ 3801 /* failed to get a stripe - must wait */
3763 raid_bio->bi_hw_segments = scnt; 3802 raid5_set_bi_hw_segments(raid_bio, scnt);
3764 conf->retry_read_aligned = raid_bio; 3803 conf->retry_read_aligned = raid_bio;
3765 return handled; 3804 return handled;
3766 } 3805 }
@@ -3768,7 +3807,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3768 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 3807 set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
3769 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 3808 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
3770 release_stripe(sh); 3809 release_stripe(sh);
3771 raid_bio->bi_hw_segments = scnt; 3810 raid5_set_bi_hw_segments(raid_bio, scnt);
3772 conf->retry_read_aligned = raid_bio; 3811 conf->retry_read_aligned = raid_bio;
3773 return handled; 3812 return handled;
3774 } 3813 }
@@ -3778,7 +3817,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3778 handled++; 3817 handled++;
3779 } 3818 }
3780 spin_lock_irq(&conf->device_lock); 3819 spin_lock_irq(&conf->device_lock);
3781 remaining = --raid_bio->bi_phys_segments; 3820 remaining = raid5_dec_bi_phys_segments(raid_bio);
3782 spin_unlock_irq(&conf->device_lock); 3821 spin_unlock_irq(&conf->device_lock);
3783 if (remaining == 0) 3822 if (remaining == 0)
3784 bio_endio(raid_bio, 0); 3823 bio_endio(raid_bio, 0);
diff --git a/drivers/media/common/tuners/tuner-xc2028.h b/drivers/media/common/tuners/tuner-xc2028.h
index 216025cf5d4b..2c5b6282b569 100644
--- a/drivers/media/common/tuners/tuner-xc2028.h
+++ b/drivers/media/common/tuners/tuner-xc2028.h
@@ -10,6 +10,7 @@
10#include "dvb_frontend.h" 10#include "dvb_frontend.h"
11 11
12#define XC2028_DEFAULT_FIRMWARE "xc3028-v27.fw" 12#define XC2028_DEFAULT_FIRMWARE "xc3028-v27.fw"
13#define XC3028L_DEFAULT_FIRMWARE "xc3028L-v36.fw"
13 14
14/* Dmoduler IF (kHz) */ 15/* Dmoduler IF (kHz) */
15#define XC3028_FE_DEFAULT 0 /* Don't load SCODE */ 16#define XC3028_FE_DEFAULT 0 /* Don't load SCODE */
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
index 4eed783f4bce..a127a4175c40 100644
--- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
@@ -491,6 +491,7 @@ static struct s5h1420_config skystar2_rev2_7_s5h1420_config = {
491 .demod_address = 0x53, 491 .demod_address = 0x53,
492 .invert = 1, 492 .invert = 1,
493 .repeated_start_workaround = 1, 493 .repeated_start_workaround = 1,
494 .serial_mpeg = 1,
494}; 495};
495 496
496static struct itd1000_config skystar2_rev2_7_itd1000_config = { 497static struct itd1000_config skystar2_rev2_7_itd1000_config = {
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 069d847ba887..0c733c66a441 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -364,15 +364,16 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
364 enum dmx_success success) 364 enum dmx_success success)
365{ 365{
366 struct dmxdev_filter *dmxdevfilter = filter->priv; 366 struct dmxdev_filter *dmxdevfilter = filter->priv;
367 unsigned long flags;
367 int ret; 368 int ret;
368 369
369 if (dmxdevfilter->buffer.error) { 370 if (dmxdevfilter->buffer.error) {
370 wake_up(&dmxdevfilter->buffer.queue); 371 wake_up(&dmxdevfilter->buffer.queue);
371 return 0; 372 return 0;
372 } 373 }
373 spin_lock(&dmxdevfilter->dev->lock); 374 spin_lock_irqsave(&dmxdevfilter->dev->lock, flags);
374 if (dmxdevfilter->state != DMXDEV_STATE_GO) { 375 if (dmxdevfilter->state != DMXDEV_STATE_GO) {
375 spin_unlock(&dmxdevfilter->dev->lock); 376 spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
376 return 0; 377 return 0;
377 } 378 }
378 del_timer(&dmxdevfilter->timer); 379 del_timer(&dmxdevfilter->timer);
@@ -391,7 +392,7 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
391 } 392 }
392 if (dmxdevfilter->params.sec.flags & DMX_ONESHOT) 393 if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
393 dmxdevfilter->state = DMXDEV_STATE_DONE; 394 dmxdevfilter->state = DMXDEV_STATE_DONE;
394 spin_unlock(&dmxdevfilter->dev->lock); 395 spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
395 wake_up(&dmxdevfilter->buffer.queue); 396 wake_up(&dmxdevfilter->buffer.queue);
396 return 0; 397 return 0;
397} 398}
@@ -403,11 +404,12 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
403{ 404{
404 struct dmxdev_filter *dmxdevfilter = feed->priv; 405 struct dmxdev_filter *dmxdevfilter = feed->priv;
405 struct dvb_ringbuffer *buffer; 406 struct dvb_ringbuffer *buffer;
407 unsigned long flags;
406 int ret; 408 int ret;
407 409
408 spin_lock(&dmxdevfilter->dev->lock); 410 spin_lock_irqsave(&dmxdevfilter->dev->lock, flags);
409 if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) { 411 if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
410 spin_unlock(&dmxdevfilter->dev->lock); 412 spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
411 return 0; 413 return 0;
412 } 414 }
413 415
@@ -417,7 +419,7 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
417 else 419 else
418 buffer = &dmxdevfilter->dev->dvr_buffer; 420 buffer = &dmxdevfilter->dev->dvr_buffer;
419 if (buffer->error) { 421 if (buffer->error) {
420 spin_unlock(&dmxdevfilter->dev->lock); 422 spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
421 wake_up(&buffer->queue); 423 wake_up(&buffer->queue);
422 return 0; 424 return 0;
423 } 425 }
@@ -428,7 +430,7 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
428 dvb_ringbuffer_flush(buffer); 430 dvb_ringbuffer_flush(buffer);
429 buffer->error = ret; 431 buffer->error = ret;
430 } 432 }
431 spin_unlock(&dmxdevfilter->dev->lock); 433 spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
432 wake_up(&buffer->queue); 434 wake_up(&buffer->queue);
433 return 0; 435 return 0;
434} 436}
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index e2eca0b1fe7c..a2c1fd5d2f67 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -399,7 +399,9 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
399void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, 399void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
400 size_t count) 400 size_t count)
401{ 401{
402 spin_lock(&demux->lock); 402 unsigned long flags;
403
404 spin_lock_irqsave(&demux->lock, flags);
403 405
404 while (count--) { 406 while (count--) {
405 if (buf[0] == 0x47) 407 if (buf[0] == 0x47)
@@ -407,16 +409,17 @@ void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
407 buf += 188; 409 buf += 188;
408 } 410 }
409 411
410 spin_unlock(&demux->lock); 412 spin_unlock_irqrestore(&demux->lock, flags);
411} 413}
412 414
413EXPORT_SYMBOL(dvb_dmx_swfilter_packets); 415EXPORT_SYMBOL(dvb_dmx_swfilter_packets);
414 416
415void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count) 417void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count)
416{ 418{
419 unsigned long flags;
417 int p = 0, i, j; 420 int p = 0, i, j;
418 421
419 spin_lock(&demux->lock); 422 spin_lock_irqsave(&demux->lock, flags);
420 423
421 if (demux->tsbufp) { 424 if (demux->tsbufp) {
422 i = demux->tsbufp; 425 i = demux->tsbufp;
@@ -449,17 +452,18 @@ void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count)
449 } 452 }
450 453
451bailout: 454bailout:
452 spin_unlock(&demux->lock); 455 spin_unlock_irqrestore(&demux->lock, flags);
453} 456}
454 457
455EXPORT_SYMBOL(dvb_dmx_swfilter); 458EXPORT_SYMBOL(dvb_dmx_swfilter);
456 459
457void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count) 460void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count)
458{ 461{
462 unsigned long flags;
459 int p = 0, i, j; 463 int p = 0, i, j;
460 u8 tmppack[188]; 464 u8 tmppack[188];
461 465
462 spin_lock(&demux->lock); 466 spin_lock_irqsave(&demux->lock, flags);
463 467
464 if (demux->tsbufp) { 468 if (demux->tsbufp) {
465 i = demux->tsbufp; 469 i = demux->tsbufp;
@@ -500,7 +504,7 @@ void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count)
500 } 504 }
501 505
502bailout: 506bailout:
503 spin_unlock(&demux->lock); 507 spin_unlock_irqrestore(&demux->lock, flags);
504} 508}
505 509
506EXPORT_SYMBOL(dvb_dmx_swfilter_204); 510EXPORT_SYMBOL(dvb_dmx_swfilter_204);
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
index 747d3fa2e5e5..2e9fd2893ede 100644
--- a/drivers/media/dvb/frontends/s5h1420.c
+++ b/drivers/media/dvb/frontends/s5h1420.c
@@ -59,7 +59,7 @@ struct s5h1420_state {
59 * it does not support repeated-start, workaround: write addr-1 59 * it does not support repeated-start, workaround: write addr-1
60 * and then read 60 * and then read
61 */ 61 */
62 u8 shadow[255]; 62 u8 shadow[256];
63}; 63};
64 64
65static u32 s5h1420_getsymbolrate(struct s5h1420_state* state); 65static u32 s5h1420_getsymbolrate(struct s5h1420_state* state);
@@ -94,8 +94,11 @@ static u8 s5h1420_readreg(struct s5h1420_state *state, u8 reg)
94 if (ret != 3) 94 if (ret != 3)
95 return ret; 95 return ret;
96 } else { 96 } else {
97 ret = i2c_transfer(state->i2c, &msg[1], 2); 97 ret = i2c_transfer(state->i2c, &msg[1], 1);
98 if (ret != 2) 98 if (ret != 1)
99 return ret;
100 ret = i2c_transfer(state->i2c, &msg[2], 1);
101 if (ret != 1)
99 return ret; 102 return ret;
100 } 103 }
101 104
@@ -823,7 +826,7 @@ static int s5h1420_init (struct dvb_frontend* fe)
823 struct s5h1420_state* state = fe->demodulator_priv; 826 struct s5h1420_state* state = fe->demodulator_priv;
824 827
825 /* disable power down and do reset */ 828 /* disable power down and do reset */
826 state->CON_1_val = 0x10; 829 state->CON_1_val = state->config->serial_mpeg << 4;
827 s5h1420_writereg(state, 0x02, state->CON_1_val); 830 s5h1420_writereg(state, 0x02, state->CON_1_val);
828 msleep(10); 831 msleep(10);
829 s5h1420_reset(state); 832 s5h1420_reset(state);
diff --git a/drivers/media/dvb/frontends/s5h1420.h b/drivers/media/dvb/frontends/s5h1420.h
index 4c913f142bc4..ff308136d865 100644
--- a/drivers/media/dvb/frontends/s5h1420.h
+++ b/drivers/media/dvb/frontends/s5h1420.h
@@ -32,10 +32,12 @@ struct s5h1420_config
32 u8 demod_address; 32 u8 demod_address;
33 33
34 /* does the inversion require inversion? */ 34 /* does the inversion require inversion? */
35 u8 invert : 1; 35 u8 invert:1;
36 36
37 u8 repeated_start_workaround : 1; 37 u8 repeated_start_workaround:1;
38 u8 cdclk_polarity : 1; /* 1 == falling edge, 0 == raising edge */ 38 u8 cdclk_polarity:1; /* 1 == falling edge, 0 == raising edge */
39
40 u8 serial_mpeg:1;
39}; 41};
40 42
41#if defined(CONFIG_DVB_S5H1420) || (defined(CONFIG_DVB_S5H1420_MODULE) && defined(MODULE)) 43#if defined(CONFIG_DVB_S5H1420) || (defined(CONFIG_DVB_S5H1420_MODULE) && defined(MODULE))
diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c
index cc5efb643f33..9da260fe3fd1 100644
--- a/drivers/media/dvb/siano/sms-cards.c
+++ b/drivers/media/dvb/siano/sms-cards.c
@@ -40,6 +40,8 @@ struct usb_device_id smsusb_id_table[] = {
40 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B }, 40 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B },
41 { USB_DEVICE(0x2040, 0x5500), 41 { USB_DEVICE(0x2040, 0x5500),
42 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, 42 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
43 { USB_DEVICE(0x2040, 0x5510),
44 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
43 { USB_DEVICE(0x2040, 0x5580), 45 { USB_DEVICE(0x2040, 0x5580),
44 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, 46 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
45 { USB_DEVICE(0x2040, 0x5590), 47 { USB_DEVICE(0x2040, 0x5590),
@@ -87,7 +89,7 @@ static struct sms_board sms_boards[] = {
87 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-nova-b-dvbt-01.fw", 89 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-nova-b-dvbt-01.fw",
88 }, 90 },
89 [SMS1XXX_BOARD_HAUPPAUGE_WINDHAM] = { 91 [SMS1XXX_BOARD_HAUPPAUGE_WINDHAM] = {
90 .name = "Hauppauge WinTV-Nova-T-MiniStick", 92 .name = "Hauppauge WinTV MiniStick",
91 .type = SMS_NOVA_B0, 93 .type = SMS_NOVA_B0,
92 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-01.fw", 94 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-01.fw",
93 }, 95 },
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 6ae4cc860efe..933eaef41ead 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -3431,7 +3431,7 @@ static int radio_open(struct inode *inode, struct file *file)
3431 dprintk("bttv: open minor=%d\n",minor); 3431 dprintk("bttv: open minor=%d\n",minor);
3432 3432
3433 for (i = 0; i < bttv_num; i++) { 3433 for (i = 0; i < bttv_num; i++) {
3434 if (bttvs[i].radio_dev->minor == minor) { 3434 if (bttvs[i].radio_dev && bttvs[i].radio_dev->minor == minor) {
3435 btv = &bttvs[i]; 3435 btv = &bttvs[i];
3436 break; 3436 break;
3437 } 3437 }
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index c149b7d712e5..5405c30dbb04 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/mm.h>
22#include <linux/pci.h> 23#include <linux/pci.h>
23#include <linux/i2c.h> 24#include <linux/i2c.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
diff --git a/drivers/media/video/cpia2/cpia2_usb.c b/drivers/media/video/cpia2/cpia2_usb.c
index a4574740350d..a8a199047cbd 100644
--- a/drivers/media/video/cpia2/cpia2_usb.c
+++ b/drivers/media/video/cpia2/cpia2_usb.c
@@ -632,7 +632,7 @@ int cpia2_usb_transfer_cmd(struct camera_data *cam,
632static int submit_urbs(struct camera_data *cam) 632static int submit_urbs(struct camera_data *cam)
633{ 633{
634 struct urb *urb; 634 struct urb *urb;
635 int fx, err, i; 635 int fx, err, i, j;
636 636
637 for(i=0; i<NUM_SBUF; ++i) { 637 for(i=0; i<NUM_SBUF; ++i) {
638 if (cam->sbuf[i].data) 638 if (cam->sbuf[i].data)
@@ -657,6 +657,9 @@ static int submit_urbs(struct camera_data *cam)
657 } 657 }
658 urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL); 658 urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL);
659 if (!urb) { 659 if (!urb) {
660 ERR("%s: usb_alloc_urb error!\n", __func__);
661 for (j = 0; j < i; j++)
662 usb_free_urb(cam->sbuf[j].urb);
660 return -ENOMEM; 663 return -ENOMEM;
661 } 664 }
662 665
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index 8fe5f38c4d7c..3cb9734ec07b 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -163,7 +163,7 @@ static const struct cx18_card cx18_card_h900 = {
163 }, 163 },
164 .audio_inputs = { 164 .audio_inputs = {
165 { CX18_CARD_INPUT_AUD_TUNER, 165 { CX18_CARD_INPUT_AUD_TUNER,
166 CX18_AV_AUDIO8, 0 }, 166 CX18_AV_AUDIO5, 0 },
167 { CX18_CARD_INPUT_LINE_IN1, 167 { CX18_CARD_INPUT_LINE_IN1,
168 CX18_AV_AUDIO_SERIAL1, 0 }, 168 CX18_AV_AUDIO_SERIAL1, 0 },
169 }, 169 },
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index 3c006103c1eb..ac3292d7646c 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -117,10 +117,10 @@ static void em28xx_audio_isocirq(struct urb *urb)
117 117
118 if (oldptr + length >= runtime->buffer_size) { 118 if (oldptr + length >= runtime->buffer_size) {
119 unsigned int cnt = 119 unsigned int cnt =
120 runtime->buffer_size - oldptr - 1; 120 runtime->buffer_size - oldptr;
121 memcpy(runtime->dma_area + oldptr * stride, cp, 121 memcpy(runtime->dma_area + oldptr * stride, cp,
122 cnt * stride); 122 cnt * stride);
123 memcpy(runtime->dma_area, cp + cnt, 123 memcpy(runtime->dma_area, cp + cnt * stride,
124 length * stride - cnt * stride); 124 length * stride - cnt * stride);
125 } else { 125 } else {
126 memcpy(runtime->dma_area + oldptr * stride, cp, 126 memcpy(runtime->dma_area + oldptr * stride, cp,
@@ -161,8 +161,14 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
161 161
162 memset(dev->adev->transfer_buffer[i], 0x80, sb_size); 162 memset(dev->adev->transfer_buffer[i], 0x80, sb_size);
163 urb = usb_alloc_urb(EM28XX_NUM_AUDIO_PACKETS, GFP_ATOMIC); 163 urb = usb_alloc_urb(EM28XX_NUM_AUDIO_PACKETS, GFP_ATOMIC);
164 if (!urb) 164 if (!urb) {
165 em28xx_errdev("usb_alloc_urb failed!\n");
166 for (j = 0; j < i; j++) {
167 usb_free_urb(dev->adev->urb[j]);
168 kfree(dev->adev->transfer_buffer[j]);
169 }
165 return -ENOMEM; 170 return -ENOMEM;
171 }
166 172
167 urb->dev = dev->udev; 173 urb->dev = dev->udev;
168 urb->context = dev; 174 urb->context = dev;
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 452da70e719f..de943cf6c169 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -93,28 +93,6 @@ struct em28xx_board em28xx_boards[] = {
93 .amux = 0, 93 .amux = 0,
94 } }, 94 } },
95 }, 95 },
96 [EM2800_BOARD_KWORLD_USB2800] = {
97 .name = "Kworld USB2800",
98 .valid = EM28XX_BOARD_NOT_VALIDATED,
99 .is_em2800 = 1,
100 .vchannels = 3,
101 .tuner_type = TUNER_PHILIPS_FCV1236D,
102 .tda9887_conf = TDA9887_PRESENT,
103 .decoder = EM28XX_SAA7113,
104 .input = { {
105 .type = EM28XX_VMUX_TELEVISION,
106 .vmux = SAA7115_COMPOSITE2,
107 .amux = 0,
108 }, {
109 .type = EM28XX_VMUX_COMPOSITE1,
110 .vmux = SAA7115_COMPOSITE0,
111 .amux = 1,
112 }, {
113 .type = EM28XX_VMUX_SVIDEO,
114 .vmux = SAA7115_SVIDEO3,
115 .amux = 1,
116 } },
117 },
118 [EM2820_BOARD_KWORLD_PVRTV2800RF] = { 96 [EM2820_BOARD_KWORLD_PVRTV2800RF] = {
119 .name = "Kworld PVR TV 2800 RF", 97 .name = "Kworld PVR TV 2800 RF",
120 .is_em2800 = 0, 98 .is_em2800 = 0,
@@ -599,7 +577,7 @@ struct em28xx_board em28xx_boards[] = {
599 }, { 577 }, {
600 .type = EM28XX_VMUX_COMPOSITE1, 578 .type = EM28XX_VMUX_COMPOSITE1,
601 .vmux = TVP5150_COMPOSITE1, 579 .vmux = TVP5150_COMPOSITE1,
602 .amux = 1, 580 .amux = 3,
603 }, { 581 }, {
604 .type = EM28XX_VMUX_SVIDEO, 582 .type = EM28XX_VMUX_SVIDEO,
605 .vmux = TVP5150_SVIDEO, 583 .vmux = TVP5150_SVIDEO,
@@ -952,22 +930,23 @@ struct em28xx_board em28xx_boards[] = {
952 }, 930 },
953 [EM2880_BOARD_KWORLD_DVB_310U] = { 931 [EM2880_BOARD_KWORLD_DVB_310U] = {
954 .name = "KWorld DVB-T 310U", 932 .name = "KWorld DVB-T 310U",
955 .valid = EM28XX_BOARD_NOT_VALIDATED,
956 .vchannels = 3, 933 .vchannels = 3,
957 .tuner_type = TUNER_XC2028, 934 .tuner_type = TUNER_XC2028,
935 .has_dvb = 1,
936 .mts_firmware = 1,
958 .decoder = EM28XX_TVP5150, 937 .decoder = EM28XX_TVP5150,
959 .input = { { 938 .input = { {
960 .type = EM28XX_VMUX_TELEVISION, 939 .type = EM28XX_VMUX_TELEVISION,
961 .vmux = TVP5150_COMPOSITE0, 940 .vmux = TVP5150_COMPOSITE0,
962 .amux = 0, 941 .amux = EM28XX_AMUX_VIDEO,
963 }, { 942 }, {
964 .type = EM28XX_VMUX_COMPOSITE1, 943 .type = EM28XX_VMUX_COMPOSITE1,
965 .vmux = TVP5150_COMPOSITE1, 944 .vmux = TVP5150_COMPOSITE1,
966 .amux = 1, 945 .amux = EM28XX_AMUX_AC97_LINE_IN,
967 }, { 946 }, { /* S-video has not been tested yet */
968 .type = EM28XX_VMUX_SVIDEO, 947 .type = EM28XX_VMUX_SVIDEO,
969 .vmux = TVP5150_SVIDEO, 948 .vmux = TVP5150_SVIDEO,
970 .amux = 1, 949 .amux = EM28XX_AMUX_AC97_LINE_IN,
971 } }, 950 } },
972 }, 951 },
973 [EM2881_BOARD_DNT_DA2_HYBRID] = { 952 [EM2881_BOARD_DNT_DA2_HYBRID] = {
@@ -1282,6 +1261,7 @@ static struct em28xx_reg_seq em2882_terratec_hybrid_xs_digital[] = {
1282static struct em28xx_hash_table em28xx_eeprom_hash [] = { 1261static struct em28xx_hash_table em28xx_eeprom_hash [] = {
1283 /* P/N: SA 60002070465 Tuner: TVF7533-MF */ 1262 /* P/N: SA 60002070465 Tuner: TVF7533-MF */
1284 {0x6ce05a8f, EM2820_BOARD_PROLINK_PLAYTV_USB2, TUNER_YMEC_TVF_5533MF}, 1263 {0x6ce05a8f, EM2820_BOARD_PROLINK_PLAYTV_USB2, TUNER_YMEC_TVF_5533MF},
1264 {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028},
1285}; 1265};
1286 1266
1287/* I2C devicelist hash table for devices with generic USB IDs */ 1267/* I2C devicelist hash table for devices with generic USB IDs */
@@ -1552,9 +1532,12 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
1552 /* djh - Not sure which demod we need here */ 1532 /* djh - Not sure which demod we need here */
1553 ctl->demod = XC3028_FE_DEFAULT; 1533 ctl->demod = XC3028_FE_DEFAULT;
1554 break; 1534 break;
1535 case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
1536 ctl->demod = XC3028_FE_DEFAULT;
1537 ctl->fname = XC3028L_DEFAULT_FIRMWARE;
1538 break;
1555 case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950: 1539 case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
1556 case EM2880_BOARD_PINNACLE_PCTV_HD_PRO: 1540 case EM2880_BOARD_PINNACLE_PCTV_HD_PRO:
1557 case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
1558 /* FIXME: Better to specify the needed IF */ 1541 /* FIXME: Better to specify the needed IF */
1559 ctl->demod = XC3028_FE_DEFAULT; 1542 ctl->demod = XC3028_FE_DEFAULT;
1560 break; 1543 break;
@@ -1764,6 +1747,20 @@ void em28xx_card_setup(struct em28xx *dev)
1764 break; 1747 break;
1765 case EM2820_BOARD_UNKNOWN: 1748 case EM2820_BOARD_UNKNOWN:
1766 case EM2800_BOARD_UNKNOWN: 1749 case EM2800_BOARD_UNKNOWN:
1750 /*
1751 * The K-WORLD DVB-T 310U is detected as an MSI Digivox AD.
1752 *
1753 * This occurs because they share identical USB vendor and
1754 * product IDs.
1755 *
1756 * What we do here is look up the EEPROM hash of the K-WORLD
1757 * and if it is found then we decide that we do not have
1758 * a DIGIVOX and reset the device to the K-WORLD instead.
1759 *
1760 * This solution is only valid if they do not share eeprom
1761 * hash identities which has not been determined as yet.
1762 */
1763 case EM2880_BOARD_MSI_DIGIVOX_AD:
1767 if (!em28xx_hint_board(dev)) 1764 if (!em28xx_hint_board(dev))
1768 em28xx_set_model(dev); 1765 em28xx_set_model(dev);
1769 break; 1766 break;
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index 4b992bc0083c..d2b1a1a52689 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -452,6 +452,15 @@ static int dvb_init(struct em28xx *dev)
452 goto out_free; 452 goto out_free;
453 } 453 }
454 break; 454 break;
455 case EM2880_BOARD_KWORLD_DVB_310U:
456 dvb->frontend = dvb_attach(zl10353_attach,
457 &em28xx_zl10353_with_xc3028,
458 &dev->i2c_adap);
459 if (attach_xc3028(0x61, dev) < 0) {
460 result = -EINVAL;
461 goto out_free;
462 }
463 break;
455 default: 464 default:
456 printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card" 465 printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card"
457 " isn't supported yet\n", 466 " isn't supported yet\n",
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 7be69284da03..ac95c55887df 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -459,6 +459,7 @@ static int create_urbs(struct gspca_dev *gspca_dev,
459 urb = usb_alloc_urb(npkt, GFP_KERNEL); 459 urb = usb_alloc_urb(npkt, GFP_KERNEL);
460 if (!urb) { 460 if (!urb) {
461 err("usb_alloc_urb failed"); 461 err("usb_alloc_urb failed");
462 destroy_urbs(gspca_dev);
462 return -ENOMEM; 463 return -ENOMEM;
463 } 464 }
464 urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev, 465 urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev,
@@ -468,8 +469,8 @@ static int create_urbs(struct gspca_dev *gspca_dev,
468 469
469 if (urb->transfer_buffer == NULL) { 470 if (urb->transfer_buffer == NULL) {
470 usb_free_urb(urb); 471 usb_free_urb(urb);
471 destroy_urbs(gspca_dev);
472 err("usb_buffer_urb failed"); 472 err("usb_buffer_urb failed");
473 destroy_urbs(gspca_dev);
473 return -ENOMEM; 474 return -ENOMEM;
474 } 475 }
475 gspca_dev->urb[n] = urb; 476 gspca_dev->urb[n] = urb;
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index d4be51843286..ba865b7f1ed8 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -1063,6 +1063,7 @@ static __devinitdata struct usb_device_id device_table[] = {
1063 {USB_DEVICE(0x093a, 0x2621), .driver_info = SENSOR_PAC7302}, 1063 {USB_DEVICE(0x093a, 0x2621), .driver_info = SENSOR_PAC7302},
1064 {USB_DEVICE(0x093a, 0x2624), .driver_info = SENSOR_PAC7302}, 1064 {USB_DEVICE(0x093a, 0x2624), .driver_info = SENSOR_PAC7302},
1065 {USB_DEVICE(0x093a, 0x2626), .driver_info = SENSOR_PAC7302}, 1065 {USB_DEVICE(0x093a, 0x2626), .driver_info = SENSOR_PAC7302},
1066 {USB_DEVICE(0x093a, 0x262a), .driver_info = SENSOR_PAC7302},
1066 {} 1067 {}
1067}; 1068};
1068MODULE_DEVICE_TABLE(usb, device_table); 1069MODULE_DEVICE_TABLE(usb, device_table);
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 5dd78c6766ea..12b81ae526b7 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -232,7 +232,7 @@ static struct ctrl sd_ctrls[] = {
232static struct v4l2_pix_format vga_mode[] = { 232static struct v4l2_pix_format vga_mode[] = {
233 {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, 233 {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
234 .bytesperline = 160, 234 .bytesperline = 160,
235 .sizeimage = 160 * 120 * 5 / 4, 235 .sizeimage = 160 * 120,
236 .colorspace = V4L2_COLORSPACE_SRGB, 236 .colorspace = V4L2_COLORSPACE_SRGB,
237 .priv = 2 | MODE_RAW}, 237 .priv = 2 | MODE_RAW},
238 {160, 120, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, 238 {160, 120, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE,
@@ -264,7 +264,7 @@ static struct v4l2_pix_format sif_mode[] = {
264 .priv = 1 | MODE_REDUCED_SIF}, 264 .priv = 1 | MODE_REDUCED_SIF},
265 {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, 265 {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
266 .bytesperline = 176, 266 .bytesperline = 176,
267 .sizeimage = 176 * 144 * 5 / 4, 267 .sizeimage = 176 * 144,
268 .colorspace = V4L2_COLORSPACE_SRGB, 268 .colorspace = V4L2_COLORSPACE_SRGB,
269 .priv = 1 | MODE_RAW}, 269 .priv = 1 | MODE_RAW},
270 {176, 144, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, 270 {176, 144, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE,
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index d75b1d20b318..572b0f363b64 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -707,6 +707,7 @@ static void i2c_w8(struct gspca_dev *gspca_dev,
707 0x08, 0, /* value, index */ 707 0x08, 0, /* value, index */
708 gspca_dev->usb_buf, 8, 708 gspca_dev->usb_buf, 8,
709 500); 709 500);
710 msleep(2);
710} 711}
711 712
712/* read 5 bytes in gspca_dev->usb_buf */ 713/* read 5 bytes in gspca_dev->usb_buf */
@@ -976,13 +977,13 @@ static int sd_init(struct gspca_dev *gspca_dev)
976 case BRIDGE_SN9C105: 977 case BRIDGE_SN9C105:
977 if (regF1 != 0x11) 978 if (regF1 != 0x11)
978 return -ENODEV; 979 return -ENODEV;
979 reg_w(gspca_dev, 0x02, regGpio, 2); 980 reg_w(gspca_dev, 0x01, regGpio, 2);
980 break; 981 break;
981 case BRIDGE_SN9C120: 982 case BRIDGE_SN9C120:
982 if (regF1 != 0x12) 983 if (regF1 != 0x12)
983 return -ENODEV; 984 return -ENODEV;
984 regGpio[1] = 0x70; 985 regGpio[1] = 0x70;
985 reg_w(gspca_dev, 0x02, regGpio, 2); 986 reg_w(gspca_dev, 0x01, regGpio, 2);
986 break; 987 break;
987 default: 988 default:
988/* case BRIDGE_SN9C110: */ 989/* case BRIDGE_SN9C110: */
@@ -1183,7 +1184,7 @@ static void sd_start(struct gspca_dev *gspca_dev)
1183 static const __u8 CA[] = { 0x28, 0xd8, 0x14, 0xec }; 1184 static const __u8 CA[] = { 0x28, 0xd8, 0x14, 0xec };
1184 static const __u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */ 1185 static const __u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */
1185 static const __u8 CE_ov76xx[] = 1186 static const __u8 CE_ov76xx[] =
1186 { 0x32, 0xdd, 0x32, 0xdd }; /* OV7630/48 */ 1187 { 0x32, 0xdd, 0x32, 0xdd };
1187 1188
1188 sn9c1xx = sn_tb[(int) sd->sensor]; 1189 sn9c1xx = sn_tb[(int) sd->sensor];
1189 configure_gpio(gspca_dev, sn9c1xx); 1190 configure_gpio(gspca_dev, sn9c1xx);
@@ -1223,8 +1224,15 @@ static void sd_start(struct gspca_dev *gspca_dev)
1223 reg_w(gspca_dev, 0x20, gamma_def, sizeof gamma_def); 1224 reg_w(gspca_dev, 0x20, gamma_def, sizeof gamma_def);
1224 for (i = 0; i < 8; i++) 1225 for (i = 0; i < 8; i++)
1225 reg_w(gspca_dev, 0x84, reg84, sizeof reg84); 1226 reg_w(gspca_dev, 0x84, reg84, sizeof reg84);
1227 switch (sd->sensor) {
1228 case SENSOR_OV7660:
1229 reg_w1(gspca_dev, 0x9a, 0x05);
1230 break;
1231 default:
1226 reg_w1(gspca_dev, 0x9a, 0x08); 1232 reg_w1(gspca_dev, 0x9a, 0x08);
1227 reg_w1(gspca_dev, 0x99, 0x59); 1233 reg_w1(gspca_dev, 0x99, 0x59);
1234 break;
1235 }
1228 1236
1229 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; 1237 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
1230 if (mode) 1238 if (mode)
@@ -1275,8 +1283,8 @@ static void sd_start(struct gspca_dev *gspca_dev)
1275/* reg1 = 0x44; */ 1283/* reg1 = 0x44; */
1276/* reg1 = 0x46; (done) */ 1284/* reg1 = 0x46; (done) */
1277 } else { 1285 } else {
1278 reg17 = 0x22; /* 640 MCKSIZE */ 1286 reg17 = 0xa2; /* 640 */
1279 reg1 = 0x06; 1287 reg1 = 0x44;
1280 } 1288 }
1281 break; 1289 break;
1282 } 1290 }
@@ -1285,6 +1293,7 @@ static void sd_start(struct gspca_dev *gspca_dev)
1285 switch (sd->sensor) { 1293 switch (sd->sensor) {
1286 case SENSOR_OV7630: 1294 case SENSOR_OV7630:
1287 case SENSOR_OV7648: 1295 case SENSOR_OV7648:
1296 case SENSOR_OV7660:
1288 reg_w(gspca_dev, 0xce, CE_ov76xx, 4); 1297 reg_w(gspca_dev, 0xce, CE_ov76xx, 4);
1289 break; 1298 break;
1290 default: 1299 default:
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index cfbc9ebc5c5d..95fcfcb9e31b 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -225,7 +225,7 @@ static int i2c_read(struct gspca_dev *gspca_dev, __u16 reg, __u8 mode)
225 reg_w_val(gspca_dev->dev, 0x8802, (mode | 0x01)); 225 reg_w_val(gspca_dev->dev, 0x8802, (mode | 0x01));
226 do { 226 do {
227 reg_r(gspca_dev, 0x8803, 1); 227 reg_r(gspca_dev, 0x8803, 1);
228 if (!gspca_dev->usb_buf) 228 if (!gspca_dev->usb_buf[0])
229 break; 229 break;
230 } while (--retry); 230 } while (--retry);
231 if (retry == 0) 231 if (retry == 0)
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 8d7c27e6ac77..d61ef727e0c2 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -6576,8 +6576,8 @@ static int setlightfreq(struct gspca_dev *gspca_dev)
6576 cs2102_60HZ, cs2102_60HZScale}, 6576 cs2102_60HZ, cs2102_60HZScale},
6577/* SENSOR_CS2102K 1 */ 6577/* SENSOR_CS2102K 1 */
6578 {cs2102_NoFliker, cs2102_NoFlikerScale, 6578 {cs2102_NoFliker, cs2102_NoFlikerScale,
6579 cs2102_50HZ, cs2102_50HZScale, 6579 NULL, NULL, /* currently disabled */
6580 cs2102_60HZ, cs2102_60HZScale}, 6580 NULL, NULL},
6581/* SENSOR_GC0305 2 */ 6581/* SENSOR_GC0305 2 */
6582 {gc0305_NoFliker, gc0305_NoFliker, 6582 {gc0305_NoFliker, gc0305_NoFliker,
6583 gc0305_50HZ, gc0305_50HZ, 6583 gc0305_50HZ, gc0305_50HZ,
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index 3d3c48db45d9..c6852402c5e9 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -3591,7 +3591,7 @@ static int
3591ov51x_init_isoc(struct usb_ov511 *ov) 3591ov51x_init_isoc(struct usb_ov511 *ov)
3592{ 3592{
3593 struct urb *urb; 3593 struct urb *urb;
3594 int fx, err, n, size; 3594 int fx, err, n, i, size;
3595 3595
3596 PDEBUG(3, "*** Initializing capture ***"); 3596 PDEBUG(3, "*** Initializing capture ***");
3597 3597
@@ -3662,6 +3662,8 @@ ov51x_init_isoc(struct usb_ov511 *ov)
3662 urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL); 3662 urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL);
3663 if (!urb) { 3663 if (!urb) {
3664 err("init isoc: usb_alloc_urb ret. NULL"); 3664 err("init isoc: usb_alloc_urb ret. NULL");
3665 for (i = 0; i < n; i++)
3666 usb_free_urb(ov->sbuf[i].urb);
3665 return -ENOMEM; 3667 return -ENOMEM;
3666 } 3668 }
3667 ov->sbuf[n].urb = urb; 3669 ov->sbuf[n].urb = urb;
@@ -5651,7 +5653,7 @@ static ssize_t show_exposure(struct device *cd,
5651 if (!ov->dev) 5653 if (!ov->dev)
5652 return -ENODEV; 5654 return -ENODEV;
5653 sensor_get_exposure(ov, &exp); 5655 sensor_get_exposure(ov, &exp);
5654 return sprintf(buf, "%d\n", exp >> 8); 5656 return sprintf(buf, "%d\n", exp);
5655} 5657}
5656static DEVICE_ATTR(exposure, S_IRUGO, show_exposure, NULL); 5658static DEVICE_ATTR(exposure, S_IRUGO, show_exposure, NULL);
5657 5659
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.c b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
index 88e175168438..cbe2a3417851 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
@@ -489,6 +489,8 @@ static const struct pvr2_device_desc pvr2_device_751xx = {
489struct usb_device_id pvr2_device_table[] = { 489struct usb_device_id pvr2_device_table[] = {
490 { USB_DEVICE(0x2040, 0x2900), 490 { USB_DEVICE(0x2040, 0x2900),
491 .driver_info = (kernel_ulong_t)&pvr2_device_29xxx}, 491 .driver_info = (kernel_ulong_t)&pvr2_device_29xxx},
492 { USB_DEVICE(0x2040, 0x2950), /* Logically identical to 2900 */
493 .driver_info = (kernel_ulong_t)&pvr2_device_29xxx},
492 { USB_DEVICE(0x2040, 0x2400), 494 { USB_DEVICE(0x2040, 0x2400),
493 .driver_info = (kernel_ulong_t)&pvr2_device_24xxx}, 495 .driver_info = (kernel_ulong_t)&pvr2_device_24xxx},
494 { USB_DEVICE(0x1164, 0x0622), 496 { USB_DEVICE(0x1164, 0x0622),
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index b1d09d8e2b85..92b83feae366 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -669,7 +669,7 @@ static void s2255_fillbuff(struct s2255_dev *dev, struct s2255_buffer *buf,
669 (unsigned long)vbuf, pos); 669 (unsigned long)vbuf, pos);
670 /* tell v4l buffer was filled */ 670 /* tell v4l buffer was filled */
671 671
672 buf->vb.field_count++; 672 buf->vb.field_count = dev->frame_count[chn] * 2;
673 do_gettimeofday(&ts); 673 do_gettimeofday(&ts);
674 buf->vb.ts = ts; 674 buf->vb.ts = ts;
675 buf->vb.state = VIDEOBUF_DONE; 675 buf->vb.state = VIDEOBUF_DONE;
@@ -1268,6 +1268,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
1268 dev->last_frame[chn] = -1; 1268 dev->last_frame[chn] = -1;
1269 dev->bad_payload[chn] = 0; 1269 dev->bad_payload[chn] = 0;
1270 dev->cur_frame[chn] = 0; 1270 dev->cur_frame[chn] = 0;
1271 dev->frame_count[chn] = 0;
1271 for (j = 0; j < SYS_FRAMES; j++) { 1272 for (j = 0; j < SYS_FRAMES; j++) {
1272 dev->buffer[chn].frame[j].ulState = 0; 1273 dev->buffer[chn].frame[j].ulState = 0;
1273 dev->buffer[chn].frame[j].cur_size = 0; 1274 dev->buffer[chn].frame[j].cur_size = 0;
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 6ef3e5297de8..feab12aa2c7b 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -592,7 +592,7 @@ int uvc_query_v4l2_ctrl(struct uvc_video_device *video,
592 if (ctrl == NULL) 592 if (ctrl == NULL)
593 return -EINVAL; 593 return -EINVAL;
594 594
595 data = kmalloc(8, GFP_KERNEL); 595 data = kmalloc(ctrl->info->size, GFP_KERNEL);
596 if (data == NULL) 596 if (data == NULL)
597 return -ENOMEM; 597 return -ENOMEM;
598 598
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index 168baabe4659..11edf79f57be 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -911,7 +911,6 @@ static int w9968cf_start_transfer(struct w9968cf_device* cam)
911 911
912 for (i = 0; i < W9968CF_URBS; i++) { 912 for (i = 0; i < W9968CF_URBS; i++) {
913 urb = usb_alloc_urb(W9968CF_ISO_PACKETS, GFP_KERNEL); 913 urb = usb_alloc_urb(W9968CF_ISO_PACKETS, GFP_KERNEL);
914 cam->urb[i] = urb;
915 if (!urb) { 914 if (!urb) {
916 for (j = 0; j < i; j++) 915 for (j = 0; j < i; j++)
917 usb_free_urb(cam->urb[j]); 916 usb_free_urb(cam->urb[j]);
@@ -919,6 +918,7 @@ static int w9968cf_start_transfer(struct w9968cf_device* cam)
919 return -ENOMEM; 918 return -ENOMEM;
920 } 919 }
921 920
921 cam->urb[i] = urb;
922 urb->dev = udev; 922 urb->dev = udev;
923 urb->context = (void*)cam; 923 urb->context = (void*)cam;
924 urb->pipe = usb_rcvisocpipe(udev, 1); 924 urb->pipe = usb_rcvisocpipe(udev, 1);
diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c
index 95c79ad80487..54ac3fe26ec2 100644
--- a/drivers/media/video/wm8739.c
+++ b/drivers/media/video/wm8739.c
@@ -274,10 +274,8 @@ static int wm8739_probe(struct i2c_client *client,
274 client->addr << 1, client->adapter->name); 274 client->addr << 1, client->adapter->name);
275 275
276 state = kmalloc(sizeof(struct wm8739_state), GFP_KERNEL); 276 state = kmalloc(sizeof(struct wm8739_state), GFP_KERNEL);
277 if (state == NULL) { 277 if (state == NULL)
278 kfree(client);
279 return -ENOMEM; 278 return -ENOMEM;
280 }
281 state->vol_l = 0x17; /* 0dB */ 279 state->vol_l = 0x17; /* 0dB */
282 state->vol_r = 0x17; /* 0dB */ 280 state->vol_r = 0x17; /* 0dB */
283 state->muted = 0; 281 state->muted = 0;
diff --git a/drivers/media/video/zoran_card.c b/drivers/media/video/zoran_card.c
index d842a7cb99d2..3282be730298 100644
--- a/drivers/media/video/zoran_card.c
+++ b/drivers/media/video/zoran_card.c
@@ -988,7 +988,7 @@ zoran_open_init_params (struct zoran *zr)
988 zr->v4l_grab_seq = 0; 988 zr->v4l_grab_seq = 0;
989 zr->v4l_settings.width = 192; 989 zr->v4l_settings.width = 192;
990 zr->v4l_settings.height = 144; 990 zr->v4l_settings.height = 144;
991 zr->v4l_settings.format = &zoran_formats[4]; /* YUY2 - YUV-4:2:2 packed */ 991 zr->v4l_settings.format = &zoran_formats[7]; /* YUY2 - YUV-4:2:2 packed */
992 zr->v4l_settings.bytesperline = 992 zr->v4l_settings.bytesperline =
993 zr->v4l_settings.width * 993 zr->v4l_settings.width *
994 ((zr->v4l_settings.format->depth + 7) / 8); 994 ((zr->v4l_settings.format->depth + 7) / 8);
diff --git a/drivers/media/video/zoran_driver.c b/drivers/media/video/zoran_driver.c
index ec6f59674b10..2dab9eea4def 100644
--- a/drivers/media/video/zoran_driver.c
+++ b/drivers/media/video/zoran_driver.c
@@ -134,7 +134,7 @@ const struct zoran_format zoran_formats[] = {
134 }, { 134 }, {
135 .name = "16-bit RGB BE", 135 .name = "16-bit RGB BE",
136 ZFMT(-1, 136 ZFMT(-1,
137 V4L2_PIX_FMT_RGB565, V4L2_COLORSPACE_SRGB), 137 V4L2_PIX_FMT_RGB565X, V4L2_COLORSPACE_SRGB),
138 .depth = 16, 138 .depth = 16,
139 .flags = ZORAN_FORMAT_CAPTURE | 139 .flags = ZORAN_FORMAT_CAPTURE |
140 ZORAN_FORMAT_OVERLAY, 140 ZORAN_FORMAT_OVERLAY,
@@ -2737,7 +2737,8 @@ zoran_do_ioctl (struct inode *inode,
2737 fh->v4l_settings.format->fourcc; 2737 fh->v4l_settings.format->fourcc;
2738 fmt->fmt.pix.colorspace = 2738 fmt->fmt.pix.colorspace =
2739 fh->v4l_settings.format->colorspace; 2739 fh->v4l_settings.format->colorspace;
2740 fmt->fmt.pix.bytesperline = 0; 2740 fmt->fmt.pix.bytesperline =
2741 fh->v4l_settings.bytesperline;
2741 if (BUZ_MAX_HEIGHT < 2742 if (BUZ_MAX_HEIGHT <
2742 (fh->v4l_settings.height * 2)) 2743 (fh->v4l_settings.height * 2))
2743 fmt->fmt.pix.field = 2744 fmt->fmt.pix.field =
@@ -2833,13 +2834,6 @@ zoran_do_ioctl (struct inode *inode,
2833 fmt->fmt.pix.pixelformat, 2834 fmt->fmt.pix.pixelformat,
2834 (char *) &printformat); 2835 (char *) &printformat);
2835 2836
2836 if (fmt->fmt.pix.bytesperline > 0) {
2837 dprintk(5,
2838 KERN_ERR "%s: bpl not supported\n",
2839 ZR_DEVNAME(zr));
2840 return -EINVAL;
2841 }
2842
2843 /* we can be requested to do JPEG/raw playback/capture */ 2837 /* we can be requested to do JPEG/raw playback/capture */
2844 if (! 2838 if (!
2845 (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || 2839 (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
@@ -2923,6 +2917,7 @@ zoran_do_ioctl (struct inode *inode,
2923 fh->jpg_buffers.buffer_size = 2917 fh->jpg_buffers.buffer_size =
2924 zoran_v4l2_calc_bufsize(&fh-> 2918 zoran_v4l2_calc_bufsize(&fh->
2925 jpg_settings); 2919 jpg_settings);
2920 fmt->fmt.pix.bytesperline = 0;
2926 fmt->fmt.pix.sizeimage = 2921 fmt->fmt.pix.sizeimage =
2927 fh->jpg_buffers.buffer_size; 2922 fh->jpg_buffers.buffer_size;
2928 2923
@@ -2979,6 +2974,8 @@ zoran_do_ioctl (struct inode *inode,
2979 2974
2980 /* tell the user the 2975 /* tell the user the
2981 * results/missing stuff */ 2976 * results/missing stuff */
2977 fmt->fmt.pix.bytesperline =
2978 fh->v4l_settings.bytesperline;
2982 fmt->fmt.pix.sizeimage = 2979 fmt->fmt.pix.sizeimage =
2983 fh->v4l_settings.height * 2980 fh->v4l_settings.height *
2984 fh->v4l_settings.bytesperline; 2981 fh->v4l_settings.bytesperline;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index d2d2318dafa4..6e291bf8237a 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -197,7 +197,7 @@ static int mspro_block_bd_open(struct inode *inode, struct file *filp)
197static int mspro_block_disk_release(struct gendisk *disk) 197static int mspro_block_disk_release(struct gendisk *disk)
198{ 198{
199 struct mspro_block_data *msb = disk->private_data; 199 struct mspro_block_data *msb = disk->private_data;
200 int disk_id = disk->first_minor >> MSPRO_BLOCK_PART_SHIFT; 200 int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT;
201 201
202 mutex_lock(&mspro_block_disk_lock); 202 mutex_lock(&mspro_block_disk_lock);
203 203
@@ -828,7 +828,7 @@ static void mspro_block_submit_req(struct request_queue *q)
828 828
829 if (msb->eject) { 829 if (msb->eject) {
830 while ((req = elv_next_request(q)) != NULL) 830 while ((req = elv_next_request(q)) != NULL)
831 end_queued_request(req, -ENODEV); 831 __blk_end_request(req, -ENODEV, blk_rq_bytes(req));
832 832
833 return; 833 return;
834 } 834 }
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 10c44d3fe01a..68dc8d9eb24e 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -21,7 +21,7 @@ config MFD_SM501
21 21
22config MFD_SM501_GPIO 22config MFD_SM501_GPIO
23 bool "Export GPIO via GPIO layer" 23 bool "Export GPIO via GPIO layer"
24 depends on MFD_SM501 && HAVE_GPIO_LIB 24 depends on MFD_SM501 && GPIOLIB
25 ---help--- 25 ---help---
26 This option uses the gpio library layer to export the 64 GPIO 26 This option uses the gpio library layer to export the 64 GPIO
27 lines on the SM501. The platform data is used to supply the 27 lines on the SM501. The platform data is used to supply the
@@ -29,7 +29,7 @@ config MFD_SM501_GPIO
29 29
30config MFD_ASIC3 30config MFD_ASIC3
31 bool "Support for Compaq ASIC3" 31 bool "Support for Compaq ASIC3"
32 depends on GENERIC_HARDIRQS && HAVE_GPIO_LIB && ARM 32 depends on GENERIC_HARDIRQS && GPIOLIB && ARM
33 ---help--- 33 ---help---
34 This driver supports the ASIC3 multifunction chip found on many 34 This driver supports the ASIC3 multifunction chip found on many
35 PDAs (mainly iPAQ and HTC based ones) 35 PDAs (mainly iPAQ and HTC based ones)
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index bc2a807f210d..ba5aa2008273 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -312,7 +312,6 @@ static int __init asic3_irq_probe(struct platform_device *pdev)
312 struct asic3 *asic = platform_get_drvdata(pdev); 312 struct asic3 *asic = platform_get_drvdata(pdev);
313 unsigned long clksel = 0; 313 unsigned long clksel = 0;
314 unsigned int irq, irq_base; 314 unsigned int irq, irq_base;
315 int map_size;
316 int ret; 315 int ret;
317 316
318 ret = platform_get_irq(pdev, 0); 317 ret = platform_get_irq(pdev, 0);
@@ -534,6 +533,7 @@ static int __init asic3_probe(struct platform_device *pdev)
534 struct asic3 *asic; 533 struct asic3 *asic;
535 struct resource *mem; 534 struct resource *mem;
536 unsigned long clksel; 535 unsigned long clksel;
536 int map_size;
537 int ret = 0; 537 int ret = 0;
538 538
539 asic = kzalloc(sizeof(struct asic3), GFP_KERNEL); 539 asic = kzalloc(sizeof(struct asic3), GFP_KERNEL);
diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c
index facdb9893c84..1ee8501e90f1 100644
--- a/drivers/misc/eeepc-laptop.c
+++ b/drivers/misc/eeepc-laptop.c
@@ -450,12 +450,14 @@ static int eeepc_get_fan_pwm(void)
450 int value = 0; 450 int value = 0;
451 451
452 read_acpi_int(NULL, EEEPC_EC_FAN_PWM, &value); 452 read_acpi_int(NULL, EEEPC_EC_FAN_PWM, &value);
453 value = value * 255 / 100;
453 return (value); 454 return (value);
454} 455}
455 456
456static void eeepc_set_fan_pwm(int value) 457static void eeepc_set_fan_pwm(int value)
457{ 458{
458 value = SENSORS_LIMIT(value, 0, 100); 459 value = SENSORS_LIMIT(value, 0, 255);
460 value = value * 100 / 255;
459 ec_write(EEEPC_EC_SC02, value); 461 ec_write(EEEPC_EC_SC02, value);
460} 462}
461 463
@@ -520,15 +522,23 @@ static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
520 static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0); 522 static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0);
521 523
522EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL); 524EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL);
523EEEPC_CREATE_SENSOR_ATTR(fan1_pwm, S_IRUGO | S_IWUSR, 525EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
524 eeepc_get_fan_pwm, eeepc_set_fan_pwm); 526 eeepc_get_fan_pwm, eeepc_set_fan_pwm);
525EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, 527EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
526 eeepc_get_fan_ctrl, eeepc_set_fan_ctrl); 528 eeepc_get_fan_ctrl, eeepc_set_fan_ctrl);
527 529
530static ssize_t
531show_name(struct device *dev, struct device_attribute *attr, char *buf)
532{
533 return sprintf(buf, "eeepc\n");
534}
535static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
536
528static struct attribute *hwmon_attributes[] = { 537static struct attribute *hwmon_attributes[] = {
529 &sensor_dev_attr_fan1_pwm.dev_attr.attr, 538 &sensor_dev_attr_pwm1.dev_attr.attr,
530 &sensor_dev_attr_fan1_input.dev_attr.attr, 539 &sensor_dev_attr_fan1_input.dev_attr.attr,
531 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 540 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
541 &sensor_dev_attr_name.dev_attr.attr,
532 NULL 542 NULL
533}; 543};
534 544
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 6986f3926244..efacee0404a0 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -29,6 +29,7 @@
29#include <linux/blkdev.h> 29#include <linux/blkdev.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31#include <linux/scatterlist.h> 31#include <linux/scatterlist.h>
32#include <linux/string_helpers.h>
32 33
33#include <linux/mmc/card.h> 34#include <linux/mmc/card.h>
34#include <linux/mmc/host.h> 35#include <linux/mmc/host.h>
@@ -83,7 +84,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
83 mutex_lock(&open_lock); 84 mutex_lock(&open_lock);
84 md->usage--; 85 md->usage--;
85 if (md->usage == 0) { 86 if (md->usage == 0) {
86 int devidx = md->disk->first_minor >> MMC_SHIFT; 87 int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
87 __clear_bit(devidx, dev_use); 88 __clear_bit(devidx, dev_use);
88 89
89 put_disk(md->disk); 90 put_disk(md->disk);
@@ -532,6 +533,8 @@ static int mmc_blk_probe(struct mmc_card *card)
532 struct mmc_blk_data *md; 533 struct mmc_blk_data *md;
533 int err; 534 int err;
534 535
536 char cap_str[10];
537
535 /* 538 /*
536 * Check that the card supports the command class(es) we need. 539 * Check that the card supports the command class(es) we need.
537 */ 540 */
@@ -546,10 +549,11 @@ static int mmc_blk_probe(struct mmc_card *card)
546 if (err) 549 if (err)
547 goto out; 550 goto out;
548 551
549 printk(KERN_INFO "%s: %s %s %lluKiB %s\n", 552 string_get_size(get_capacity(md->disk) << 9, STRING_UNITS_2,
553 cap_str, sizeof(cap_str));
554 printk(KERN_INFO "%s: %s %s %s %s\n",
550 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 555 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
551 (unsigned long long)(get_capacity(md->disk) >> 1), 556 cap_str, md->read_only ? "(ro)" : "");
552 md->read_only ? "(ro)" : "");
553 557
554 mmc_set_drvdata(card, md); 558 mmc_set_drvdata(card, md);
555 add_disk(md->disk); 559 add_disk(md->disk);
@@ -615,14 +619,19 @@ static struct mmc_driver mmc_driver = {
615 619
616static int __init mmc_blk_init(void) 620static int __init mmc_blk_init(void)
617{ 621{
618 int res = -ENOMEM; 622 int res;
619 623
620 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 624 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
621 if (res) 625 if (res)
622 goto out; 626 goto out;
623 627
624 return mmc_register_driver(&mmc_driver); 628 res = mmc_register_driver(&mmc_driver);
629 if (res)
630 goto out2;
625 631
632 return 0;
633 out2:
634 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
626 out: 635 out:
627 return res; 636 return res;
628} 637}
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index f26b01d811ae..b92b172074ee 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -1040,7 +1040,7 @@ static const struct mmc_test_case mmc_test_cases[] = {
1040 1040
1041}; 1041};
1042 1042
1043static struct mutex mmc_test_lock; 1043static DEFINE_MUTEX(mmc_test_lock);
1044 1044
1045static void mmc_test_run(struct mmc_test_card *test, int testcase) 1045static void mmc_test_run(struct mmc_test_card *test, int testcase)
1046{ 1046{
@@ -1171,8 +1171,6 @@ static int mmc_test_probe(struct mmc_card *card)
1171 if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD)) 1171 if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD))
1172 return -ENODEV; 1172 return -ENODEV;
1173 1173
1174 mutex_init(&mmc_test_lock);
1175
1176 ret = device_create_file(&card->dev, &dev_attr_test); 1174 ret = device_create_file(&card->dev, &dev_attr_test);
1177 if (ret) 1175 if (ret)
1178 return ret; 1176 return ret;
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 0bd06f5bd62f..00008967ef7a 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -195,7 +195,9 @@ static int atmci_regs_show(struct seq_file *s, void *v)
195 195
196 /* Grab a more or less consistent snapshot */ 196 /* Grab a more or less consistent snapshot */
197 spin_lock_irq(&host->mmc->lock); 197 spin_lock_irq(&host->mmc->lock);
198 clk_enable(host->mck);
198 memcpy_fromio(buf, host->regs, MCI_REGS_SIZE); 199 memcpy_fromio(buf, host->regs, MCI_REGS_SIZE);
200 clk_disable(host->mck);
199 spin_unlock_irq(&host->mmc->lock); 201 spin_unlock_irq(&host->mmc->lock);
200 202
201 seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", 203 seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n",
@@ -216,6 +218,8 @@ static int atmci_regs_show(struct seq_file *s, void *v)
216 atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]); 218 atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]);
217 atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]); 219 atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]);
218 220
221 kfree(buf);
222
219 return 0; 223 return 0;
220} 224}
221 225
@@ -237,7 +241,6 @@ static void atmci_init_debugfs(struct atmel_mci *host)
237 struct mmc_host *mmc; 241 struct mmc_host *mmc;
238 struct dentry *root; 242 struct dentry *root;
239 struct dentry *node; 243 struct dentry *node;
240 struct resource *res;
241 244
242 mmc = host->mmc; 245 mmc = host->mmc;
243 root = mmc->debugfs_root; 246 root = mmc->debugfs_root;
@@ -251,9 +254,6 @@ static void atmci_init_debugfs(struct atmel_mci *host)
251 if (!node) 254 if (!node)
252 goto err; 255 goto err;
253 256
254 res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
255 node->d_inode->i_size = res->end - res->start + 1;
256
257 node = debugfs_create_file("req", S_IRUSR, root, host, &atmci_req_fops); 257 node = debugfs_create_file("req", S_IRUSR, root, host, &atmci_req_fops);
258 if (!node) 258 if (!node)
259 goto err; 259 goto err;
@@ -426,8 +426,6 @@ static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data)
426 host->sg = NULL; 426 host->sg = NULL;
427 host->data = data; 427 host->data = data;
428 428
429 mci_writel(host, BLKR, MCI_BCNT(data->blocks)
430 | MCI_BLKLEN(data->blksz));
431 dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n", 429 dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n",
432 MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); 430 MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
433 431
@@ -483,6 +481,10 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
483 if (data->blocks > 1 && data->blksz & 3) 481 if (data->blocks > 1 && data->blksz & 3)
484 goto fail; 482 goto fail;
485 atmci_set_timeout(host, data); 483 atmci_set_timeout(host, data);
484
485 /* Must set block count/size before sending command */
486 mci_writel(host, BLKR, MCI_BCNT(data->blocks)
487 | MCI_BLKLEN(data->blksz));
486 } 488 }
487 489
488 iflags = MCI_CMDRDY; 490 iflags = MCI_CMDRDY;
@@ -1059,6 +1061,10 @@ static int __init atmci_probe(struct platform_device *pdev)
1059 host->present = !gpio_get_value(host->detect_pin); 1061 host->present = !gpio_get_value(host->detect_pin);
1060 } 1062 }
1061 } 1063 }
1064
1065 if (!gpio_is_valid(host->detect_pin))
1066 mmc->caps |= MMC_CAP_NEEDS_POLL;
1067
1062 if (gpio_is_valid(host->wp_pin)) { 1068 if (gpio_is_valid(host->wp_pin)) {
1063 if (gpio_request(host->wp_pin, "mmc_wp")) { 1069 if (gpio_request(host->wp_pin, "mmc_wp")) {
1064 dev_dbg(&mmc->class_dev, "no WP pin available\n"); 1070 dev_dbg(&mmc->class_dev, "no WP pin available\n");
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 9e647a06054f..ba2b4240a86a 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -159,10 +159,10 @@ static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
159#define STATUS_TO_TEXT(a) \ 159#define STATUS_TO_TEXT(a) \
160 do { \ 160 do { \
161 if (status & TMIO_STAT_##a) \ 161 if (status & TMIO_STAT_##a) \
162 printf(#a); \ 162 printk(#a); \
163 } while (0) 163 } while (0)
164 164
165void debug_status(u32 status) 165void pr_debug_status(u32 status)
166{ 166{
167 printk(KERN_DEBUG "status: %08x = ", status); 167 printk(KERN_DEBUG "status: %08x = ", status);
168 STATUS_TO_TEXT(CARD_REMOVE); 168 STATUS_TO_TEXT(CARD_REMOVE);
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index f34f20c78911..9bf581c4f740 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1005,6 +1005,29 @@ static int ftl_writesect(struct mtd_blktrans_dev *dev,
1005 return ftl_write((void *)dev, buf, block, 1); 1005 return ftl_write((void *)dev, buf, block, 1);
1006} 1006}
1007 1007
1008static int ftl_discardsect(struct mtd_blktrans_dev *dev,
1009 unsigned long sector, unsigned nr_sects)
1010{
1011 partition_t *part = (void *)dev;
1012 uint32_t bsize = 1 << part->header.EraseUnitSize;
1013
1014 DEBUG(1, "FTL erase sector %ld for %d sectors\n",
1015 sector, nr_sects);
1016
1017 while (nr_sects) {
1018 uint32_t old_addr = part->VirtualBlockMap[sector];
1019 if (old_addr != 0xffffffff) {
1020 part->VirtualBlockMap[sector] = 0xffffffff;
1021 part->EUNInfo[old_addr/bsize].Deleted++;
1022 if (set_bam_entry(part, old_addr, 0))
1023 return -EIO;
1024 }
1025 nr_sects--;
1026 sector++;
1027 }
1028
1029 return 0;
1030}
1008/*====================================================================*/ 1031/*====================================================================*/
1009 1032
1010static void ftl_freepart(partition_t *part) 1033static void ftl_freepart(partition_t *part)
@@ -1069,6 +1092,7 @@ static struct mtd_blktrans_ops ftl_tr = {
1069 .blksize = SECTOR_SIZE, 1092 .blksize = SECTOR_SIZE,
1070 .readsect = ftl_readsect, 1093 .readsect = ftl_readsect,
1071 .writesect = ftl_writesect, 1094 .writesect = ftl_writesect,
1095 .discard = ftl_discardsect,
1072 .getgeo = ftl_getgeo, 1096 .getgeo = ftl_getgeo,
1073 .add_mtd = ftl_add_mtd, 1097 .add_mtd = ftl_add_mtd,
1074 .remove_dev = ftl_remove_dev, 1098 .remove_dev = ftl_remove_dev,
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 9ff007c4962c..681d5aca2af4 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -32,6 +32,14 @@ struct mtd_blkcore_priv {
32 spinlock_t queue_lock; 32 spinlock_t queue_lock;
33}; 33};
34 34
35static int blktrans_discard_request(struct request_queue *q,
36 struct request *req)
37{
38 req->cmd_type = REQ_TYPE_LINUX_BLOCK;
39 req->cmd[0] = REQ_LB_OP_DISCARD;
40 return 0;
41}
42
35static int do_blktrans_request(struct mtd_blktrans_ops *tr, 43static int do_blktrans_request(struct mtd_blktrans_ops *tr,
36 struct mtd_blktrans_dev *dev, 44 struct mtd_blktrans_dev *dev,
37 struct request *req) 45 struct request *req)
@@ -44,6 +52,10 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
44 52
45 buf = req->buffer; 53 buf = req->buffer;
46 54
55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
56 req->cmd[0] == REQ_LB_OP_DISCARD)
57 return !tr->discard(dev, block, nsect);
58
47 if (!blk_fs_request(req)) 59 if (!blk_fs_request(req))
48 return 0; 60 return 0;
49 61
@@ -367,6 +379,10 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
367 379
368 tr->blkcore_priv->rq->queuedata = tr; 380 tr->blkcore_priv->rq->queuedata = tr;
369 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); 381 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
382 if (tr->discard)
383 blk_queue_set_discard(tr->blkcore_priv->rq,
384 blktrans_discard_request);
385
370 tr->blkshift = ffs(tr->blksize) - 1; 386 tr->blkshift = ffs(tr->blksize) - 1;
371 387
372 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr, 388 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index ac4e506b4f88..5ea6b60fa377 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -257,7 +257,6 @@ struct e1000_adapter {
257 struct net_device *netdev; 257 struct net_device *netdev;
258 struct pci_dev *pdev; 258 struct pci_dev *pdev;
259 struct net_device_stats net_stats; 259 struct net_device_stats net_stats;
260 spinlock_t stats_lock; /* prevent concurrent stats updates */
261 260
262 /* structs defined in e1000_hw.h */ 261 /* structs defined in e1000_hw.h */
263 struct e1000_hw hw; 262 struct e1000_hw hw;
@@ -284,6 +283,8 @@ struct e1000_adapter {
284 unsigned long led_status; 283 unsigned long led_status;
285 284
286 unsigned int flags; 285 unsigned int flags;
286 struct work_struct downshift_task;
287 struct work_struct update_phy_task;
287}; 288};
288 289
289struct e1000_info { 290struct e1000_info {
@@ -305,6 +306,7 @@ struct e1000_info {
305#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) 306#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
306#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) 307#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
307#define FLAG_HAS_JUMBO_FRAMES (1 << 7) 308#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
309#define FLAG_READ_ONLY_NVM (1 << 8)
308#define FLAG_IS_ICH (1 << 9) 310#define FLAG_IS_ICH (1 << 9)
309#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) 311#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
310#define FLAG_IS_QUAD_PORT_A (1 << 12) 312#define FLAG_IS_QUAD_PORT_A (1 << 12)
@@ -385,6 +387,7 @@ extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
385extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw); 387extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
386extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); 388extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
387 389
390extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
388extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, 391extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
389 bool state); 392 bool state);
390extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); 393extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index e21c9e0f3738..33a3ff17b5d0 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -432,6 +432,10 @@ static void e1000_get_regs(struct net_device *netdev,
432 regs_buff[11] = er32(TIDV); 432 regs_buff[11] = er32(TIDV);
433 433
434 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ 434 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
435
436 /* ethtool doesn't use anything past this point, so all this
437 * code is likely legacy junk for apps that may or may not
438 * exist */
435 if (hw->phy.type == e1000_phy_m88) { 439 if (hw->phy.type == e1000_phy_m88) {
436 e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 440 e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
437 regs_buff[13] = (u32)phy_data; /* cable length */ 441 regs_buff[13] = (u32)phy_data; /* cable length */
@@ -447,7 +451,7 @@ static void e1000_get_regs(struct net_device *netdev,
447 regs_buff[22] = adapter->phy_stats.receive_errors; 451 regs_buff[22] = adapter->phy_stats.receive_errors;
448 regs_buff[23] = regs_buff[13]; /* mdix mode */ 452 regs_buff[23] = regs_buff[13]; /* mdix mode */
449 } 453 }
450 regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ 454 regs_buff[21] = 0; /* was idle_errors */
451 e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); 455 e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
452 regs_buff[24] = (u32)phy_data; /* phy local receiver status */ 456 regs_buff[24] = (u32)phy_data; /* phy local receiver status */
453 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ 457 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
@@ -529,6 +533,9 @@ static int e1000_set_eeprom(struct net_device *netdev,
529 if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) 533 if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
530 return -EFAULT; 534 return -EFAULT;
531 535
536 if (adapter->flags & FLAG_READ_ONLY_NVM)
537 return -EINVAL;
538
532 max_len = hw->nvm.word_size * 2; 539 max_len = hw->nvm.word_size * 2;
533 540
534 first_word = eeprom->offset >> 1; 541 first_word = eeprom->offset >> 1;
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9e38452a738c..bcd2bc477af2 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -58,6 +58,7 @@
58#define ICH_FLASH_HSFCTL 0x0006 58#define ICH_FLASH_HSFCTL 0x0006
59#define ICH_FLASH_FADDR 0x0008 59#define ICH_FLASH_FADDR 0x0008
60#define ICH_FLASH_FDATA0 0x0010 60#define ICH_FLASH_FDATA0 0x0010
61#define ICH_FLASH_PR0 0x0074
61 62
62#define ICH_FLASH_READ_COMMAND_TIMEOUT 500 63#define ICH_FLASH_READ_COMMAND_TIMEOUT 500
63#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 64#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
@@ -150,6 +151,19 @@ union ich8_hws_flash_regacc {
150 u16 regval; 151 u16 regval;
151}; 152};
152 153
154/* ICH Flash Protected Region */
155union ich8_flash_protected_range {
156 struct ich8_pr {
157 u32 base:13; /* 0:12 Protected Range Base */
158 u32 reserved1:2; /* 13:14 Reserved */
159 u32 rpe:1; /* 15 Read Protection Enable */
160 u32 limit:13; /* 16:28 Protected Range Limit */
161 u32 reserved2:2; /* 29:30 Reserved */
162 u32 wpe:1; /* 31 Write Protection Enable */
163 } range;
164 u32 regval;
165};
166
153static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); 167static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
154static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); 168static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
155static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); 169static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
@@ -366,6 +380,9 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
366 return 0; 380 return 0;
367} 381}
368 382
383static DEFINE_MUTEX(nvm_mutex);
384static pid_t nvm_owner = -1;
385
369/** 386/**
370 * e1000_acquire_swflag_ich8lan - Acquire software control flag 387 * e1000_acquire_swflag_ich8lan - Acquire software control flag
371 * @hw: pointer to the HW structure 388 * @hw: pointer to the HW structure
@@ -379,6 +396,15 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
379 u32 extcnf_ctrl; 396 u32 extcnf_ctrl;
380 u32 timeout = PHY_CFG_TIMEOUT; 397 u32 timeout = PHY_CFG_TIMEOUT;
381 398
399 might_sleep();
400
401 if (!mutex_trylock(&nvm_mutex)) {
402 WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n",
403 nvm_owner);
404 mutex_lock(&nvm_mutex);
405 }
406 nvm_owner = current->pid;
407
382 while (timeout) { 408 while (timeout) {
383 extcnf_ctrl = er32(EXTCNF_CTRL); 409 extcnf_ctrl = er32(EXTCNF_CTRL);
384 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 410 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
@@ -393,6 +419,8 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
393 419
394 if (!timeout) { 420 if (!timeout) {
395 hw_dbg(hw, "FW or HW has locked the resource for too long.\n"); 421 hw_dbg(hw, "FW or HW has locked the resource for too long.\n");
422 nvm_owner = -1;
423 mutex_unlock(&nvm_mutex);
396 return -E1000_ERR_CONFIG; 424 return -E1000_ERR_CONFIG;
397 } 425 }
398 426
@@ -414,6 +442,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
414 extcnf_ctrl = er32(EXTCNF_CTRL); 442 extcnf_ctrl = er32(EXTCNF_CTRL);
415 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 443 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
416 ew32(EXTCNF_CTRL, extcnf_ctrl); 444 ew32(EXTCNF_CTRL, extcnf_ctrl);
445
446 nvm_owner = -1;
447 mutex_unlock(&nvm_mutex);
417} 448}
418 449
419/** 450/**
@@ -1284,6 +1315,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1284 * programming failed. 1315 * programming failed.
1285 */ 1316 */
1286 if (ret_val) { 1317 if (ret_val) {
1318 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
1287 hw_dbg(hw, "Flash commit failed.\n"); 1319 hw_dbg(hw, "Flash commit failed.\n");
1288 e1000_release_swflag_ich8lan(hw); 1320 e1000_release_swflag_ich8lan(hw);
1289 return ret_val; 1321 return ret_val;
@@ -1374,6 +1406,49 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
1374} 1406}
1375 1407
1376/** 1408/**
1409 * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
1410 * @hw: pointer to the HW structure
1411 *
1412 * To prevent malicious write/erase of the NVM, set it to be read-only
1413 * so that the hardware ignores all write/erase cycles of the NVM via
1414 * the flash control registers. The shadow-ram copy of the NVM will
1415 * still be updated, however any updates to this copy will not stick
1416 * across driver reloads.
1417 **/
1418void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
1419{
1420 union ich8_flash_protected_range pr0;
1421 union ich8_hws_flash_status hsfsts;
1422 u32 gfpreg;
1423 s32 ret_val;
1424
1425 ret_val = e1000_acquire_swflag_ich8lan(hw);
1426 if (ret_val)
1427 return;
1428
1429 gfpreg = er32flash(ICH_FLASH_GFPREG);
1430
1431 /* Write-protect GbE Sector of NVM */
1432 pr0.regval = er32flash(ICH_FLASH_PR0);
1433 pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
1434 pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
1435 pr0.range.wpe = true;
1436 ew32flash(ICH_FLASH_PR0, pr0.regval);
1437
1438 /*
1439 * Lock down a subset of GbE Flash Control Registers, e.g.
1440 * PR0 to prevent the write-protection from being lifted.
1441 * Once FLOCKDN is set, the registers protected by it cannot
1442 * be written until FLOCKDN is cleared by a hardware reset.
1443 */
1444 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
1445 hsfsts.hsf_status.flockdn = true;
1446 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
1447
1448 e1000_release_swflag_ich8lan(hw);
1449}
1450
1451/**
1377 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM 1452 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
1378 * @hw: pointer to the HW structure 1453 * @hw: pointer to the HW structure
1379 * @offset: The offset (in bytes) of the byte/word to read. 1454 * @offset: The offset (in bytes) of the byte/word to read.
@@ -1720,6 +1795,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1720 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 1795 ew32(CTRL, (ctrl | E1000_CTRL_RST));
1721 msleep(20); 1796 msleep(20);
1722 1797
1798 /* release the swflag because it is not reset by hardware reset */
1799 e1000_release_swflag_ich8lan(hw);
1800
1723 ret_val = e1000e_get_auto_rd_done(hw); 1801 ret_val = e1000e_get_auto_rd_done(hw);
1724 if (ret_val) { 1802 if (ret_val) {
1725 /* 1803 /*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index d266510c8a94..b81c4237b5d3 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -47,7 +47,7 @@
47 47
48#include "e1000.h" 48#include "e1000.h"
49 49
50#define DRV_VERSION "0.3.3.3-k2" 50#define DRV_VERSION "0.3.3.3-k6"
51char e1000e_driver_name[] = "e1000e"; 51char e1000e_driver_name[] = "e1000e";
52const char e1000e_driver_version[] = DRV_VERSION; 52const char e1000e_driver_version[] = DRV_VERSION;
53 53
@@ -1115,6 +1115,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1115 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1115 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1116} 1116}
1117 1117
1118static void e1000e_downshift_workaround(struct work_struct *work)
1119{
1120 struct e1000_adapter *adapter = container_of(work,
1121 struct e1000_adapter, downshift_task);
1122
1123 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1124}
1125
1118/** 1126/**
1119 * e1000_intr_msi - Interrupt Handler 1127 * e1000_intr_msi - Interrupt Handler
1120 * @irq: interrupt number 1128 * @irq: interrupt number
@@ -1139,7 +1147,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
1139 */ 1147 */
1140 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1148 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1141 (!(er32(STATUS) & E1000_STATUS_LU))) 1149 (!(er32(STATUS) & E1000_STATUS_LU)))
1142 e1000e_gig_downshift_workaround_ich8lan(hw); 1150 schedule_work(&adapter->downshift_task);
1143 1151
1144 /* 1152 /*
1145 * 80003ES2LAN workaround-- For packet buffer work-around on 1153 * 80003ES2LAN workaround-- For packet buffer work-around on
@@ -1205,7 +1213,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
1205 */ 1213 */
1206 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1214 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1207 (!(er32(STATUS) & E1000_STATUS_LU))) 1215 (!(er32(STATUS) & E1000_STATUS_LU)))
1208 e1000e_gig_downshift_workaround_ich8lan(hw); 1216 schedule_work(&adapter->downshift_task);
1209 1217
1210 /* 1218 /*
1211 * 80003ES2LAN workaround-- 1219 * 80003ES2LAN workaround--
@@ -2592,8 +2600,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2592 /* Explicitly disable IRQ since the NIC can be in any state. */ 2600 /* Explicitly disable IRQ since the NIC can be in any state. */
2593 e1000_irq_disable(adapter); 2601 e1000_irq_disable(adapter);
2594 2602
2595 spin_lock_init(&adapter->stats_lock);
2596
2597 set_bit(__E1000_DOWN, &adapter->state); 2603 set_bit(__E1000_DOWN, &adapter->state);
2598 return 0; 2604 return 0;
2599 2605
@@ -2912,6 +2918,21 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
2912 return 0; 2918 return 0;
2913} 2919}
2914 2920
2921/**
2922 * e1000e_update_phy_task - work thread to update phy
2923 * @work: pointer to our work struct
2924 *
2925 * this worker thread exists because we must acquire a
2926 * semaphore to read the phy, which we could msleep while
2927 * waiting for it, and we can't msleep in a timer.
2928 **/
2929static void e1000e_update_phy_task(struct work_struct *work)
2930{
2931 struct e1000_adapter *adapter = container_of(work,
2932 struct e1000_adapter, update_phy_task);
2933 e1000_get_phy_info(&adapter->hw);
2934}
2935
2915/* 2936/*
2916 * Need to wait a few seconds after link up to get diagnostic information from 2937 * Need to wait a few seconds after link up to get diagnostic information from
2917 * the phy 2938 * the phy
@@ -2919,7 +2940,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
2919static void e1000_update_phy_info(unsigned long data) 2940static void e1000_update_phy_info(unsigned long data)
2920{ 2941{
2921 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 2942 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2922 e1000_get_phy_info(&adapter->hw); 2943 schedule_work(&adapter->update_phy_task);
2923} 2944}
2924 2945
2925/** 2946/**
@@ -2930,10 +2951,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2930{ 2951{
2931 struct e1000_hw *hw = &adapter->hw; 2952 struct e1000_hw *hw = &adapter->hw;
2932 struct pci_dev *pdev = adapter->pdev; 2953 struct pci_dev *pdev = adapter->pdev;
2933 unsigned long irq_flags;
2934 u16 phy_tmp;
2935
2936#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
2937 2954
2938 /* 2955 /*
2939 * Prevent stats update while adapter is being reset, or if the pci 2956 * Prevent stats update while adapter is being reset, or if the pci
@@ -2944,14 +2961,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2944 if (pci_channel_offline(pdev)) 2961 if (pci_channel_offline(pdev))
2945 return; 2962 return;
2946 2963
2947 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
2948
2949 /*
2950 * these counters are modified from e1000_adjust_tbi_stats,
2951 * called from the interrupt context, so they must only
2952 * be written while holding adapter->stats_lock
2953 */
2954
2955 adapter->stats.crcerrs += er32(CRCERRS); 2964 adapter->stats.crcerrs += er32(CRCERRS);
2956 adapter->stats.gprc += er32(GPRC); 2965 adapter->stats.gprc += er32(GPRC);
2957 adapter->stats.gorc += er32(GORCL); 2966 adapter->stats.gorc += er32(GORCL);
@@ -3022,21 +3031,10 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3022 3031
3023 /* Tx Dropped needs to be maintained elsewhere */ 3032 /* Tx Dropped needs to be maintained elsewhere */
3024 3033
3025 /* Phy Stats */
3026 if (hw->phy.media_type == e1000_media_type_copper) {
3027 if ((adapter->link_speed == SPEED_1000) &&
3028 (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) {
3029 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3030 adapter->phy_stats.idle_errors += phy_tmp;
3031 }
3032 }
3033
3034 /* Management Stats */ 3034 /* Management Stats */
3035 adapter->stats.mgptc += er32(MGTPTC); 3035 adapter->stats.mgptc += er32(MGTPTC);
3036 adapter->stats.mgprc += er32(MGTPRC); 3036 adapter->stats.mgprc += er32(MGTPRC);
3037 adapter->stats.mgpdc += er32(MGTPDC); 3037 adapter->stats.mgpdc += er32(MGTPDC);
3038
3039 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
3040} 3038}
3041 3039
3042/** 3040/**
@@ -3048,10 +3046,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
3048 struct e1000_hw *hw = &adapter->hw; 3046 struct e1000_hw *hw = &adapter->hw;
3049 struct e1000_phy_regs *phy = &adapter->phy_regs; 3047 struct e1000_phy_regs *phy = &adapter->phy_regs;
3050 int ret_val; 3048 int ret_val;
3051 unsigned long irq_flags;
3052
3053
3054 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
3055 3049
3056 if ((er32(STATUS) & E1000_STATUS_LU) && 3050 if ((er32(STATUS) & E1000_STATUS_LU) &&
3057 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 3051 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
@@ -3082,8 +3076,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
3082 phy->stat1000 = 0; 3076 phy->stat1000 = 0;
3083 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); 3077 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
3084 } 3078 }
3085
3086 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
3087} 3079}
3088 3080
3089static void e1000_print_link_info(struct e1000_adapter *adapter) 3081static void e1000_print_link_info(struct e1000_adapter *adapter)
@@ -4467,6 +4459,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4467 4459
4468 adapter->bd_number = cards_found++; 4460 adapter->bd_number = cards_found++;
4469 4461
4462 e1000e_check_options(adapter);
4463
4470 /* setup adapter struct */ 4464 /* setup adapter struct */
4471 err = e1000_sw_init(adapter); 4465 err = e1000_sw_init(adapter);
4472 if (err) 4466 if (err)
@@ -4482,6 +4476,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4482 if (err) 4476 if (err)
4483 goto err_hw_init; 4477 goto err_hw_init;
4484 4478
4479 if ((adapter->flags & FLAG_IS_ICH) &&
4480 (adapter->flags & FLAG_READ_ONLY_NVM))
4481 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
4482
4485 hw->mac.ops.get_bus_info(&adapter->hw); 4483 hw->mac.ops.get_bus_info(&adapter->hw);
4486 4484
4487 adapter->hw.phy.autoneg_wait_to_complete = 0; 4485 adapter->hw.phy.autoneg_wait_to_complete = 0;
@@ -4572,8 +4570,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4572 4570
4573 INIT_WORK(&adapter->reset_task, e1000_reset_task); 4571 INIT_WORK(&adapter->reset_task, e1000_reset_task);
4574 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); 4572 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
4575 4573 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
4576 e1000e_check_options(adapter); 4574 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
4577 4575
4578 /* Initialize link parameters. User can change them with ethtool */ 4576 /* Initialize link parameters. User can change them with ethtool */
4579 adapter->hw.mac.autoneg = 1; 4577 adapter->hw.mac.autoneg = 1;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index ed912e023a72..d91dbf7ba434 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -133,6 +133,15 @@ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
133 */ 133 */
134E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); 134E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
135 135
136/*
137 * Write Protect NVM
138 *
139 * Valid Range: 0, 1
140 *
141 * Default Value: 1 (enabled)
142 */
143E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
144
136struct e1000_option { 145struct e1000_option {
137 enum { enable_option, range_option, list_option } type; 146 enum { enable_option, range_option, list_option } type;
138 const char *name; 147 const char *name;
@@ -388,4 +397,25 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
388 opt.def); 397 opt.def);
389 } 398 }
390 } 399 }
400 { /* Write-protect NVM */
401 const struct e1000_option opt = {
402 .type = enable_option,
403 .name = "Write-protect NVM",
404 .err = "defaulting to Enabled",
405 .def = OPTION_ENABLED
406 };
407
408 if (adapter->flags & FLAG_IS_ICH) {
409 if (num_WriteProtectNVM > bd) {
410 unsigned int write_protect_nvm = WriteProtectNVM[bd];
411 e1000_validate_option(&write_protect_nvm, &opt,
412 adapter);
413 if (write_protect_nvm)
414 adapter->flags |= FLAG_READ_ONLY_NVM;
415 } else {
416 if (opt.def)
417 adapter->flags |= FLAG_READ_ONLY_NVM;
418 }
419 }
420 }
391} 421}
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
index f6c45288d0e7..87e37bc39145 100644
--- a/drivers/net/wireless/ath9k/core.c
+++ b/drivers/net/wireless/ath9k/core.c
@@ -294,8 +294,6 @@ static int ath_stop(struct ath_softc *sc)
294 * hardware is gone (invalid). 294 * hardware is gone (invalid).
295 */ 295 */
296 296
297 if (!sc->sc_invalid)
298 ath9k_hw_set_interrupts(ah, 0);
299 ath_draintxq(sc, false); 297 ath_draintxq(sc, false);
300 if (!sc->sc_invalid) { 298 if (!sc->sc_invalid) {
301 ath_stoprecv(sc); 299 ath_stoprecv(sc);
@@ -797,6 +795,12 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
797 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) 795 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
798 sc->sc_imask |= ATH9K_INT_CST; 796 sc->sc_imask |= ATH9K_INT_CST;
799 797
798 /* Note: We disable MIB interrupts for now as we don't yet
799 * handle processing ANI, otherwise you will get an interrupt
800 * storm after about 7 hours of usage making the system unusable
801 * with huge latency. Once we do have ANI processing included
802 * we can re-enable this interrupt. */
803#if 0
800 /* 804 /*
801 * Enable MIB interrupts when there are hardware phy counters. 805 * Enable MIB interrupts when there are hardware phy counters.
802 * Note we only do this (at the moment) for station mode. 806 * Note we only do this (at the moment) for station mode.
@@ -804,6 +808,7 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
804 if (ath9k_hw_phycounters(ah) && 808 if (ath9k_hw_phycounters(ah) &&
805 ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS))) 809 ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS)))
806 sc->sc_imask |= ATH9K_INT_MIB; 810 sc->sc_imask |= ATH9K_INT_MIB;
811#endif
807 /* 812 /*
808 * Some hardware processes the TIM IE and fires an 813 * Some hardware processes the TIM IE and fires an
809 * interrupt when the TIM bit is set. For hardware 814 * interrupt when the TIM bit is set. For hardware
@@ -1336,6 +1341,8 @@ void ath_deinit(struct ath_softc *sc)
1336 1341
1337 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__); 1342 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1338 1343
1344 tasklet_kill(&sc->intr_tq);
1345 tasklet_kill(&sc->bcon_tasklet);
1339 ath_stop(sc); 1346 ath_stop(sc);
1340 if (!sc->sc_invalid) 1347 if (!sc->sc_invalid)
1341 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 1348 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
index 4ee695b76b88..2f84093331ee 100644
--- a/drivers/net/wireless/ath9k/core.h
+++ b/drivers/net/wireless/ath9k/core.h
@@ -974,7 +974,6 @@ struct ath_softc {
974 u32 sc_keymax; /* size of key cache */ 974 u32 sc_keymax; /* size of key cache */
975 DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); /* key use bit map */ 975 DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); /* key use bit map */
976 u8 sc_splitmic; /* split TKIP MIC keys */ 976 u8 sc_splitmic; /* split TKIP MIC keys */
977 int sc_keytype;
978 977
979 /* RX */ 978 /* RX */
980 struct list_head sc_rxbuf; 979 struct list_head sc_rxbuf;
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index 99badf1404c3..acebdf1d20a8 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -206,8 +206,6 @@ static int ath_key_config(struct ath_softc *sc,
206 if (!ret) 206 if (!ret)
207 return -EIO; 207 return -EIO;
208 208
209 if (mac)
210 sc->sc_keytype = hk.kv_type;
211 return 0; 209 return 0;
212} 210}
213 211
@@ -778,7 +776,6 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
778 case DISABLE_KEY: 776 case DISABLE_KEY:
779 ath_key_delete(sc, key); 777 ath_key_delete(sc, key);
780 clear_bit(key->keyidx, sc->sc_keymap); 778 clear_bit(key->keyidx, sc->sc_keymap);
781 sc->sc_keytype = ATH9K_CIPHER_CLR;
782 break; 779 break;
783 default: 780 default:
784 ret = -EINVAL; 781 ret = -EINVAL;
@@ -1414,10 +1411,17 @@ static void ath_pci_remove(struct pci_dev *pdev)
1414{ 1411{
1415 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 1412 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1416 struct ath_softc *sc = hw->priv; 1413 struct ath_softc *sc = hw->priv;
1414 enum ath9k_int status;
1417 1415
1418 if (pdev->irq) 1416 if (pdev->irq) {
1417 ath9k_hw_set_interrupts(sc->sc_ah, 0);
1418 /* clear the ISR */
1419 ath9k_hw_getisr(sc->sc_ah, &status);
1420 sc->sc_invalid = 1;
1419 free_irq(pdev->irq, sc); 1421 free_irq(pdev->irq, sc);
1422 }
1420 ath_detach(sc); 1423 ath_detach(sc);
1424
1421 pci_iounmap(pdev, sc->mem); 1425 pci_iounmap(pdev, sc->mem);
1422 pci_release_region(pdev, 0); 1426 pci_release_region(pdev, 0);
1423 pci_disable_device(pdev); 1427 pci_disable_device(pdev);
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index 550129f717e2..8b332e11a656 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -315,11 +315,11 @@ static int ath_tx_prepare(struct ath_softc *sc,
315 txctl->keyix = tx_info->control.hw_key->hw_key_idx; 315 txctl->keyix = tx_info->control.hw_key->hw_key_idx;
316 txctl->frmlen += tx_info->control.icv_len; 316 txctl->frmlen += tx_info->control.icv_len;
317 317
318 if (sc->sc_keytype == ATH9K_CIPHER_WEP) 318 if (tx_info->control.hw_key->alg == ALG_WEP)
319 txctl->keytype = ATH9K_KEY_TYPE_WEP; 319 txctl->keytype = ATH9K_KEY_TYPE_WEP;
320 else if (sc->sc_keytype == ATH9K_CIPHER_TKIP) 320 else if (tx_info->control.hw_key->alg == ALG_TKIP)
321 txctl->keytype = ATH9K_KEY_TYPE_TKIP; 321 txctl->keytype = ATH9K_KEY_TYPE_TKIP;
322 else if (sc->sc_keytype == ATH9K_CIPHER_AES_CCM) 322 else if (tx_info->control.hw_key->alg == ALG_CCMP)
323 txctl->keytype = ATH9K_KEY_TYPE_AES; 323 txctl->keytype = ATH9K_KEY_TYPE_AES;
324 } 324 }
325 325
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index da8b7433e3a6..a60ae86bd5c9 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -58,6 +58,7 @@ static struct usb_device_id usb_ids[] = {
58 { USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 }, 58 { USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 },
59 { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 }, 59 { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 },
60 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, 60 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
61 { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
61 /* ZD1211B */ 62 /* ZD1211B */
62 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, 63 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
63 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, 64 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 9c718583a237..77baff022f71 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -16,6 +16,7 @@
16 16
17 17
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/sched.h>
19#include <linux/pci.h> 20#include <linux/pci.h>
20#include <linux/stat.h> 21#include <linux/stat.h>
21#include <linux/topology.h> 22#include <linux/topology.h>
@@ -484,6 +485,21 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
484#endif /* HAVE_PCI_LEGACY */ 485#endif /* HAVE_PCI_LEGACY */
485 486
486#ifdef HAVE_PCI_MMAP 487#ifdef HAVE_PCI_MMAP
488
489static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
490{
491 unsigned long nr, start, size;
492
493 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
494 start = vma->vm_pgoff;
495 size = pci_resource_len(pdev, resno) >> PAGE_SHIFT;
496 if (start < size && size - start >= nr)
497 return 1;
498 WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n",
499 current->comm, start, start+nr, pci_name(pdev), resno, size);
500 return 0;
501}
502
487/** 503/**
488 * pci_mmap_resource - map a PCI resource into user memory space 504 * pci_mmap_resource - map a PCI resource into user memory space
489 * @kobj: kobject for mapping 505 * @kobj: kobject for mapping
@@ -510,6 +526,9 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
510 if (i >= PCI_ROM_RESOURCE) 526 if (i >= PCI_ROM_RESOURCE)
511 return -ENODEV; 527 return -ENODEV;
512 528
529 if (!pci_mmap_fits(pdev, i, vma))
530 return -EINVAL;
531
513 /* pci_mmap_page_range() expects the same kind of entry as coming 532 /* pci_mmap_page_range() expects the same kind of entry as coming
514 * from /proc/bus/pci/ which is a "user visible" value. If this is 533 * from /proc/bus/pci/ which is a "user visible" value. If this is
515 * different from the resource itself, arch will do necessary fixup. 534 * different from the resource itself, arch will do necessary fixup.
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 9a7c9e1408a4..851f5b83cdbc 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -527,7 +527,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
527 */ 527 */
528 pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, 528 pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP,
529 &reg32); 529 &reg32);
530 if (!(reg32 & PCI_EXP_DEVCAP_RBER && !aspm_force)) { 530 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
531 printk("Pre-1.1 PCIe device detected, " 531 printk("Pre-1.1 PCIe device detected, "
532 "disable ASPM for %s. It can be enabled forcedly" 532 "disable ASPM for %s. It can be enabled forcedly"
533 " with 'pcie_aspm=force'\n", pci_name(pdev)); 533 " with 'pcie_aspm=force'\n", pci_name(pdev));
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 3b3b5f178797..4edfc4731bd4 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -162,7 +162,7 @@ EXPORT_SYMBOL(pci_find_slot);
162 * time. 162 * time.
163 */ 163 */
164struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, 164struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
165 const struct pci_dev *from) 165 struct pci_dev *from)
166{ 166{
167 struct pci_dev *pdev; 167 struct pci_dev *pdev;
168 168
@@ -263,7 +263,7 @@ static int match_pci_dev_by_id(struct device *dev, void *data)
263 * this file. 263 * this file.
264 */ 264 */
265static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, 265static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
266 const struct pci_dev *from) 266 struct pci_dev *from)
267{ 267{
268 struct device *dev; 268 struct device *dev;
269 struct device *dev_start = NULL; 269 struct device *dev_start = NULL;
@@ -303,7 +303,7 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
303 */ 303 */
304struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, 304struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
305 unsigned int ss_vendor, unsigned int ss_device, 305 unsigned int ss_vendor, unsigned int ss_device,
306 const struct pci_dev *from) 306 struct pci_dev *from)
307{ 307{
308 struct pci_dev *pdev; 308 struct pci_dev *pdev;
309 struct pci_device_id *id; 309 struct pci_device_id *id;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 4174d9656e35..34c83d3ca0fa 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -427,6 +427,18 @@ static int pcmcia_device_probe(struct device * dev)
427 p_drv = to_pcmcia_drv(dev->driver); 427 p_drv = to_pcmcia_drv(dev->driver);
428 s = p_dev->socket; 428 s = p_dev->socket;
429 429
430 /* The PCMCIA code passes the match data in via dev->driver_data
431 * which is an ugly hack. Once the driver probe is called it may
432 * and often will overwrite the match data so we must save it first
433 *
434 * handle pseudo multifunction devices:
435 * there are at most two pseudo multifunction devices.
436 * if we're matching against the first, schedule a
437 * call which will then check whether there are two
438 * pseudo devices, and if not, add the second one.
439 */
440 did = p_dev->dev.driver_data;
441
430 ds_dbg(1, "trying to bind %s to %s\n", p_dev->dev.bus_id, 442 ds_dbg(1, "trying to bind %s to %s\n", p_dev->dev.bus_id,
431 p_drv->drv.name); 443 p_drv->drv.name);
432 444
@@ -455,21 +467,14 @@ static int pcmcia_device_probe(struct device * dev)
455 goto put_module; 467 goto put_module;
456 } 468 }
457 469
458 /* handle pseudo multifunction devices:
459 * there are at most two pseudo multifunction devices.
460 * if we're matching against the first, schedule a
461 * call which will then check whether there are two
462 * pseudo devices, and if not, add the second one.
463 */
464 did = p_dev->dev.driver_data;
465 if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) && 470 if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) &&
466 (p_dev->socket->device_count == 1) && (p_dev->device_no == 0)) 471 (p_dev->socket->device_count == 1) && (p_dev->device_no == 0))
467 pcmcia_add_device_later(p_dev->socket, 0); 472 pcmcia_add_device_later(p_dev->socket, 0);
468 473
469 put_module: 474put_module:
470 if (ret) 475 if (ret)
471 module_put(p_drv->owner); 476 module_put(p_drv->owner);
472 put_dev: 477put_dev:
473 if (ret) 478 if (ret)
474 put_device(dev); 479 put_device(dev);
475 return (ret); 480 return (ret);
diff --git a/drivers/pnp/Makefile b/drivers/pnp/Makefile
index 26f5abc9c3f7..e83f34f1b5ba 100644
--- a/drivers/pnp/Makefile
+++ b/drivers/pnp/Makefile
@@ -2,12 +2,15 @@
2# Makefile for the Linux Plug-and-Play Support. 2# Makefile for the Linux Plug-and-Play Support.
3# 3#
4 4
5obj-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o system.o 5obj-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o
6 6
7obj-$(CONFIG_PNPACPI) += pnpacpi/ 7obj-$(CONFIG_PNPACPI) += pnpacpi/
8obj-$(CONFIG_PNPBIOS) += pnpbios/ 8obj-$(CONFIG_PNPBIOS) += pnpbios/
9obj-$(CONFIG_ISAPNP) += isapnp/ 9obj-$(CONFIG_ISAPNP) += isapnp/
10 10
11# pnp_system_init goes after pnpacpi/pnpbios init
12obj-y += system.o
13
11ifeq ($(CONFIG_PNP_DEBUG),y) 14ifeq ($(CONFIG_PNP_DEBUG),y)
12EXTRA_CFLAGS += -DDEBUG 15EXTRA_CFLAGS += -DDEBUG
13endif 16endif
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index c1b9ea34977b..53561d72b4ee 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -268,7 +268,7 @@ static int __init pnpacpi_init(void)
268 return 0; 268 return 0;
269} 269}
270 270
271subsys_initcall(pnpacpi_init); 271fs_initcall(pnpacpi_init);
272 272
273static int __init pnpacpi_setup(char *str) 273static int __init pnpacpi_setup(char *str)
274{ 274{
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 19a4be1a9a31..662dfcddedc6 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -571,7 +571,7 @@ static int __init pnpbios_init(void)
571 return 0; 571 return 0;
572} 572}
573 573
574subsys_initcall(pnpbios_init); 574fs_initcall(pnpbios_init);
575 575
576static int __init pnpbios_thread_init(void) 576static int __init pnpbios_thread_init(void)
577{ 577{
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index f118252f3a9f..52e2743b04ec 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -422,6 +422,12 @@ done:
422 return err; 422 return err;
423} 423}
424 424
425static int rtc_dev_fasync(int fd, struct file *file, int on)
426{
427 struct rtc_device *rtc = file->private_data;
428 return fasync_helper(fd, file, on, &rtc->async_queue);
429}
430
425static int rtc_dev_release(struct inode *inode, struct file *file) 431static int rtc_dev_release(struct inode *inode, struct file *file)
426{ 432{
427 struct rtc_device *rtc = file->private_data; 433 struct rtc_device *rtc = file->private_data;
@@ -434,16 +440,13 @@ static int rtc_dev_release(struct inode *inode, struct file *file)
434 if (rtc->ops->release) 440 if (rtc->ops->release)
435 rtc->ops->release(rtc->dev.parent); 441 rtc->ops->release(rtc->dev.parent);
436 442
443 if (file->f_flags & FASYNC)
444 rtc_dev_fasync(-1, file, 0);
445
437 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); 446 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
438 return 0; 447 return 0;
439} 448}
440 449
441static int rtc_dev_fasync(int fd, struct file *file, int on)
442{
443 struct rtc_device *rtc = file->private_data;
444 return fasync_helper(fd, file, on, &rtc->async_queue);
445}
446
447static const struct file_operations rtc_dev_fops = { 450static const struct file_operations rtc_dev_fops = {
448 .owner = THIS_MODULE, 451 .owner = THIS_MODULE,
449 .llseek = no_llseek, 452 .llseek = no_llseek,
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 03c0e40a92ff..e3b5c4d3036e 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -76,7 +76,8 @@ dasd_devices_show(struct seq_file *m, void *v)
76 /* Print kdev. */ 76 /* Print kdev. */
77 if (block->gdp) 77 if (block->gdp)
78 seq_printf(m, " at (%3d:%6d)", 78 seq_printf(m, " at (%3d:%6d)",
79 block->gdp->major, block->gdp->first_minor); 79 MAJOR(disk_devt(block->gdp)),
80 MINOR(disk_devt(block->gdp)));
80 else 81 else
81 seq_printf(m, " at (???:??????)"); 82 seq_printf(m, " at (???:??????)");
82 /* Print device name. */ 83 /* Print device name. */
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 711b3004b3e6..9481e4a3f76e 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -114,7 +114,7 @@ dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
114 found = 0; 114 found = 0;
115 // test if minor available 115 // test if minor available
116 list_for_each_entry(entry, &dcssblk_devices, lh) 116 list_for_each_entry(entry, &dcssblk_devices, lh)
117 if (minor == entry->gd->first_minor) 117 if (minor == MINOR(disk_devt(entry->gd)))
118 found++; 118 found++;
119 if (!found) break; // got unused minor 119 if (!found) break; // got unused minor
120 } 120 }
@@ -397,7 +397,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
397 goto unload_seg; 397 goto unload_seg;
398 } 398 }
399 sprintf(dev_info->gd->disk_name, "dcssblk%d", 399 sprintf(dev_info->gd->disk_name, "dcssblk%d",
400 dev_info->gd->first_minor); 400 MINOR(disk_devt(dev_info->gd)));
401 list_add_tail(&dev_info->lh, &dcssblk_devices); 401 list_add_tail(&dev_info->lh, &dcssblk_devices);
402 402
403 if (!try_module_get(THIS_MODULE)) { 403 if (!try_module_get(THIS_MODULE)) {
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 1679e2f91c94..a0b6b46e7466 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -447,51 +447,36 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
447{ 447{
448 char s[80]; 448 char s[80];
449 449
450 sprintf(s, "%s sc:%x ", cdev->dev.bus_id, irq_ptr->schid.sch_no); 450 sprintf(s, "qdio: %s ", dev_name(&cdev->dev));
451
452 switch (irq_ptr->qib.qfmt) { 451 switch (irq_ptr->qib.qfmt) {
453 case QDIO_QETH_QFMT: 452 case QDIO_QETH_QFMT:
454 sprintf(s + strlen(s), "OSADE "); 453 sprintf(s + strlen(s), "OSA ");
455 break; 454 break;
456 case QDIO_ZFCP_QFMT: 455 case QDIO_ZFCP_QFMT:
457 sprintf(s + strlen(s), "ZFCP "); 456 sprintf(s + strlen(s), "ZFCP ");
458 break; 457 break;
459 case QDIO_IQDIO_QFMT: 458 case QDIO_IQDIO_QFMT:
460 sprintf(s + strlen(s), "HiperSockets "); 459 sprintf(s + strlen(s), "HS ");
461 break; 460 break;
462 } 461 }
463 sprintf(s + strlen(s), "using: "); 462 sprintf(s + strlen(s), "on SC %x using ", irq_ptr->schid.sch_no);
464 463 sprintf(s + strlen(s), "AI:%d ", is_thinint_irq(irq_ptr));
465 if (!is_thinint_irq(irq_ptr)) 464 sprintf(s + strlen(s), "QEBSM:%d ", (irq_ptr->sch_token) ? 1 : 0);
466 sprintf(s + strlen(s), "no"); 465 sprintf(s + strlen(s), "PCI:%d ",
467 sprintf(s + strlen(s), "AdapterInterrupts "); 466 (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0);
468 if (!(irq_ptr->sch_token != 0)) 467 sprintf(s + strlen(s), "TDD:%d ", css_general_characteristics.aif_tdd);
469 sprintf(s + strlen(s), "no"); 468 sprintf(s + strlen(s), "SIGA:");
470 sprintf(s + strlen(s), "QEBSM "); 469 sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.input) ? "R" : " ");
471 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) 470 sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.output) ? "W" : " ");
472 sprintf(s + strlen(s), "no"); 471 sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.sync) ? "S" : " ");
473 sprintf(s + strlen(s), "OutboundPCI "); 472 sprintf(s + strlen(s), "%s",
474 if (!css_general_characteristics.aif_tdd) 473 (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ");
475 sprintf(s + strlen(s), "no"); 474 sprintf(s + strlen(s), "%s",
476 sprintf(s + strlen(s), "TDD\n"); 475 (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ");
477 printk(KERN_INFO "qdio: %s", s); 476 sprintf(s + strlen(s), "%s",
478 477 (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " ");
479 memset(s, 0, sizeof(s));
480 sprintf(s, "%s SIGA required: ", cdev->dev.bus_id);
481 if (irq_ptr->siga_flag.input)
482 sprintf(s + strlen(s), "Read ");
483 if (irq_ptr->siga_flag.output)
484 sprintf(s + strlen(s), "Write ");
485 if (irq_ptr->siga_flag.sync)
486 sprintf(s + strlen(s), "Sync ");
487 if (!irq_ptr->siga_flag.no_sync_ti)
488 sprintf(s + strlen(s), "SyncAI ");
489 if (!irq_ptr->siga_flag.no_sync_out_ti)
490 sprintf(s + strlen(s), "SyncOutAI ");
491 if (!irq_ptr->siga_flag.no_sync_out_pci)
492 sprintf(s + strlen(s), "SyncOutPCI");
493 sprintf(s + strlen(s), "\n"); 478 sprintf(s + strlen(s), "\n");
494 printk(KERN_INFO "qdio: %s", s); 479 printk(KERN_INFO "%s", s);
495} 480}
496 481
497int __init qdio_setup_init(void) 482int __init qdio_setup_init(void)
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 90abfd06ed55..24255e42dc30 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -88,11 +88,13 @@ static int __init zfcp_device_setup(char *devstr)
88 strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE); 88 strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE);
89 89
90 token = strsep(&str, ","); 90 token = strsep(&str, ",");
91 if (!token || strict_strtoull(token, 0, &zfcp_data.init_wwpn)) 91 if (!token || strict_strtoull(token, 0,
92 (unsigned long long *) &zfcp_data.init_wwpn))
92 goto err_out; 93 goto err_out;
93 94
94 token = strsep(&str, ","); 95 token = strsep(&str, ",");
95 if (!token || strict_strtoull(token, 0, &zfcp_data.init_fcp_lun)) 96 if (!token || strict_strtoull(token, 0,
97 (unsigned long long *) &zfcp_data.init_fcp_lun))
96 goto err_out; 98 goto err_out;
97 99
98 kfree(str); 100 kfree(str);
@@ -100,24 +102,10 @@ static int __init zfcp_device_setup(char *devstr)
100 102
101 err_out: 103 err_out:
102 kfree(str); 104 kfree(str);
103 pr_err("zfcp: Parse error for device parameter string %s, " 105 pr_err("zfcp: %s is not a valid SCSI device\n", devstr);
104 "device not attached.\n", devstr);
105 return 0; 106 return 0;
106} 107}
107 108
108static struct zfcp_adapter *zfcp_get_adapter_by_busid(char *bus_id)
109{
110 struct zfcp_adapter *adapter;
111
112 list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list)
113 if ((strncmp(bus_id, adapter->ccw_device->dev.bus_id,
114 BUS_ID_SIZE) == 0) &&
115 !(atomic_read(&adapter->status) &
116 ZFCP_STATUS_COMMON_REMOVE))
117 return adapter;
118 return NULL;
119}
120
121static void __init zfcp_init_device_configure(void) 109static void __init zfcp_init_device_configure(void)
122{ 110{
123 struct zfcp_adapter *adapter; 111 struct zfcp_adapter *adapter;
@@ -141,7 +129,12 @@ static void __init zfcp_init_device_configure(void)
141 goto out_unit; 129 goto out_unit;
142 up(&zfcp_data.config_sema); 130 up(&zfcp_data.config_sema);
143 ccw_device_set_online(adapter->ccw_device); 131 ccw_device_set_online(adapter->ccw_device);
132
144 zfcp_erp_wait(adapter); 133 zfcp_erp_wait(adapter);
134 wait_event(adapter->erp_done_wqh,
135 !(atomic_read(&unit->status) &
136 ZFCP_STATUS_UNIT_SCSI_WORK_PENDING));
137
145 down(&zfcp_data.config_sema); 138 down(&zfcp_data.config_sema);
146 zfcp_unit_put(unit); 139 zfcp_unit_put(unit);
147out_unit: 140out_unit:
@@ -180,9 +173,9 @@ static int __init zfcp_module_init(void)
180 if (!zfcp_data.gid_pn_cache) 173 if (!zfcp_data.gid_pn_cache)
181 goto out_gid_cache; 174 goto out_gid_cache;
182 175
183 INIT_LIST_HEAD(&zfcp_data.adapter_list_head); 176 zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");
184 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh);
185 177
178 INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
186 sema_init(&zfcp_data.config_sema, 1); 179 sema_init(&zfcp_data.config_sema, 1);
187 rwlock_init(&zfcp_data.config_lock); 180 rwlock_init(&zfcp_data.config_lock);
188 181
@@ -193,13 +186,14 @@ static int __init zfcp_module_init(void)
193 186
194 retval = misc_register(&zfcp_cfdc_misc); 187 retval = misc_register(&zfcp_cfdc_misc);
195 if (retval) { 188 if (retval) {
196 pr_err("zfcp: registration of misc device zfcp_cfdc failed\n"); 189 pr_err("zfcp: Registering the misc device zfcp_cfdc failed\n");
197 goto out_misc; 190 goto out_misc;
198 } 191 }
199 192
200 retval = zfcp_ccw_register(); 193 retval = zfcp_ccw_register();
201 if (retval) { 194 if (retval) {
202 pr_err("zfcp: Registration with common I/O layer failed.\n"); 195 pr_err("zfcp: The zfcp device driver could not register with "
196 "the common I/O layer\n");
203 goto out_ccw_register; 197 goto out_ccw_register;
204 } 198 }
205 199
@@ -231,8 +225,7 @@ module_init(zfcp_module_init);
231 * 225 *
232 * Returns: pointer to zfcp_unit or NULL 226 * Returns: pointer to zfcp_unit or NULL
233 */ 227 */
234struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, 228struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
235 fcp_lun_t fcp_lun)
236{ 229{
237 struct zfcp_unit *unit; 230 struct zfcp_unit *unit;
238 231
@@ -251,7 +244,7 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port,
251 * Returns: pointer to zfcp_port or NULL 244 * Returns: pointer to zfcp_port or NULL
252 */ 245 */
253struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, 246struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
254 wwn_t wwpn) 247 u64 wwpn)
255{ 248{
256 struct zfcp_port *port; 249 struct zfcp_port *port;
257 250
@@ -276,7 +269,7 @@ static void zfcp_sysfs_unit_release(struct device *dev)
276 * 269 *
277 * Sets up some unit internal structures and creates sysfs entry. 270 * Sets up some unit internal structures and creates sysfs entry.
278 */ 271 */
279struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun) 272struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
280{ 273{
281 struct zfcp_unit *unit; 274 struct zfcp_unit *unit;
282 275
@@ -290,7 +283,8 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
290 unit->port = port; 283 unit->port = port;
291 unit->fcp_lun = fcp_lun; 284 unit->fcp_lun = fcp_lun;
292 285
293 snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", fcp_lun); 286 snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx",
287 (unsigned long long) fcp_lun);
294 unit->sysfs_device.parent = &port->sysfs_device; 288 unit->sysfs_device.parent = &port->sysfs_device;
295 unit->sysfs_device.release = zfcp_sysfs_unit_release; 289 unit->sysfs_device.release = zfcp_sysfs_unit_release;
296 dev_set_drvdata(&unit->sysfs_device, unit); 290 dev_set_drvdata(&unit->sysfs_device, unit);
@@ -323,7 +317,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
323 } 317 }
324 318
325 zfcp_unit_get(unit); 319 zfcp_unit_get(unit);
326 unit->scsi_lun = scsilun_to_int((struct scsi_lun *)&unit->fcp_lun);
327 320
328 write_lock_irq(&zfcp_data.config_lock); 321 write_lock_irq(&zfcp_data.config_lock);
329 list_add_tail(&unit->list, &port->unit_list_head); 322 list_add_tail(&unit->list, &port->unit_list_head);
@@ -332,7 +325,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
332 325
333 write_unlock_irq(&zfcp_data.config_lock); 326 write_unlock_irq(&zfcp_data.config_lock);
334 327
335 port->units++;
336 zfcp_port_get(port); 328 zfcp_port_get(port);
337 329
338 return unit; 330 return unit;
@@ -351,11 +343,10 @@ err_out_free:
351 */ 343 */
352void zfcp_unit_dequeue(struct zfcp_unit *unit) 344void zfcp_unit_dequeue(struct zfcp_unit *unit)
353{ 345{
354 zfcp_unit_wait(unit); 346 wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
355 write_lock_irq(&zfcp_data.config_lock); 347 write_lock_irq(&zfcp_data.config_lock);
356 list_del(&unit->list); 348 list_del(&unit->list);
357 write_unlock_irq(&zfcp_data.config_lock); 349 write_unlock_irq(&zfcp_data.config_lock);
358 unit->port->units--;
359 zfcp_port_put(unit->port); 350 zfcp_port_put(unit->port);
360 sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs); 351 sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
361 device_unregister(&unit->sysfs_device); 352 device_unregister(&unit->sysfs_device);
@@ -416,11 +407,6 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
416 mempool_destroy(adapter->pool.data_gid_pn); 407 mempool_destroy(adapter->pool.data_gid_pn);
417} 408}
418 409
419static void zfcp_dummy_release(struct device *dev)
420{
421 return;
422}
423
424/** 410/**
425 * zfcp_status_read_refill - refill the long running status_read_requests 411 * zfcp_status_read_refill - refill the long running status_read_requests
426 * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled 412 * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
@@ -450,19 +436,6 @@ static void _zfcp_status_read_scheduler(struct work_struct *work)
450 stat_work)); 436 stat_work));
451} 437}
452 438
453static int zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
454{
455 struct zfcp_port *port;
456
457 port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA,
458 ZFCP_DID_DIRECTORY_SERVICE);
459 if (IS_ERR(port))
460 return PTR_ERR(port);
461 zfcp_port_put(port);
462
463 return 0;
464}
465
466/** 439/**
467 * zfcp_adapter_enqueue - enqueue a new adapter to the list 440 * zfcp_adapter_enqueue - enqueue a new adapter to the list
468 * @ccw_device: pointer to the struct cc_device 441 * @ccw_device: pointer to the struct cc_device
@@ -508,7 +481,6 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
508 init_waitqueue_head(&adapter->erp_done_wqh); 481 init_waitqueue_head(&adapter->erp_done_wqh);
509 482
510 INIT_LIST_HEAD(&adapter->port_list_head); 483 INIT_LIST_HEAD(&adapter->port_list_head);
511 INIT_LIST_HEAD(&adapter->port_remove_lh);
512 INIT_LIST_HEAD(&adapter->erp_ready_head); 484 INIT_LIST_HEAD(&adapter->erp_ready_head);
513 INIT_LIST_HEAD(&adapter->erp_running_head); 485 INIT_LIST_HEAD(&adapter->erp_running_head);
514 486
@@ -518,7 +490,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
518 spin_lock_init(&adapter->san_dbf_lock); 490 spin_lock_init(&adapter->san_dbf_lock);
519 spin_lock_init(&adapter->scsi_dbf_lock); 491 spin_lock_init(&adapter->scsi_dbf_lock);
520 spin_lock_init(&adapter->rec_dbf_lock); 492 spin_lock_init(&adapter->rec_dbf_lock);
521 spin_lock_init(&adapter->req_q.lock); 493 spin_lock_init(&adapter->req_q_lock);
522 494
523 rwlock_init(&adapter->erp_lock); 495 rwlock_init(&adapter->erp_lock);
524 rwlock_init(&adapter->abort_lock); 496 rwlock_init(&adapter->abort_lock);
@@ -537,28 +509,15 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
537 &zfcp_sysfs_adapter_attrs)) 509 &zfcp_sysfs_adapter_attrs))
538 goto sysfs_failed; 510 goto sysfs_failed;
539 511
540 adapter->generic_services.parent = &adapter->ccw_device->dev;
541 adapter->generic_services.release = zfcp_dummy_release;
542 snprintf(adapter->generic_services.bus_id, BUS_ID_SIZE,
543 "generic_services");
544
545 if (device_register(&adapter->generic_services))
546 goto generic_services_failed;
547
548 write_lock_irq(&zfcp_data.config_lock); 512 write_lock_irq(&zfcp_data.config_lock);
549 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 513 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
550 list_add_tail(&adapter->list, &zfcp_data.adapter_list_head); 514 list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
551 write_unlock_irq(&zfcp_data.config_lock); 515 write_unlock_irq(&zfcp_data.config_lock);
552 516
553 zfcp_data.adapters++; 517 zfcp_fc_nameserver_init(adapter);
554
555 zfcp_nameserver_enqueue(adapter);
556 518
557 return 0; 519 return 0;
558 520
559generic_services_failed:
560 sysfs_remove_group(&ccw_device->dev.kobj,
561 &zfcp_sysfs_adapter_attrs);
562sysfs_failed: 521sysfs_failed:
563 zfcp_adapter_debug_unregister(adapter); 522 zfcp_adapter_debug_unregister(adapter);
564debug_register_failed: 523debug_register_failed:
@@ -585,7 +544,6 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
585 cancel_work_sync(&adapter->scan_work); 544 cancel_work_sync(&adapter->scan_work);
586 cancel_work_sync(&adapter->stat_work); 545 cancel_work_sync(&adapter->stat_work);
587 zfcp_adapter_scsi_unregister(adapter); 546 zfcp_adapter_scsi_unregister(adapter);
588 device_unregister(&adapter->generic_services);
589 sysfs_remove_group(&adapter->ccw_device->dev.kobj, 547 sysfs_remove_group(&adapter->ccw_device->dev.kobj,
590 &zfcp_sysfs_adapter_attrs); 548 &zfcp_sysfs_adapter_attrs);
591 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 549 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
@@ -603,9 +561,6 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
603 list_del(&adapter->list); 561 list_del(&adapter->list);
604 write_unlock_irq(&zfcp_data.config_lock); 562 write_unlock_irq(&zfcp_data.config_lock);
605 563
606 /* decrease number of adapters in list */
607 zfcp_data.adapters--;
608
609 zfcp_qdio_free(adapter); 564 zfcp_qdio_free(adapter);
610 565
611 zfcp_free_low_mem_buffers(adapter); 566 zfcp_free_low_mem_buffers(adapter);
@@ -633,21 +588,19 @@ static void zfcp_sysfs_port_release(struct device *dev)
633 * d_id is used to enqueue ports with a well known address like the Directory 588 * d_id is used to enqueue ports with a well known address like the Directory
634 * Service for nameserver lookup. 589 * Service for nameserver lookup.
635 */ 590 */
636struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, 591struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
637 u32 status, u32 d_id) 592 u32 status, u32 d_id)
638{ 593{
639 struct zfcp_port *port; 594 struct zfcp_port *port;
640 int retval; 595 int retval;
641 char *bus_id;
642 596
643 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); 597 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
644 if (!port) 598 if (!port)
645 return ERR_PTR(-ENOMEM); 599 return ERR_PTR(-ENOMEM);
646 600
647 init_waitqueue_head(&port->remove_wq); 601 init_waitqueue_head(&port->remove_wq);
648
649 INIT_LIST_HEAD(&port->unit_list_head); 602 INIT_LIST_HEAD(&port->unit_list_head);
650 INIT_LIST_HEAD(&port->unit_remove_lh); 603 INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup);
651 604
652 port->adapter = adapter; 605 port->adapter = adapter;
653 port->d_id = d_id; 606 port->d_id = d_id;
@@ -657,34 +610,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
657 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); 610 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
658 atomic_set(&port->refcount, 0); 611 atomic_set(&port->refcount, 0);
659 612
660 if (status & ZFCP_STATUS_PORT_WKA) { 613 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx",
661 switch (d_id) { 614 (unsigned long long) wwpn);
662 case ZFCP_DID_DIRECTORY_SERVICE: 615 port->sysfs_device.parent = &adapter->ccw_device->dev;
663 bus_id = "directory";
664 break;
665 case ZFCP_DID_MANAGEMENT_SERVICE:
666 bus_id = "management";
667 break;
668 case ZFCP_DID_KEY_DISTRIBUTION_SERVICE:
669 bus_id = "key_distribution";
670 break;
671 case ZFCP_DID_ALIAS_SERVICE:
672 bus_id = "alias";
673 break;
674 case ZFCP_DID_TIME_SERVICE:
675 bus_id = "time";
676 break;
677 default:
678 kfree(port);
679 return ERR_PTR(-EINVAL);
680 }
681 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "%s", bus_id);
682 port->sysfs_device.parent = &adapter->generic_services;
683 } else {
684 snprintf(port->sysfs_device.bus_id,
685 BUS_ID_SIZE, "0x%016llx", wwpn);
686 port->sysfs_device.parent = &adapter->ccw_device->dev;
687 }
688 616
689 port->sysfs_device.release = zfcp_sysfs_port_release; 617 port->sysfs_device.release = zfcp_sysfs_port_release;
690 dev_set_drvdata(&port->sysfs_device, port); 618 dev_set_drvdata(&port->sysfs_device, port);
@@ -700,12 +628,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
700 if (device_register(&port->sysfs_device)) 628 if (device_register(&port->sysfs_device))
701 goto err_out_free; 629 goto err_out_free;
702 630
703 if (status & ZFCP_STATUS_PORT_WKA) 631 retval = sysfs_create_group(&port->sysfs_device.kobj,
704 retval = sysfs_create_group(&port->sysfs_device.kobj, 632 &zfcp_sysfs_port_attrs);
705 &zfcp_sysfs_ns_port_attrs);
706 else
707 retval = sysfs_create_group(&port->sysfs_device.kobj,
708 &zfcp_sysfs_port_attrs);
709 633
710 if (retval) { 634 if (retval) {
711 device_unregister(&port->sysfs_device); 635 device_unregister(&port->sysfs_device);
@@ -718,10 +642,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
718 list_add_tail(&port->list, &adapter->port_list_head); 642 list_add_tail(&port->list, &adapter->port_list_head);
719 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 643 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
720 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status); 644 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
721 if (d_id == ZFCP_DID_DIRECTORY_SERVICE)
722 if (!adapter->nameserver_port)
723 adapter->nameserver_port = port;
724 adapter->ports++;
725 645
726 write_unlock_irq(&zfcp_data.config_lock); 646 write_unlock_irq(&zfcp_data.config_lock);
727 647
@@ -740,21 +660,15 @@ err_out:
740 */ 660 */
741void zfcp_port_dequeue(struct zfcp_port *port) 661void zfcp_port_dequeue(struct zfcp_port *port)
742{ 662{
743 zfcp_port_wait(port); 663 wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
744 write_lock_irq(&zfcp_data.config_lock); 664 write_lock_irq(&zfcp_data.config_lock);
745 list_del(&port->list); 665 list_del(&port->list);
746 port->adapter->ports--;
747 write_unlock_irq(&zfcp_data.config_lock); 666 write_unlock_irq(&zfcp_data.config_lock);
748 if (port->rport) 667 if (port->rport)
749 fc_remote_port_delete(port->rport); 668 fc_remote_port_delete(port->rport);
750 port->rport = NULL; 669 port->rport = NULL;
751 zfcp_adapter_put(port->adapter); 670 zfcp_adapter_put(port->adapter);
752 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA) 671 sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
753 sysfs_remove_group(&port->sysfs_device.kobj,
754 &zfcp_sysfs_ns_port_attrs);
755 else
756 sysfs_remove_group(&port->sysfs_device.kobj,
757 &zfcp_sysfs_port_attrs);
758 device_unregister(&port->sysfs_device); 672 device_unregister(&port->sysfs_device);
759} 673}
760 674
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 51b6a05f4d12..b04038c74786 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -25,7 +25,8 @@ static int zfcp_ccw_probe(struct ccw_device *ccw_device)
25 down(&zfcp_data.config_sema); 25 down(&zfcp_data.config_sema);
26 if (zfcp_adapter_enqueue(ccw_device)) { 26 if (zfcp_adapter_enqueue(ccw_device)) {
27 dev_err(&ccw_device->dev, 27 dev_err(&ccw_device->dev,
28 "Setup of data structures failed.\n"); 28 "Setting up data structures for the "
29 "FCP adapter failed\n");
29 retval = -EINVAL; 30 retval = -EINVAL;
30 } 31 }
31 up(&zfcp_data.config_sema); 32 up(&zfcp_data.config_sema);
@@ -46,6 +47,8 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
46 struct zfcp_adapter *adapter; 47 struct zfcp_adapter *adapter;
47 struct zfcp_port *port, *p; 48 struct zfcp_port *port, *p;
48 struct zfcp_unit *unit, *u; 49 struct zfcp_unit *unit, *u;
50 LIST_HEAD(unit_remove_lh);
51 LIST_HEAD(port_remove_lh);
49 52
50 ccw_device_set_offline(ccw_device); 53 ccw_device_set_offline(ccw_device);
51 down(&zfcp_data.config_sema); 54 down(&zfcp_data.config_sema);
@@ -54,26 +57,26 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
54 write_lock_irq(&zfcp_data.config_lock); 57 write_lock_irq(&zfcp_data.config_lock);
55 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { 58 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
56 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) { 59 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
57 list_move(&unit->list, &port->unit_remove_lh); 60 list_move(&unit->list, &unit_remove_lh);
58 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, 61 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
59 &unit->status); 62 &unit->status);
60 } 63 }
61 list_move(&port->list, &adapter->port_remove_lh); 64 list_move(&port->list, &port_remove_lh);
62 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 65 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
63 } 66 }
64 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 67 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
65 write_unlock_irq(&zfcp_data.config_lock); 68 write_unlock_irq(&zfcp_data.config_lock);
66 69
67 list_for_each_entry_safe(port, p, &adapter->port_remove_lh, list) { 70 list_for_each_entry_safe(port, p, &port_remove_lh, list) {
68 list_for_each_entry_safe(unit, u, &port->unit_remove_lh, list) { 71 list_for_each_entry_safe(unit, u, &unit_remove_lh, list) {
69 if (atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED, 72 if (atomic_read(&unit->status) &
70 &unit->status)) 73 ZFCP_STATUS_UNIT_REGISTERED)
71 scsi_remove_device(unit->device); 74 scsi_remove_device(unit->device);
72 zfcp_unit_dequeue(unit); 75 zfcp_unit_dequeue(unit);
73 } 76 }
74 zfcp_port_dequeue(port); 77 zfcp_port_dequeue(port);
75 } 78 }
76 zfcp_adapter_wait(adapter); 79 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
77 zfcp_adapter_dequeue(adapter); 80 zfcp_adapter_dequeue(adapter);
78 81
79 up(&zfcp_data.config_sema); 82 up(&zfcp_data.config_sema);
@@ -156,15 +159,18 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
156 159
157 switch (event) { 160 switch (event) {
158 case CIO_GONE: 161 case CIO_GONE:
159 dev_warn(&adapter->ccw_device->dev, "device gone\n"); 162 dev_warn(&adapter->ccw_device->dev,
163 "The FCP device has been detached\n");
160 zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL); 164 zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL);
161 break; 165 break;
162 case CIO_NO_PATH: 166 case CIO_NO_PATH:
163 dev_warn(&adapter->ccw_device->dev, "no path\n"); 167 dev_warn(&adapter->ccw_device->dev,
168 "The CHPID for the FCP device is offline\n");
164 zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL); 169 zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL);
165 break; 170 break;
166 case CIO_OPER: 171 case CIO_OPER:
167 dev_info(&adapter->ccw_device->dev, "operational again\n"); 172 dev_info(&adapter->ccw_device->dev,
173 "The FCP device is operational again\n");
168 zfcp_erp_modify_adapter_status(adapter, 11, NULL, 174 zfcp_erp_modify_adapter_status(adapter, 11, NULL,
169 ZFCP_STATUS_COMMON_RUNNING, 175 ZFCP_STATUS_COMMON_RUNNING,
170 ZFCP_SET); 176 ZFCP_SET);
@@ -220,3 +226,20 @@ int __init zfcp_ccw_register(void)
220{ 226{
221 return ccw_driver_register(&zfcp_ccw_driver); 227 return ccw_driver_register(&zfcp_ccw_driver);
222} 228}
229
230/**
231 * zfcp_get_adapter_by_busid - find zfcp_adapter struct
232 * @busid: bus id string of zfcp adapter to find
233 */
234struct zfcp_adapter *zfcp_get_adapter_by_busid(char *busid)
235{
236 struct ccw_device *ccw_device;
237 struct zfcp_adapter *adapter = NULL;
238
239 ccw_device = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
240 if (ccw_device) {
241 adapter = dev_get_drvdata(&ccw_device->dev);
242 put_device(&ccw_device->dev);
243 }
244 return adapter;
245}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index fca48b88fc53..060f5f2352ec 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -318,6 +318,26 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
318 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 318 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
319} 319}
320 320
321/**
322 * zfcp_hba_dbf_event_berr - trace event for bit error threshold
323 * @adapter: adapter affected by this QDIO related event
324 * @req: fsf request
325 */
326void zfcp_hba_dbf_event_berr(struct zfcp_adapter *adapter,
327 struct zfcp_fsf_req *req)
328{
329 struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
330 struct fsf_status_read_buffer *sr_buf = req->data;
331 struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
332 unsigned long flags;
333
334 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
335 memset(r, 0, sizeof(*r));
336 strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE);
337 memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload));
338 debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
339 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
340}
321static void zfcp_hba_dbf_view_response(char **p, 341static void zfcp_hba_dbf_view_response(char **p,
322 struct zfcp_hba_dbf_record_response *r) 342 struct zfcp_hba_dbf_record_response *r)
323{ 343{
@@ -399,6 +419,30 @@ static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
399 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); 419 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
400} 420}
401 421
422static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r)
423{
424 zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count);
425 zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count);
426 zfcp_dbf_out(p, "loss_of_sig_err", "%d", r->loss_of_signal_error_count);
427 zfcp_dbf_out(p, "prim_seq_err", "%d",
428 r->primitive_sequence_error_count);
429 zfcp_dbf_out(p, "inval_trans_word_err", "%d",
430 r->invalid_transmission_word_error_count);
431 zfcp_dbf_out(p, "CRC_errors", "%d", r->crc_error_count);
432 zfcp_dbf_out(p, "prim_seq_event_to", "%d",
433 r->primitive_sequence_event_timeout_count);
434 zfcp_dbf_out(p, "elast_buf_overrun_err", "%d",
435 r->elastic_buffer_overrun_error_count);
436 zfcp_dbf_out(p, "adv_rec_buf2buf_cred", "%d",
437 r->advertised_receive_b2b_credit);
438 zfcp_dbf_out(p, "curr_rec_buf2buf_cred", "%d",
439 r->current_receive_b2b_credit);
440 zfcp_dbf_out(p, "adv_trans_buf2buf_cred", "%d",
441 r->advertised_transmit_b2b_credit);
442 zfcp_dbf_out(p, "curr_trans_buf2buf_cred", "%d",
443 r->current_transmit_b2b_credit);
444}
445
402static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view, 446static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
403 char *out_buf, const char *in_buf) 447 char *out_buf, const char *in_buf)
404{ 448{
@@ -418,6 +462,8 @@ static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
418 zfcp_hba_dbf_view_status(&p, &r->u.status); 462 zfcp_hba_dbf_view_status(&p, &r->u.status);
419 else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0) 463 else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
420 zfcp_hba_dbf_view_qdio(&p, &r->u.qdio); 464 zfcp_hba_dbf_view_qdio(&p, &r->u.qdio);
465 else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0)
466 zfcp_hba_dbf_view_berr(&p, &r->u.berr);
421 467
422 p += sprintf(p, "\n"); 468 p += sprintf(p, "\n");
423 return p - out_buf; 469 return p - out_buf;
@@ -519,14 +565,14 @@ static const char *zfcp_rec_dbf_ids[] = {
519 [75] = "physical port recovery escalation after failed port " 565 [75] = "physical port recovery escalation after failed port "
520 "recovery", 566 "recovery",
521 [76] = "port recovery escalation after failed unit recovery", 567 [76] = "port recovery escalation after failed unit recovery",
522 [77] = "recovery opening nameserver port", 568 [77] = "",
523 [78] = "duplicate request id", 569 [78] = "duplicate request id",
524 [79] = "link down", 570 [79] = "link down",
525 [80] = "exclusive read-only unit access unsupported", 571 [80] = "exclusive read-only unit access unsupported",
526 [81] = "shared read-write unit access unsupported", 572 [81] = "shared read-write unit access unsupported",
527 [82] = "incoming rscn", 573 [82] = "incoming rscn",
528 [83] = "incoming wwpn", 574 [83] = "incoming wwpn",
529 [84] = "", 575 [84] = "wka port handle not valid close port",
530 [85] = "online", 576 [85] = "online",
531 [86] = "offline", 577 [86] = "offline",
532 [87] = "ccw device gone", 578 [87] = "ccw device gone",
@@ -570,7 +616,7 @@ static const char *zfcp_rec_dbf_ids[] = {
570 [125] = "need newer zfcp", 616 [125] = "need newer zfcp",
571 [126] = "need newer microcode", 617 [126] = "need newer microcode",
572 [127] = "arbitrated loop not supported", 618 [127] = "arbitrated loop not supported",
573 [128] = "unknown topology", 619 [128] = "",
574 [129] = "qtcb size mismatch", 620 [129] = "qtcb size mismatch",
575 [130] = "unknown fsf status ecd", 621 [130] = "unknown fsf status ecd",
576 [131] = "fcp request too big", 622 [131] = "fcp request too big",
@@ -829,9 +875,9 @@ void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
829void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) 875void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
830{ 876{
831 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 877 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
832 struct zfcp_port *port = ct->port; 878 struct zfcp_wka_port *wka_port = ct->wka_port;
833 struct zfcp_adapter *adapter = port->adapter; 879 struct zfcp_adapter *adapter = wka_port->adapter;
834 struct ct_hdr *hdr = zfcp_sg_to_address(ct->req); 880 struct ct_hdr *hdr = sg_virt(ct->req);
835 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 881 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
836 struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req; 882 struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req;
837 unsigned long flags; 883 unsigned long flags;
@@ -842,7 +888,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
842 r->fsf_reqid = (unsigned long)fsf_req; 888 r->fsf_reqid = (unsigned long)fsf_req;
843 r->fsf_seqno = fsf_req->seq_no; 889 r->fsf_seqno = fsf_req->seq_no;
844 r->s_id = fc_host_port_id(adapter->scsi_host); 890 r->s_id = fc_host_port_id(adapter->scsi_host);
845 r->d_id = port->d_id; 891 r->d_id = wka_port->d_id;
846 oct->cmd_req_code = hdr->cmd_rsp_code; 892 oct->cmd_req_code = hdr->cmd_rsp_code;
847 oct->revision = hdr->revision; 893 oct->revision = hdr->revision;
848 oct->gs_type = hdr->gs_type; 894 oct->gs_type = hdr->gs_type;
@@ -863,9 +909,9 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
863void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) 909void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
864{ 910{
865 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 911 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
866 struct zfcp_port *port = ct->port; 912 struct zfcp_wka_port *wka_port = ct->wka_port;
867 struct zfcp_adapter *adapter = port->adapter; 913 struct zfcp_adapter *adapter = wka_port->adapter;
868 struct ct_hdr *hdr = zfcp_sg_to_address(ct->resp); 914 struct ct_hdr *hdr = sg_virt(ct->resp);
869 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 915 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
870 struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp; 916 struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp;
871 unsigned long flags; 917 unsigned long flags;
@@ -875,7 +921,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
875 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); 921 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
876 r->fsf_reqid = (unsigned long)fsf_req; 922 r->fsf_reqid = (unsigned long)fsf_req;
877 r->fsf_seqno = fsf_req->seq_no; 923 r->fsf_seqno = fsf_req->seq_no;
878 r->s_id = port->d_id; 924 r->s_id = wka_port->d_id;
879 r->d_id = fc_host_port_id(adapter->scsi_host); 925 r->d_id = fc_host_port_id(adapter->scsi_host);
880 rct->cmd_rsp_code = hdr->cmd_rsp_code; 926 rct->cmd_rsp_code = hdr->cmd_rsp_code;
881 rct->revision = hdr->revision; 927 rct->revision = hdr->revision;
@@ -922,8 +968,8 @@ void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
922 968
923 zfcp_san_dbf_event_els("oels", 2, fsf_req, 969 zfcp_san_dbf_event_els("oels", 2, fsf_req,
924 fc_host_port_id(els->adapter->scsi_host), 970 fc_host_port_id(els->adapter->scsi_host),
925 els->d_id, *(u8 *) zfcp_sg_to_address(els->req), 971 els->d_id, *(u8 *) sg_virt(els->req),
926 zfcp_sg_to_address(els->req), els->req->length); 972 sg_virt(els->req), els->req->length);
927} 973}
928 974
929/** 975/**
@@ -936,8 +982,7 @@ void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
936 982
937 zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id, 983 zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id,
938 fc_host_port_id(els->adapter->scsi_host), 984 fc_host_port_id(els->adapter->scsi_host),
939 *(u8 *)zfcp_sg_to_address(els->req), 985 *(u8 *)sg_virt(els->req), sg_virt(els->resp),
940 zfcp_sg_to_address(els->resp),
941 els->resp->length); 986 els->resp->length);
942} 987}
943 988
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 0ddb18449d11..e8f450801fea 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -151,6 +151,7 @@ struct zfcp_hba_dbf_record {
151 struct zfcp_hba_dbf_record_response response; 151 struct zfcp_hba_dbf_record_response response;
152 struct zfcp_hba_dbf_record_status status; 152 struct zfcp_hba_dbf_record_status status;
153 struct zfcp_hba_dbf_record_qdio qdio; 153 struct zfcp_hba_dbf_record_qdio qdio;
154 struct fsf_bit_error_payload berr;
154 } u; 155 } u;
155} __attribute__ ((packed)); 156} __attribute__ ((packed));
156 157
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 67f45fc62f53..73eb41580f25 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -39,29 +39,6 @@
39 39
40/********************* GENERAL DEFINES *********************************/ 40/********************* GENERAL DEFINES *********************************/
41 41
42/**
43 * zfcp_sg_to_address - determine kernel address from struct scatterlist
44 * @list: struct scatterlist
45 * Return: kernel address
46 */
47static inline void *
48zfcp_sg_to_address(struct scatterlist *list)
49{
50 return sg_virt(list);
51}
52
53/**
54 * zfcp_address_to_sg - set up struct scatterlist from kernel address
55 * @address: kernel address
56 * @list: struct scatterlist
57 * @size: buffer size
58 */
59static inline void
60zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
61{
62 sg_set_buf(list, address, size);
63}
64
65#define REQUEST_LIST_SIZE 128 42#define REQUEST_LIST_SIZE 128
66 43
67/********************* SCSI SPECIFIC DEFINES *********************************/ 44/********************* SCSI SPECIFIC DEFINES *********************************/
@@ -101,11 +78,6 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
101 78
102/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ 79/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
103 80
104typedef unsigned long long wwn_t;
105typedef unsigned long long fcp_lun_t;
106/* data length field may be at variable position in FCP-2 FCP_CMND IU */
107typedef unsigned int fcp_dl_t;
108
109/* timeout for name-server lookup (in seconds) */ 81/* timeout for name-server lookup (in seconds) */
110#define ZFCP_NS_GID_PN_TIMEOUT 10 82#define ZFCP_NS_GID_PN_TIMEOUT 10
111 83
@@ -129,7 +101,7 @@ typedef unsigned int fcp_dl_t;
129 101
130/* FCP(-2) FCP_CMND IU */ 102/* FCP(-2) FCP_CMND IU */
131struct fcp_cmnd_iu { 103struct fcp_cmnd_iu {
132 fcp_lun_t fcp_lun; /* FCP logical unit number */ 104 u64 fcp_lun; /* FCP logical unit number */
133 u8 crn; /* command reference number */ 105 u8 crn; /* command reference number */
134 u8 reserved0:5; /* reserved */ 106 u8 reserved0:5; /* reserved */
135 u8 task_attribute:3; /* task attribute */ 107 u8 task_attribute:3; /* task attribute */
@@ -204,7 +176,7 @@ struct fcp_rscn_element {
204struct fcp_logo { 176struct fcp_logo {
205 u32 command; 177 u32 command;
206 u32 nport_did; 178 u32 nport_did;
207 wwn_t nport_wwpn; 179 u64 nport_wwpn;
208} __attribute__((packed)); 180} __attribute__((packed));
209 181
210/* 182/*
@@ -218,13 +190,6 @@ struct fcp_logo {
218#define ZFCP_LS_RSCN 0x61 190#define ZFCP_LS_RSCN 0x61
219#define ZFCP_LS_RNID 0x78 191#define ZFCP_LS_RNID 0x78
220 192
221struct zfcp_ls_rjt_par {
222 u8 action;
223 u8 reason_code;
224 u8 reason_expl;
225 u8 vendor_unique;
226} __attribute__ ((packed));
227
228struct zfcp_ls_adisc { 193struct zfcp_ls_adisc {
229 u8 code; 194 u8 code;
230 u8 field[3]; 195 u8 field[3];
@@ -234,20 +199,6 @@ struct zfcp_ls_adisc {
234 u32 nport_id; 199 u32 nport_id;
235} __attribute__ ((packed)); 200} __attribute__ ((packed));
236 201
237struct zfcp_ls_adisc_acc {
238 u8 code;
239 u8 field[3];
240 u32 hard_nport_id;
241 u64 wwpn;
242 u64 wwnn;
243 u32 nport_id;
244} __attribute__ ((packed));
245
246struct zfcp_rc_entry {
247 u8 code;
248 const char *description;
249};
250
251/* 202/*
252 * FC-GS-2 stuff 203 * FC-GS-2 stuff
253 */ 204 */
@@ -281,9 +232,7 @@ struct zfcp_rc_entry {
281#define ZFCP_STATUS_COMMON_RUNNING 0x40000000 232#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
282#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000 233#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
283#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000 234#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
284#define ZFCP_STATUS_COMMON_OPENING 0x08000000
285#define ZFCP_STATUS_COMMON_OPEN 0x04000000 235#define ZFCP_STATUS_COMMON_OPEN 0x04000000
286#define ZFCP_STATUS_COMMON_CLOSING 0x02000000
287#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000 236#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000
288#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000 237#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000
289#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000 238#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000
@@ -291,16 +240,15 @@ struct zfcp_rc_entry {
291 240
292/* adapter status */ 241/* adapter status */
293#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 242#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
294#define ZFCP_STATUS_ADAPTER_REGISTERED 0x00000004
295#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 243#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
296#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 244#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
297#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020 245#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020
298#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080 246#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
299#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 247#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
300#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 248#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
301#define ZFCP_STATUS_ADAPTER_XPORT_OK 0x00000800
302 249
303/* FC-PH/FC-GS well-known address identifiers for generic services */ 250/* FC-PH/FC-GS well-known address identifiers for generic services */
251#define ZFCP_DID_WKA 0xFFFFF0
304#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA 252#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA
305#define ZFCP_DID_TIME_SERVICE 0xFFFFFB 253#define ZFCP_DID_TIME_SERVICE 0xFFFFFB
306#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC 254#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC
@@ -312,29 +260,27 @@ struct zfcp_rc_entry {
312#define ZFCP_STATUS_PORT_DID_DID 0x00000002 260#define ZFCP_STATUS_PORT_DID_DID 0x00000002
313#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004 261#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004
314#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008 262#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008
315#define ZFCP_STATUS_PORT_NO_SCSI_ID 0x00000010
316#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020 263#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020
317 264
318/* for ports with well known addresses */ 265/* well known address (WKA) port status*/
319#define ZFCP_STATUS_PORT_WKA \ 266enum zfcp_wka_status {
320 (ZFCP_STATUS_PORT_NO_WWPN | \ 267 ZFCP_WKA_PORT_OFFLINE,
321 ZFCP_STATUS_PORT_NO_SCSI_ID) 268 ZFCP_WKA_PORT_CLOSING,
269 ZFCP_WKA_PORT_OPENING,
270 ZFCP_WKA_PORT_ONLINE,
271};
322 272
323/* logical unit status */ 273/* logical unit status */
324#define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002
325#define ZFCP_STATUS_UNIT_SHARED 0x00000004 274#define ZFCP_STATUS_UNIT_SHARED 0x00000004
326#define ZFCP_STATUS_UNIT_READONLY 0x00000008 275#define ZFCP_STATUS_UNIT_READONLY 0x00000008
327#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010 276#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010
328#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020 277#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020
329 278
330/* FSF request status (this does not have a common part) */ 279/* FSF request status (this does not have a common part) */
331#define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000
332#define ZFCP_STATUS_FSFREQ_POOL 0x00000001
333#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 280#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
334#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004 281#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004
335#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008 282#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
336#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 283#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
337#define ZFCP_STATUS_FSFREQ_ABORTING 0x00000020
338#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 284#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
339#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080 285#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
340#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100 286#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100
@@ -379,7 +325,7 @@ struct ct_hdr {
379 * a port name is required */ 325 * a port name is required */
380struct ct_iu_gid_pn_req { 326struct ct_iu_gid_pn_req {
381 struct ct_hdr header; 327 struct ct_hdr header;
382 wwn_t wwpn; 328 u64 wwpn;
383} __attribute__ ((packed)); 329} __attribute__ ((packed));
384 330
385/* FS_ACC IU and data unit for GID_PN nameserver request */ 331/* FS_ACC IU and data unit for GID_PN nameserver request */
@@ -388,11 +334,9 @@ struct ct_iu_gid_pn_resp {
388 u32 d_id; 334 u32 d_id;
389} __attribute__ ((packed)); 335} __attribute__ ((packed));
390 336
391typedef void (*zfcp_send_ct_handler_t)(unsigned long);
392
393/** 337/**
394 * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct 338 * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
395 * @port: port where the request is sent to 339 * @wka_port: port where the request is sent to
396 * @req: scatter-gather list for request 340 * @req: scatter-gather list for request
397 * @resp: scatter-gather list for response 341 * @resp: scatter-gather list for response
398 * @req_count: number of elements in request scatter-gather list 342 * @req_count: number of elements in request scatter-gather list
@@ -404,12 +348,12 @@ typedef void (*zfcp_send_ct_handler_t)(unsigned long);
404 * @status: used to pass error status to calling function 348 * @status: used to pass error status to calling function
405 */ 349 */
406struct zfcp_send_ct { 350struct zfcp_send_ct {
407 struct zfcp_port *port; 351 struct zfcp_wka_port *wka_port;
408 struct scatterlist *req; 352 struct scatterlist *req;
409 struct scatterlist *resp; 353 struct scatterlist *resp;
410 unsigned int req_count; 354 unsigned int req_count;
411 unsigned int resp_count; 355 unsigned int resp_count;
412 zfcp_send_ct_handler_t handler; 356 void (*handler)(unsigned long);
413 unsigned long handler_data; 357 unsigned long handler_data;
414 int timeout; 358 int timeout;
415 struct completion *completion; 359 struct completion *completion;
@@ -426,8 +370,6 @@ struct zfcp_gid_pn_data {
426 struct zfcp_port *port; 370 struct zfcp_port *port;
427}; 371};
428 372
429typedef void (*zfcp_send_els_handler_t)(unsigned long);
430
431/** 373/**
432 * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els 374 * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els
433 * @adapter: adapter where request is sent from 375 * @adapter: adapter where request is sent from
@@ -451,22 +393,28 @@ struct zfcp_send_els {
451 struct scatterlist *resp; 393 struct scatterlist *resp;
452 unsigned int req_count; 394 unsigned int req_count;
453 unsigned int resp_count; 395 unsigned int resp_count;
454 zfcp_send_els_handler_t handler; 396 void (*handler)(unsigned long);
455 unsigned long handler_data; 397 unsigned long handler_data;
456 struct completion *completion; 398 struct completion *completion;
457 int ls_code; 399 int ls_code;
458 int status; 400 int status;
459}; 401};
460 402
403struct zfcp_wka_port {
404 struct zfcp_adapter *adapter;
405 wait_queue_head_t completion_wq;
406 enum zfcp_wka_status status;
407 atomic_t refcount;
408 u32 d_id;
409 u32 handle;
410 struct mutex mutex;
411 struct delayed_work work;
412};
413
461struct zfcp_qdio_queue { 414struct zfcp_qdio_queue {
462 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */ 415 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
463 u8 first; /* index of next free bfr 416 u8 first; /* index of next free bfr in queue */
464 in queue (free_count>0) */ 417 atomic_t count; /* number of free buffers in queue */
465 atomic_t count; /* number of free buffers
466 in queue */
467 spinlock_t lock; /* lock for operations on queue */
468 int pci_batch; /* SBALs since PCI indication
469 was last set */
470}; 418};
471 419
472struct zfcp_erp_action { 420struct zfcp_erp_action {
@@ -475,7 +423,7 @@ struct zfcp_erp_action {
475 struct zfcp_adapter *adapter; /* device which should be recovered */ 423 struct zfcp_adapter *adapter; /* device which should be recovered */
476 struct zfcp_port *port; 424 struct zfcp_port *port;
477 struct zfcp_unit *unit; 425 struct zfcp_unit *unit;
478 volatile u32 status; /* recovery status */ 426 u32 status; /* recovery status */
479 u32 step; /* active step of this erp action */ 427 u32 step; /* active step of this erp action */
480 struct zfcp_fsf_req *fsf_req; /* fsf request currently pending 428 struct zfcp_fsf_req *fsf_req; /* fsf request currently pending
481 for this action */ 429 for this action */
@@ -506,8 +454,8 @@ struct zfcp_adapter {
506 atomic_t refcount; /* reference count */ 454 atomic_t refcount; /* reference count */
507 wait_queue_head_t remove_wq; /* can be used to wait for 455 wait_queue_head_t remove_wq; /* can be used to wait for
508 refcount drop to zero */ 456 refcount drop to zero */
509 wwn_t peer_wwnn; /* P2P peer WWNN */ 457 u64 peer_wwnn; /* P2P peer WWNN */
510 wwn_t peer_wwpn; /* P2P peer WWPN */ 458 u64 peer_wwpn; /* P2P peer WWPN */
511 u32 peer_d_id; /* P2P peer D_ID */ 459 u32 peer_d_id; /* P2P peer D_ID */
512 struct ccw_device *ccw_device; /* S/390 ccw device */ 460 struct ccw_device *ccw_device; /* S/390 ccw device */
513 u32 hydra_version; /* Hydra version */ 461 u32 hydra_version; /* Hydra version */
@@ -518,13 +466,13 @@ struct zfcp_adapter {
518 u16 timer_ticks; /* time int for a tick */ 466 u16 timer_ticks; /* time int for a tick */
519 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ 467 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
520 struct list_head port_list_head; /* remote port list */ 468 struct list_head port_list_head; /* remote port list */
521 struct list_head port_remove_lh; /* head of ports to be
522 removed */
523 u32 ports; /* number of remote ports */
524 unsigned long req_no; /* unique FSF req number */ 469 unsigned long req_no; /* unique FSF req number */
525 struct list_head *req_list; /* list of pending reqs */ 470 struct list_head *req_list; /* list of pending reqs */
526 spinlock_t req_list_lock; /* request list lock */ 471 spinlock_t req_list_lock; /* request list lock */
527 struct zfcp_qdio_queue req_q; /* request queue */ 472 struct zfcp_qdio_queue req_q; /* request queue */
473 spinlock_t req_q_lock; /* for operations on queue */
474 int req_q_pci_batch; /* SBALs since PCI indication
475 was last set */
528 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 476 u32 fsf_req_seq_no; /* FSF cmnd seq number */
529 wait_queue_head_t request_wq; /* can be used to wait for 477 wait_queue_head_t request_wq; /* can be used to wait for
530 more avaliable SBALs */ 478 more avaliable SBALs */
@@ -548,7 +496,7 @@ struct zfcp_adapter {
548 actions */ 496 actions */
549 u32 erp_low_mem_count; /* nr of erp actions waiting 497 u32 erp_low_mem_count; /* nr of erp actions waiting
550 for memory */ 498 for memory */
551 struct zfcp_port *nameserver_port; /* adapter's nameserver */ 499 struct zfcp_wka_port nsp; /* adapter's nameserver */
552 debug_info_t *rec_dbf; 500 debug_info_t *rec_dbf;
553 debug_info_t *hba_dbf; 501 debug_info_t *hba_dbf;
554 debug_info_t *san_dbf; /* debug feature areas */ 502 debug_info_t *san_dbf; /* debug feature areas */
@@ -563,11 +511,11 @@ struct zfcp_adapter {
563 struct zfcp_scsi_dbf_record scsi_dbf_buf; 511 struct zfcp_scsi_dbf_record scsi_dbf_buf;
564 struct zfcp_adapter_mempool pool; /* Adapter memory pools */ 512 struct zfcp_adapter_mempool pool; /* Adapter memory pools */
565 struct qdio_initialize qdio_init_data; /* for qdio_establish */ 513 struct qdio_initialize qdio_init_data; /* for qdio_establish */
566 struct device generic_services; /* directory for WKA ports */
567 struct fc_host_statistics *fc_stats; 514 struct fc_host_statistics *fc_stats;
568 struct fsf_qtcb_bottom_port *stats_reset_data; 515 struct fsf_qtcb_bottom_port *stats_reset_data;
569 unsigned long stats_reset; 516 unsigned long stats_reset;
570 struct work_struct scan_work; 517 struct work_struct scan_work;
518 atomic_t qdio_outb_full; /* queue full incidents */
571}; 519};
572 520
573struct zfcp_port { 521struct zfcp_port {
@@ -579,18 +527,16 @@ struct zfcp_port {
579 refcount drop to zero */ 527 refcount drop to zero */
580 struct zfcp_adapter *adapter; /* adapter used to access port */ 528 struct zfcp_adapter *adapter; /* adapter used to access port */
581 struct list_head unit_list_head; /* head of logical unit list */ 529 struct list_head unit_list_head; /* head of logical unit list */
582 struct list_head unit_remove_lh; /* head of luns to be removed
583 list */
584 u32 units; /* # of logical units in list */
585 atomic_t status; /* status of this remote port */ 530 atomic_t status; /* status of this remote port */
586 wwn_t wwnn; /* WWNN if known */ 531 u64 wwnn; /* WWNN if known */
587 wwn_t wwpn; /* WWPN */ 532 u64 wwpn; /* WWPN */
588 u32 d_id; /* D_ID */ 533 u32 d_id; /* D_ID */
589 u32 handle; /* handle assigned by FSF */ 534 u32 handle; /* handle assigned by FSF */
590 struct zfcp_erp_action erp_action; /* pending error recovery */ 535 struct zfcp_erp_action erp_action; /* pending error recovery */
591 atomic_t erp_counter; 536 atomic_t erp_counter;
592 u32 maxframe_size; 537 u32 maxframe_size;
593 u32 supported_classes; 538 u32 supported_classes;
539 struct work_struct gid_pn_work;
594}; 540};
595 541
596struct zfcp_unit { 542struct zfcp_unit {
@@ -601,8 +547,7 @@ struct zfcp_unit {
601 refcount drop to zero */ 547 refcount drop to zero */
602 struct zfcp_port *port; /* remote port of unit */ 548 struct zfcp_port *port; /* remote port of unit */
603 atomic_t status; /* status of this logical unit */ 549 atomic_t status; /* status of this logical unit */
604 unsigned int scsi_lun; /* own SCSI LUN */ 550 u64 fcp_lun; /* own FCP_LUN */
605 fcp_lun_t fcp_lun; /* own FCP_LUN */
606 u32 handle; /* handle assigned by FSF */ 551 u32 handle; /* handle assigned by FSF */
607 struct scsi_device *device; /* scsi device struct pointer */ 552 struct scsi_device *device; /* scsi device struct pointer */
608 struct zfcp_erp_action erp_action; /* pending error recovery */ 553 struct zfcp_erp_action erp_action; /* pending error recovery */
@@ -625,7 +570,7 @@ struct zfcp_fsf_req {
625 u8 sbal_response; /* SBAL used in interrupt */ 570 u8 sbal_response; /* SBAL used in interrupt */
626 wait_queue_head_t completion_wq; /* can be used by a routine 571 wait_queue_head_t completion_wq; /* can be used by a routine
627 to wait for completion */ 572 to wait for completion */
628 volatile u32 status; /* status of this request */ 573 u32 status; /* status of this request */
629 u32 fsf_command; /* FSF Command copy */ 574 u32 fsf_command; /* FSF Command copy */
630 struct fsf_qtcb *qtcb; /* address of associated QTCB */ 575 struct fsf_qtcb *qtcb; /* address of associated QTCB */
631 u32 seq_no; /* Sequence number of request */ 576 u32 seq_no; /* Sequence number of request */
@@ -644,11 +589,7 @@ struct zfcp_fsf_req {
644struct zfcp_data { 589struct zfcp_data {
645 struct scsi_host_template scsi_host_template; 590 struct scsi_host_template scsi_host_template;
646 struct scsi_transport_template *scsi_transport_template; 591 struct scsi_transport_template *scsi_transport_template;
647 atomic_t status; /* Module status flags */
648 struct list_head adapter_list_head; /* head of adapter list */ 592 struct list_head adapter_list_head; /* head of adapter list */
649 struct list_head adapter_remove_lh; /* head of adapters to be
650 removed */
651 u32 adapters; /* # of adapters in list */
652 rwlock_t config_lock; /* serialises changes 593 rwlock_t config_lock; /* serialises changes
653 to adapter/port/unit 594 to adapter/port/unit
654 lists */ 595 lists */
@@ -656,11 +597,12 @@ struct zfcp_data {
656 changes */ 597 changes */
657 atomic_t loglevel; /* current loglevel */ 598 atomic_t loglevel; /* current loglevel */
658 char init_busid[BUS_ID_SIZE]; 599 char init_busid[BUS_ID_SIZE];
659 wwn_t init_wwpn; 600 u64 init_wwpn;
660 fcp_lun_t init_fcp_lun; 601 u64 init_fcp_lun;
661 struct kmem_cache *fsf_req_qtcb_cache; 602 struct kmem_cache *fsf_req_qtcb_cache;
662 struct kmem_cache *sr_buffer_cache; 603 struct kmem_cache *sr_buffer_cache;
663 struct kmem_cache *gid_pn_cache; 604 struct kmem_cache *gid_pn_cache;
605 struct workqueue_struct *work_queue;
664}; 606};
665 607
666/* struct used by memory pools for fsf_requests */ 608/* struct used by memory pools for fsf_requests */
@@ -677,14 +619,7 @@ struct zfcp_fsf_req_qtcb {
677#define ZFCP_SET 0x00000100 619#define ZFCP_SET 0x00000100
678#define ZFCP_CLEAR 0x00000200 620#define ZFCP_CLEAR 0x00000200
679 621
680#ifndef atomic_test_mask
681#define atomic_test_mask(mask, target) \
682 ((atomic_read(target) & mask) == mask)
683#endif
684
685#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id) 622#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
686#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
687#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port))
688 623
689/* 624/*
690 * Helper functions for request ID management. 625 * Helper functions for request ID management.
@@ -745,12 +680,6 @@ zfcp_unit_put(struct zfcp_unit *unit)
745} 680}
746 681
747static inline void 682static inline void
748zfcp_unit_wait(struct zfcp_unit *unit)
749{
750 wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
751}
752
753static inline void
754zfcp_port_get(struct zfcp_port *port) 683zfcp_port_get(struct zfcp_port *port)
755{ 684{
756 atomic_inc(&port->refcount); 685 atomic_inc(&port->refcount);
@@ -764,12 +693,6 @@ zfcp_port_put(struct zfcp_port *port)
764} 693}
765 694
766static inline void 695static inline void
767zfcp_port_wait(struct zfcp_port *port)
768{
769 wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
770}
771
772static inline void
773zfcp_adapter_get(struct zfcp_adapter *adapter) 696zfcp_adapter_get(struct zfcp_adapter *adapter)
774{ 697{
775 atomic_inc(&adapter->refcount); 698 atomic_inc(&adapter->refcount);
@@ -782,10 +705,4 @@ zfcp_adapter_put(struct zfcp_adapter *adapter)
782 wake_up(&adapter->remove_wq); 705 wake_up(&adapter->remove_wq);
783} 706}
784 707
785static inline void
786zfcp_adapter_wait(struct zfcp_adapter *adapter)
787{
788 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
789}
790
791#endif /* ZFCP_DEF_H */ 708#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 643ac4bba5b5..782313131870 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -23,7 +23,6 @@ enum zfcp_erp_steps {
23 ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001, 23 ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
24 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, 24 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
25 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, 25 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
26 ZFCP_ERP_STEP_NAMESERVER_OPEN = 0x0200,
27 ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400, 26 ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400,
28 ZFCP_ERP_STEP_PORT_OPENING = 0x0800, 27 ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
29 ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, 28 ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000,
@@ -532,8 +531,7 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
532 struct zfcp_port *port; 531 struct zfcp_port *port;
533 532
534 list_for_each_entry(port, &adapter->port_list_head, list) 533 list_for_each_entry(port, &adapter->port_list_head, list)
535 if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)) 534 _zfcp_erp_port_reopen(port, clear, id, ref);
536 _zfcp_erp_port_reopen(port, clear, id, ref);
537} 535}
538 536
539static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, u8 id, 537static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, u8 id,
@@ -669,8 +667,6 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
669 int ret; 667 int ret;
670 struct zfcp_adapter *adapter = act->adapter; 668 struct zfcp_adapter *adapter = act->adapter;
671 669
672 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
673
674 write_lock_irq(&adapter->erp_lock); 670 write_lock_irq(&adapter->erp_lock);
675 zfcp_erp_action_to_running(act); 671 zfcp_erp_action_to_running(act);
676 write_unlock_irq(&adapter->erp_lock); 672 write_unlock_irq(&adapter->erp_lock);
@@ -741,8 +737,7 @@ static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
741 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); 737 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
742 failed_qdio: 738 failed_qdio:
743 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 739 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
744 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 740 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
745 ZFCP_STATUS_ADAPTER_XPORT_OK,
746 &act->adapter->status); 741 &act->adapter->status);
747 return retval; 742 return retval;
748} 743}
@@ -751,15 +746,11 @@ static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
751{ 746{
752 int retval; 747 int retval;
753 748
754 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
755 zfcp_erp_adapter_strategy_generic(act, 1); /* close */ 749 zfcp_erp_adapter_strategy_generic(act, 1); /* close */
756 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
757 if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY) 750 if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
758 return ZFCP_ERP_EXIT; 751 return ZFCP_ERP_EXIT;
759 752
760 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
761 retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */ 753 retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */
762 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
763 754
764 if (retval == ZFCP_ERP_FAILED) 755 if (retval == ZFCP_ERP_FAILED)
765 ssleep(8); 756 ssleep(8);
@@ -783,10 +774,7 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
783 774
784static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port) 775static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
785{ 776{
786 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | 777 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
787 ZFCP_STATUS_COMMON_CLOSING |
788 ZFCP_STATUS_COMMON_ACCESS_DENIED |
789 ZFCP_STATUS_PORT_DID_DID |
790 ZFCP_STATUS_PORT_PHYS_CLOSING | 778 ZFCP_STATUS_PORT_PHYS_CLOSING |
791 ZFCP_STATUS_PORT_INVALID_WWPN, 779 ZFCP_STATUS_PORT_INVALID_WWPN,
792 &port->status); 780 &port->status);
@@ -839,73 +827,12 @@ static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
839 return ZFCP_ERP_CONTINUES; 827 return ZFCP_ERP_CONTINUES;
840} 828}
841 829
842static void zfcp_erp_port_strategy_open_ns_wake(struct zfcp_erp_action *ns_act)
843{
844 unsigned long flags;
845 struct zfcp_adapter *adapter = ns_act->adapter;
846 struct zfcp_erp_action *act, *tmp;
847 int status;
848
849 read_lock_irqsave(&adapter->erp_lock, flags);
850 list_for_each_entry_safe(act, tmp, &adapter->erp_running_head, list) {
851 if (act->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
852 status = atomic_read(&adapter->nameserver_port->status);
853 if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
854 zfcp_erp_port_failed(act->port, 27, NULL);
855 zfcp_erp_action_ready(act);
856 }
857 }
858 read_unlock_irqrestore(&adapter->erp_lock, flags);
859}
860
861static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *act)
862{
863 int retval;
864
865 switch (act->step) {
866 case ZFCP_ERP_STEP_UNINITIALIZED:
867 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
868 case ZFCP_ERP_STEP_PORT_CLOSING:
869 return zfcp_erp_port_strategy_open_port(act);
870
871 case ZFCP_ERP_STEP_PORT_OPENING:
872 if (atomic_read(&act->port->status) & ZFCP_STATUS_COMMON_OPEN)
873 retval = ZFCP_ERP_SUCCEEDED;
874 else
875 retval = ZFCP_ERP_FAILED;
876 /* this is needed anyway */
877 zfcp_erp_port_strategy_open_ns_wake(act);
878 return retval;
879
880 default:
881 return ZFCP_ERP_FAILED;
882 }
883}
884
885static int zfcp_erp_port_strategy_open_lookup(struct zfcp_erp_action *act)
886{
887 int retval;
888
889 retval = zfcp_fc_ns_gid_pn_request(act);
890 if (retval == -ENOMEM)
891 return ZFCP_ERP_NOMEM;
892 act->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
893 if (retval)
894 return ZFCP_ERP_FAILED;
895 return ZFCP_ERP_CONTINUES;
896}
897
898static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act) 830static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
899{ 831{
900 struct zfcp_adapter *adapter = act->adapter; 832 struct zfcp_adapter *adapter = act->adapter;
901 struct zfcp_port *port = act->port; 833 struct zfcp_port *port = act->port;
902 834
903 if (port->wwpn != adapter->peer_wwpn) { 835 if (port->wwpn != adapter->peer_wwpn) {
904 dev_err(&adapter->ccw_device->dev,
905 "Failed to open port 0x%016Lx, "
906 "Peer WWPN 0x%016Lx does not "
907 "match.\n", port->wwpn,
908 adapter->peer_wwpn);
909 zfcp_erp_port_failed(port, 25, NULL); 836 zfcp_erp_port_failed(port, 25, NULL);
910 return ZFCP_ERP_FAILED; 837 return ZFCP_ERP_FAILED;
911 } 838 }
@@ -914,11 +841,25 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
914 return zfcp_erp_port_strategy_open_port(act); 841 return zfcp_erp_port_strategy_open_port(act);
915} 842}
916 843
844void zfcp_erp_port_strategy_open_lookup(struct work_struct *work)
845{
846 int retval;
847 struct zfcp_port *port = container_of(work, struct zfcp_port,
848 gid_pn_work);
849
850 retval = zfcp_fc_ns_gid_pn(&port->erp_action);
851 if (retval == -ENOMEM)
852 zfcp_erp_notify(&port->erp_action, ZFCP_ERP_NOMEM);
853 port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
854 if (retval)
855 zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED);
856
857}
858
917static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) 859static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
918{ 860{
919 struct zfcp_adapter *adapter = act->adapter; 861 struct zfcp_adapter *adapter = act->adapter;
920 struct zfcp_port *port = act->port; 862 struct zfcp_port *port = act->port;
921 struct zfcp_port *ns_port = adapter->nameserver_port;
922 int p_status = atomic_read(&port->status); 863 int p_status = atomic_read(&port->status);
923 864
924 switch (act->step) { 865 switch (act->step) {
@@ -927,28 +868,10 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
927 case ZFCP_ERP_STEP_PORT_CLOSING: 868 case ZFCP_ERP_STEP_PORT_CLOSING:
928 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) 869 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
929 return zfcp_erp_open_ptp_port(act); 870 return zfcp_erp_open_ptp_port(act);
930 if (!ns_port) { 871 if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
931 dev_err(&adapter->ccw_device->dev, 872 queue_work(zfcp_data.work_queue, &port->gid_pn_work);
932 "Nameserver port unavailable.\n"); 873 return ZFCP_ERP_CONTINUES;
933 return ZFCP_ERP_FAILED;
934 }
935 if (!(atomic_read(&ns_port->status) &
936 ZFCP_STATUS_COMMON_UNBLOCKED)) {
937 /* nameserver port may live again */
938 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING,
939 &ns_port->status);
940 if (zfcp_erp_port_reopen(ns_port, 0, 77, act) >= 0) {
941 act->step = ZFCP_ERP_STEP_NAMESERVER_OPEN;
942 return ZFCP_ERP_CONTINUES;
943 }
944 return ZFCP_ERP_FAILED;
945 } 874 }
946 /* else nameserver port is already open, fall through */
947 case ZFCP_ERP_STEP_NAMESERVER_OPEN:
948 if (!(atomic_read(&ns_port->status) & ZFCP_STATUS_COMMON_OPEN))
949 return ZFCP_ERP_FAILED;
950 return zfcp_erp_port_strategy_open_lookup(act);
951
952 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: 875 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
953 if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) { 876 if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
954 if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) { 877 if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) {
@@ -961,25 +884,26 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
961 884
962 case ZFCP_ERP_STEP_PORT_OPENING: 885 case ZFCP_ERP_STEP_PORT_OPENING:
963 /* D_ID might have changed during open */ 886 /* D_ID might have changed during open */
964 if ((p_status & ZFCP_STATUS_COMMON_OPEN) && 887 if (p_status & ZFCP_STATUS_COMMON_OPEN) {
965 (p_status & ZFCP_STATUS_PORT_DID_DID)) 888 if (p_status & ZFCP_STATUS_PORT_DID_DID)
966 return ZFCP_ERP_SUCCEEDED; 889 return ZFCP_ERP_SUCCEEDED;
890 else {
891 act->step = ZFCP_ERP_STEP_PORT_CLOSING;
892 return ZFCP_ERP_CONTINUES;
893 }
967 /* fall through otherwise */ 894 /* fall through otherwise */
895 }
968 } 896 }
969 return ZFCP_ERP_FAILED; 897 return ZFCP_ERP_FAILED;
970} 898}
971 899
972static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *act)
973{
974 if (atomic_read(&act->port->status) & (ZFCP_STATUS_PORT_WKA))
975 return zfcp_erp_port_strategy_open_nameserver(act);
976 return zfcp_erp_port_strategy_open_common(act);
977}
978
979static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action) 900static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
980{ 901{
981 struct zfcp_port *port = erp_action->port; 902 struct zfcp_port *port = erp_action->port;
982 903
904 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)
905 goto close_init_done;
906
983 switch (erp_action->step) { 907 switch (erp_action->step) {
984 case ZFCP_ERP_STEP_UNINITIALIZED: 908 case ZFCP_ERP_STEP_UNINITIALIZED:
985 zfcp_erp_port_strategy_clearstati(port); 909 zfcp_erp_port_strategy_clearstati(port);
@@ -992,19 +916,17 @@ static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
992 return ZFCP_ERP_FAILED; 916 return ZFCP_ERP_FAILED;
993 break; 917 break;
994 } 918 }
919
920close_init_done:
995 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) 921 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
996 return ZFCP_ERP_EXIT; 922 return ZFCP_ERP_EXIT;
997 else
998 return zfcp_erp_port_strategy_open(erp_action);
999 923
1000 return ZFCP_ERP_FAILED; 924 return zfcp_erp_port_strategy_open_common(erp_action);
1001} 925}
1002 926
1003static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit) 927static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
1004{ 928{
1005 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | 929 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1006 ZFCP_STATUS_COMMON_CLOSING |
1007 ZFCP_STATUS_COMMON_ACCESS_DENIED |
1008 ZFCP_STATUS_UNIT_SHARED | 930 ZFCP_STATUS_UNIT_SHARED |
1009 ZFCP_STATUS_UNIT_READONLY, 931 ZFCP_STATUS_UNIT_READONLY,
1010 &unit->status); 932 &unit->status);
@@ -1065,8 +987,14 @@ static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
1065 break; 987 break;
1066 case ZFCP_ERP_FAILED : 988 case ZFCP_ERP_FAILED :
1067 atomic_inc(&unit->erp_counter); 989 atomic_inc(&unit->erp_counter);
1068 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) 990 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) {
991 dev_err(&unit->port->adapter->ccw_device->dev,
992 "ERP failed for unit 0x%016Lx on "
993 "port 0x%016Lx\n",
994 (unsigned long long)unit->fcp_lun,
995 (unsigned long long)unit->port->wwpn);
1069 zfcp_erp_unit_failed(unit, 21, NULL); 996 zfcp_erp_unit_failed(unit, 21, NULL);
997 }
1070 break; 998 break;
1071 } 999 }
1072 1000
@@ -1091,8 +1019,12 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
1091 result = ZFCP_ERP_EXIT; 1019 result = ZFCP_ERP_EXIT;
1092 } 1020 }
1093 atomic_inc(&port->erp_counter); 1021 atomic_inc(&port->erp_counter);
1094 if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) 1022 if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) {
1023 dev_err(&port->adapter->ccw_device->dev,
1024 "ERP failed for remote port 0x%016Lx\n",
1025 (unsigned long long)port->wwpn);
1095 zfcp_erp_port_failed(port, 22, NULL); 1026 zfcp_erp_port_failed(port, 22, NULL);
1027 }
1096 break; 1028 break;
1097 } 1029 }
1098 1030
@@ -1114,8 +1046,12 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
1114 1046
1115 case ZFCP_ERP_FAILED : 1047 case ZFCP_ERP_FAILED :
1116 atomic_inc(&adapter->erp_counter); 1048 atomic_inc(&adapter->erp_counter);
1117 if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) 1049 if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) {
1050 dev_err(&adapter->ccw_device->dev,
1051 "ERP cannot recover an error "
1052 "on the FCP device\n");
1118 zfcp_erp_adapter_failed(adapter, 23, NULL); 1053 zfcp_erp_adapter_failed(adapter, 23, NULL);
1054 }
1119 break; 1055 break;
1120 } 1056 }
1121 1057
@@ -1250,9 +1186,10 @@ static void zfcp_erp_scsi_scan(struct work_struct *work)
1250 struct zfcp_unit *unit = p->unit; 1186 struct zfcp_unit *unit = p->unit;
1251 struct fc_rport *rport = unit->port->rport; 1187 struct fc_rport *rport = unit->port->rport;
1252 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, 1188 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
1253 unit->scsi_lun, 0); 1189 scsilun_to_int((struct scsi_lun *)&unit->fcp_lun), 0);
1254 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); 1190 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1255 zfcp_unit_put(unit); 1191 zfcp_unit_put(unit);
1192 wake_up(&unit->port->adapter->erp_done_wqh);
1256 kfree(p); 1193 kfree(p);
1257} 1194}
1258 1195
@@ -1263,9 +1200,9 @@ static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
1263 p = kzalloc(sizeof(*p), GFP_KERNEL); 1200 p = kzalloc(sizeof(*p), GFP_KERNEL);
1264 if (!p) { 1201 if (!p) {
1265 dev_err(&unit->port->adapter->ccw_device->dev, 1202 dev_err(&unit->port->adapter->ccw_device->dev,
1266 "Out of resources. Could not register unit " 1203 "Registering unit 0x%016Lx on port 0x%016Lx failed\n",
1267 "0x%016Lx on port 0x%016Lx with SCSI stack.\n", 1204 (unsigned long long)unit->fcp_lun,
1268 unit->fcp_lun, unit->port->wwpn); 1205 (unsigned long long)unit->port->wwpn);
1269 return; 1206 return;
1270 } 1207 }
1271 1208
@@ -1273,7 +1210,7 @@ static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
1273 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); 1210 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1274 INIT_WORK(&p->work, zfcp_erp_scsi_scan); 1211 INIT_WORK(&p->work, zfcp_erp_scsi_scan);
1275 p->unit = unit; 1212 p->unit = unit;
1276 schedule_work(&p->work); 1213 queue_work(zfcp_data.work_queue, &p->work);
1277} 1214}
1278 1215
1279static void zfcp_erp_rport_register(struct zfcp_port *port) 1216static void zfcp_erp_rport_register(struct zfcp_port *port)
@@ -1286,8 +1223,8 @@ static void zfcp_erp_rport_register(struct zfcp_port *port)
1286 port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); 1223 port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
1287 if (!port->rport) { 1224 if (!port->rport) {
1288 dev_err(&port->adapter->ccw_device->dev, 1225 dev_err(&port->adapter->ccw_device->dev,
1289 "Failed registration of rport " 1226 "Registering port 0x%016Lx failed\n",
1290 "0x%016Lx.\n", port->wwpn); 1227 (unsigned long long)port->wwpn);
1291 return; 1228 return;
1292 } 1229 }
1293 1230
@@ -1299,12 +1236,12 @@ static void zfcp_erp_rport_register(struct zfcp_port *port)
1299static void zfcp_erp_rports_del(struct zfcp_adapter *adapter) 1236static void zfcp_erp_rports_del(struct zfcp_adapter *adapter)
1300{ 1237{
1301 struct zfcp_port *port; 1238 struct zfcp_port *port;
1302 list_for_each_entry(port, &adapter->port_list_head, list) 1239 list_for_each_entry(port, &adapter->port_list_head, list) {
1303 if (port->rport && !(atomic_read(&port->status) & 1240 if (!port->rport)
1304 ZFCP_STATUS_PORT_WKA)) { 1241 continue;
1305 fc_remote_port_delete(port->rport); 1242 fc_remote_port_delete(port->rport);
1306 port->rport = NULL; 1243 port->rport = NULL;
1307 } 1244 }
1308} 1245}
1309 1246
1310static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) 1247static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
@@ -1459,9 +1396,9 @@ static int zfcp_erp_thread(void *data)
1459 zfcp_erp_wakeup(adapter); 1396 zfcp_erp_wakeup(adapter);
1460 } 1397 }
1461 1398
1462 zfcp_rec_dbf_event_thread(4, adapter); 1399 zfcp_rec_dbf_event_thread_lock(4, adapter);
1463 down_interruptible(&adapter->erp_ready_sem); 1400 down_interruptible(&adapter->erp_ready_sem);
1464 zfcp_rec_dbf_event_thread(5, adapter); 1401 zfcp_rec_dbf_event_thread_lock(5, adapter);
1465 } 1402 }
1466 1403
1467 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); 1404 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
@@ -1484,7 +1421,7 @@ int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
1484 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD); 1421 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
1485 if (retval < 0) { 1422 if (retval < 0) {
1486 dev_err(&adapter->ccw_device->dev, 1423 dev_err(&adapter->ccw_device->dev,
1487 "Creation of ERP thread failed.\n"); 1424 "Creating an ERP thread for the FCP device failed.\n");
1488 return retval; 1425 return retval;
1489 } 1426 }
1490 wait_event(adapter->erp_thread_wqh, 1427 wait_event(adapter->erp_thread_wqh,
@@ -1506,7 +1443,7 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
1506{ 1443{
1507 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); 1444 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
1508 up(&adapter->erp_ready_sem); 1445 up(&adapter->erp_ready_sem);
1509 zfcp_rec_dbf_event_thread_lock(2, adapter); 1446 zfcp_rec_dbf_event_thread_lock(3, adapter);
1510 1447
1511 wait_event(adapter->erp_thread_wqh, 1448 wait_event(adapter->erp_thread_wqh,
1512 !(atomic_read(&adapter->status) & 1449 !(atomic_read(&adapter->status) &
@@ -1526,7 +1463,6 @@ void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
1526{ 1463{
1527 zfcp_erp_modify_adapter_status(adapter, id, ref, 1464 zfcp_erp_modify_adapter_status(adapter, id, ref,
1528 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); 1465 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1529 dev_err(&adapter->ccw_device->dev, "Adapter ERP failed.\n");
1530} 1466}
1531 1467
1532/** 1468/**
@@ -1539,15 +1475,6 @@ void zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
1539{ 1475{
1540 zfcp_erp_modify_port_status(port, id, ref, 1476 zfcp_erp_modify_port_status(port, id, ref,
1541 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); 1477 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1542
1543 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)
1544 dev_err(&port->adapter->ccw_device->dev,
1545 "Port ERP failed for WKA port d_id=0x%06x.\n",
1546 port->d_id);
1547 else
1548 dev_err(&port->adapter->ccw_device->dev,
1549 "Port ERP failed for port wwpn=0x%016Lx.\n",
1550 port->wwpn);
1551} 1478}
1552 1479
1553/** 1480/**
@@ -1560,10 +1487,6 @@ void zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref)
1560{ 1487{
1561 zfcp_erp_modify_unit_status(unit, id, ref, 1488 zfcp_erp_modify_unit_status(unit, id, ref,
1562 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); 1489 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1563
1564 dev_err(&unit->port->adapter->ccw_device->dev,
1565 "Unit ERP failed for unit 0x%016Lx on port 0x%016Lx.\n",
1566 unit->fcp_lun, unit->port->wwpn);
1567} 1490}
1568 1491
1569/** 1492/**
@@ -1754,9 +1677,8 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id,
1754 1677
1755 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | 1678 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
1756 ZFCP_STATUS_COMMON_ACCESS_BOXED))) { 1679 ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
1757 if (!(status & ZFCP_STATUS_PORT_WKA)) 1680 list_for_each_entry(unit, &port->unit_list_head, list)
1758 list_for_each_entry(unit, &port->unit_list_head, list) 1681 zfcp_erp_unit_access_changed(unit, id, ref);
1759 zfcp_erp_unit_access_changed(unit, id, ref);
1760 return; 1682 return;
1761 } 1683 }
1762 1684
@@ -1779,10 +1701,7 @@ void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id,
1779 return; 1701 return;
1780 1702
1781 read_lock_irqsave(&zfcp_data.config_lock, flags); 1703 read_lock_irqsave(&zfcp_data.config_lock, flags);
1782 if (adapter->nameserver_port)
1783 zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref);
1784 list_for_each_entry(port, &adapter->port_list_head, list) 1704 list_for_each_entry(port, &adapter->port_list_head, list)
1785 if (port != adapter->nameserver_port) 1705 zfcp_erp_port_access_changed(port, id, ref);
1786 zfcp_erp_port_access_changed(port, id, ref);
1787 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 1706 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1788} 1707}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index edfdb21591f3..b5adeda93e1d 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -12,16 +12,14 @@
12#include "zfcp_def.h" 12#include "zfcp_def.h"
13 13
14/* zfcp_aux.c */ 14/* zfcp_aux.c */
15extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, 15extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
16 fcp_lun_t); 16extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
17extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *,
18 wwn_t);
19extern int zfcp_adapter_enqueue(struct ccw_device *); 17extern int zfcp_adapter_enqueue(struct ccw_device *);
20extern void zfcp_adapter_dequeue(struct zfcp_adapter *); 18extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
21extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t, u32, 19extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
22 u32); 20 u32);
23extern void zfcp_port_dequeue(struct zfcp_port *); 21extern void zfcp_port_dequeue(struct zfcp_port *);
24extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t); 22extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
25extern void zfcp_unit_dequeue(struct zfcp_unit *); 23extern void zfcp_unit_dequeue(struct zfcp_unit *);
26extern int zfcp_reqlist_isempty(struct zfcp_adapter *); 24extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
27extern void zfcp_sg_free_table(struct scatterlist *, int); 25extern void zfcp_sg_free_table(struct scatterlist *, int);
@@ -29,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
29 27
30/* zfcp_ccw.c */ 28/* zfcp_ccw.c */
31extern int zfcp_ccw_register(void); 29extern int zfcp_ccw_register(void);
30extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
32 31
33/* zfcp_cfdc.c */ 32/* zfcp_cfdc.c */
34extern struct miscdevice zfcp_cfdc_misc; 33extern struct miscdevice zfcp_cfdc_misc;
@@ -50,6 +49,8 @@ extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
50 struct fsf_status_read_buffer *); 49 struct fsf_status_read_buffer *);
51extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int, 50extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
52 int); 51 int);
52extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *,
53 struct zfcp_fsf_req *);
53extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); 54extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
54extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); 55extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
55extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); 56extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
@@ -91,17 +92,21 @@ extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8, void *);
91extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *); 92extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *);
92extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *); 93extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
93extern void zfcp_erp_timeout_handler(unsigned long); 94extern void zfcp_erp_timeout_handler(unsigned long);
95extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *);
94 96
95/* zfcp_fc.c */ 97/* zfcp_fc.c */
96extern int zfcp_scan_ports(struct zfcp_adapter *); 98extern int zfcp_scan_ports(struct zfcp_adapter *);
97extern void _zfcp_scan_ports_later(struct work_struct *); 99extern void _zfcp_scan_ports_later(struct work_struct *);
98extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); 100extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
99extern int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *); 101extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *);
100extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); 102extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
101extern void zfcp_test_link(struct zfcp_port *); 103extern void zfcp_test_link(struct zfcp_port *);
104extern void zfcp_fc_nameserver_init(struct zfcp_adapter *);
102 105
103/* zfcp_fsf.c */ 106/* zfcp_fsf.c */
104extern int zfcp_fsf_open_port(struct zfcp_erp_action *); 107extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
108extern int zfcp_fsf_open_wka_port(struct zfcp_wka_port *);
109extern int zfcp_fsf_close_wka_port(struct zfcp_wka_port *);
105extern int zfcp_fsf_close_port(struct zfcp_erp_action *); 110extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
106extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); 111extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
107extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); 112extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
@@ -135,10 +140,8 @@ extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
135extern int zfcp_qdio_allocate(struct zfcp_adapter *); 140extern int zfcp_qdio_allocate(struct zfcp_adapter *);
136extern void zfcp_qdio_free(struct zfcp_adapter *); 141extern void zfcp_qdio_free(struct zfcp_adapter *);
137extern int zfcp_qdio_send(struct zfcp_fsf_req *); 142extern int zfcp_qdio_send(struct zfcp_fsf_req *);
138extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req( 143extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *);
139 struct zfcp_fsf_req *); 144extern struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *);
140extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr(
141 struct zfcp_fsf_req *);
142extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long, 145extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long,
143 struct scatterlist *, int); 146 struct scatterlist *, int);
144extern int zfcp_qdio_open(struct zfcp_adapter *); 147extern int zfcp_qdio_open(struct zfcp_adapter *);
@@ -148,14 +151,12 @@ extern void zfcp_qdio_close(struct zfcp_adapter *);
148extern struct zfcp_data zfcp_data; 151extern struct zfcp_data zfcp_data;
149extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); 152extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
150extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); 153extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
151extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
152extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); 154extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
153extern struct fc_function_template zfcp_transport_functions; 155extern struct fc_function_template zfcp_transport_functions;
154 156
155/* zfcp_sysfs.c */ 157/* zfcp_sysfs.c */
156extern struct attribute_group zfcp_sysfs_unit_attrs; 158extern struct attribute_group zfcp_sysfs_unit_attrs;
157extern struct attribute_group zfcp_sysfs_adapter_attrs; 159extern struct attribute_group zfcp_sysfs_adapter_attrs;
158extern struct attribute_group zfcp_sysfs_ns_port_attrs;
159extern struct attribute_group zfcp_sysfs_port_attrs; 160extern struct attribute_group zfcp_sysfs_port_attrs;
160extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; 161extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
161extern struct device_attribute *zfcp_sysfs_shost_attrs[]; 162extern struct device_attribute *zfcp_sysfs_shost_attrs[];
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 56196c98c07b..1a7c80a77ff5 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -39,6 +39,84 @@ struct zfcp_gpn_ft {
39 struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS]; 39 struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
40}; 40};
41 41
42struct zfcp_fc_ns_handler_data {
43 struct completion done;
44 void (*handler)(unsigned long);
45 unsigned long handler_data;
46};
47
48static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
49{
50 if (mutex_lock_interruptible(&wka_port->mutex))
51 return -ERESTARTSYS;
52
53 if (wka_port->status != ZFCP_WKA_PORT_ONLINE) {
54 wka_port->status = ZFCP_WKA_PORT_OPENING;
55 if (zfcp_fsf_open_wka_port(wka_port))
56 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
57 }
58
59 mutex_unlock(&wka_port->mutex);
60
61 wait_event_timeout(
62 wka_port->completion_wq,
63 wka_port->status == ZFCP_WKA_PORT_ONLINE ||
64 wka_port->status == ZFCP_WKA_PORT_OFFLINE,
65 HZ >> 1);
66
67 if (wka_port->status == ZFCP_WKA_PORT_ONLINE) {
68 atomic_inc(&wka_port->refcount);
69 return 0;
70 }
71 return -EIO;
72}
73
74static void zfcp_wka_port_offline(struct work_struct *work)
75{
76 struct delayed_work *dw = container_of(work, struct delayed_work, work);
77 struct zfcp_wka_port *wka_port =
78 container_of(dw, struct zfcp_wka_port, work);
79
80 wait_event(wka_port->completion_wq,
81 atomic_read(&wka_port->refcount) == 0);
82
83 mutex_lock(&wka_port->mutex);
84 if ((atomic_read(&wka_port->refcount) != 0) ||
85 (wka_port->status != ZFCP_WKA_PORT_ONLINE))
86 goto out;
87
88 wka_port->status = ZFCP_WKA_PORT_CLOSING;
89 if (zfcp_fsf_close_wka_port(wka_port)) {
90 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
91 wake_up(&wka_port->completion_wq);
92 }
93out:
94 mutex_unlock(&wka_port->mutex);
95}
96
97static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port)
98{
99 if (atomic_dec_return(&wka_port->refcount) != 0)
100 return;
101 /* wait 10 miliseconds, other reqs might pop in */
102 schedule_delayed_work(&wka_port->work, HZ / 100);
103}
104
105void zfcp_fc_nameserver_init(struct zfcp_adapter *adapter)
106{
107 struct zfcp_wka_port *wka_port = &adapter->nsp;
108
109 init_waitqueue_head(&wka_port->completion_wq);
110
111 wka_port->adapter = adapter;
112 wka_port->d_id = ZFCP_DID_DIRECTORY_SERVICE;
113
114 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
115 atomic_set(&wka_port->refcount, 0);
116 mutex_init(&wka_port->mutex);
117 INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline);
118}
119
42static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, 120static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
43 struct fcp_rscn_element *elem) 121 struct fcp_rscn_element *elem)
44{ 122{
@@ -47,10 +125,8 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
47 125
48 read_lock_irqsave(&zfcp_data.config_lock, flags); 126 read_lock_irqsave(&zfcp_data.config_lock, flags);
49 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { 127 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
50 if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
51 continue;
52 /* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */ 128 /* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */
53 if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status)) 129 if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_DID_DID))
54 /* Try to connect to unused ports anyway. */ 130 /* Try to connect to unused ports anyway. */
55 zfcp_erp_port_reopen(port, 131 zfcp_erp_port_reopen(port,
56 ZFCP_STATUS_COMMON_ERP_FAILED, 132 ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -102,7 +178,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
102 schedule_work(&fsf_req->adapter->scan_work); 178 schedule_work(&fsf_req->adapter->scan_work);
103} 179}
104 180
105static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, wwn_t wwpn) 181static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
106{ 182{
107 struct zfcp_adapter *adapter = req->adapter; 183 struct zfcp_adapter *adapter = req->adapter;
108 struct zfcp_port *port; 184 struct zfcp_port *port;
@@ -157,7 +233,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
157 zfcp_fc_incoming_rscn(fsf_req); 233 zfcp_fc_incoming_rscn(fsf_req);
158} 234}
159 235
160static void zfcp_ns_gid_pn_handler(unsigned long data) 236static void zfcp_fc_ns_handler(unsigned long data)
237{
238 struct zfcp_fc_ns_handler_data *compl_rec =
239 (struct zfcp_fc_ns_handler_data *) data;
240
241 if (compl_rec->handler)
242 compl_rec->handler(compl_rec->handler_data);
243
244 complete(&compl_rec->done);
245}
246
247static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
161{ 248{
162 struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data; 249 struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
163 struct zfcp_send_ct *ct = &gid_pn->ct; 250 struct zfcp_send_ct *ct = &gid_pn->ct;
@@ -166,43 +253,31 @@ static void zfcp_ns_gid_pn_handler(unsigned long data)
166 struct zfcp_port *port = gid_pn->port; 253 struct zfcp_port *port = gid_pn->port;
167 254
168 if (ct->status) 255 if (ct->status)
169 goto out; 256 return;
170 if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) { 257 if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) {
171 atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status); 258 atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
172 goto out; 259 return;
173 } 260 }
174 /* paranoia */ 261 /* paranoia */
175 if (ct_iu_req->wwpn != port->wwpn) 262 if (ct_iu_req->wwpn != port->wwpn)
176 goto out; 263 return;
177 /* looks like a valid d_id */ 264 /* looks like a valid d_id */
178 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; 265 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
179 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status); 266 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
180out:
181 mempool_free(gid_pn, port->adapter->pool.data_gid_pn);
182} 267}
183 268
184/** 269int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
185 * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request 270 struct zfcp_gid_pn_data *gid_pn)
186 * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
187 * return: -ENOMEM on error, 0 otherwise
188 */
189int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
190{ 271{
191 int ret;
192 struct zfcp_gid_pn_data *gid_pn;
193 struct zfcp_adapter *adapter = erp_action->adapter; 272 struct zfcp_adapter *adapter = erp_action->adapter;
194 273 struct zfcp_fc_ns_handler_data compl_rec;
195 gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC); 274 int ret;
196 if (!gid_pn)
197 return -ENOMEM;
198
199 memset(gid_pn, 0, sizeof(*gid_pn));
200 275
201 /* setup parameters for send generic command */ 276 /* setup parameters for send generic command */
202 gid_pn->port = erp_action->port; 277 gid_pn->port = erp_action->port;
203 gid_pn->ct.port = adapter->nameserver_port; 278 gid_pn->ct.wka_port = &adapter->nsp;
204 gid_pn->ct.handler = zfcp_ns_gid_pn_handler; 279 gid_pn->ct.handler = zfcp_fc_ns_handler;
205 gid_pn->ct.handler_data = (unsigned long) gid_pn; 280 gid_pn->ct.handler_data = (unsigned long) &compl_rec;
206 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; 281 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
207 gid_pn->ct.req = &gid_pn->req; 282 gid_pn->ct.req = &gid_pn->req;
208 gid_pn->ct.resp = &gid_pn->resp; 283 gid_pn->ct.resp = &gid_pn->resp;
@@ -222,10 +297,42 @@ int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
222 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE; 297 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE;
223 gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn; 298 gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
224 299
300 init_completion(&compl_rec.done);
301 compl_rec.handler = zfcp_fc_ns_gid_pn_eval;
302 compl_rec.handler_data = (unsigned long) gid_pn;
225 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp, 303 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
226 erp_action); 304 erp_action);
305 if (!ret)
306 wait_for_completion(&compl_rec.done);
307 return ret;
308}
309
310/**
311 * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
312 * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
313 * return: -ENOMEM on error, 0 otherwise
314 */
315int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action)
316{
317 int ret;
318 struct zfcp_gid_pn_data *gid_pn;
319 struct zfcp_adapter *adapter = erp_action->adapter;
320
321 gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
322 if (!gid_pn)
323 return -ENOMEM;
324
325 memset(gid_pn, 0, sizeof(*gid_pn));
326
327 ret = zfcp_wka_port_get(&adapter->nsp);
227 if (ret) 328 if (ret)
228 mempool_free(gid_pn, adapter->pool.data_gid_pn); 329 goto out;
330
331 ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn);
332
333 zfcp_wka_port_put(&adapter->nsp);
334out:
335 mempool_free(gid_pn, adapter->pool.data_gid_pn);
229 return ret; 336 return ret;
230} 337}
231 338
@@ -255,14 +362,14 @@ struct zfcp_els_adisc {
255 struct scatterlist req; 362 struct scatterlist req;
256 struct scatterlist resp; 363 struct scatterlist resp;
257 struct zfcp_ls_adisc ls_adisc; 364 struct zfcp_ls_adisc ls_adisc;
258 struct zfcp_ls_adisc_acc ls_adisc_acc; 365 struct zfcp_ls_adisc ls_adisc_acc;
259}; 366};
260 367
261static void zfcp_fc_adisc_handler(unsigned long data) 368static void zfcp_fc_adisc_handler(unsigned long data)
262{ 369{
263 struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data; 370 struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data;
264 struct zfcp_port *port = adisc->els.port; 371 struct zfcp_port *port = adisc->els.port;
265 struct zfcp_ls_adisc_acc *ls_adisc = &adisc->ls_adisc_acc; 372 struct zfcp_ls_adisc *ls_adisc = &adisc->ls_adisc_acc;
266 373
267 if (adisc->els.status) { 374 if (adisc->els.status) {
268 /* request rejected or timed out */ 375 /* request rejected or timed out */
@@ -295,7 +402,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
295 sg_init_one(adisc->els.req, &adisc->ls_adisc, 402 sg_init_one(adisc->els.req, &adisc->ls_adisc,
296 sizeof(struct zfcp_ls_adisc)); 403 sizeof(struct zfcp_ls_adisc));
297 sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc, 404 sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
298 sizeof(struct zfcp_ls_adisc_acc)); 405 sizeof(struct zfcp_ls_adisc));
299 406
300 adisc->els.req_count = 1; 407 adisc->els.req_count = 1;
301 adisc->els.resp_count = 1; 408 adisc->els.resp_count = 1;
@@ -338,30 +445,6 @@ void zfcp_test_link(struct zfcp_port *port)
338 zfcp_erp_port_forced_reopen(port, 0, 65, NULL); 445 zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
339} 446}
340 447
341static int zfcp_scan_get_nameserver(struct zfcp_adapter *adapter)
342{
343 int ret;
344
345 if (!adapter->nameserver_port)
346 return -EINTR;
347
348 if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
349 &adapter->nameserver_port->status)) {
350 ret = zfcp_erp_port_reopen(adapter->nameserver_port, 0, 148,
351 NULL);
352 if (ret)
353 return ret;
354 zfcp_erp_wait(adapter);
355 }
356 return !atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
357 &adapter->nameserver_port->status);
358}
359
360static void zfcp_gpn_ft_handler(unsigned long _done)
361{
362 complete((struct completion *)_done);
363}
364
365static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft) 448static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft)
366{ 449{
367 struct scatterlist *sg = &gpn_ft->sg_req; 450 struct scatterlist *sg = &gpn_ft->sg_req;
@@ -403,7 +486,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
403{ 486{
404 struct zfcp_send_ct *ct = &gpn_ft->ct; 487 struct zfcp_send_ct *ct = &gpn_ft->ct;
405 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); 488 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
406 struct completion done; 489 struct zfcp_fc_ns_handler_data compl_rec;
407 int ret; 490 int ret;
408 491
409 /* prepare CT IU for GPN_FT */ 492 /* prepare CT IU for GPN_FT */
@@ -420,19 +503,20 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
420 req->fc4_type = ZFCP_CT_SCSI_FCP; 503 req->fc4_type = ZFCP_CT_SCSI_FCP;
421 504
422 /* prepare zfcp_send_ct */ 505 /* prepare zfcp_send_ct */
423 ct->port = adapter->nameserver_port; 506 ct->wka_port = &adapter->nsp;
424 ct->handler = zfcp_gpn_ft_handler; 507 ct->handler = zfcp_fc_ns_handler;
425 ct->handler_data = (unsigned long)&done; 508 ct->handler_data = (unsigned long)&compl_rec;
426 ct->timeout = 10; 509 ct->timeout = 10;
427 ct->req = &gpn_ft->sg_req; 510 ct->req = &gpn_ft->sg_req;
428 ct->resp = gpn_ft->sg_resp; 511 ct->resp = gpn_ft->sg_resp;
429 ct->req_count = 1; 512 ct->req_count = 1;
430 ct->resp_count = ZFCP_GPN_FT_BUFFERS; 513 ct->resp_count = ZFCP_GPN_FT_BUFFERS;
431 514
432 init_completion(&done); 515 init_completion(&compl_rec.done);
516 compl_rec.handler = NULL;
433 ret = zfcp_fsf_send_ct(ct, NULL, NULL); 517 ret = zfcp_fsf_send_ct(ct, NULL, NULL);
434 if (!ret) 518 if (!ret)
435 wait_for_completion(&done); 519 wait_for_completion(&compl_rec.done);
436 return ret; 520 return ret;
437} 521}
438 522
@@ -442,9 +526,8 @@ static void zfcp_validate_port(struct zfcp_port *port)
442 526
443 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); 527 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
444 528
445 if (port == adapter->nameserver_port) 529 if ((port->supported_classes != 0) ||
446 return; 530 !list_empty(&port->unit_list_head)) {
447 if ((port->supported_classes != 0) || (port->units != 0)) {
448 zfcp_port_put(port); 531 zfcp_port_put(port);
449 return; 532 return;
450 } 533 }
@@ -460,7 +543,7 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
460 struct scatterlist *sg = gpn_ft->sg_resp; 543 struct scatterlist *sg = gpn_ft->sg_resp;
461 struct ct_hdr *hdr = sg_virt(sg); 544 struct ct_hdr *hdr = sg_virt(sg);
462 struct gpn_ft_resp_acc *acc = sg_virt(sg); 545 struct gpn_ft_resp_acc *acc = sg_virt(sg);
463 struct zfcp_adapter *adapter = ct->port->adapter; 546 struct zfcp_adapter *adapter = ct->wka_port->adapter;
464 struct zfcp_port *port, *tmp; 547 struct zfcp_port *port, *tmp;
465 u32 d_id; 548 u32 d_id;
466 int ret = 0, x, last = 0; 549 int ret = 0, x, last = 0;
@@ -490,6 +573,9 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
490 d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 | 573 d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 |
491 acc->port_id[2]; 574 acc->port_id[2];
492 575
576 /* don't attach ports with a well known address */
577 if ((d_id & ZFCP_DID_WKA) == ZFCP_DID_WKA)
578 continue;
493 /* skip the adapter's port and known remote ports */ 579 /* skip the adapter's port and known remote ports */
494 if (acc->wwpn == fc_host_port_name(adapter->scsi_host)) 580 if (acc->wwpn == fc_host_port_name(adapter->scsi_host))
495 continue; 581 continue;
@@ -528,13 +614,15 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
528 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT) 614 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
529 return 0; 615 return 0;
530 616
531 ret = zfcp_scan_get_nameserver(adapter); 617 ret = zfcp_wka_port_get(&adapter->nsp);
532 if (ret) 618 if (ret)
533 return ret; 619 return ret;
534 620
535 gpn_ft = zfcp_alloc_sg_env(); 621 gpn_ft = zfcp_alloc_sg_env();
536 if (!gpn_ft) 622 if (!gpn_ft) {
537 return -ENOMEM; 623 ret = -ENOMEM;
624 goto out;
625 }
538 626
539 for (i = 0; i < 3; i++) { 627 for (i = 0; i < 3; i++) {
540 ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter); 628 ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter);
@@ -547,7 +635,8 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
547 } 635 }
548 } 636 }
549 zfcp_free_sg_env(gpn_ft); 637 zfcp_free_sg_env(gpn_ft);
550 638out:
639 zfcp_wka_port_put(&adapter->nsp);
551 return ret; 640 return ret;
552} 641}
553 642
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 49dbeb754e5f..739356a5c123 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -50,19 +50,16 @@ static u32 fsf_qtcb_type[] = {
50 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 50 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
51}; 51};
52 52
53static const char *zfcp_act_subtable_type[] = {
54 "unknown", "OS", "WWPN", "DID", "LUN"
55};
56
57static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table) 53static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
58{ 54{
59 u16 subtable = table >> 16; 55 u16 subtable = table >> 16;
60 u16 rule = table & 0xffff; 56 u16 rule = table & 0xffff;
57 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
61 58
62 if (subtable && subtable < ARRAY_SIZE(zfcp_act_subtable_type)) 59 if (subtable && subtable < ARRAY_SIZE(act_type))
63 dev_warn(&adapter->ccw_device->dev, 60 dev_warn(&adapter->ccw_device->dev,
64 "Access denied in subtable %s, rule %d.\n", 61 "Access denied according to ACT rule type %s, "
65 zfcp_act_subtable_type[subtable], rule); 62 "rule %d\n", act_type[subtable], rule);
66} 63}
67 64
68static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req, 65static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
@@ -70,8 +67,8 @@ static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
70{ 67{
71 struct fsf_qtcb_header *header = &req->qtcb->header; 68 struct fsf_qtcb_header *header = &req->qtcb->header;
72 dev_warn(&req->adapter->ccw_device->dev, 69 dev_warn(&req->adapter->ccw_device->dev,
73 "Access denied, cannot send command to port 0x%016Lx.\n", 70 "Access denied to port 0x%016Lx\n",
74 port->wwpn); 71 (unsigned long long)port->wwpn);
75 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); 72 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
76 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); 73 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
77 zfcp_erp_port_access_denied(port, 55, req); 74 zfcp_erp_port_access_denied(port, 55, req);
@@ -83,8 +80,9 @@ static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
83{ 80{
84 struct fsf_qtcb_header *header = &req->qtcb->header; 81 struct fsf_qtcb_header *header = &req->qtcb->header;
85 dev_warn(&req->adapter->ccw_device->dev, 82 dev_warn(&req->adapter->ccw_device->dev,
86 "Access denied for unit 0x%016Lx on port 0x%016Lx.\n", 83 "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
87 unit->fcp_lun, unit->port->wwpn); 84 (unsigned long long)unit->fcp_lun,
85 (unsigned long long)unit->port->wwpn);
88 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); 86 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
89 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); 87 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
90 zfcp_erp_unit_access_denied(unit, 59, req); 88 zfcp_erp_unit_access_denied(unit, 59, req);
@@ -93,9 +91,8 @@ static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
93 91
94static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 92static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
95{ 93{
96 dev_err(&req->adapter->ccw_device->dev, 94 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
97 "Required FC class not supported by adapter, " 95 "operational because of an unsupported FC class\n");
98 "shutting down adapter.\n");
99 zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req); 96 zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req);
100 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 97 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
101} 98}
@@ -171,42 +168,6 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
171 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 168 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
172} 169}
173 170
174static void zfcp_fsf_bit_error_threshold(struct zfcp_fsf_req *req)
175{
176 struct zfcp_adapter *adapter = req->adapter;
177 struct fsf_status_read_buffer *sr_buf = req->data;
178 struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
179
180 dev_warn(&adapter->ccw_device->dev,
181 "Warning: bit error threshold data "
182 "received for the adapter: "
183 "link failures = %i, loss of sync errors = %i, "
184 "loss of signal errors = %i, "
185 "primitive sequence errors = %i, "
186 "invalid transmission word errors = %i, "
187 "CRC errors = %i).\n",
188 err->link_failure_error_count,
189 err->loss_of_sync_error_count,
190 err->loss_of_signal_error_count,
191 err->primitive_sequence_error_count,
192 err->invalid_transmission_word_error_count,
193 err->crc_error_count);
194 dev_warn(&adapter->ccw_device->dev,
195 "Additional bit error threshold data of the adapter: "
196 "primitive sequence event time-outs = %i, "
197 "elastic buffer overrun errors = %i, "
198 "advertised receive buffer-to-buffer credit = %i, "
199 "current receice buffer-to-buffer credit = %i, "
200 "advertised transmit buffer-to-buffer credit = %i, "
201 "current transmit buffer-to-buffer credit = %i).\n",
202 err->primitive_sequence_event_timeout_count,
203 err->elastic_buffer_overrun_error_count,
204 err->advertised_receive_b2b_credit,
205 err->current_receive_b2b_credit,
206 err->advertised_transmit_b2b_credit,
207 err->current_transmit_b2b_credit);
208}
209
210static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id, 171static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
211 struct fsf_link_down_info *link_down) 172 struct fsf_link_down_info *link_down)
212{ 173{
@@ -223,62 +184,66 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
223 switch (link_down->error_code) { 184 switch (link_down->error_code) {
224 case FSF_PSQ_LINK_NO_LIGHT: 185 case FSF_PSQ_LINK_NO_LIGHT:
225 dev_warn(&req->adapter->ccw_device->dev, 186 dev_warn(&req->adapter->ccw_device->dev,
226 "The local link is down: no light detected.\n"); 187 "There is no light signal from the local "
188 "fibre channel cable\n");
227 break; 189 break;
228 case FSF_PSQ_LINK_WRAP_PLUG: 190 case FSF_PSQ_LINK_WRAP_PLUG:
229 dev_warn(&req->adapter->ccw_device->dev, 191 dev_warn(&req->adapter->ccw_device->dev,
230 "The local link is down: wrap plug detected.\n"); 192 "There is a wrap plug instead of a fibre "
193 "channel cable\n");
231 break; 194 break;
232 case FSF_PSQ_LINK_NO_FCP: 195 case FSF_PSQ_LINK_NO_FCP:
233 dev_warn(&req->adapter->ccw_device->dev, 196 dev_warn(&req->adapter->ccw_device->dev,
234 "The local link is down: " 197 "The adjacent fibre channel node does not "
235 "adjacent node on link does not support FCP.\n"); 198 "support FCP\n");
236 break; 199 break;
237 case FSF_PSQ_LINK_FIRMWARE_UPDATE: 200 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
238 dev_warn(&req->adapter->ccw_device->dev, 201 dev_warn(&req->adapter->ccw_device->dev,
239 "The local link is down: " 202 "The FCP device is suspended because of a "
240 "firmware update in progress.\n"); 203 "firmware update\n");
241 break; 204 break;
242 case FSF_PSQ_LINK_INVALID_WWPN: 205 case FSF_PSQ_LINK_INVALID_WWPN:
243 dev_warn(&req->adapter->ccw_device->dev, 206 dev_warn(&req->adapter->ccw_device->dev,
244 "The local link is down: " 207 "The FCP device detected a WWPN that is "
245 "duplicate or invalid WWPN detected.\n"); 208 "duplicate or not valid\n");
246 break; 209 break;
247 case FSF_PSQ_LINK_NO_NPIV_SUPPORT: 210 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
248 dev_warn(&req->adapter->ccw_device->dev, 211 dev_warn(&req->adapter->ccw_device->dev,
249 "The local link is down: " 212 "The fibre channel fabric does not support NPIV\n");
250 "no support for NPIV by Fabric.\n");
251 break; 213 break;
252 case FSF_PSQ_LINK_NO_FCP_RESOURCES: 214 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
253 dev_warn(&req->adapter->ccw_device->dev, 215 dev_warn(&req->adapter->ccw_device->dev,
254 "The local link is down: " 216 "The FCP adapter cannot support more NPIV ports\n");
255 "out of resource in FCP daughtercard.\n");
256 break; 217 break;
257 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: 218 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
258 dev_warn(&req->adapter->ccw_device->dev, 219 dev_warn(&req->adapter->ccw_device->dev,
259 "The local link is down: " 220 "The adjacent switch cannot support "
260 "out of resource in Fabric.\n"); 221 "more NPIV ports\n");
261 break; 222 break;
262 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: 223 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
263 dev_warn(&req->adapter->ccw_device->dev, 224 dev_warn(&req->adapter->ccw_device->dev,
264 "The local link is down: " 225 "The FCP adapter could not log in to the "
265 "unable to login to Fabric.\n"); 226 "fibre channel fabric\n");
266 break; 227 break;
267 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: 228 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
268 dev_warn(&req->adapter->ccw_device->dev, 229 dev_warn(&req->adapter->ccw_device->dev,
269 "WWPN assignment file corrupted on adapter.\n"); 230 "The WWPN assignment file on the FCP adapter "
231 "has been damaged\n");
270 break; 232 break;
271 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: 233 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
272 dev_warn(&req->adapter->ccw_device->dev, 234 dev_warn(&req->adapter->ccw_device->dev,
273 "Mode table corrupted on adapter.\n"); 235 "The mode table on the FCP adapter "
236 "has been damaged\n");
274 break; 237 break;
275 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: 238 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
276 dev_warn(&req->adapter->ccw_device->dev, 239 dev_warn(&req->adapter->ccw_device->dev,
277 "No WWPN for assignment table on adapter.\n"); 240 "All NPIV ports on the FCP adapter have "
241 "been assigned\n");
278 break; 242 break;
279 default: 243 default:
280 dev_warn(&req->adapter->ccw_device->dev, 244 dev_warn(&req->adapter->ccw_device->dev,
281 "The local link to adapter is down.\n"); 245 "The link between the FCP adapter and "
246 "the FC fabric is down\n");
282 } 247 }
283out: 248out:
284 zfcp_erp_adapter_failed(adapter, id, req); 249 zfcp_erp_adapter_failed(adapter, id, req);
@@ -286,27 +251,18 @@ out:
286 251
287static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 252static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
288{ 253{
289 struct zfcp_adapter *adapter = req->adapter;
290 struct fsf_status_read_buffer *sr_buf = req->data; 254 struct fsf_status_read_buffer *sr_buf = req->data;
291 struct fsf_link_down_info *ldi = 255 struct fsf_link_down_info *ldi =
292 (struct fsf_link_down_info *) &sr_buf->payload; 256 (struct fsf_link_down_info *) &sr_buf->payload;
293 257
294 switch (sr_buf->status_subtype) { 258 switch (sr_buf->status_subtype) {
295 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 259 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
296 dev_warn(&adapter->ccw_device->dev,
297 "Physical link is down.\n");
298 zfcp_fsf_link_down_info_eval(req, 38, ldi); 260 zfcp_fsf_link_down_info_eval(req, 38, ldi);
299 break; 261 break;
300 case FSF_STATUS_READ_SUB_FDISC_FAILED: 262 case FSF_STATUS_READ_SUB_FDISC_FAILED:
301 dev_warn(&adapter->ccw_device->dev,
302 "Local link is down "
303 "due to failed FDISC login.\n");
304 zfcp_fsf_link_down_info_eval(req, 39, ldi); 263 zfcp_fsf_link_down_info_eval(req, 39, ldi);
305 break; 264 break;
306 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 265 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
307 dev_warn(&adapter->ccw_device->dev,
308 "Local link is down "
309 "due to firmware update on adapter.\n");
310 zfcp_fsf_link_down_info_eval(req, 40, NULL); 266 zfcp_fsf_link_down_info_eval(req, 40, NULL);
311 }; 267 };
312} 268}
@@ -335,14 +291,17 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
335 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 291 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
336 break; 292 break;
337 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 293 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
338 zfcp_fsf_bit_error_threshold(req); 294 dev_warn(&adapter->ccw_device->dev,
295 "The error threshold for checksum statistics "
296 "has been exceeded\n");
297 zfcp_hba_dbf_event_berr(adapter, req);
339 break; 298 break;
340 case FSF_STATUS_READ_LINK_DOWN: 299 case FSF_STATUS_READ_LINK_DOWN:
341 zfcp_fsf_status_read_link_down(req); 300 zfcp_fsf_status_read_link_down(req);
342 break; 301 break;
343 case FSF_STATUS_READ_LINK_UP: 302 case FSF_STATUS_READ_LINK_UP:
344 dev_info(&adapter->ccw_device->dev, 303 dev_info(&adapter->ccw_device->dev,
345 "Local link was replugged.\n"); 304 "The local link has been restored\n");
346 /* All ports should be marked as ready to run again */ 305 /* All ports should be marked as ready to run again */
347 zfcp_erp_modify_adapter_status(adapter, 30, NULL, 306 zfcp_erp_modify_adapter_status(adapter, 30, NULL,
348 ZFCP_STATUS_COMMON_RUNNING, 307 ZFCP_STATUS_COMMON_RUNNING,
@@ -370,7 +329,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
370 zfcp_fsf_req_free(req); 329 zfcp_fsf_req_free(req);
371 330
372 atomic_inc(&adapter->stat_miss); 331 atomic_inc(&adapter->stat_miss);
373 schedule_work(&adapter->stat_work); 332 queue_work(zfcp_data.work_queue, &adapter->stat_work);
374} 333}
375 334
376static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) 335static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
@@ -386,8 +345,8 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
386 break; 345 break;
387 case FSF_SQ_NO_RECOM: 346 case FSF_SQ_NO_RECOM:
388 dev_err(&req->adapter->ccw_device->dev, 347 dev_err(&req->adapter->ccw_device->dev,
389 "No recommendation could be given for a " 348 "The FCP adapter reported a problem "
390 "problem on the adapter.\n"); 349 "that cannot be recovered\n");
391 zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req); 350 zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req);
392 break; 351 break;
393 } 352 }
@@ -403,8 +362,7 @@ static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
403 switch (req->qtcb->header.fsf_status) { 362 switch (req->qtcb->header.fsf_status) {
404 case FSF_UNKNOWN_COMMAND: 363 case FSF_UNKNOWN_COMMAND:
405 dev_err(&req->adapter->ccw_device->dev, 364 dev_err(&req->adapter->ccw_device->dev,
406 "Command issued by the device driver (0x%x) is " 365 "The FCP adapter does not recognize the command 0x%x\n",
407 "not known by the adapter.\n",
408 req->qtcb->header.fsf_command); 366 req->qtcb->header.fsf_command);
409 zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req); 367 zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req);
410 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 368 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -435,11 +393,9 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
435 return; 393 return;
436 case FSF_PROT_QTCB_VERSION_ERROR: 394 case FSF_PROT_QTCB_VERSION_ERROR:
437 dev_err(&adapter->ccw_device->dev, 395 dev_err(&adapter->ccw_device->dev,
438 "The QTCB version requested by zfcp (0x%x) is not " 396 "QTCB version 0x%x not supported by FCP adapter "
439 "supported by the FCP adapter (lowest supported " 397 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
440 "0x%x, highest supported 0x%x).\n", 398 psq->word[0], psq->word[1]);
441 FSF_QTCB_CURRENT_VERSION, psq->word[0],
442 psq->word[1]);
443 zfcp_erp_adapter_shutdown(adapter, 0, 117, req); 399 zfcp_erp_adapter_shutdown(adapter, 0, 117, req);
444 break; 400 break;
445 case FSF_PROT_ERROR_STATE: 401 case FSF_PROT_ERROR_STATE:
@@ -449,8 +405,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
449 break; 405 break;
450 case FSF_PROT_UNSUPP_QTCB_TYPE: 406 case FSF_PROT_UNSUPP_QTCB_TYPE:
451 dev_err(&adapter->ccw_device->dev, 407 dev_err(&adapter->ccw_device->dev,
452 "Packet header type used by the device driver is " 408 "The QTCB type is not supported by the FCP adapter\n");
453 "incompatible with that used on the adapter.\n");
454 zfcp_erp_adapter_shutdown(adapter, 0, 118, req); 409 zfcp_erp_adapter_shutdown(adapter, 0, 118, req);
455 break; 410 break;
456 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 411 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
@@ -459,7 +414,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
459 break; 414 break;
460 case FSF_PROT_DUPLICATE_REQUEST_ID: 415 case FSF_PROT_DUPLICATE_REQUEST_ID:
461 dev_err(&adapter->ccw_device->dev, 416 dev_err(&adapter->ccw_device->dev,
462 "The request identifier 0x%Lx is ambiguous.\n", 417 "0x%Lx is an ambiguous request identifier\n",
463 (unsigned long long)qtcb->bottom.support.req_handle); 418 (unsigned long long)qtcb->bottom.support.req_handle);
464 zfcp_erp_adapter_shutdown(adapter, 0, 78, req); 419 zfcp_erp_adapter_shutdown(adapter, 0, 78, req);
465 break; 420 break;
@@ -479,9 +434,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
479 break; 434 break;
480 default: 435 default:
481 dev_err(&adapter->ccw_device->dev, 436 dev_err(&adapter->ccw_device->dev,
482 "Transfer protocol status information" 437 "0x%x is not a valid transfer protocol status\n",
483 "provided by the adapter (0x%x) "
484 "is not compatible with the device driver.\n",
485 qtcb->prefix.prot_status); 438 qtcb->prefix.prot_status);
486 zfcp_erp_adapter_shutdown(adapter, 0, 119, req); 439 zfcp_erp_adapter_shutdown(adapter, 0, 119, req);
487 } 440 }
@@ -559,33 +512,17 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
559 adapter->peer_wwpn = bottom->plogi_payload.wwpn; 512 adapter->peer_wwpn = bottom->plogi_payload.wwpn;
560 adapter->peer_wwnn = bottom->plogi_payload.wwnn; 513 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
561 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 514 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
562 if (req->erp_action)
563 dev_info(&adapter->ccw_device->dev,
564 "Point-to-Point fibrechannel "
565 "configuration detected.\n");
566 break; 515 break;
567 case FSF_TOPO_FABRIC: 516 case FSF_TOPO_FABRIC:
568 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 517 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
569 if (req->erp_action)
570 dev_info(&adapter->ccw_device->dev,
571 "Switched fabric fibrechannel "
572 "network detected.\n");
573 break; 518 break;
574 case FSF_TOPO_AL: 519 case FSF_TOPO_AL:
575 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 520 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
576 dev_err(&adapter->ccw_device->dev,
577 "Unsupported arbitrated loop fibrechannel "
578 "topology detected, shutting down "
579 "adapter.\n");
580 zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
581 return -EIO;
582 default: 521 default:
583 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
584 dev_err(&adapter->ccw_device->dev, 522 dev_err(&adapter->ccw_device->dev,
585 "The fibrechannel topology reported by the" 523 "Unknown or unsupported arbitrated loop "
586 " adapter is not known by the zfcp driver," 524 "fibre channel topology detected\n");
587 " shutting down adapter.\n"); 525 zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
588 zfcp_erp_adapter_shutdown(adapter, 0, 128, req);
589 return -EIO; 526 return -EIO;
590 } 527 }
591 528
@@ -616,11 +553,9 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
616 553
617 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 554 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
618 dev_err(&adapter->ccw_device->dev, 555 dev_err(&adapter->ccw_device->dev,
619 "Maximum QTCB size (%d bytes) allowed by " 556 "FCP adapter maximum QTCB size (%d bytes) "
620 "the adapter is lower than the minimum " 557 "is too small\n",
621 "required by the driver (%ld bytes).\n", 558 bottom->max_qtcb_size);
622 bottom->max_qtcb_size,
623 sizeof(struct fsf_qtcb));
624 zfcp_erp_adapter_shutdown(adapter, 0, 129, req); 559 zfcp_erp_adapter_shutdown(adapter, 0, 129, req);
625 return; 560 return;
626 } 561 }
@@ -656,15 +591,15 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
656 591
657 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) { 592 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
658 dev_err(&adapter->ccw_device->dev, 593 dev_err(&adapter->ccw_device->dev,
659 "The adapter only supports newer control block " 594 "The FCP adapter only supports newer "
660 "versions, try updated device driver.\n"); 595 "control block versions\n");
661 zfcp_erp_adapter_shutdown(adapter, 0, 125, req); 596 zfcp_erp_adapter_shutdown(adapter, 0, 125, req);
662 return; 597 return;
663 } 598 }
664 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 599 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
665 dev_err(&adapter->ccw_device->dev, 600 dev_err(&adapter->ccw_device->dev,
666 "The adapter only supports older control block " 601 "The FCP adapter only supports older "
667 "versions, consider a microcode upgrade.\n"); 602 "control block versions\n");
668 zfcp_erp_adapter_shutdown(adapter, 0, 126, req); 603 zfcp_erp_adapter_shutdown(adapter, 0, 126, req);
669 } 604 }
670} 605}
@@ -688,7 +623,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
688 623
689static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 624static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
690{ 625{
691 struct zfcp_adapter *adapter = req->adapter;
692 struct fsf_qtcb *qtcb = req->qtcb; 626 struct fsf_qtcb *qtcb = req->qtcb;
693 627
694 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 628 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
@@ -697,38 +631,47 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
697 switch (qtcb->header.fsf_status) { 631 switch (qtcb->header.fsf_status) {
698 case FSF_GOOD: 632 case FSF_GOOD:
699 zfcp_fsf_exchange_port_evaluate(req); 633 zfcp_fsf_exchange_port_evaluate(req);
700 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
701 break; 634 break;
702 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 635 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
703 zfcp_fsf_exchange_port_evaluate(req); 636 zfcp_fsf_exchange_port_evaluate(req);
704 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
705 zfcp_fsf_link_down_info_eval(req, 43, 637 zfcp_fsf_link_down_info_eval(req, 43,
706 &qtcb->header.fsf_status_qual.link_down_info); 638 &qtcb->header.fsf_status_qual.link_down_info);
707 break; 639 break;
708 } 640 }
709} 641}
710 642
711static int zfcp_fsf_sbal_check(struct zfcp_qdio_queue *queue) 643static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter)
712{ 644{
713 spin_lock_bh(&queue->lock); 645 struct zfcp_qdio_queue *req_q = &adapter->req_q;
714 if (atomic_read(&queue->count)) 646
647 spin_lock_bh(&adapter->req_q_lock);
648 if (atomic_read(&req_q->count))
715 return 1; 649 return 1;
716 spin_unlock_bh(&queue->lock); 650 spin_unlock_bh(&adapter->req_q_lock);
717 return 0; 651 return 0;
718} 652}
719 653
654static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
655{
656 unsigned int count = atomic_read(&adapter->req_q.count);
657 if (!count)
658 atomic_inc(&adapter->qdio_outb_full);
659 return count > 0;
660}
661
720static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) 662static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
721{ 663{
722 long ret; 664 long ret;
723 struct zfcp_qdio_queue *req_q = &adapter->req_q;
724 665
725 spin_unlock_bh(&req_q->lock); 666 spin_unlock_bh(&adapter->req_q_lock);
726 ret = wait_event_interruptible_timeout(adapter->request_wq, 667 ret = wait_event_interruptible_timeout(adapter->request_wq,
727 zfcp_fsf_sbal_check(req_q), 5 * HZ); 668 zfcp_fsf_sbal_check(adapter), 5 * HZ);
728 if (ret > 0) 669 if (ret > 0)
729 return 0; 670 return 0;
671 if (!ret)
672 atomic_inc(&adapter->qdio_outb_full);
730 673
731 spin_lock_bh(&req_q->lock); 674 spin_lock_bh(&adapter->req_q_lock);
732 return -EIO; 675 return -EIO;
733} 676}
734 677
@@ -765,7 +708,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
765 u32 fsf_cmd, int req_flags, 708 u32 fsf_cmd, int req_flags,
766 mempool_t *pool) 709 mempool_t *pool)
767{ 710{
768 volatile struct qdio_buffer_element *sbale; 711 struct qdio_buffer_element *sbale;
769 712
770 struct zfcp_fsf_req *req; 713 struct zfcp_fsf_req *req;
771 struct zfcp_qdio_queue *req_q = &adapter->req_q; 714 struct zfcp_qdio_queue *req_q = &adapter->req_q;
@@ -867,10 +810,10 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
867{ 810{
868 struct zfcp_fsf_req *req; 811 struct zfcp_fsf_req *req;
869 struct fsf_status_read_buffer *sr_buf; 812 struct fsf_status_read_buffer *sr_buf;
870 volatile struct qdio_buffer_element *sbale; 813 struct qdio_buffer_element *sbale;
871 int retval = -EIO; 814 int retval = -EIO;
872 815
873 spin_lock_bh(&adapter->req_q.lock); 816 spin_lock_bh(&adapter->req_q_lock);
874 if (zfcp_fsf_req_sbal_get(adapter)) 817 if (zfcp_fsf_req_sbal_get(adapter))
875 goto out; 818 goto out;
876 819
@@ -910,7 +853,7 @@ failed_buf:
910 zfcp_fsf_req_free(req); 853 zfcp_fsf_req_free(req);
911 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); 854 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
912out: 855out:
913 spin_unlock_bh(&adapter->req_q.lock); 856 spin_unlock_bh(&adapter->req_q_lock);
914 return retval; 857 return retval;
915} 858}
916 859
@@ -980,11 +923,11 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
980 struct zfcp_unit *unit, 923 struct zfcp_unit *unit,
981 int req_flags) 924 int req_flags)
982{ 925{
983 volatile struct qdio_buffer_element *sbale; 926 struct qdio_buffer_element *sbale;
984 struct zfcp_fsf_req *req = NULL; 927 struct zfcp_fsf_req *req = NULL;
985 928
986 spin_lock(&adapter->req_q.lock); 929 spin_lock(&adapter->req_q_lock);
987 if (!atomic_read(&adapter->req_q.count)) 930 if (!zfcp_fsf_sbal_available(adapter))
988 goto out; 931 goto out;
989 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, 932 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
990 req_flags, adapter->pool.fsf_req_abort); 933 req_flags, adapter->pool.fsf_req_abort);
@@ -1013,7 +956,7 @@ out_error_free:
1013 zfcp_fsf_req_free(req); 956 zfcp_fsf_req_free(req);
1014 req = NULL; 957 req = NULL;
1015out: 958out:
1016 spin_unlock(&adapter->req_q.lock); 959 spin_unlock(&adapter->req_q_lock);
1017 return req; 960 return req;
1018} 961}
1019 962
@@ -1021,7 +964,6 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1021{ 964{
1022 struct zfcp_adapter *adapter = req->adapter; 965 struct zfcp_adapter *adapter = req->adapter;
1023 struct zfcp_send_ct *send_ct = req->data; 966 struct zfcp_send_ct *send_ct = req->data;
1024 struct zfcp_port *port = send_ct->port;
1025 struct fsf_qtcb_header *header = &req->qtcb->header; 967 struct fsf_qtcb_header *header = &req->qtcb->header;
1026 968
1027 send_ct->status = -EINVAL; 969 send_ct->status = -EINVAL;
@@ -1040,17 +982,14 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1040 case FSF_ADAPTER_STATUS_AVAILABLE: 982 case FSF_ADAPTER_STATUS_AVAILABLE:
1041 switch (header->fsf_status_qual.word[0]){ 983 switch (header->fsf_status_qual.word[0]){
1042 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 984 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1043 zfcp_test_link(port);
1044 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 985 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1045 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 986 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1046 break; 987 break;
1047 } 988 }
1048 break; 989 break;
1049 case FSF_ACCESS_DENIED: 990 case FSF_ACCESS_DENIED:
1050 zfcp_fsf_access_denied_port(req, port);
1051 break; 991 break;
1052 case FSF_PORT_BOXED: 992 case FSF_PORT_BOXED:
1053 zfcp_erp_port_boxed(port, 49, req);
1054 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 993 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1055 ZFCP_STATUS_FSFREQ_RETRY; 994 ZFCP_STATUS_FSFREQ_RETRY;
1056 break; 995 break;
@@ -1101,12 +1040,12 @@ static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req,
1101int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, 1040int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1102 struct zfcp_erp_action *erp_action) 1041 struct zfcp_erp_action *erp_action)
1103{ 1042{
1104 struct zfcp_port *port = ct->port; 1043 struct zfcp_wka_port *wka_port = ct->wka_port;
1105 struct zfcp_adapter *adapter = port->adapter; 1044 struct zfcp_adapter *adapter = wka_port->adapter;
1106 struct zfcp_fsf_req *req; 1045 struct zfcp_fsf_req *req;
1107 int ret = -EIO; 1046 int ret = -EIO;
1108 1047
1109 spin_lock_bh(&adapter->req_q.lock); 1048 spin_lock_bh(&adapter->req_q_lock);
1110 if (zfcp_fsf_req_sbal_get(adapter)) 1049 if (zfcp_fsf_req_sbal_get(adapter))
1111 goto out; 1050 goto out;
1112 1051
@@ -1123,7 +1062,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1123 goto failed_send; 1062 goto failed_send;
1124 1063
1125 req->handler = zfcp_fsf_send_ct_handler; 1064 req->handler = zfcp_fsf_send_ct_handler;
1126 req->qtcb->header.port_handle = port->handle; 1065 req->qtcb->header.port_handle = wka_port->handle;
1127 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1066 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1128 req->qtcb->bottom.support.timeout = ct->timeout; 1067 req->qtcb->bottom.support.timeout = ct->timeout;
1129 req->data = ct; 1068 req->data = ct;
@@ -1148,7 +1087,7 @@ failed_send:
1148 if (erp_action) 1087 if (erp_action)
1149 erp_action->fsf_req = NULL; 1088 erp_action->fsf_req = NULL;
1150out: 1089out:
1151 spin_unlock_bh(&adapter->req_q.lock); 1090 spin_unlock_bh(&adapter->req_q_lock);
1152 return ret; 1091 return ret;
1153} 1092}
1154 1093
@@ -1218,8 +1157,8 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1218 ZFCP_STATUS_COMMON_UNBLOCKED))) 1157 ZFCP_STATUS_COMMON_UNBLOCKED)))
1219 return -EBUSY; 1158 return -EBUSY;
1220 1159
1221 spin_lock(&adapter->req_q.lock); 1160 spin_lock(&adapter->req_q_lock);
1222 if (!atomic_read(&adapter->req_q.count)) 1161 if (!zfcp_fsf_sbal_available(adapter))
1223 goto out; 1162 goto out;
1224 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, 1163 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
1225 ZFCP_REQ_AUTO_CLEANUP, NULL); 1164 ZFCP_REQ_AUTO_CLEANUP, NULL);
@@ -1228,8 +1167,8 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1228 goto out; 1167 goto out;
1229 } 1168 }
1230 1169
1231 ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, 1170 ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, 2);
1232 FSF_MAX_SBALS_PER_ELS_REQ); 1171
1233 if (ret) 1172 if (ret)
1234 goto failed_send; 1173 goto failed_send;
1235 1174
@@ -1252,19 +1191,19 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1252failed_send: 1191failed_send:
1253 zfcp_fsf_req_free(req); 1192 zfcp_fsf_req_free(req);
1254out: 1193out:
1255 spin_unlock(&adapter->req_q.lock); 1194 spin_unlock(&adapter->req_q_lock);
1256 return ret; 1195 return ret;
1257} 1196}
1258 1197
1259int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1198int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1260{ 1199{
1261 volatile struct qdio_buffer_element *sbale; 1200 struct qdio_buffer_element *sbale;
1262 struct zfcp_fsf_req *req; 1201 struct zfcp_fsf_req *req;
1263 struct zfcp_adapter *adapter = erp_action->adapter; 1202 struct zfcp_adapter *adapter = erp_action->adapter;
1264 int retval = -EIO; 1203 int retval = -EIO;
1265 1204
1266 spin_lock_bh(&adapter->req_q.lock); 1205 spin_lock_bh(&adapter->req_q_lock);
1267 if (!atomic_read(&adapter->req_q.count)) 1206 if (!zfcp_fsf_sbal_available(adapter))
1268 goto out; 1207 goto out;
1269 req = zfcp_fsf_req_create(adapter, 1208 req = zfcp_fsf_req_create(adapter,
1270 FSF_QTCB_EXCHANGE_CONFIG_DATA, 1209 FSF_QTCB_EXCHANGE_CONFIG_DATA,
@@ -1295,18 +1234,18 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1295 erp_action->fsf_req = NULL; 1234 erp_action->fsf_req = NULL;
1296 } 1235 }
1297out: 1236out:
1298 spin_unlock_bh(&adapter->req_q.lock); 1237 spin_unlock_bh(&adapter->req_q_lock);
1299 return retval; 1238 return retval;
1300} 1239}
1301 1240
1302int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, 1241int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1303 struct fsf_qtcb_bottom_config *data) 1242 struct fsf_qtcb_bottom_config *data)
1304{ 1243{
1305 volatile struct qdio_buffer_element *sbale; 1244 struct qdio_buffer_element *sbale;
1306 struct zfcp_fsf_req *req = NULL; 1245 struct zfcp_fsf_req *req = NULL;
1307 int retval = -EIO; 1246 int retval = -EIO;
1308 1247
1309 spin_lock_bh(&adapter->req_q.lock); 1248 spin_lock_bh(&adapter->req_q_lock);
1310 if (zfcp_fsf_req_sbal_get(adapter)) 1249 if (zfcp_fsf_req_sbal_get(adapter))
1311 goto out; 1250 goto out;
1312 1251
@@ -1334,7 +1273,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1334 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1273 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1335 retval = zfcp_fsf_req_send(req); 1274 retval = zfcp_fsf_req_send(req);
1336out: 1275out:
1337 spin_unlock_bh(&adapter->req_q.lock); 1276 spin_unlock_bh(&adapter->req_q_lock);
1338 if (!retval) 1277 if (!retval)
1339 wait_event(req->completion_wq, 1278 wait_event(req->completion_wq,
1340 req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 1279 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
@@ -1351,7 +1290,7 @@ out:
1351 */ 1290 */
1352int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) 1291int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1353{ 1292{
1354 volatile struct qdio_buffer_element *sbale; 1293 struct qdio_buffer_element *sbale;
1355 struct zfcp_fsf_req *req; 1294 struct zfcp_fsf_req *req;
1356 struct zfcp_adapter *adapter = erp_action->adapter; 1295 struct zfcp_adapter *adapter = erp_action->adapter;
1357 int retval = -EIO; 1296 int retval = -EIO;
@@ -1359,8 +1298,8 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1359 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1298 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1360 return -EOPNOTSUPP; 1299 return -EOPNOTSUPP;
1361 1300
1362 spin_lock_bh(&adapter->req_q.lock); 1301 spin_lock_bh(&adapter->req_q_lock);
1363 if (!atomic_read(&adapter->req_q.count)) 1302 if (!zfcp_fsf_sbal_available(adapter))
1364 goto out; 1303 goto out;
1365 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 1304 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
1366 ZFCP_REQ_AUTO_CLEANUP, 1305 ZFCP_REQ_AUTO_CLEANUP,
@@ -1385,7 +1324,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1385 erp_action->fsf_req = NULL; 1324 erp_action->fsf_req = NULL;
1386 } 1325 }
1387out: 1326out:
1388 spin_unlock_bh(&adapter->req_q.lock); 1327 spin_unlock_bh(&adapter->req_q_lock);
1389 return retval; 1328 return retval;
1390} 1329}
1391 1330
@@ -1398,15 +1337,15 @@ out:
1398int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, 1337int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
1399 struct fsf_qtcb_bottom_port *data) 1338 struct fsf_qtcb_bottom_port *data)
1400{ 1339{
1401 volatile struct qdio_buffer_element *sbale; 1340 struct qdio_buffer_element *sbale;
1402 struct zfcp_fsf_req *req = NULL; 1341 struct zfcp_fsf_req *req = NULL;
1403 int retval = -EIO; 1342 int retval = -EIO;
1404 1343
1405 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1344 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1406 return -EOPNOTSUPP; 1345 return -EOPNOTSUPP;
1407 1346
1408 spin_lock_bh(&adapter->req_q.lock); 1347 spin_lock_bh(&adapter->req_q_lock);
1409 if (!atomic_read(&adapter->req_q.count)) 1348 if (!zfcp_fsf_sbal_available(adapter))
1410 goto out; 1349 goto out;
1411 1350
1412 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0, 1351 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
@@ -1427,7 +1366,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
1427 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1366 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1428 retval = zfcp_fsf_req_send(req); 1367 retval = zfcp_fsf_req_send(req);
1429out: 1368out:
1430 spin_unlock_bh(&adapter->req_q.lock); 1369 spin_unlock_bh(&adapter->req_q_lock);
1431 if (!retval) 1370 if (!retval)
1432 wait_event(req->completion_wq, 1371 wait_event(req->completion_wq,
1433 req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 1372 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
@@ -1443,7 +1382,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1443 struct fsf_plogi *plogi; 1382 struct fsf_plogi *plogi;
1444 1383
1445 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1384 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1446 goto skip_fsfstatus; 1385 return;
1447 1386
1448 switch (header->fsf_status) { 1387 switch (header->fsf_status) {
1449 case FSF_PORT_ALREADY_OPEN: 1388 case FSF_PORT_ALREADY_OPEN:
@@ -1453,9 +1392,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1453 break; 1392 break;
1454 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1393 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1455 dev_warn(&req->adapter->ccw_device->dev, 1394 dev_warn(&req->adapter->ccw_device->dev,
1456 "The adapter is out of resources. The remote port " 1395 "Not enough FCP adapter resources to open "
1457 "0x%016Lx could not be opened, disabling it.\n", 1396 "remote port 0x%016Lx\n",
1458 port->wwpn); 1397 (unsigned long long)port->wwpn);
1459 zfcp_erp_port_failed(port, 31, req); 1398 zfcp_erp_port_failed(port, 31, req);
1460 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1399 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1461 break; 1400 break;
@@ -1467,8 +1406,8 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1467 break; 1406 break;
1468 case FSF_SQ_NO_RETRY_POSSIBLE: 1407 case FSF_SQ_NO_RETRY_POSSIBLE:
1469 dev_warn(&req->adapter->ccw_device->dev, 1408 dev_warn(&req->adapter->ccw_device->dev,
1470 "The remote port 0x%016Lx could not be " 1409 "Remote port 0x%016Lx could not be opened\n",
1471 "opened. Disabling it.\n", port->wwpn); 1410 (unsigned long long)port->wwpn);
1472 zfcp_erp_port_failed(port, 32, req); 1411 zfcp_erp_port_failed(port, 32, req);
1473 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1412 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1474 break; 1413 break;
@@ -1496,9 +1435,6 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1496 * another GID_PN straight after a port has been opened. 1435 * another GID_PN straight after a port has been opened.
1497 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1436 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1498 */ 1437 */
1499 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN)
1500 break;
1501
1502 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els; 1438 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
1503 if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) { 1439 if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) {
1504 if (plogi->serv_param.wwpn != port->wwpn) 1440 if (plogi->serv_param.wwpn != port->wwpn)
@@ -1514,9 +1450,6 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1514 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1450 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1515 break; 1451 break;
1516 } 1452 }
1517
1518skip_fsfstatus:
1519 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status);
1520} 1453}
1521 1454
1522/** 1455/**
@@ -1526,12 +1459,12 @@ skip_fsfstatus:
1526 */ 1459 */
1527int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 1460int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1528{ 1461{
1529 volatile struct qdio_buffer_element *sbale; 1462 struct qdio_buffer_element *sbale;
1530 struct zfcp_adapter *adapter = erp_action->adapter; 1463 struct zfcp_adapter *adapter = erp_action->adapter;
1531 struct zfcp_fsf_req *req; 1464 struct zfcp_fsf_req *req;
1532 int retval = -EIO; 1465 int retval = -EIO;
1533 1466
1534 spin_lock_bh(&adapter->req_q.lock); 1467 spin_lock_bh(&adapter->req_q_lock);
1535 if (zfcp_fsf_req_sbal_get(adapter)) 1468 if (zfcp_fsf_req_sbal_get(adapter))
1536 goto out; 1469 goto out;
1537 1470
@@ -1553,7 +1486,6 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1553 req->data = erp_action->port; 1486 req->data = erp_action->port;
1554 req->erp_action = erp_action; 1487 req->erp_action = erp_action;
1555 erp_action->fsf_req = req; 1488 erp_action->fsf_req = req;
1556 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
1557 1489
1558 zfcp_fsf_start_erp_timer(req); 1490 zfcp_fsf_start_erp_timer(req);
1559 retval = zfcp_fsf_req_send(req); 1491 retval = zfcp_fsf_req_send(req);
@@ -1562,7 +1494,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1562 erp_action->fsf_req = NULL; 1494 erp_action->fsf_req = NULL;
1563 } 1495 }
1564out: 1496out:
1565 spin_unlock_bh(&adapter->req_q.lock); 1497 spin_unlock_bh(&adapter->req_q_lock);
1566 return retval; 1498 return retval;
1567} 1499}
1568 1500
@@ -1571,7 +1503,7 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1571 struct zfcp_port *port = req->data; 1503 struct zfcp_port *port = req->data;
1572 1504
1573 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1505 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1574 goto skip_fsfstatus; 1506 return;
1575 1507
1576 switch (req->qtcb->header.fsf_status) { 1508 switch (req->qtcb->header.fsf_status) {
1577 case FSF_PORT_HANDLE_NOT_VALID: 1509 case FSF_PORT_HANDLE_NOT_VALID:
@@ -1586,9 +1518,6 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1586 ZFCP_CLEAR); 1518 ZFCP_CLEAR);
1587 break; 1519 break;
1588 } 1520 }
1589
1590skip_fsfstatus:
1591 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status);
1592} 1521}
1593 1522
1594/** 1523/**
@@ -1598,12 +1527,12 @@ skip_fsfstatus:
1598 */ 1527 */
1599int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 1528int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1600{ 1529{
1601 volatile struct qdio_buffer_element *sbale; 1530 struct qdio_buffer_element *sbale;
1602 struct zfcp_adapter *adapter = erp_action->adapter; 1531 struct zfcp_adapter *adapter = erp_action->adapter;
1603 struct zfcp_fsf_req *req; 1532 struct zfcp_fsf_req *req;
1604 int retval = -EIO; 1533 int retval = -EIO;
1605 1534
1606 spin_lock_bh(&adapter->req_q.lock); 1535 spin_lock_bh(&adapter->req_q_lock);
1607 if (zfcp_fsf_req_sbal_get(adapter)) 1536 if (zfcp_fsf_req_sbal_get(adapter))
1608 goto out; 1537 goto out;
1609 1538
@@ -1624,7 +1553,6 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1624 req->erp_action = erp_action; 1553 req->erp_action = erp_action;
1625 req->qtcb->header.port_handle = erp_action->port->handle; 1554 req->qtcb->header.port_handle = erp_action->port->handle;
1626 erp_action->fsf_req = req; 1555 erp_action->fsf_req = req;
1627 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
1628 1556
1629 zfcp_fsf_start_erp_timer(req); 1557 zfcp_fsf_start_erp_timer(req);
1630 retval = zfcp_fsf_req_send(req); 1558 retval = zfcp_fsf_req_send(req);
@@ -1633,7 +1561,131 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1633 erp_action->fsf_req = NULL; 1561 erp_action->fsf_req = NULL;
1634 } 1562 }
1635out: 1563out:
1636 spin_unlock_bh(&adapter->req_q.lock); 1564 spin_unlock_bh(&adapter->req_q_lock);
1565 return retval;
1566}
1567
1568static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1569{
1570 struct zfcp_wka_port *wka_port = req->data;
1571 struct fsf_qtcb_header *header = &req->qtcb->header;
1572
1573 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1574 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1575 goto out;
1576 }
1577
1578 switch (header->fsf_status) {
1579 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1580 dev_warn(&req->adapter->ccw_device->dev,
1581 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1582 case FSF_ADAPTER_STATUS_AVAILABLE:
1583 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1584 case FSF_ACCESS_DENIED:
1585 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1586 break;
1587 case FSF_PORT_ALREADY_OPEN:
1588 case FSF_GOOD:
1589 wka_port->handle = header->port_handle;
1590 wka_port->status = ZFCP_WKA_PORT_ONLINE;
1591 }
1592out:
1593 wake_up(&wka_port->completion_wq);
1594}
1595
1596/**
1597 * zfcp_fsf_open_wka_port - create and send open wka-port request
1598 * @wka_port: pointer to struct zfcp_wka_port
1599 * Returns: 0 on success, error otherwise
1600 */
1601int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
1602{
1603 struct qdio_buffer_element *sbale;
1604 struct zfcp_adapter *adapter = wka_port->adapter;
1605 struct zfcp_fsf_req *req;
1606 int retval = -EIO;
1607
1608 spin_lock_bh(&adapter->req_q_lock);
1609 if (zfcp_fsf_req_sbal_get(adapter))
1610 goto out;
1611
1612 req = zfcp_fsf_req_create(adapter,
1613 FSF_QTCB_OPEN_PORT_WITH_DID,
1614 ZFCP_REQ_AUTO_CLEANUP,
1615 adapter->pool.fsf_req_erp);
1616 if (unlikely(IS_ERR(req))) {
1617 retval = PTR_ERR(req);
1618 goto out;
1619 }
1620
1621 sbale = zfcp_qdio_sbale_req(req);
1622 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1623 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1624
1625 req->handler = zfcp_fsf_open_wka_port_handler;
1626 req->qtcb->bottom.support.d_id = wka_port->d_id;
1627 req->data = wka_port;
1628
1629 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1630 retval = zfcp_fsf_req_send(req);
1631 if (retval)
1632 zfcp_fsf_req_free(req);
1633out:
1634 spin_unlock_bh(&adapter->req_q_lock);
1635 return retval;
1636}
1637
1638static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1639{
1640 struct zfcp_wka_port *wka_port = req->data;
1641
1642 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1643 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1644 zfcp_erp_adapter_reopen(wka_port->adapter, 0, 84, req);
1645 }
1646
1647 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1648 wake_up(&wka_port->completion_wq);
1649}
1650
1651/**
1652 * zfcp_fsf_close_wka_port - create and send close wka port request
1653 * @erp_action: pointer to struct zfcp_erp_action
1654 * Returns: 0 on success, error otherwise
1655 */
1656int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
1657{
1658 struct qdio_buffer_element *sbale;
1659 struct zfcp_adapter *adapter = wka_port->adapter;
1660 struct zfcp_fsf_req *req;
1661 int retval = -EIO;
1662
1663 spin_lock_bh(&adapter->req_q_lock);
1664 if (zfcp_fsf_req_sbal_get(adapter))
1665 goto out;
1666
1667 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
1668 ZFCP_REQ_AUTO_CLEANUP,
1669 adapter->pool.fsf_req_erp);
1670 if (unlikely(IS_ERR(req))) {
1671 retval = PTR_ERR(req);
1672 goto out;
1673 }
1674
1675 sbale = zfcp_qdio_sbale_req(req);
1676 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1677 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1678
1679 req->handler = zfcp_fsf_close_wka_port_handler;
1680 req->data = wka_port;
1681 req->qtcb->header.port_handle = wka_port->handle;
1682
1683 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1684 retval = zfcp_fsf_req_send(req);
1685 if (retval)
1686 zfcp_fsf_req_free(req);
1687out:
1688 spin_unlock_bh(&adapter->req_q_lock);
1637 return retval; 1689 return retval;
1638} 1690}
1639 1691
@@ -1695,12 +1747,12 @@ skip_fsfstatus:
1695 */ 1747 */
1696int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 1748int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1697{ 1749{
1698 volatile struct qdio_buffer_element *sbale; 1750 struct qdio_buffer_element *sbale;
1699 struct zfcp_adapter *adapter = erp_action->adapter; 1751 struct zfcp_adapter *adapter = erp_action->adapter;
1700 struct zfcp_fsf_req *req; 1752 struct zfcp_fsf_req *req;
1701 int retval = -EIO; 1753 int retval = -EIO;
1702 1754
1703 spin_lock_bh(&adapter->req_q.lock); 1755 spin_lock_bh(&adapter->req_q_lock);
1704 if (zfcp_fsf_req_sbal_get(adapter)) 1756 if (zfcp_fsf_req_sbal_get(adapter))
1705 goto out; 1757 goto out;
1706 1758
@@ -1731,7 +1783,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1731 erp_action->fsf_req = NULL; 1783 erp_action->fsf_req = NULL;
1732 } 1784 }
1733out: 1785out:
1734 spin_unlock_bh(&adapter->req_q.lock); 1786 spin_unlock_bh(&adapter->req_q_lock);
1735 return retval; 1787 return retval;
1736} 1788}
1737 1789
@@ -1746,7 +1798,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1746 int exclusive, readwrite; 1798 int exclusive, readwrite;
1747 1799
1748 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1800 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1749 goto skip_fsfstatus; 1801 return;
1750 1802
1751 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1803 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1752 ZFCP_STATUS_COMMON_ACCESS_BOXED | 1804 ZFCP_STATUS_COMMON_ACCESS_BOXED |
@@ -1774,14 +1826,12 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1774 case FSF_LUN_SHARING_VIOLATION: 1826 case FSF_LUN_SHARING_VIOLATION:
1775 if (header->fsf_status_qual.word[0]) 1827 if (header->fsf_status_qual.word[0])
1776 dev_warn(&adapter->ccw_device->dev, 1828 dev_warn(&adapter->ccw_device->dev,
1777 "FCP-LUN 0x%Lx at the remote port " 1829 "LUN 0x%Lx on port 0x%Lx is already in "
1778 "with WWPN 0x%Lx " 1830 "use by CSS%d, MIF Image ID %x\n",
1779 "connected to the adapter " 1831 (unsigned long long)unit->fcp_lun,
1780 "is already in use in LPAR%d, CSS%d.\n", 1832 (unsigned long long)unit->port->wwpn,
1781 unit->fcp_lun, 1833 queue_designator->cssid,
1782 unit->port->wwpn, 1834 queue_designator->hla);
1783 queue_designator->hla,
1784 queue_designator->cssid);
1785 else 1835 else
1786 zfcp_act_eval_err(adapter, 1836 zfcp_act_eval_err(adapter,
1787 header->fsf_status_qual.word[2]); 1837 header->fsf_status_qual.word[2]);
@@ -1792,9 +1842,10 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1792 break; 1842 break;
1793 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1843 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1794 dev_warn(&adapter->ccw_device->dev, 1844 dev_warn(&adapter->ccw_device->dev,
1795 "The adapter ran out of resources. There is no " 1845 "No handle is available for LUN "
1796 "handle available for unit 0x%016Lx on port 0x%016Lx.", 1846 "0x%016Lx on port 0x%016Lx\n",
1797 unit->fcp_lun, unit->port->wwpn); 1847 (unsigned long long)unit->fcp_lun,
1848 (unsigned long long)unit->port->wwpn);
1798 zfcp_erp_unit_failed(unit, 34, req); 1849 zfcp_erp_unit_failed(unit, 34, req);
1799 /* fall through */ 1850 /* fall through */
1800 case FSF_INVALID_COMMAND_OPTION: 1851 case FSF_INVALID_COMMAND_OPTION:
@@ -1831,26 +1882,29 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1831 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY, 1882 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
1832 &unit->status); 1883 &unit->status);
1833 dev_info(&adapter->ccw_device->dev, 1884 dev_info(&adapter->ccw_device->dev,
1834 "Read-only access for unit 0x%016Lx " 1885 "SCSI device at LUN 0x%016Lx on port "
1835 "on port 0x%016Lx.\n", 1886 "0x%016Lx opened read-only\n",
1836 unit->fcp_lun, unit->port->wwpn); 1887 (unsigned long long)unit->fcp_lun,
1888 (unsigned long long)unit->port->wwpn);
1837 } 1889 }
1838 1890
1839 if (exclusive && !readwrite) { 1891 if (exclusive && !readwrite) {
1840 dev_err(&adapter->ccw_device->dev, 1892 dev_err(&adapter->ccw_device->dev,
1841 "Exclusive access of read-only unit " 1893 "Exclusive read-only access not "
1842 "0x%016Lx on port 0x%016Lx not " 1894 "supported (unit 0x%016Lx, "
1843 "supported, disabling unit.\n", 1895 "port 0x%016Lx)\n",
1844 unit->fcp_lun, unit->port->wwpn); 1896 (unsigned long long)unit->fcp_lun,
1897 (unsigned long long)unit->port->wwpn);
1845 zfcp_erp_unit_failed(unit, 35, req); 1898 zfcp_erp_unit_failed(unit, 35, req);
1846 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1899 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1847 zfcp_erp_unit_shutdown(unit, 0, 80, req); 1900 zfcp_erp_unit_shutdown(unit, 0, 80, req);
1848 } else if (!exclusive && readwrite) { 1901 } else if (!exclusive && readwrite) {
1849 dev_err(&adapter->ccw_device->dev, 1902 dev_err(&adapter->ccw_device->dev,
1850 "Shared access of read-write unit " 1903 "Shared read-write access not "
1851 "0x%016Lx on port 0x%016Lx not " 1904 "supported (unit 0x%016Lx, port "
1852 "supported, disabling unit.\n", 1905 "0x%016Lx\n)",
1853 unit->fcp_lun, unit->port->wwpn); 1906 (unsigned long long)unit->fcp_lun,
1907 (unsigned long long)unit->port->wwpn);
1854 zfcp_erp_unit_failed(unit, 36, req); 1908 zfcp_erp_unit_failed(unit, 36, req);
1855 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1909 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1856 zfcp_erp_unit_shutdown(unit, 0, 81, req); 1910 zfcp_erp_unit_shutdown(unit, 0, 81, req);
@@ -1858,9 +1912,6 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1858 } 1912 }
1859 break; 1913 break;
1860 } 1914 }
1861
1862skip_fsfstatus:
1863 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status);
1864} 1915}
1865 1916
1866/** 1917/**
@@ -1870,12 +1921,12 @@ skip_fsfstatus:
1870 */ 1921 */
1871int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) 1922int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1872{ 1923{
1873 volatile struct qdio_buffer_element *sbale; 1924 struct qdio_buffer_element *sbale;
1874 struct zfcp_adapter *adapter = erp_action->adapter; 1925 struct zfcp_adapter *adapter = erp_action->adapter;
1875 struct zfcp_fsf_req *req; 1926 struct zfcp_fsf_req *req;
1876 int retval = -EIO; 1927 int retval = -EIO;
1877 1928
1878 spin_lock_bh(&adapter->req_q.lock); 1929 spin_lock_bh(&adapter->req_q_lock);
1879 if (zfcp_fsf_req_sbal_get(adapter)) 1930 if (zfcp_fsf_req_sbal_get(adapter))
1880 goto out; 1931 goto out;
1881 1932
@@ -1901,8 +1952,6 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1901 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1952 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1902 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 1953 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1903 1954
1904 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
1905
1906 zfcp_fsf_start_erp_timer(req); 1955 zfcp_fsf_start_erp_timer(req);
1907 retval = zfcp_fsf_req_send(req); 1956 retval = zfcp_fsf_req_send(req);
1908 if (retval) { 1957 if (retval) {
@@ -1910,7 +1959,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1910 erp_action->fsf_req = NULL; 1959 erp_action->fsf_req = NULL;
1911 } 1960 }
1912out: 1961out:
1913 spin_unlock_bh(&adapter->req_q.lock); 1962 spin_unlock_bh(&adapter->req_q_lock);
1914 return retval; 1963 return retval;
1915} 1964}
1916 1965
@@ -1919,7 +1968,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
1919 struct zfcp_unit *unit = req->data; 1968 struct zfcp_unit *unit = req->data;
1920 1969
1921 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1970 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1922 goto skip_fsfstatus; 1971 return;
1923 1972
1924 switch (req->qtcb->header.fsf_status) { 1973 switch (req->qtcb->header.fsf_status) {
1925 case FSF_PORT_HANDLE_NOT_VALID: 1974 case FSF_PORT_HANDLE_NOT_VALID:
@@ -1949,8 +1998,6 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
1949 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1998 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
1950 break; 1999 break;
1951 } 2000 }
1952skip_fsfstatus:
1953 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status);
1954} 2001}
1955 2002
1956/** 2003/**
@@ -1960,12 +2007,12 @@ skip_fsfstatus:
1960 */ 2007 */
1961int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) 2008int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
1962{ 2009{
1963 volatile struct qdio_buffer_element *sbale; 2010 struct qdio_buffer_element *sbale;
1964 struct zfcp_adapter *adapter = erp_action->adapter; 2011 struct zfcp_adapter *adapter = erp_action->adapter;
1965 struct zfcp_fsf_req *req; 2012 struct zfcp_fsf_req *req;
1966 int retval = -EIO; 2013 int retval = -EIO;
1967 2014
1968 spin_lock_bh(&adapter->req_q.lock); 2015 spin_lock_bh(&adapter->req_q_lock);
1969 if (zfcp_fsf_req_sbal_get(adapter)) 2016 if (zfcp_fsf_req_sbal_get(adapter))
1970 goto out; 2017 goto out;
1971 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, 2018 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN,
@@ -1986,7 +2033,6 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
1986 req->data = erp_action->unit; 2033 req->data = erp_action->unit;
1987 req->erp_action = erp_action; 2034 req->erp_action = erp_action;
1988 erp_action->fsf_req = req; 2035 erp_action->fsf_req = req;
1989 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
1990 2036
1991 zfcp_fsf_start_erp_timer(req); 2037 zfcp_fsf_start_erp_timer(req);
1992 retval = zfcp_fsf_req_send(req); 2038 retval = zfcp_fsf_req_send(req);
@@ -1995,7 +2041,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
1995 erp_action->fsf_req = NULL; 2041 erp_action->fsf_req = NULL;
1996 } 2042 }
1997out: 2043out:
1998 spin_unlock_bh(&adapter->req_q.lock); 2044 spin_unlock_bh(&adapter->req_q_lock);
1999 return retval; 2045 return retval;
2000} 2046}
2001 2047
@@ -2156,21 +2202,21 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2156 break; 2202 break;
2157 case FSF_DIRECTION_INDICATOR_NOT_VALID: 2203 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2158 dev_err(&req->adapter->ccw_device->dev, 2204 dev_err(&req->adapter->ccw_device->dev,
2159 "Invalid data direction (%d) given for unit " 2205 "Incorrect direction %d, unit 0x%016Lx on port "
2160 "0x%016Lx on port 0x%016Lx, shutting down " 2206 "0x%016Lx closed\n",
2161 "adapter.\n",
2162 req->qtcb->bottom.io.data_direction, 2207 req->qtcb->bottom.io.data_direction,
2163 unit->fcp_lun, unit->port->wwpn); 2208 (unsigned long long)unit->fcp_lun,
2209 (unsigned long long)unit->port->wwpn);
2164 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req); 2210 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req);
2165 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2211 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2166 break; 2212 break;
2167 case FSF_CMND_LENGTH_NOT_VALID: 2213 case FSF_CMND_LENGTH_NOT_VALID:
2168 dev_err(&req->adapter->ccw_device->dev, 2214 dev_err(&req->adapter->ccw_device->dev,
2169 "An invalid control-data-block length field (%d) " 2215 "Incorrect CDB length %d, unit 0x%016Lx on "
2170 "was found in a command for unit 0x%016Lx on port " 2216 "port 0x%016Lx closed\n",
2171 "0x%016Lx. Shutting down adapter.\n",
2172 req->qtcb->bottom.io.fcp_cmnd_length, 2217 req->qtcb->bottom.io.fcp_cmnd_length,
2173 unit->fcp_lun, unit->port->wwpn); 2218 (unsigned long long)unit->fcp_lun,
2219 (unsigned long long)unit->port->wwpn);
2174 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req); 2220 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req);
2175 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2221 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2176 break; 2222 break;
@@ -2201,6 +2247,20 @@ skip_fsfstatus:
2201 } 2247 }
2202} 2248}
2203 2249
2250static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
2251{
2252 u32 *fcp_dl_ptr;
2253
2254 /*
2255 * fcp_dl_addr = start address of fcp_cmnd structure +
2256 * size of fixed part + size of dynamically sized add_dcp_cdb field
2257 * SEE FCP-2 documentation
2258 */
2259 fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
2260 (fcp_cmd->add_fcp_cdb_length << 2));
2261 *fcp_dl_ptr = fcp_dl;
2262}
2263
2204/** 2264/**
2205 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 2265 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2206 * @adapter: adapter where scsi command is issued 2266 * @adapter: adapter where scsi command is issued
@@ -2223,8 +2283,8 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
2223 ZFCP_STATUS_COMMON_UNBLOCKED))) 2283 ZFCP_STATUS_COMMON_UNBLOCKED)))
2224 return -EBUSY; 2284 return -EBUSY;
2225 2285
2226 spin_lock(&adapter->req_q.lock); 2286 spin_lock(&adapter->req_q_lock);
2227 if (!atomic_read(&adapter->req_q.count)) 2287 if (!zfcp_fsf_sbal_available(adapter))
2228 goto out; 2288 goto out;
2229 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, 2289 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2230 adapter->pool.fsf_req_scsi); 2290 adapter->pool.fsf_req_scsi);
@@ -2286,7 +2346,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
2286 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 2346 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
2287 2347
2288 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + 2348 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2289 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(fcp_dl_t); 2349 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
2290 2350
2291 real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype, 2351 real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype,
2292 scsi_sglist(scsi_cmnd), 2352 scsi_sglist(scsi_cmnd),
@@ -2296,10 +2356,10 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
2296 retval = -EIO; 2356 retval = -EIO;
2297 else { 2357 else {
2298 dev_err(&adapter->ccw_device->dev, 2358 dev_err(&adapter->ccw_device->dev,
2299 "SCSI request too large. " 2359 "Oversize data package, unit 0x%016Lx "
2300 "Shutting down unit 0x%016Lx on port " 2360 "on port 0x%016Lx closed\n",
2301 "0x%016Lx.\n", unit->fcp_lun, 2361 (unsigned long long)unit->fcp_lun,
2302 unit->port->wwpn); 2362 (unsigned long long)unit->port->wwpn);
2303 zfcp_erp_unit_shutdown(unit, 0, 131, req); 2363 zfcp_erp_unit_shutdown(unit, 0, 131, req);
2304 retval = -EINVAL; 2364 retval = -EINVAL;
2305 } 2365 }
@@ -2322,7 +2382,7 @@ failed_scsi_cmnd:
2322 zfcp_fsf_req_free(req); 2382 zfcp_fsf_req_free(req);
2323 scsi_cmnd->host_scribble = NULL; 2383 scsi_cmnd->host_scribble = NULL;
2324out: 2384out:
2325 spin_unlock(&adapter->req_q.lock); 2385 spin_unlock(&adapter->req_q_lock);
2326 return retval; 2386 return retval;
2327} 2387}
2328 2388
@@ -2338,7 +2398,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2338 struct zfcp_unit *unit, 2398 struct zfcp_unit *unit,
2339 u8 tm_flags, int req_flags) 2399 u8 tm_flags, int req_flags)
2340{ 2400{
2341 volatile struct qdio_buffer_element *sbale; 2401 struct qdio_buffer_element *sbale;
2342 struct zfcp_fsf_req *req = NULL; 2402 struct zfcp_fsf_req *req = NULL;
2343 struct fcp_cmnd_iu *fcp_cmnd_iu; 2403 struct fcp_cmnd_iu *fcp_cmnd_iu;
2344 2404
@@ -2346,8 +2406,8 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2346 ZFCP_STATUS_COMMON_UNBLOCKED))) 2406 ZFCP_STATUS_COMMON_UNBLOCKED)))
2347 return NULL; 2407 return NULL;
2348 2408
2349 spin_lock(&adapter->req_q.lock); 2409 spin_lock(&adapter->req_q_lock);
2350 if (!atomic_read(&adapter->req_q.count)) 2410 if (!zfcp_fsf_sbal_available(adapter))
2351 goto out; 2411 goto out;
2352 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, 2412 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2353 adapter->pool.fsf_req_scsi); 2413 adapter->pool.fsf_req_scsi);
@@ -2362,7 +2422,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2362 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2422 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2363 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2423 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2364 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + 2424 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2365 sizeof(fcp_dl_t); 2425 sizeof(u32);
2366 2426
2367 sbale = zfcp_qdio_sbale_req(req); 2427 sbale = zfcp_qdio_sbale_req(req);
2368 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; 2428 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
@@ -2379,7 +2439,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2379 zfcp_fsf_req_free(req); 2439 zfcp_fsf_req_free(req);
2380 req = NULL; 2440 req = NULL;
2381out: 2441out:
2382 spin_unlock(&adapter->req_q.lock); 2442 spin_unlock(&adapter->req_q_lock);
2383 return req; 2443 return req;
2384} 2444}
2385 2445
@@ -2398,7 +2458,7 @@ static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2398struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, 2458struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2399 struct zfcp_fsf_cfdc *fsf_cfdc) 2459 struct zfcp_fsf_cfdc *fsf_cfdc)
2400{ 2460{
2401 volatile struct qdio_buffer_element *sbale; 2461 struct qdio_buffer_element *sbale;
2402 struct zfcp_fsf_req *req = NULL; 2462 struct zfcp_fsf_req *req = NULL;
2403 struct fsf_qtcb_bottom_support *bottom; 2463 struct fsf_qtcb_bottom_support *bottom;
2404 int direction, retval = -EIO, bytes; 2464 int direction, retval = -EIO, bytes;
@@ -2417,7 +2477,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2417 return ERR_PTR(-EINVAL); 2477 return ERR_PTR(-EINVAL);
2418 } 2478 }
2419 2479
2420 spin_lock_bh(&adapter->req_q.lock); 2480 spin_lock_bh(&adapter->req_q_lock);
2421 if (zfcp_fsf_req_sbal_get(adapter)) 2481 if (zfcp_fsf_req_sbal_get(adapter))
2422 goto out; 2482 goto out;
2423 2483
@@ -2447,7 +2507,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2447 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2507 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2448 retval = zfcp_fsf_req_send(req); 2508 retval = zfcp_fsf_req_send(req);
2449out: 2509out:
2450 spin_unlock_bh(&adapter->req_q.lock); 2510 spin_unlock_bh(&adapter->req_q_lock);
2451 2511
2452 if (!retval) { 2512 if (!retval) {
2453 wait_event(req->completion_wq, 2513 wait_event(req->completion_wq,
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index bf94b4da0763..fd3a88777ac8 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -71,13 +71,6 @@
71#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041 71#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041
72#define FSF_ELS_COMMAND_REJECTED 0x00000050 72#define FSF_ELS_COMMAND_REJECTED 0x00000050
73#define FSF_GENERIC_COMMAND_REJECTED 0x00000051 73#define FSF_GENERIC_COMMAND_REJECTED 0x00000051
74#define FSF_OPERATION_PARTIALLY_SUCCESSFUL 0x00000052
75#define FSF_AUTHORIZATION_FAILURE 0x00000053
76#define FSF_CFDC_ERROR_DETECTED 0x00000054
77#define FSF_CONTROL_FILE_UPDATE_ERROR 0x00000055
78#define FSF_CONTROL_FILE_TOO_LARGE 0x00000056
79#define FSF_ACCESS_CONFLICT_DETECTED 0x00000057
80#define FSF_CONFLICTS_OVERRULED 0x00000058
81#define FSF_PORT_BOXED 0x00000059 74#define FSF_PORT_BOXED 0x00000059
82#define FSF_LUN_BOXED 0x0000005A 75#define FSF_LUN_BOXED 0x0000005A
83#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE 0x0000005B 76#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE 0x0000005B
@@ -85,9 +78,7 @@
85#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061 78#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
86#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062 79#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
87#define FSF_SBAL_MISMATCH 0x00000063 80#define FSF_SBAL_MISMATCH 0x00000063
88#define FSF_OPEN_PORT_WITHOUT_PRLI 0x00000064
89#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD 81#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
90#define FSF_FCP_RSP_AVAILABLE 0x000000AF
91#define FSF_UNKNOWN_COMMAND 0x000000E2 82#define FSF_UNKNOWN_COMMAND 0x000000E2
92#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 83#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
93#define FSF_INVALID_COMMAND_OPTION 0x000000E5 84#define FSF_INVALID_COMMAND_OPTION 0x000000E5
@@ -102,20 +93,9 @@
102#define FSF_SQ_RETRY_IF_POSSIBLE 0x02 93#define FSF_SQ_RETRY_IF_POSSIBLE 0x02
103#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03 94#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03
104#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04 95#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04
105#define FSF_SQ_ULP_PROGRAMMING_ERROR 0x05
106#define FSF_SQ_COMMAND_ABORTED 0x06 96#define FSF_SQ_COMMAND_ABORTED 0x06
107#define FSF_SQ_NO_RETRY_POSSIBLE 0x07 97#define FSF_SQ_NO_RETRY_POSSIBLE 0x07
108 98
109/* FSF status qualifier for CFDC commands */
110#define FSF_SQ_CFDC_HARDENED_ON_SE 0x00000000
111#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE 0x00000001
112#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2 0x00000002
113/* CFDC subtable codes */
114#define FSF_SQ_CFDC_SUBTABLE_OS 0x0001
115#define FSF_SQ_CFDC_SUBTABLE_PORT_WWPN 0x0002
116#define FSF_SQ_CFDC_SUBTABLE_PORT_DID 0x0003
117#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004
118
119/* FSF status qualifier (most significant 4 bytes), local link down */ 99/* FSF status qualifier (most significant 4 bytes), local link down */
120#define FSF_PSQ_LINK_NO_LIGHT 0x00000004 100#define FSF_PSQ_LINK_NO_LIGHT 0x00000004
121#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008 101#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008
@@ -145,7 +125,6 @@
145#define FSF_STATUS_READ_LINK_UP 0x00000006 125#define FSF_STATUS_READ_LINK_UP 0x00000006
146#define FSF_STATUS_READ_NOTIFICATION_LOST 0x00000009 126#define FSF_STATUS_READ_NOTIFICATION_LOST 0x00000009
147#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A 127#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
148#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B
149#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C 128#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
150 129
151/* status subtypes in status read buffer */ 130/* status subtypes in status read buffer */
@@ -159,20 +138,9 @@
159 138
160/* status subtypes for unsolicited status notification lost */ 139/* status subtypes for unsolicited status notification lost */
161#define FSF_STATUS_READ_SUB_INCOMING_ELS 0x00000001 140#define FSF_STATUS_READ_SUB_INCOMING_ELS 0x00000001
162#define FSF_STATUS_READ_SUB_SENSE_DATA 0x00000002
163#define FSF_STATUS_READ_SUB_LINK_STATUS 0x00000004
164#define FSF_STATUS_READ_SUB_PORT_CLOSED 0x00000008
165#define FSF_STATUS_READ_SUB_BIT_ERROR_THRESHOLD 0x00000010
166#define FSF_STATUS_READ_SUB_ACT_UPDATED 0x00000020 141#define FSF_STATUS_READ_SUB_ACT_UPDATED 0x00000020
167#define FSF_STATUS_READ_SUB_ACT_HARDENED 0x00000040
168#define FSF_STATUS_READ_SUB_FEATURE_UPDATE_ALERT 0x00000080
169
170/* status subtypes for CFDC */
171#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002
172#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F
173 142
174/* topologie that is detected by the adapter */ 143/* topologie that is detected by the adapter */
175#define FSF_TOPO_ERROR 0x00000000
176#define FSF_TOPO_P2P 0x00000001 144#define FSF_TOPO_P2P 0x00000001
177#define FSF_TOPO_FABRIC 0x00000002 145#define FSF_TOPO_FABRIC 0x00000002
178#define FSF_TOPO_AL 0x00000003 146#define FSF_TOPO_AL 0x00000003
@@ -180,17 +148,13 @@
180/* data direction for FCP commands */ 148/* data direction for FCP commands */
181#define FSF_DATADIR_WRITE 0x00000001 149#define FSF_DATADIR_WRITE 0x00000001
182#define FSF_DATADIR_READ 0x00000002 150#define FSF_DATADIR_READ 0x00000002
183#define FSF_DATADIR_READ_WRITE 0x00000003
184#define FSF_DATADIR_CMND 0x00000004 151#define FSF_DATADIR_CMND 0x00000004
185 152
186/* fc service class */ 153/* fc service class */
187#define FSF_CLASS_1 0x00000001
188#define FSF_CLASS_2 0x00000002
189#define FSF_CLASS_3 0x00000003 154#define FSF_CLASS_3 0x00000003
190 155
191/* SBAL chaining */ 156/* SBAL chaining */
192#define FSF_MAX_SBALS_PER_REQ 36 157#define FSF_MAX_SBALS_PER_REQ 36
193#define FSF_MAX_SBALS_PER_ELS_REQ 2
194 158
195/* logging space behind QTCB */ 159/* logging space behind QTCB */
196#define FSF_QTCB_LOG_SIZE 1024 160#define FSF_QTCB_LOG_SIZE 1024
@@ -200,50 +164,16 @@
200#define FSF_FEATURE_LUN_SHARING 0x00000004 164#define FSF_FEATURE_LUN_SHARING 0x00000004
201#define FSF_FEATURE_NOTIFICATION_LOST 0x00000008 165#define FSF_FEATURE_NOTIFICATION_LOST 0x00000008
202#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010 166#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
203#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
204#define FSF_FEATURE_UPDATE_ALERT 0x00000100 167#define FSF_FEATURE_UPDATE_ALERT 0x00000100
205#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200 168#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
206 169
207/* host connection features */ 170/* host connection features */
208#define FSF_FEATURE_NPIV_MODE 0x00000001 171#define FSF_FEATURE_NPIV_MODE 0x00000001
209#define FSF_FEATURE_VM_ASSIGNED_WWPN 0x00000002
210 172
211/* option */ 173/* option */
212#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001 174#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
213#define FSF_OPEN_LUN_REPLICATE_SENSE 0x00000002
214
215/* adapter types */
216#define FSF_ADAPTER_TYPE_FICON 0x00000001
217#define FSF_ADAPTER_TYPE_FICON_EXPRESS 0x00000002
218
219/* port types */
220#define FSF_HBA_PORTTYPE_UNKNOWN 0x00000001
221#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
222#define FSF_HBA_PORTTYPE_NPORT 0x00000005
223#define FSF_HBA_PORTTYPE_PTP 0x00000021
224/* following are not defined and used by FSF Spec
225 but are additionally defined by FC-HBA */
226#define FSF_HBA_PORTTYPE_OTHER 0x00000002
227#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
228#define FSF_HBA_PORTTYPE_NLPORT 0x00000006
229#define FSF_HBA_PORTTYPE_FLPORT 0x00000007
230#define FSF_HBA_PORTTYPE_FPORT 0x00000008
231#define FSF_HBA_PORTTYPE_LPORT 0x00000020
232
233/* port states */
234#define FSF_HBA_PORTSTATE_UNKNOWN 0x00000001
235#define FSF_HBA_PORTSTATE_ONLINE 0x00000002
236#define FSF_HBA_PORTSTATE_OFFLINE 0x00000003
237#define FSF_HBA_PORTSTATE_LINKDOWN 0x00000006
238#define FSF_HBA_PORTSTATE_ERROR 0x00000007
239
240/* IO states of adapter */
241#define FSF_IOSTAT_NPORT_RJT 0x00000004
242#define FSF_IOSTAT_FABRIC_RJT 0x00000005
243#define FSF_IOSTAT_LS_RJT 0x00000009
244 175
245/* open LUN access flags*/ 176/* open LUN access flags*/
246#define FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED 0x01000000
247#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000 177#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000
248#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000 178#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000
249 179
@@ -265,11 +195,6 @@ struct fsf_queue_designator {
265 u32 res1; 195 u32 res1;
266} __attribute__ ((packed)); 196} __attribute__ ((packed));
267 197
268struct fsf_port_closed_payload {
269 struct fsf_queue_designator queue_designator;
270 u32 port_handle;
271} __attribute__ ((packed));
272
273struct fsf_bit_error_payload { 198struct fsf_bit_error_payload {
274 u32 res1; 199 u32 res1;
275 u32 link_failure_error_count; 200 u32 link_failure_error_count;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 69d632d851d9..3e05080e62d4 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -28,7 +28,7 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
28 return 0; 28 return 0;
29} 29}
30 30
31static volatile struct qdio_buffer_element * 31static struct qdio_buffer_element *
32zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) 32zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
33{ 33{
34 return &q->sbal[sbal_idx]->element[sbale_idx]; 34 return &q->sbal[sbal_idx]->element[sbale_idx];
@@ -57,7 +57,7 @@ void zfcp_qdio_free(struct zfcp_adapter *adapter)
57 57
58static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id) 58static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id)
59{ 59{
60 dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n"); 60 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
61 61
62 zfcp_erp_adapter_reopen(adapter, 62 zfcp_erp_adapter_reopen(adapter,
63 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 63 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
@@ -145,7 +145,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
145{ 145{
146 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; 146 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
147 struct zfcp_qdio_queue *queue = &adapter->resp_q; 147 struct zfcp_qdio_queue *queue = &adapter->resp_q;
148 volatile struct qdio_buffer_element *sbale; 148 struct qdio_buffer_element *sbale;
149 int sbal_idx, sbale_idx, sbal_no; 149 int sbal_idx, sbale_idx, sbal_no;
150 150
151 if (unlikely(qdio_err)) { 151 if (unlikely(qdio_err)) {
@@ -174,8 +174,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
174 174
175 if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY))) 175 if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY)))
176 dev_warn(&adapter->ccw_device->dev, 176 dev_warn(&adapter->ccw_device->dev,
177 "Protocol violation by adapter. " 177 "A QDIO protocol error occurred, "
178 "Continuing operations.\n"); 178 "operations continue\n");
179 } 179 }
180 180
181 /* 181 /*
@@ -190,8 +190,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
190 * @fsf_req: pointer to struct fsf_req 190 * @fsf_req: pointer to struct fsf_req
191 * Returns: pointer to qdio_buffer_element (SBALE) structure 191 * Returns: pointer to qdio_buffer_element (SBALE) structure
192 */ 192 */
193volatile struct qdio_buffer_element * 193struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
194zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
195{ 194{
196 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0); 195 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0);
197} 196}
@@ -201,8 +200,7 @@ zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
201 * @fsf_req: pointer to struct fsf_req 200 * @fsf_req: pointer to struct fsf_req
202 * Returns: pointer to qdio_buffer_element (SBALE) structure 201 * Returns: pointer to qdio_buffer_element (SBALE) structure
203 */ 202 */
204volatile struct qdio_buffer_element * 203struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
205zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
206{ 204{
207 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 205 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last,
208 req->sbale_curr); 206 req->sbale_curr);
@@ -216,10 +214,10 @@ static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
216 % QDIO_MAX_BUFFERS_PER_Q; 214 % QDIO_MAX_BUFFERS_PER_Q;
217} 215}
218 216
219static volatile struct qdio_buffer_element * 217static struct qdio_buffer_element *
220zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 218zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
221{ 219{
222 volatile struct qdio_buffer_element *sbale; 220 struct qdio_buffer_element *sbale;
223 221
224 /* set last entry flag in current SBALE of current SBAL */ 222 /* set last entry flag in current SBALE of current SBAL */
225 sbale = zfcp_qdio_sbale_curr(fsf_req); 223 sbale = zfcp_qdio_sbale_curr(fsf_req);
@@ -250,7 +248,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
250 return sbale; 248 return sbale;
251} 249}
252 250
253static volatile struct qdio_buffer_element * 251static struct qdio_buffer_element *
254zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 252zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
255{ 253{
256 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 254 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -273,7 +271,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
273 unsigned int sbtype, void *start_addr, 271 unsigned int sbtype, void *start_addr,
274 unsigned int total_length) 272 unsigned int total_length)
275{ 273{
276 volatile struct qdio_buffer_element *sbale; 274 struct qdio_buffer_element *sbale;
277 unsigned long remaining, length; 275 unsigned long remaining, length;
278 void *addr; 276 void *addr;
279 277
@@ -282,6 +280,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
282 addr += length, remaining -= length) { 280 addr += length, remaining -= length) {
283 sbale = zfcp_qdio_sbale_next(fsf_req, sbtype); 281 sbale = zfcp_qdio_sbale_next(fsf_req, sbtype);
284 if (!sbale) { 282 if (!sbale) {
283 atomic_inc(&fsf_req->adapter->qdio_outb_full);
285 zfcp_qdio_undo_sbals(fsf_req); 284 zfcp_qdio_undo_sbals(fsf_req);
286 return -EINVAL; 285 return -EINVAL;
287 } 286 }
@@ -307,7 +306,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
307int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 306int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
308 struct scatterlist *sg, int max_sbals) 307 struct scatterlist *sg, int max_sbals)
309{ 308{
310 volatile struct qdio_buffer_element *sbale; 309 struct qdio_buffer_element *sbale;
311 int retval, bytes = 0; 310 int retval, bytes = 0;
312 311
313 /* figure out last allowed SBAL */ 312 /* figure out last allowed SBAL */
@@ -344,10 +343,10 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
344 int first = fsf_req->sbal_first; 343 int first = fsf_req->sbal_first;
345 int count = fsf_req->sbal_number; 344 int count = fsf_req->sbal_number;
346 int retval, pci, pci_batch; 345 int retval, pci, pci_batch;
347 volatile struct qdio_buffer_element *sbale; 346 struct qdio_buffer_element *sbale;
348 347
349 /* acknowledgements for transferred buffers */ 348 /* acknowledgements for transferred buffers */
350 pci_batch = req_q->pci_batch + count; 349 pci_batch = adapter->req_q_pci_batch + count;
351 if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) { 350 if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) {
352 pci_batch %= ZFCP_QDIO_PCI_INTERVAL; 351 pci_batch %= ZFCP_QDIO_PCI_INTERVAL;
353 pci = first + count - (pci_batch + 1); 352 pci = first + count - (pci_batch + 1);
@@ -367,7 +366,7 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
367 atomic_sub(count, &req_q->count); 366 atomic_sub(count, &req_q->count);
368 req_q->first += count; 367 req_q->first += count;
369 req_q->first %= QDIO_MAX_BUFFERS_PER_Q; 368 req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
370 req_q->pci_batch = pci_batch; 369 adapter->req_q_pci_batch = pci_batch;
371 return 0; 370 return 0;
372} 371}
373 372
@@ -418,14 +417,14 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
418 struct zfcp_qdio_queue *req_q; 417 struct zfcp_qdio_queue *req_q;
419 int first, count; 418 int first, count;
420 419
421 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) 420 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
422 return; 421 return;
423 422
424 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 423 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
425 req_q = &adapter->req_q; 424 req_q = &adapter->req_q;
426 spin_lock_bh(&req_q->lock); 425 spin_lock_bh(&adapter->req_q_lock);
427 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 426 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
428 spin_unlock_bh(&req_q->lock); 427 spin_unlock_bh(&adapter->req_q_lock);
429 428
430 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); 429 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
431 430
@@ -438,7 +437,7 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
438 } 437 }
439 req_q->first = 0; 438 req_q->first = 0;
440 atomic_set(&req_q->count, 0); 439 atomic_set(&req_q->count, 0);
441 req_q->pci_batch = 0; 440 adapter->req_q_pci_batch = 0;
442 adapter->resp_q.first = 0; 441 adapter->resp_q.first = 0;
443 atomic_set(&adapter->resp_q.count, 0); 442 atomic_set(&adapter->resp_q.count, 0);
444} 443}
@@ -450,23 +449,17 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
450 */ 449 */
451int zfcp_qdio_open(struct zfcp_adapter *adapter) 450int zfcp_qdio_open(struct zfcp_adapter *adapter)
452{ 451{
453 volatile struct qdio_buffer_element *sbale; 452 struct qdio_buffer_element *sbale;
454 int cc; 453 int cc;
455 454
456 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) 455 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
457 return -EIO; 456 return -EIO;
458 457
459 if (qdio_establish(&adapter->qdio_init_data)) { 458 if (qdio_establish(&adapter->qdio_init_data))
460 dev_err(&adapter->ccw_device->dev, 459 goto failed_establish;
461 "Establish of QDIO queues failed.\n");
462 return -EIO;
463 }
464 460
465 if (qdio_activate(adapter->ccw_device)) { 461 if (qdio_activate(adapter->ccw_device))
466 dev_err(&adapter->ccw_device->dev,
467 "Activate of QDIO queues failed.\n");
468 goto failed_qdio; 462 goto failed_qdio;
469 }
470 463
471 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 464 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
472 sbale = &(adapter->resp_q.sbal[cc]->element[0]); 465 sbale = &(adapter->resp_q.sbal[cc]->element[0]);
@@ -476,20 +469,20 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
476 } 469 }
477 470
478 if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, 471 if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
479 QDIO_MAX_BUFFERS_PER_Q)) { 472 QDIO_MAX_BUFFERS_PER_Q))
480 dev_err(&adapter->ccw_device->dev,
481 "Init of QDIO response queue failed.\n");
482 goto failed_qdio; 473 goto failed_qdio;
483 }
484 474
485 /* set index of first avalable SBALS / number of available SBALS */ 475 /* set index of first avalable SBALS / number of available SBALS */
486 adapter->req_q.first = 0; 476 adapter->req_q.first = 0;
487 atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); 477 atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
488 adapter->req_q.pci_batch = 0; 478 adapter->req_q_pci_batch = 0;
489 479
490 return 0; 480 return 0;
491 481
492failed_qdio: 482failed_qdio:
493 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); 483 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
484failed_establish:
485 dev_err(&adapter->ccw_device->dev,
486 "Setting up the QDIO connection to the FCP adapter failed\n");
494 return -EIO; 487 return -EIO;
495} 488}
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index aeae56b00b45..ca8f85f3dad4 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -21,20 +21,6 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
21 return fcp_sns_info_ptr; 21 return fcp_sns_info_ptr;
22} 22}
23 23
24void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
25{
26 fcp_dl_t *fcp_dl_ptr;
27
28 /*
29 * fcp_dl_addr = start address of fcp_cmnd structure +
30 * size of fixed part + size of dynamically sized add_dcp_cdb field
31 * SEE FCP-2 documentation
32 */
33 fcp_dl_ptr = (fcp_dl_t *) ((unsigned char *) &fcp_cmd[1] +
34 (fcp_cmd->add_fcp_cdb_length << 2));
35 *fcp_dl_ptr = fcp_dl;
36}
37
38static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 24static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
39{ 25{
40 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 26 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
@@ -119,13 +105,17 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
119{ 105{
120 struct zfcp_port *port; 106 struct zfcp_port *port;
121 struct zfcp_unit *unit; 107 struct zfcp_unit *unit;
108 int scsi_lun;
122 109
123 list_for_each_entry(port, &adapter->port_list_head, list) { 110 list_for_each_entry(port, &adapter->port_list_head, list) {
124 if (!port->rport || (id != port->rport->scsi_target_id)) 111 if (!port->rport || (id != port->rport->scsi_target_id))
125 continue; 112 continue;
126 list_for_each_entry(unit, &port->unit_list_head, list) 113 list_for_each_entry(unit, &port->unit_list_head, list) {
127 if (lun == unit->scsi_lun) 114 scsi_lun = scsilun_to_int(
115 (struct scsi_lun *)&unit->fcp_lun);
116 if (lun == scsi_lun)
128 return unit; 117 return unit;
118 }
129 } 119 }
130 120
131 return NULL; 121 return NULL;
@@ -183,7 +173,6 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
183 return retval; 173 return retval;
184 } 174 }
185 fsf_req->data = NULL; 175 fsf_req->data = NULL;
186 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
187 176
188 /* don't access old fsf_req after releasing the abort_lock */ 177 /* don't access old fsf_req after releasing the abort_lock */
189 write_unlock_irqrestore(&adapter->abort_lock, flags); 178 write_unlock_irqrestore(&adapter->abort_lock, flags);
@@ -294,7 +283,8 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
294 sizeof (struct zfcp_adapter *)); 283 sizeof (struct zfcp_adapter *));
295 if (!adapter->scsi_host) { 284 if (!adapter->scsi_host) {
296 dev_err(&adapter->ccw_device->dev, 285 dev_err(&adapter->ccw_device->dev,
297 "registration with SCSI stack failed."); 286 "Registering the FCP device with the "
287 "SCSI stack failed\n");
298 return -EIO; 288 return -EIO;
299 } 289 }
300 290
@@ -312,7 +302,6 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
312 scsi_host_put(adapter->scsi_host); 302 scsi_host_put(adapter->scsi_host);
313 return -EIO; 303 return -EIO;
314 } 304 }
315 atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
316 305
317 return 0; 306 return 0;
318} 307}
@@ -336,7 +325,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
336 scsi_remove_host(shost); 325 scsi_remove_host(shost);
337 scsi_host_put(shost); 326 scsi_host_put(shost);
338 adapter->scsi_host = NULL; 327 adapter->scsi_host = NULL;
339 atomic_clear_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
340 328
341 return; 329 return;
342} 330}
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 2e85c6c49e7d..2809d789b55c 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -26,9 +26,9 @@ static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
26ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n", 26ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n",
27 atomic_read(&adapter->status)); 27 atomic_read(&adapter->status));
28ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n", 28ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n",
29 adapter->peer_wwnn); 29 (unsigned long long) adapter->peer_wwnn);
30ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n", 30ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n",
31 adapter->peer_wwpn); 31 (unsigned long long) adapter->peer_wwpn);
32ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n", 32ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n",
33 adapter->peer_d_id); 33 adapter->peer_d_id);
34ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n", 34ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n",
@@ -135,8 +135,9 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
135{ 135{
136 struct zfcp_adapter *adapter = dev_get_drvdata(dev); 136 struct zfcp_adapter *adapter = dev_get_drvdata(dev);
137 struct zfcp_port *port; 137 struct zfcp_port *port;
138 wwn_t wwpn; 138 u64 wwpn;
139 int retval = 0; 139 int retval = 0;
140 LIST_HEAD(port_remove_lh);
140 141
141 down(&zfcp_data.config_sema); 142 down(&zfcp_data.config_sema);
142 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { 143 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
@@ -144,7 +145,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
144 goto out; 145 goto out;
145 } 146 }
146 147
147 if (strict_strtoull(buf, 0, &wwpn)) { 148 if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) {
148 retval = -EINVAL; 149 retval = -EINVAL;
149 goto out; 150 goto out;
150 } 151 }
@@ -154,7 +155,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
154 if (port && (atomic_read(&port->refcount) == 0)) { 155 if (port && (atomic_read(&port->refcount) == 0)) {
155 zfcp_port_get(port); 156 zfcp_port_get(port);
156 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 157 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
157 list_move(&port->list, &adapter->port_remove_lh); 158 list_move(&port->list, &port_remove_lh);
158 } else 159 } else
159 port = NULL; 160 port = NULL;
160 write_unlock_irq(&zfcp_data.config_lock); 161 write_unlock_irq(&zfcp_data.config_lock);
@@ -200,7 +201,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
200{ 201{
201 struct zfcp_port *port = dev_get_drvdata(dev); 202 struct zfcp_port *port = dev_get_drvdata(dev);
202 struct zfcp_unit *unit; 203 struct zfcp_unit *unit;
203 fcp_lun_t fcp_lun; 204 u64 fcp_lun;
204 int retval = -EINVAL; 205 int retval = -EINVAL;
205 206
206 down(&zfcp_data.config_sema); 207 down(&zfcp_data.config_sema);
@@ -209,7 +210,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
209 goto out; 210 goto out;
210 } 211 }
211 212
212 if (strict_strtoull(buf, 0, &fcp_lun)) 213 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
213 goto out; 214 goto out;
214 215
215 unit = zfcp_unit_enqueue(port, fcp_lun); 216 unit = zfcp_unit_enqueue(port, fcp_lun);
@@ -233,8 +234,9 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
233{ 234{
234 struct zfcp_port *port = dev_get_drvdata(dev); 235 struct zfcp_port *port = dev_get_drvdata(dev);
235 struct zfcp_unit *unit; 236 struct zfcp_unit *unit;
236 fcp_lun_t fcp_lun; 237 u64 fcp_lun;
237 int retval = 0; 238 int retval = 0;
239 LIST_HEAD(unit_remove_lh);
238 240
239 down(&zfcp_data.config_sema); 241 down(&zfcp_data.config_sema);
240 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { 242 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
@@ -242,7 +244,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
242 goto out; 244 goto out;
243 } 245 }
244 246
245 if (strict_strtoull(buf, 0, &fcp_lun)) { 247 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) {
246 retval = -EINVAL; 248 retval = -EINVAL;
247 goto out; 249 goto out;
248 } 250 }
@@ -252,7 +254,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
252 if (unit && (atomic_read(&unit->refcount) == 0)) { 254 if (unit && (atomic_read(&unit->refcount) == 0)) {
253 zfcp_unit_get(unit); 255 zfcp_unit_get(unit);
254 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); 256 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
255 list_move(&unit->list, &port->unit_remove_lh); 257 list_move(&unit->list, &unit_remove_lh);
256 } else 258 } else
257 unit = NULL; 259 unit = NULL;
258 260
@@ -273,22 +275,7 @@ out:
273} 275}
274static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); 276static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
275 277
276static struct attribute *zfcp_port_ns_attrs[] = { 278static struct attribute *zfcp_port_attrs[] = {
277 &dev_attr_port_failed.attr,
278 &dev_attr_port_in_recovery.attr,
279 &dev_attr_port_status.attr,
280 &dev_attr_port_access_denied.attr,
281 NULL
282};
283
284/**
285 * zfcp_sysfs_ns_port_attrs - sysfs attributes for nameserver
286 */
287struct attribute_group zfcp_sysfs_ns_port_attrs = {
288 .attrs = zfcp_port_ns_attrs,
289};
290
291static struct attribute *zfcp_port_no_ns_attrs[] = {
292 &dev_attr_unit_add.attr, 279 &dev_attr_unit_add.attr,
293 &dev_attr_unit_remove.attr, 280 &dev_attr_unit_remove.attr,
294 &dev_attr_port_failed.attr, 281 &dev_attr_port_failed.attr,
@@ -302,7 +289,7 @@ static struct attribute *zfcp_port_no_ns_attrs[] = {
302 * zfcp_sysfs_port_attrs - sysfs attributes for all other ports 289 * zfcp_sysfs_port_attrs - sysfs attributes for all other ports
303 */ 290 */
304struct attribute_group zfcp_sysfs_port_attrs = { 291struct attribute_group zfcp_sysfs_port_attrs = {
305 .attrs = zfcp_port_no_ns_attrs, 292 .attrs = zfcp_port_attrs,
306}; 293};
307 294
308static struct attribute *zfcp_unit_attrs[] = { 295static struct attribute *zfcp_unit_attrs[] = {
@@ -395,8 +382,10 @@ static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
395 382
396ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", 383ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
397 unit->port->adapter->ccw_device->dev.bus_id); 384 unit->port->adapter->ccw_device->dev.bus_id);
398ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn); 385ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
399ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun); 386 (unsigned long long) unit->port->wwpn);
387ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n",
388 (unsigned long long) unit->fcp_lun);
400 389
401struct device_attribute *zfcp_sysfs_sdev_attrs[] = { 390struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
402 &dev_attr_fcp_lun, 391 &dev_attr_fcp_lun,
@@ -487,10 +476,23 @@ ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
487ZFCP_SHOST_ATTR(seconds_active, "%llu\n", 476ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
488 (unsigned long long) stat_info.seconds_act); 477 (unsigned long long) stat_info.seconds_act);
489 478
479static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
480 struct device_attribute *attr,
481 char *buf)
482{
483 struct Scsi_Host *scsi_host = class_to_shost(dev);
484 struct zfcp_adapter *adapter =
485 (struct zfcp_adapter *) scsi_host->hostdata[0];
486
487 return sprintf(buf, "%d\n", atomic_read(&adapter->qdio_outb_full));
488}
489static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
490
490struct device_attribute *zfcp_sysfs_shost_attrs[] = { 491struct device_attribute *zfcp_sysfs_shost_attrs[] = {
491 &dev_attr_utilization, 492 &dev_attr_utilization,
492 &dev_attr_requests, 493 &dev_attr_requests,
493 &dev_attr_megabytes, 494 &dev_attr_megabytes,
494 &dev_attr_seconds_active, 495 &dev_attr_seconds_active,
496 &dev_attr_queue_full,
495 NULL 497 NULL
496}; 498};
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4e0322b1c1ea..d3b211af4e1c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1325,14 +1325,6 @@ config SCSI_QLOGIC_FAS
1325 To compile this driver as a module, choose M here: the 1325 To compile this driver as a module, choose M here: the
1326 module will be called qlogicfas. 1326 module will be called qlogicfas.
1327 1327
1328config SCSI_QLOGIC_FC_FIRMWARE
1329 bool "Include loadable firmware in driver"
1330 depends on SCSI_QLOGIC_FC
1331 help
1332 Say Y to include ISP2X00 Fabric Initiator/Target Firmware, with
1333 expanded LUN addressing and FcTape (FCP-2) support, in the
1334 qlogicfc driver. This is required on some platforms.
1335
1336config SCSI_QLOGIC_1280 1328config SCSI_QLOGIC_1280
1337 tristate "Qlogic QLA 1240/1x80/1x160 SCSI support" 1329 tristate "Qlogic QLA 1240/1x80/1x160 SCSI support"
1338 depends on PCI && SCSI 1330 depends on PCI && SCSI
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index aa4e77c25273..8abfd06b5a72 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
1139 srbcmd->id = cpu_to_le32(scmd_id(cmd)); 1139 srbcmd->id = cpu_to_le32(scmd_id(cmd));
1140 srbcmd->lun = cpu_to_le32(cmd->device->lun); 1140 srbcmd->lun = cpu_to_le32(cmd->device->lun);
1141 srbcmd->flags = cpu_to_le32(flag); 1141 srbcmd->flags = cpu_to_le32(flag);
1142 timeout = cmd->timeout_per_command/HZ; 1142 timeout = cmd->request->timeout/HZ;
1143 if (timeout == 0) 1143 if (timeout == 0)
1144 timeout = 1; 1144 timeout = 1;
1145 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds 1145 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index ef693e8412e9..8f45570a8a01 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -84,7 +84,7 @@ struct clariion_dh_data {
84 /* 84 /*
85 * I/O buffer for both MODE_SELECT and INQUIRY commands. 85 * I/O buffer for both MODE_SELECT and INQUIRY commands.
86 */ 86 */
87 char buffer[CLARIION_BUFFER_SIZE]; 87 unsigned char buffer[CLARIION_BUFFER_SIZE];
88 /* 88 /*
89 * SCSI sense buffer for commands -- assumes serial issuance 89 * SCSI sense buffer for commands -- assumes serial issuance
90 * and completion sequence of all commands for same multipath. 90 * and completion sequence of all commands for same multipath.
@@ -176,7 +176,7 @@ static int parse_sp_info_reply(struct scsi_device *sdev,
176 err = SCSI_DH_DEV_TEMP_BUSY; 176 err = SCSI_DH_DEV_TEMP_BUSY;
177 goto out; 177 goto out;
178 } 178 }
179 if (csdev->buffer[4] < 0 || csdev->buffer[4] > 2) { 179 if (csdev->buffer[4] > 2) {
180 /* Invalid buffer format */ 180 /* Invalid buffer format */
181 sdev_printk(KERN_NOTICE, sdev, 181 sdev_printk(KERN_NOTICE, sdev,
182 "%s: invalid VPD page 0xC0 format\n", 182 "%s: invalid VPD page 0xC0 format\n",
@@ -278,7 +278,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
278 return NULL; 278 return NULL;
279 } 279 }
280 280
281 memset(rq->cmd, 0, BLK_MAX_CDB);
282 rq->cmd_len = COMMAND_SIZE(cmd); 281 rq->cmd_len = COMMAND_SIZE(cmd);
283 rq->cmd[0] = cmd; 282 rq->cmd[0] = cmd;
284 283
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index a6a4ef3ad51c..5e93c88ad66b 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -114,7 +114,6 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
114 req->cmd_type = REQ_TYPE_BLOCK_PC; 114 req->cmd_type = REQ_TYPE_BLOCK_PC;
115 req->cmd_flags |= REQ_FAILFAST; 115 req->cmd_flags |= REQ_FAILFAST;
116 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); 116 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
117 memset(req->cmd, 0, MAX_COMMAND_SIZE);
118 req->cmd[0] = TEST_UNIT_READY; 117 req->cmd[0] = TEST_UNIT_READY;
119 req->timeout = HP_SW_TIMEOUT; 118 req->timeout = HP_SW_TIMEOUT;
120 req->sense = h->sense; 119 req->sense = h->sense;
@@ -207,7 +206,6 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
207 req->cmd_type = REQ_TYPE_BLOCK_PC; 206 req->cmd_type = REQ_TYPE_BLOCK_PC;
208 req->cmd_flags |= REQ_FAILFAST; 207 req->cmd_flags |= REQ_FAILFAST;
209 req->cmd_len = COMMAND_SIZE(START_STOP); 208 req->cmd_len = COMMAND_SIZE(START_STOP);
210 memset(req->cmd, 0, MAX_COMMAND_SIZE);
211 req->cmd[0] = START_STOP; 209 req->cmd[0] = START_STOP;
212 req->cmd[4] = 1; /* Start spin cycle */ 210 req->cmd[4] = 1; /* Start spin cycle */
213 req->timeout = HP_SW_TIMEOUT; 211 req->timeout = HP_SW_TIMEOUT;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 6e2f130d56de..50bf95f3b5c4 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -225,8 +225,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
225 return NULL; 225 return NULL;
226 } 226 }
227 227
228 memset(rq->cmd, 0, BLK_MAX_CDB);
229
230 rq->cmd_type = REQ_TYPE_BLOCK_PC; 228 rq->cmd_type = REQ_TYPE_BLOCK_PC;
231 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; 229 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
232 rq->retries = RDAC_RETRIES; 230 rq->retries = RDAC_RETRIES;
@@ -590,6 +588,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
590 {"STK", "OPENstorage D280"}, 588 {"STK", "OPENstorage D280"},
591 {"SUN", "CSM200_R"}, 589 {"SUN", "CSM200_R"},
592 {"SUN", "LCSM100_F"}, 590 {"SUN", "LCSM100_F"},
591 {"DELL", "MD3000"},
592 {"DELL", "MD3000i"},
593 {NULL, NULL}, 593 {NULL, NULL},
594}; 594};
595 595
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 822d5214692b..c387c15a2128 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -464,7 +464,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
464 464
465 /* use request field to save the ptr. to completion struct. */ 465 /* use request field to save the ptr. to completion struct. */
466 scp->request = (struct request *)&wait; 466 scp->request = (struct request *)&wait;
467 scp->timeout_per_command = timeout*HZ;
468 scp->cmd_len = 12; 467 scp->cmd_len = 12;
469 scp->cmnd = cmnd; 468 scp->cmnd = cmnd;
470 cmndinfo.priority = IOCTL_PRI; 469 cmndinfo.priority = IOCTL_PRI;
@@ -1995,23 +1994,12 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
1995 register Scsi_Cmnd *pscp; 1994 register Scsi_Cmnd *pscp;
1996 register Scsi_Cmnd *nscp; 1995 register Scsi_Cmnd *nscp;
1997 ulong flags; 1996 ulong flags;
1998 unchar b, t;
1999 1997
2000 TRACE(("gdth_putq() priority %d\n",priority)); 1998 TRACE(("gdth_putq() priority %d\n",priority));
2001 spin_lock_irqsave(&ha->smp_lock, flags); 1999 spin_lock_irqsave(&ha->smp_lock, flags);
2002 2000
2003 if (!cmndinfo->internal_command) { 2001 if (!cmndinfo->internal_command)
2004 cmndinfo->priority = priority; 2002 cmndinfo->priority = priority;
2005 b = scp->device->channel;
2006 t = scp->device->id;
2007 if (priority >= DEFAULT_PRI) {
2008 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
2009 (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) {
2010 TRACE2(("gdth_putq(): locked IO ->update_timeout()\n"));
2011 cmndinfo->timeout = gdth_update_timeout(scp, 0);
2012 }
2013 }
2014 }
2015 2003
2016 if (ha->req_first==NULL) { 2004 if (ha->req_first==NULL) {
2017 ha->req_first = scp; /* queue was empty */ 2005 ha->req_first = scp; /* queue was empty */
@@ -3899,6 +3887,39 @@ static const char *gdth_info(struct Scsi_Host *shp)
3899 return ((const char *)ha->binfo.type_string); 3887 return ((const char *)ha->binfo.type_string);
3900} 3888}
3901 3889
3890static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
3891{
3892 gdth_ha_str *ha = shost_priv(scp->device->host);
3893 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
3894 unchar b, t;
3895 ulong flags;
3896 enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
3897
3898 TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
3899 b = scp->device->channel;
3900 t = scp->device->id;
3901
3902 /*
3903 * We don't really honor the command timeout, but we try to
3904 * honor 6 times of the actual command timeout! So reset the
3905 * timer if this is less than 6th timeout on this command!
3906 */
3907 if (++cmndinfo->timeout_count < 6)
3908 retval = BLK_EH_RESET_TIMER;
3909
3910 /* Reset the timeout if it is locked IO */
3911 spin_lock_irqsave(&ha->smp_lock, flags);
3912 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
3913 (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
3914 TRACE2(("%s(): locked IO, reset timeout\n", __func__));
3915 retval = BLK_EH_RESET_TIMER;
3916 }
3917 spin_unlock_irqrestore(&ha->smp_lock, flags);
3918
3919 return retval;
3920}
3921
3922
3902static int gdth_eh_bus_reset(Scsi_Cmnd *scp) 3923static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
3903{ 3924{
3904 gdth_ha_str *ha = shost_priv(scp->device->host); 3925 gdth_ha_str *ha = shost_priv(scp->device->host);
@@ -3992,7 +4013,7 @@ static int gdth_queuecommand(struct scsi_cmnd *scp,
3992 BUG_ON(!cmndinfo); 4013 BUG_ON(!cmndinfo);
3993 4014
3994 scp->scsi_done = done; 4015 scp->scsi_done = done;
3995 gdth_update_timeout(scp, scp->timeout_per_command * 6); 4016 cmndinfo->timeout_count = 0;
3996 cmndinfo->priority = DEFAULT_PRI; 4017 cmndinfo->priority = DEFAULT_PRI;
3997 4018
3998 return __gdth_queuecommand(ha, scp, cmndinfo); 4019 return __gdth_queuecommand(ha, scp, cmndinfo);
@@ -4096,12 +4117,10 @@ static int ioc_lockdrv(void __user *arg)
4096 ha->hdr[j].lock = 1; 4117 ha->hdr[j].lock = 1;
4097 spin_unlock_irqrestore(&ha->smp_lock, flags); 4118 spin_unlock_irqrestore(&ha->smp_lock, flags);
4098 gdth_wait_completion(ha, ha->bus_cnt, j); 4119 gdth_wait_completion(ha, ha->bus_cnt, j);
4099 gdth_stop_timeout(ha, ha->bus_cnt, j);
4100 } else { 4120 } else {
4101 spin_lock_irqsave(&ha->smp_lock, flags); 4121 spin_lock_irqsave(&ha->smp_lock, flags);
4102 ha->hdr[j].lock = 0; 4122 ha->hdr[j].lock = 0;
4103 spin_unlock_irqrestore(&ha->smp_lock, flags); 4123 spin_unlock_irqrestore(&ha->smp_lock, flags);
4104 gdth_start_timeout(ha, ha->bus_cnt, j);
4105 gdth_next(ha); 4124 gdth_next(ha);
4106 } 4125 }
4107 } 4126 }
@@ -4539,18 +4558,14 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4539 spin_lock_irqsave(&ha->smp_lock, flags); 4558 spin_lock_irqsave(&ha->smp_lock, flags);
4540 ha->raw[i].lock = 1; 4559 ha->raw[i].lock = 1;
4541 spin_unlock_irqrestore(&ha->smp_lock, flags); 4560 spin_unlock_irqrestore(&ha->smp_lock, flags);
4542 for (j = 0; j < ha->tid_cnt; ++j) { 4561 for (j = 0; j < ha->tid_cnt; ++j)
4543 gdth_wait_completion(ha, i, j); 4562 gdth_wait_completion(ha, i, j);
4544 gdth_stop_timeout(ha, i, j);
4545 }
4546 } else { 4563 } else {
4547 spin_lock_irqsave(&ha->smp_lock, flags); 4564 spin_lock_irqsave(&ha->smp_lock, flags);
4548 ha->raw[i].lock = 0; 4565 ha->raw[i].lock = 0;
4549 spin_unlock_irqrestore(&ha->smp_lock, flags); 4566 spin_unlock_irqrestore(&ha->smp_lock, flags);
4550 for (j = 0; j < ha->tid_cnt; ++j) { 4567 for (j = 0; j < ha->tid_cnt; ++j)
4551 gdth_start_timeout(ha, i, j);
4552 gdth_next(ha); 4568 gdth_next(ha);
4553 }
4554 } 4569 }
4555 } 4570 }
4556 break; 4571 break;
@@ -4644,6 +4659,7 @@ static struct scsi_host_template gdth_template = {
4644 .slave_configure = gdth_slave_configure, 4659 .slave_configure = gdth_slave_configure,
4645 .bios_param = gdth_bios_param, 4660 .bios_param = gdth_bios_param,
4646 .proc_info = gdth_proc_info, 4661 .proc_info = gdth_proc_info,
4662 .eh_timed_out = gdth_timed_out,
4647 .proc_name = "gdth", 4663 .proc_name = "gdth",
4648 .can_queue = GDTH_MAXCMDS, 4664 .can_queue = GDTH_MAXCMDS,
4649 .this_id = -1, 4665 .this_id = -1,
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index ca92476727cf..1646444e9bd5 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -916,7 +916,7 @@ typedef struct {
916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ 916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
917 dma_addr_t sense_paddr; /* sense dma-addr */ 917 dma_addr_t sense_paddr; /* sense dma-addr */
918 unchar priority; 918 unchar priority;
919 int timeout; 919 int timeout_count; /* # of timeout calls */
920 volatile int wait_for_completion; 920 volatile int wait_for_completion;
921 ushort status; 921 ushort status;
922 ulong32 info; 922 ulong32 info;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index ce0228e26aec..59349a316e13 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -748,69 +748,3 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
748 } 748 }
749 spin_unlock_irqrestore(&ha->smp_lock, flags); 749 spin_unlock_irqrestore(&ha->smp_lock, flags);
750} 750}
751
752static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id)
753{
754 ulong flags;
755 Scsi_Cmnd *scp;
756 unchar b, t;
757
758 spin_lock_irqsave(&ha->smp_lock, flags);
759
760 for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
761 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
762 if (!cmndinfo->internal_command) {
763 b = scp->device->channel;
764 t = scp->device->id;
765 if (t == (unchar)id && b == (unchar)busnum) {
766 TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
767 cmndinfo->timeout = gdth_update_timeout(scp, 0);
768 }
769 }
770 }
771 spin_unlock_irqrestore(&ha->smp_lock, flags);
772}
773
774static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id)
775{
776 ulong flags;
777 Scsi_Cmnd *scp;
778 unchar b, t;
779
780 spin_lock_irqsave(&ha->smp_lock, flags);
781
782 for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
783 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
784 if (!cmndinfo->internal_command) {
785 b = scp->device->channel;
786 t = scp->device->id;
787 if (t == (unchar)id && b == (unchar)busnum) {
788 TRACE2(("gdth_start_timeout(): update_timeout()\n"));
789 gdth_update_timeout(scp, cmndinfo->timeout);
790 }
791 }
792 }
793 spin_unlock_irqrestore(&ha->smp_lock, flags);
794}
795
796static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout)
797{
798 int oldto;
799
800 oldto = scp->timeout_per_command;
801 scp->timeout_per_command = timeout;
802
803 if (timeout == 0) {
804 del_timer(&scp->eh_timeout);
805 scp->eh_timeout.data = (unsigned long) NULL;
806 scp->eh_timeout.expires = 0;
807 } else {
808 if (scp->eh_timeout.data != (unsigned long) NULL)
809 del_timer(&scp->eh_timeout);
810 scp->eh_timeout.data = (unsigned long) scp;
811 scp->eh_timeout.expires = jiffies + timeout;
812 add_timer(&scp->eh_timeout);
813 }
814
815 return oldto;
816}
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index 45e6fdacf36e..9b900cc9ebe8 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -20,9 +20,6 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
20 ulong64 *paddr); 20 ulong64 *paddr);
21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr); 21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); 22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
23static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id);
24static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id);
25static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout);
26 23
27#endif 24#endif
28 25
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index fed0b02ebc1d..3fdbb13e80a8 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -464,7 +464,7 @@ static int __scsi_host_match(struct device *dev, void *data)
464struct Scsi_Host *scsi_host_lookup(unsigned short hostnum) 464struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
465{ 465{
466 struct device *cdev; 466 struct device *cdev;
467 struct Scsi_Host *shost = ERR_PTR(-ENXIO); 467 struct Scsi_Host *shost = NULL;
468 468
469 cdev = class_find_device(&shost_class, NULL, &hostnum, 469 cdev = class_find_device(&shost_class, NULL, &hostnum,
470 __scsi_host_match); 470 __scsi_host_match);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 7b1502c0ab6e..87e09f35d3d4 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
756 init_event_struct(evt_struct, 756 init_event_struct(evt_struct,
757 handle_cmd_rsp, 757 handle_cmd_rsp,
758 VIOSRP_SRP_FORMAT, 758 VIOSRP_SRP_FORMAT,
759 cmnd->timeout_per_command/HZ); 759 cmnd->request->timeout/HZ);
760 760
761 evt_struct->cmnd = cmnd; 761 evt_struct->cmnd = cmnd;
762 evt_struct->cmnd_done = done; 762 evt_struct->cmnd_done = done;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 461331d3dc45..81c16cba5417 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -612,7 +612,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
612 pc->req_xfer = pc->buf_size = scsi_bufflen(cmd); 612 pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
613 pc->scsi_cmd = cmd; 613 pc->scsi_cmd = cmd;
614 pc->done = done; 614 pc->done = done;
615 pc->timeout = jiffies + cmd->timeout_per_command; 615 pc->timeout = jiffies + cmd->request->timeout;
616 616
617 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { 617 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
618 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); 618 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e7a3a6554425..d30eb7ba018e 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3670,7 +3670,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3670 sdev->no_uld_attach = 1; 3670 sdev->no_uld_attach = 1;
3671 } 3671 }
3672 if (ipr_is_vset_device(res)) { 3672 if (ipr_is_vset_device(res)) {
3673 sdev->timeout = IPR_VSET_RW_TIMEOUT; 3673 blk_queue_rq_timeout(sdev->request_queue,
3674 IPR_VSET_RW_TIMEOUT);
3674 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 3675 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3675 } 3676 }
3676 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) 3677 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bc9e6ddf41df..ef683f0d2b5a 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3818,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
3818 scb->cmd.dcdb.segment_4G = 0; 3818 scb->cmd.dcdb.segment_4G = 0;
3819 scb->cmd.dcdb.enhanced_sg = 0; 3819 scb->cmd.dcdb.enhanced_sg = 0;
3820 3820
3821 TimeOut = scb->scsi_cmd->timeout_per_command; 3821 TimeOut = scb->scsi_cmd->request->timeout;
3822 3822
3823 if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ 3823 if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
3824 if (!scb->sg_len) { 3824 if (!scb->sg_len) {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 299e075a7b34..da7b67d30d9a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1456,7 +1456,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1456 if (lun == task->sc->device->lun || lun == -1) { 1456 if (lun == task->sc->device->lun || lun == -1) {
1457 debug_scsi("failing in progress sc %p itt 0x%x\n", 1457 debug_scsi("failing in progress sc %p itt 0x%x\n",
1458 task->sc, task->itt); 1458 task->sc, task->itt);
1459 fail_command(conn, task, DID_BUS_BUSY << 16); 1459 fail_command(conn, task, error << 16);
1460 } 1460 }
1461 } 1461 }
1462} 1462}
@@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
1476 scsi_queue_work(conn->session->host, &conn->xmitwork); 1476 scsi_queue_work(conn->session->host, &conn->xmitwork);
1477} 1477}
1478 1478
1479static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1479static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1480{ 1480{
1481 struct iscsi_cls_session *cls_session; 1481 struct iscsi_cls_session *cls_session;
1482 struct iscsi_session *session; 1482 struct iscsi_session *session;
1483 struct iscsi_conn *conn; 1483 struct iscsi_conn *conn;
1484 enum scsi_eh_timer_return rc = EH_NOT_HANDLED; 1484 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
1485 1485
1486 cls_session = starget_to_session(scsi_target(scmd->device)); 1486 cls_session = starget_to_session(scsi_target(scmd->device));
1487 session = cls_session->dd_data; 1487 session = cls_session->dd_data;
@@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1494 * We are probably in the middle of iscsi recovery so let 1494 * We are probably in the middle of iscsi recovery so let
1495 * that complete and handle the error. 1495 * that complete and handle the error.
1496 */ 1496 */
1497 rc = EH_RESET_TIMER; 1497 rc = BLK_EH_RESET_TIMER;
1498 goto done; 1498 goto done;
1499 } 1499 }
1500 1500
1501 conn = session->leadconn; 1501 conn = session->leadconn;
1502 if (!conn) { 1502 if (!conn) {
1503 /* In the middle of shuting down */ 1503 /* In the middle of shuting down */
1504 rc = EH_RESET_TIMER; 1504 rc = BLK_EH_RESET_TIMER;
1505 goto done; 1505 goto done;
1506 } 1506 }
1507 1507
@@ -1513,20 +1513,21 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1513 */ 1513 */
1514 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + 1514 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1515 (conn->ping_timeout * HZ), jiffies)) 1515 (conn->ping_timeout * HZ), jiffies))
1516 rc = EH_RESET_TIMER; 1516 rc = BLK_EH_RESET_TIMER;
1517 /* 1517 /*
1518 * if we are about to check the transport then give the command 1518 * if we are about to check the transport then give the command
1519 * more time 1519 * more time
1520 */ 1520 */
1521 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), 1521 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
1522 jiffies)) 1522 jiffies))
1523 rc = EH_RESET_TIMER; 1523 rc = BLK_EH_RESET_TIMER;
1524 /* if in the middle of checking the transport then give us more time */ 1524 /* if in the middle of checking the transport then give us more time */
1525 if (conn->ping_task) 1525 if (conn->ping_task)
1526 rc = EH_RESET_TIMER; 1526 rc = BLK_EH_RESET_TIMER;
1527done: 1527done:
1528 spin_unlock(&session->lock); 1528 spin_unlock(&session->lock);
1529 debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh"); 1529 debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ?
1530 "timer reset" : "nh");
1530 return rc; 1531 return rc;
1531} 1532}
1532 1533
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 48ee8c7f5bdd..e15501170698 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -294,10 +294,10 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
294 } 294 }
295} 295}
296 296
297static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in, 297static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in,
298 u32 val) 298 u32 val)
299{ 299{
300 struct domain_device *dev = ap->private_data; 300 struct domain_device *dev = link->ap->private_data;
301 301
302 SAS_DPRINTK("STUB %s\n", __func__); 302 SAS_DPRINTK("STUB %s\n", __func__);
303 switch (sc_reg_in) { 303 switch (sc_reg_in) {
@@ -319,10 +319,10 @@ static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
319 return 0; 319 return 0;
320} 320}
321 321
322static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in, 322static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
323 u32 *val) 323 u32 *val)
324{ 324{
325 struct domain_device *dev = ap->private_data; 325 struct domain_device *dev = link->ap->private_data;
326 326
327 SAS_DPRINTK("STUB %s\n", __func__); 327 SAS_DPRINTK("STUB %s\n", __func__);
328 switch (sc_reg_in) { 328 switch (sc_reg_in) {
@@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task *task)
398 398
399 /* Bounce SCSI-initiated commands to the SCSI EH */ 399 /* Bounce SCSI-initiated commands to the SCSI EH */
400 if (qc->scsicmd) { 400 if (qc->scsicmd) {
401 scsi_req_abort_cmd(qc->scsicmd); 401 blk_abort_request(qc->scsicmd->request);
402 scsi_schedule_eh(qc->scsicmd->device->host); 402 scsi_schedule_eh(qc->scsicmd->device->host);
403 return; 403 return;
404 } 404 }
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index b4f9368f116a..0001374bd6b2 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
55int sas_register_ports(struct sas_ha_struct *sas_ha); 55int sas_register_ports(struct sas_ha_struct *sas_ha);
56void sas_unregister_ports(struct sas_ha_struct *sas_ha); 56void sas_unregister_ports(struct sas_ha_struct *sas_ha);
57 57
58enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); 58enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
59 59
60int sas_init_queue(struct sas_ha_struct *sas_ha); 60int sas_init_queue(struct sas_ha_struct *sas_ha);
61int sas_init_events(struct sas_ha_struct *sas_ha); 61int sas_init_events(struct sas_ha_struct *sas_ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index a8e3ef309070..744838780ada 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -673,43 +673,43 @@ out:
673 return; 673 return;
674} 674}
675 675
676enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) 676enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
677{ 677{
678 struct sas_task *task = TO_SAS_TASK(cmd); 678 struct sas_task *task = TO_SAS_TASK(cmd);
679 unsigned long flags; 679 unsigned long flags;
680 680
681 if (!task) { 681 if (!task) {
682 cmd->timeout_per_command /= 2; 682 cmd->request->timeout /= 2;
683 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n", 683 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
684 cmd, task, (cmd->timeout_per_command ? 684 cmd, task, (cmd->request->timeout ?
685 "EH_RESET_TIMER" : "EH_NOT_HANDLED")); 685 "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
686 if (!cmd->timeout_per_command) 686 if (!cmd->request->timeout)
687 return EH_NOT_HANDLED; 687 return BLK_EH_NOT_HANDLED;
688 return EH_RESET_TIMER; 688 return BLK_EH_RESET_TIMER;
689 } 689 }
690 690
691 spin_lock_irqsave(&task->task_state_lock, flags); 691 spin_lock_irqsave(&task->task_state_lock, flags);
692 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED); 692 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
693 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 693 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
694 spin_unlock_irqrestore(&task->task_state_lock, flags); 694 spin_unlock_irqrestore(&task->task_state_lock, flags);
695 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", 695 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
696 cmd, task); 696 "BLK_EH_HANDLED\n", cmd, task);
697 return EH_HANDLED; 697 return BLK_EH_HANDLED;
698 } 698 }
699 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) { 699 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
700 spin_unlock_irqrestore(&task->task_state_lock, flags); 700 spin_unlock_irqrestore(&task->task_state_lock, flags);
701 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: " 701 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
702 "EH_RESET_TIMER\n", 702 "BLK_EH_RESET_TIMER\n",
703 cmd, task); 703 cmd, task);
704 return EH_RESET_TIMER; 704 return BLK_EH_RESET_TIMER;
705 } 705 }
706 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 706 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
707 spin_unlock_irqrestore(&task->task_state_lock, flags); 707 spin_unlock_irqrestore(&task->task_state_lock, flags);
708 708
709 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n", 709 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
710 cmd, task); 710 cmd, task);
711 711
712 return EH_NOT_HANDLED; 712 return BLK_EH_NOT_HANDLED;
713} 713}
714 714
715int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 715int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
@@ -1039,7 +1039,7 @@ void sas_task_abort(struct sas_task *task)
1039 return; 1039 return;
1040 } 1040 }
1041 1041
1042 scsi_req_abort_cmd(sc); 1042 blk_abort_request(sc->request);
1043 scsi_schedule_eh(sc->device->host); 1043 scsi_schedule_eh(sc->device->host);
1044} 1044}
1045 1045
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 97b763378e7d..afe1de998763 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -1167,7 +1167,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
1167 * cmd has not been completed within the timeout period. 1167 * cmd has not been completed within the timeout period.
1168 */ 1168 */
1169static enum 1169static enum
1170scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 1170blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1171{ 1171{
1172 struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr; 1172 struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
1173 struct megasas_instance *instance; 1173 struct megasas_instance *instance;
@@ -1175,7 +1175,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1175 1175
1176 if (time_after(jiffies, scmd->jiffies_at_alloc + 1176 if (time_after(jiffies, scmd->jiffies_at_alloc +
1177 (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) { 1177 (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
1178 return EH_NOT_HANDLED; 1178 return BLK_EH_NOT_HANDLED;
1179 } 1179 }
1180 1180
1181 instance = cmd->instance; 1181 instance = cmd->instance;
@@ -1189,7 +1189,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1189 1189
1190 spin_unlock_irqrestore(instance->host->host_lock, flags); 1190 spin_unlock_irqrestore(instance->host->host_lock, flags);
1191 } 1191 }
1192 return EH_RESET_TIMER; 1192 return BLK_EH_RESET_TIMER;
1193} 1193}
1194 1194
1195/** 1195/**
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index c57c94c0ffd2..3b7240e40819 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4170,8 +4170,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
4170 ** 4170 **
4171 **---------------------------------------------------- 4171 **----------------------------------------------------
4172 */ 4172 */
4173 if (np->settle_time && cmd->timeout_per_command >= HZ) { 4173 if (np->settle_time && cmd->request->timeout >= HZ) {
4174 u_long tlimit = jiffies + cmd->timeout_per_command - HZ; 4174 u_long tlimit = jiffies + cmd->request->timeout - HZ;
4175 if (time_after(np->settle_time, tlimit)) 4175 if (time_after(np->settle_time, tlimit))
4176 np->settle_time = tlimit; 4176 np->settle_time = tlimit;
4177 } 4177 }
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 37f9ba0cd798..b6cd12b2e996 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2845,7 +2845,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2845 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 2845 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2846 2846
2847 /* Set ISP command timeout. */ 2847 /* Set ISP command timeout. */
2848 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); 2848 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2849 2849
2850 /* Set device target ID and LUN */ 2850 /* Set device target ID and LUN */
2851 pkt->lun = SCSI_LUN_32(cmd); 2851 pkt->lun = SCSI_LUN_32(cmd);
@@ -3114,7 +3114,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3114 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 3114 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3115 3115
3116 /* Set ISP command timeout. */ 3116 /* Set ISP command timeout. */
3117 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); 3117 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3118 3118
3119 /* Set device target ID and LUN */ 3119 /* Set device target ID and LUN */
3120 pkt->lun = SCSI_LUN_32(cmd); 3120 pkt->lun = SCSI_LUN_32(cmd);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 45e7dcb4b34d..0ddfe7106b3b 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -292,10 +292,11 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
292 valid = 0; 292 valid = 0;
293 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 293 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
294 valid = 1; 294 valid = 1;
295 else if (start == (FA_BOOT_CODE_ADDR*4) || 295 else if (start == (ha->flt_region_boot * 4) ||
296 start == (FA_RISC_CODE_ADDR*4)) 296 start == (ha->flt_region_fw * 4))
297 valid = 1; 297 valid = 1;
298 else if (IS_QLA25XX(ha) && start == (FA_VPD_NVRAM_ADDR*4)) 298 else if (IS_QLA25XX(ha) &&
299 start == (ha->flt_region_vpd_nvram * 4))
299 valid = 1; 300 valid = 1;
300 if (!valid) { 301 if (!valid) {
301 qla_printk(KERN_WARNING, ha, 302 qla_printk(KERN_WARNING, ha,
@@ -1065,6 +1066,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1065 pfc_host_stat->dumped_frames = stats->dumped_frames; 1066 pfc_host_stat->dumped_frames = stats->dumped_frames;
1066 pfc_host_stat->nos_count = stats->nos_rcvd; 1067 pfc_host_stat->nos_count = stats->nos_rcvd;
1067 } 1068 }
1069 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1070 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
1068 1071
1069done_free: 1072done_free:
1070 dma_pool_free(ha->s_dma_pool, stats, stats_dma); 1073 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 94a720eabfd8..83c819216771 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -25,7 +25,6 @@
25#include <linux/firmware.h> 25#include <linux/firmware.h>
26#include <linux/aer.h> 26#include <linux/aer.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/semaphore.h>
29 28
30#include <scsi/scsi.h> 29#include <scsi/scsi.h>
31#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
@@ -2157,6 +2156,8 @@ struct qla_chip_state_84xx {
2157 2156
2158struct qla_statistics { 2157struct qla_statistics {
2159 uint32_t total_isp_aborts; 2158 uint32_t total_isp_aborts;
2159 uint64_t input_bytes;
2160 uint64_t output_bytes;
2160}; 2161};
2161 2162
2162/* 2163/*
@@ -2238,6 +2239,7 @@ typedef struct scsi_qla_host {
2238#define FCPORT_UPDATE_NEEDED 27 2239#define FCPORT_UPDATE_NEEDED 27
2239#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */ 2240#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
2240#define UNLOADING 29 2241#define UNLOADING 29
2242#define NPIV_CONFIG_NEEDED 30
2241 2243
2242 uint32_t device_flags; 2244 uint32_t device_flags;
2243#define DFLG_LOCAL_DEVICES BIT_0 2245#define DFLG_LOCAL_DEVICES BIT_0
@@ -2507,7 +2509,6 @@ typedef struct scsi_qla_host {
2507 uint64_t fce_wr, fce_rd; 2509 uint64_t fce_wr, fce_rd;
2508 struct mutex fce_mutex; 2510 struct mutex fce_mutex;
2509 2511
2510 uint32_t hw_event_start;
2511 uint32_t hw_event_ptr; 2512 uint32_t hw_event_ptr;
2512 uint32_t hw_event_pause_errors; 2513 uint32_t hw_event_pause_errors;
2513 2514
@@ -2553,6 +2554,14 @@ typedef struct scsi_qla_host {
2553 uint32_t fdt_unprotect_sec_cmd; 2554 uint32_t fdt_unprotect_sec_cmd;
2554 uint32_t fdt_protect_sec_cmd; 2555 uint32_t fdt_protect_sec_cmd;
2555 2556
2557 uint32_t flt_region_flt;
2558 uint32_t flt_region_fdt;
2559 uint32_t flt_region_boot;
2560 uint32_t flt_region_fw;
2561 uint32_t flt_region_vpd_nvram;
2562 uint32_t flt_region_hw_event;
2563 uint32_t flt_region_npiv_conf;
2564
2556 /* Needed for BEACON */ 2565 /* Needed for BEACON */
2557 uint16_t beacon_blink_led; 2566 uint16_t beacon_blink_led;
2558 uint8_t beacon_color_state; 2567 uint8_t beacon_color_state;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index cf194517400d..d1d14202575a 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -789,14 +789,23 @@ struct device_reg_24xx {
789#define FA_RISC_CODE_ADDR 0x20000 789#define FA_RISC_CODE_ADDR 0x20000
790#define FA_RISC_CODE_SEGMENTS 2 790#define FA_RISC_CODE_SEGMENTS 2
791 791
792#define FA_FLASH_DESCR_ADDR_24 0x11000
793#define FA_FLASH_LAYOUT_ADDR_24 0x11400
794#define FA_NPIV_CONF0_ADDR_24 0x16000
795#define FA_NPIV_CONF1_ADDR_24 0x17000
796
792#define FA_FW_AREA_ADDR 0x40000 797#define FA_FW_AREA_ADDR 0x40000
793#define FA_VPD_NVRAM_ADDR 0x48000 798#define FA_VPD_NVRAM_ADDR 0x48000
794#define FA_FEATURE_ADDR 0x4C000 799#define FA_FEATURE_ADDR 0x4C000
795#define FA_FLASH_DESCR_ADDR 0x50000 800#define FA_FLASH_DESCR_ADDR 0x50000
801#define FA_FLASH_LAYOUT_ADDR 0x50400
796#define FA_HW_EVENT0_ADDR 0x54000 802#define FA_HW_EVENT0_ADDR 0x54000
797#define FA_HW_EVENT1_ADDR 0x54200 803#define FA_HW_EVENT1_ADDR 0x54400
798#define FA_HW_EVENT_SIZE 0x200 804#define FA_HW_EVENT_SIZE 0x200
799#define FA_HW_EVENT_ENTRY_SIZE 4 805#define FA_HW_EVENT_ENTRY_SIZE 4
806#define FA_NPIV_CONF0_ADDR 0x5C000
807#define FA_NPIV_CONF1_ADDR 0x5D000
808
800/* 809/*
801 * Flash Error Log Event Codes. 810 * Flash Error Log Event Codes.
802 */ 811 */
@@ -806,10 +815,6 @@ struct device_reg_24xx {
806#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023 815#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023
807#define HW_EVENT_FLASH_FW_ERR 0xF024 816#define HW_EVENT_FLASH_FW_ERR 0xF024
808 817
809#define FA_BOOT_LOG_ADDR 0x58000
810#define FA_FW_DUMP0_ADDR 0x60000
811#define FA_FW_DUMP1_ADDR 0x70000
812
813 uint32_t flash_data; /* Flash/NVRAM BIOS data. */ 818 uint32_t flash_data; /* Flash/NVRAM BIOS data. */
814 819
815 uint32_t ctrl_status; /* Control/Status. */ 820 uint32_t ctrl_status; /* Control/Status. */
@@ -1203,6 +1208,62 @@ struct qla_fdt_layout {
1203 uint8_t unused2[65]; 1208 uint8_t unused2[65];
1204}; 1209};
1205 1210
1211/* Flash Layout Table ********************************************************/
1212
1213struct qla_flt_location {
1214 uint8_t sig[4];
1215 uint32_t start_lo;
1216 uint32_t start_hi;
1217 uint16_t unused;
1218 uint16_t checksum;
1219};
1220
1221struct qla_flt_header {
1222 uint16_t version;
1223 uint16_t length;
1224 uint16_t checksum;
1225 uint16_t unused;
1226};
1227
1228#define FLT_REG_FW 0x01
1229#define FLT_REG_BOOT_CODE 0x07
1230#define FLT_REG_VPD_0 0x14
1231#define FLT_REG_NVRAM_0 0x15
1232#define FLT_REG_VPD_1 0x16
1233#define FLT_REG_NVRAM_1 0x17
1234#define FLT_REG_FDT 0x1a
1235#define FLT_REG_FLT 0x1c
1236#define FLT_REG_HW_EVENT_0 0x1d
1237#define FLT_REG_HW_EVENT_1 0x1f
1238#define FLT_REG_NPIV_CONF_0 0x29
1239#define FLT_REG_NPIV_CONF_1 0x2a
1240
1241struct qla_flt_region {
1242 uint32_t code;
1243 uint32_t size;
1244 uint32_t start;
1245 uint32_t end;
1246};
1247
1248/* Flash NPIV Configuration Table ********************************************/
1249
1250struct qla_npiv_header {
1251 uint8_t sig[2];
1252 uint16_t version;
1253 uint16_t entries;
1254 uint16_t unused[4];
1255 uint16_t checksum;
1256};
1257
1258struct qla_npiv_entry {
1259 uint16_t flags;
1260 uint16_t vf_id;
1261 uint16_t qos;
1262 uint16_t unused1;
1263 uint8_t port_name[WWN_SIZE];
1264 uint8_t node_name[WWN_SIZE];
1265};
1266
1206/* 84XX Support **************************************************************/ 1267/* 84XX Support **************************************************************/
1207 1268
1208#define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */ 1269#define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b156735e9a6..753dbe6cce6e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -313,9 +313,11 @@ extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
313extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t, 313extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
314 uint16_t, uint16_t); 314 uint16_t, uint16_t);
315 315
316extern void qla2xxx_get_flash_info(scsi_qla_host_t *); 316extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
317extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); 317extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
318 318
319extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
320
319/* 321/*
320 * Global Function Prototypes in qla_dbg.c source file. 322 * Global Function Prototypes in qla_dbg.c source file.
321 */ 323 */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index ee89ddd64aae..a470f2d3270d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -83,6 +83,13 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
83 83
84 ha->isp_ops->reset_chip(ha); 84 ha->isp_ops->reset_chip(ha);
85 85
86 rval = qla2xxx_get_flash_info(ha);
87 if (rval) {
88 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
89 ha->host_no));
90 return (rval);
91 }
92
86 ha->isp_ops->get_flash_version(ha, ha->request_ring); 93 ha->isp_ops->get_flash_version(ha, ha->request_ring);
87 94
88 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 95 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
@@ -109,7 +116,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
109 rval = qla2x00_setup_chip(ha); 116 rval = qla2x00_setup_chip(ha);
110 if (rval) 117 if (rval)
111 return (rval); 118 return (rval);
112 qla2xxx_get_flash_info(ha);
113 } 119 }
114 if (IS_QLA84XX(ha)) { 120 if (IS_QLA84XX(ha)) {
115 ha->cs84xx = qla84xx_get_chip(ha); 121 ha->cs84xx = qla84xx_get_chip(ha);
@@ -2016,7 +2022,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
2016 DEBUG3(printk("%s: exiting normally\n", __func__)); 2022 DEBUG3(printk("%s: exiting normally\n", __func__));
2017 } 2023 }
2018 2024
2019 /* Restore state if a resync event occured during processing */ 2025 /* Restore state if a resync event occurred during processing */
2020 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2026 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
2021 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2027 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2022 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2028 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
@@ -2561,7 +2567,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2561 rval = QLA_SUCCESS; 2567 rval = QLA_SUCCESS;
2562 2568
2563 /* Try GID_PT to get device list, else GAN. */ 2569 /* Try GID_PT to get device list, else GAN. */
2564 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_ATOMIC); 2570 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
2565 if (!swl) { 2571 if (!swl) {
2566 /*EMPTY*/ 2572 /*EMPTY*/
2567 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 2573 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
@@ -3751,7 +3757,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3751 rval = QLA_SUCCESS; 3757 rval = QLA_SUCCESS;
3752 3758
3753 segments = FA_RISC_CODE_SEGMENTS; 3759 segments = FA_RISC_CODE_SEGMENTS;
3754 faddr = FA_RISC_CODE_ADDR; 3760 faddr = ha->flt_region_fw;
3755 dcode = (uint32_t *)ha->request_ring; 3761 dcode = (uint32_t *)ha->request_ring;
3756 *srisc_addr = 0; 3762 *srisc_addr = 0;
3757 3763
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 92fafbdbbaab..e90afad120ee 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -52,7 +52,7 @@ to_qla_parent(scsi_qla_host_t *ha)
52 * @ha: HA context 52 * @ha: HA context
53 * @ha_locked: is function called with the hardware lock 53 * @ha_locked: is function called with the hardware lock
54 * 54 *
55 * Returns non-zero if a failure occured, else zero. 55 * Returns non-zero if a failure occurred, else zero.
56 */ 56 */
57static inline int 57static inline int
58qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked) 58qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d57669aa4615..85bc0a48598b 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -21,17 +21,22 @@ static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
21 * Returns the proper CF_* direction based on CDB. 21 * Returns the proper CF_* direction based on CDB.
22 */ 22 */
23static inline uint16_t 23static inline uint16_t
24qla2x00_get_cmd_direction(struct scsi_cmnd *cmd) 24qla2x00_get_cmd_direction(srb_t *sp)
25{ 25{
26 uint16_t cflags; 26 uint16_t cflags;
27 27
28 cflags = 0; 28 cflags = 0;
29 29
30 /* Set transfer direction */ 30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) 31 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32 cflags = CF_WRITE; 32 cflags = CF_WRITE;
33 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 33 sp->fcport->ha->qla_stats.output_bytes +=
34 scsi_bufflen(sp->cmd);
35 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
34 cflags = CF_READ; 36 cflags = CF_READ;
37 sp->fcport->ha->qla_stats.input_bytes +=
38 scsi_bufflen(sp->cmd);
39 }
35 return (cflags); 40 return (cflags);
36} 41}
37 42
@@ -169,7 +174,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
169 174
170 ha = sp->ha; 175 ha = sp->ha;
171 176
172 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd)); 177 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
173 178
174 /* Three DSDs are available in the Command Type 2 IOCB */ 179 /* Three DSDs are available in the Command Type 2 IOCB */
175 avail_dsds = 3; 180 avail_dsds = 3;
@@ -228,7 +233,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
228 233
229 ha = sp->ha; 234 ha = sp->ha;
230 235
231 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd)); 236 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
232 237
233 /* Two DSDs are available in the Command Type 3 IOCB */ 238 /* Two DSDs are available in the Command Type 3 IOCB */
234 avail_dsds = 2; 239 avail_dsds = 2;
@@ -262,7 +267,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
262 * qla2x00_start_scsi() - Send a SCSI command to the ISP 267 * qla2x00_start_scsi() - Send a SCSI command to the ISP
263 * @sp: command to send to the ISP 268 * @sp: command to send to the ISP
264 * 269 *
265 * Returns non-zero if a failure occured, else zero. 270 * Returns non-zero if a failure occurred, else zero.
266 */ 271 */
267int 272int
268qla2x00_start_scsi(srb_t *sp) 273qla2x00_start_scsi(srb_t *sp)
@@ -407,7 +412,7 @@ queuing_error:
407 * 412 *
408 * Can be called from both normal and interrupt context. 413 * Can be called from both normal and interrupt context.
409 * 414 *
410 * Returns non-zero if a failure occured, else zero. 415 * Returns non-zero if a failure occurred, else zero.
411 */ 416 */
412int 417int
413__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 418__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
@@ -625,12 +630,17 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
625 ha = sp->ha; 630 ha = sp->ha;
626 631
627 /* Set transfer direction */ 632 /* Set transfer direction */
628 if (cmd->sc_data_direction == DMA_TO_DEVICE) 633 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
629 cmd_pkt->task_mgmt_flags = 634 cmd_pkt->task_mgmt_flags =
630 __constant_cpu_to_le16(TMF_WRITE_DATA); 635 __constant_cpu_to_le16(TMF_WRITE_DATA);
631 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 636 sp->fcport->ha->qla_stats.output_bytes +=
637 scsi_bufflen(sp->cmd);
638 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
632 cmd_pkt->task_mgmt_flags = 639 cmd_pkt->task_mgmt_flags =
633 __constant_cpu_to_le16(TMF_READ_DATA); 640 __constant_cpu_to_le16(TMF_READ_DATA);
641 sp->fcport->ha->qla_stats.input_bytes +=
642 scsi_bufflen(sp->cmd);
643 }
634 644
635 /* One DSD is available in the Command Type 3 IOCB */ 645 /* One DSD is available in the Command Type 3 IOCB */
636 avail_dsds = 1; 646 avail_dsds = 1;
@@ -666,7 +676,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
666 * qla24xx_start_scsi() - Send a SCSI command to the ISP 676 * qla24xx_start_scsi() - Send a SCSI command to the ISP
667 * @sp: command to send to the ISP 677 * @sp: command to send to the ISP
668 * 678 *
669 * Returns non-zero if a failure occured, else zero. 679 * Returns non-zero if a failure occurred, else zero.
670 */ 680 */
671int 681int
672qla24xx_start_scsi(srb_t *sp) 682qla24xx_start_scsi(srb_t *sp)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 45a3b93eed57..fc4bfa7f839c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -391,9 +391,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
391 break; 391 break;
392 392
393 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 393 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
394 DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no, 394 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no,
395 mb[1])); 395 mb[1]));
396 qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]); 396 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
397 397
398 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 398 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
399 atomic_set(&ha->loop_state, LOOP_DOWN); 399 atomic_set(&ha->loop_state, LOOP_DOWN);
@@ -460,7 +460,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
460 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 460 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
461 ha->host_no, mb[1])); 461 ha->host_no, mb[1]));
462 qla_printk(KERN_INFO, ha, 462 qla_printk(KERN_INFO, ha,
463 "LIP reset occured (%x).\n", mb[1]); 463 "LIP reset occurred (%x).\n", mb[1]);
464 464
465 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 465 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
466 atomic_set(&ha->loop_state, LOOP_DOWN); 466 atomic_set(&ha->loop_state, LOOP_DOWN);
@@ -543,7 +543,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
543 543
544 case MBA_PORT_UPDATE: /* Port database update */ 544 case MBA_PORT_UPDATE: /* Port database update */
545 /* 545 /*
546 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET 546 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
547 * event etc. earlier indicating loop is down) then process 547 * event etc. earlier indicating loop is down) then process
548 * it. Otherwise ignore it and Wait for RSCN to come in. 548 * it. Otherwise ignore it and Wait for RSCN to come in.
549 */ 549 */
@@ -589,7 +589,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
589 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", 589 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
590 ha->host_no, mb[1], mb[2], mb[3])); 590 ha->host_no, mb[1], mb[2], mb[3]));
591 591
592 rscn_entry = (mb[1] << 16) | mb[2]; 592 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
593 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | 593 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
594 ha->d_id.b.al_pa; 594 ha->d_id.b.al_pa;
595 if (rscn_entry == host_pid) { 595 if (rscn_entry == host_pid) {
@@ -600,6 +600,8 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
600 break; 600 break;
601 } 601 }
602 602
603 /* Ignore reserved bits from RSCN-payload. */
604 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
603 rscn_queue_index = ha->rscn_in_ptr + 1; 605 rscn_queue_index = ha->rscn_in_ptr + 1;
604 if (rscn_queue_index == MAX_RSCN_COUNT) 606 if (rscn_queue_index == MAX_RSCN_COUNT)
605 rscn_queue_index = 0; 607 rscn_queue_index = 0;
@@ -1060,8 +1062,9 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1060 resid = resid_len; 1062 resid = resid_len;
1061 /* Use F/W calculated residual length. */ 1063 /* Use F/W calculated residual length. */
1062 if (IS_FWI2_CAPABLE(ha)) { 1064 if (IS_FWI2_CAPABLE(ha)) {
1063 if (scsi_status & SS_RESIDUAL_UNDER && 1065 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1064 resid != fw_resid_len) { 1066 lscsi_status = 0;
1067 } else if (resid != fw_resid_len) {
1065 scsi_status &= ~SS_RESIDUAL_UNDER; 1068 scsi_status &= ~SS_RESIDUAL_UNDER;
1066 lscsi_status = 0; 1069 lscsi_status = 0;
1067 } 1070 }
@@ -1834,7 +1837,6 @@ clear_risc_ints:
1834 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT); 1837 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
1835 } 1838 }
1836 spin_unlock_irq(&ha->hardware_lock); 1839 spin_unlock_irq(&ha->hardware_lock);
1837 ha->isp_ops->enable_intrs(ha);
1838 1840
1839fail: 1841fail:
1840 return ret; 1842 return ret;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 813bc7784c0a..36bc6851e23d 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -233,7 +233,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
233 DEBUG2_3_11(printk("%s(%ld): timeout schedule " 233 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
234 "isp_abort_needed.\n", __func__, ha->host_no)); 234 "isp_abort_needed.\n", __func__, ha->host_no));
235 qla_printk(KERN_WARNING, ha, 235 qla_printk(KERN_WARNING, ha,
236 "Mailbox command timeout occured. Scheduling ISP " 236 "Mailbox command timeout occurred. Scheduling ISP "
237 "abort.\n"); 237 "abort.\n");
238 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 238 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
239 qla2xxx_wake_dpc(ha); 239 qla2xxx_wake_dpc(ha);
@@ -244,7 +244,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
244 DEBUG2_3_11(printk("%s(%ld): timeout calling " 244 DEBUG2_3_11(printk("%s(%ld): timeout calling "
245 "abort_isp\n", __func__, ha->host_no)); 245 "abort_isp\n", __func__, ha->host_no));
246 qla_printk(KERN_WARNING, ha, 246 qla_printk(KERN_WARNING, ha,
247 "Mailbox command timeout occured. Issuing ISP " 247 "Mailbox command timeout occurred. Issuing ISP "
248 "abort.\n"); 248 "abort.\n");
249 249
250 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 250 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
@@ -1995,7 +1995,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
1995 char *pmap; 1995 char *pmap;
1996 dma_addr_t pmap_dma; 1996 dma_addr_t pmap_dma;
1997 1997
1998 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &pmap_dma); 1998 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
1999 if (pmap == NULL) { 1999 if (pmap == NULL) {
2000 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", 2000 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
2001 __func__, ha->host_no)); 2001 __func__, ha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 26afe44265c7..3433441b956a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1517,6 +1517,7 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
1517 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 1517 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
1518 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 1518 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
1519 set_bit(RSCN_UPDATE, &ha->dpc_flags); 1519 set_bit(RSCN_UPDATE, &ha->dpc_flags);
1520 set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
1520} 1521}
1521 1522
1522static int 1523static int
@@ -1663,8 +1664,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1663 ha->gid_list_info_size = 8; 1664 ha->gid_list_info_size = 8;
1664 ha->optrom_size = OPTROM_SIZE_25XX; 1665 ha->optrom_size = OPTROM_SIZE_25XX;
1665 ha->isp_ops = &qla25xx_isp_ops; 1666 ha->isp_ops = &qla25xx_isp_ops;
1666 ha->hw_event_start = PCI_FUNC(pdev->devfn) ?
1667 FA_HW_EVENT1_ADDR: FA_HW_EVENT0_ADDR;
1668 } 1667 }
1669 host->can_queue = ha->request_q_length + 128; 1668 host->can_queue = ha->request_q_length + 128;
1670 1669
@@ -1740,6 +1739,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1740 if (ret) 1739 if (ret)
1741 goto probe_failed; 1740 goto probe_failed;
1742 1741
1742 ha->isp_ops->enable_intrs(ha);
1743
1743 scsi_scan_host(host); 1744 scsi_scan_host(host);
1744 1745
1745 qla2x00_alloc_sysfs_attr(ha); 1746 qla2x00_alloc_sysfs_attr(ha);
@@ -2431,6 +2432,12 @@ qla2x00_do_dpc(void *data)
2431 ha->host_no)); 2432 ha->host_no));
2432 } 2433 }
2433 2434
2435 if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) &&
2436 atomic_read(&ha->loop_state) == LOOP_READY) {
2437 clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
2438 qla2xxx_flash_npiv_conf(ha);
2439 }
2440
2434 if (!ha->interrupts_on) 2441 if (!ha->interrupts_on)
2435 ha->isp_ops->enable_intrs(ha); 2442 ha->isp_ops->enable_intrs(ha);
2436 2443
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 1bca74474935..90a13211717f 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -543,23 +543,198 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
543 } 543 }
544} 544}
545 545
546void 546static int
547qla2xxx_get_flash_info(scsi_qla_host_t *ha) 547qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
548{
549 const char *loc, *locations[] = { "DEF", "PCI" };
550 uint32_t pcihdr, pcids;
551 uint32_t *dcode;
552 uint8_t *buf, *bcode, last_image;
553 uint16_t cnt, chksum, *wptr;
554 struct qla_flt_location *fltl;
555
556 /*
557 * FLT-location structure resides after the last PCI region.
558 */
559
560 /* Begin with sane defaults. */
561 loc = locations[0];
562 *start = IS_QLA24XX_TYPE(ha) ? FA_FLASH_LAYOUT_ADDR_24:
563 FA_FLASH_LAYOUT_ADDR;
564
565 /* Begin with first PCI expansion ROM header. */
566 buf = (uint8_t *)ha->request_ring;
567 dcode = (uint32_t *)ha->request_ring;
568 pcihdr = 0;
569 last_image = 1;
570 do {
571 /* Verify PCI expansion ROM header. */
572 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20);
573 bcode = buf + (pcihdr % 4);
574 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
575 goto end;
576
577 /* Locate PCI data structure. */
578 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
579 qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20);
580 bcode = buf + (pcihdr % 4);
581
582 /* Validate signature of PCI data structure. */
583 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
584 bcode[0x2] != 'I' || bcode[0x3] != 'R')
585 goto end;
586
587 last_image = bcode[0x15] & BIT_7;
588
589 /* Locate next PCI expansion ROM. */
590 pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
591 } while (!last_image);
592
593 /* Now verify FLT-location structure. */
594 fltl = (struct qla_flt_location *)ha->request_ring;
595 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2,
596 sizeof(struct qla_flt_location) >> 2);
597 if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
598 fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
599 goto end;
600
601 wptr = (uint16_t *)ha->request_ring;
602 cnt = sizeof(struct qla_flt_location) >> 1;
603 for (chksum = 0; cnt; cnt--)
604 chksum += le16_to_cpu(*wptr++);
605 if (chksum) {
606 qla_printk(KERN_ERR, ha,
607 "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
608 qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location));
609 return QLA_FUNCTION_FAILED;
610 }
611
612 /* Good data. Use specified location. */
613 loc = locations[1];
614 *start = le16_to_cpu(fltl->start_hi) << 16 |
615 le16_to_cpu(fltl->start_lo);
616end:
617 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
618 return QLA_SUCCESS;
619}
620
621static void
622qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
623{
624 const char *loc, *locations[] = { "DEF", "FLT" };
625 uint16_t *wptr;
626 uint16_t cnt, chksum;
627 uint32_t start;
628 struct qla_flt_header *flt;
629 struct qla_flt_region *region;
630
631 ha->flt_region_flt = flt_addr;
632 wptr = (uint16_t *)ha->request_ring;
633 flt = (struct qla_flt_header *)ha->request_ring;
634 region = (struct qla_flt_region *)&flt[1];
635 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
636 flt_addr << 2, OPTROM_BURST_SIZE);
637 if (*wptr == __constant_cpu_to_le16(0xffff))
638 goto no_flash_data;
639 if (flt->version != __constant_cpu_to_le16(1)) {
640 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: "
641 "version=0x%x length=0x%x checksum=0x%x.\n",
642 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
643 le16_to_cpu(flt->checksum)));
644 goto no_flash_data;
645 }
646
647 cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
648 for (chksum = 0; cnt; cnt--)
649 chksum += le16_to_cpu(*wptr++);
650 if (chksum) {
651 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
652 "version=0x%x length=0x%x checksum=0x%x.\n",
653 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
654 chksum));
655 goto no_flash_data;
656 }
657
658 loc = locations[1];
659 cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
660 for ( ; cnt; cnt--, region++) {
661 /* Store addresses as DWORD offsets. */
662 start = le32_to_cpu(region->start) >> 2;
663
664 DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
665 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
666 le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
667
668 switch (le32_to_cpu(region->code)) {
669 case FLT_REG_FW:
670 ha->flt_region_fw = start;
671 break;
672 case FLT_REG_BOOT_CODE:
673 ha->flt_region_boot = start;
674 break;
675 case FLT_REG_VPD_0:
676 ha->flt_region_vpd_nvram = start;
677 break;
678 case FLT_REG_FDT:
679 ha->flt_region_fdt = start;
680 break;
681 case FLT_REG_HW_EVENT_0:
682 if (!PCI_FUNC(ha->pdev->devfn))
683 ha->flt_region_hw_event = start;
684 break;
685 case FLT_REG_HW_EVENT_1:
686 if (PCI_FUNC(ha->pdev->devfn))
687 ha->flt_region_hw_event = start;
688 break;
689 case FLT_REG_NPIV_CONF_0:
690 if (!PCI_FUNC(ha->pdev->devfn))
691 ha->flt_region_npiv_conf = start;
692 break;
693 case FLT_REG_NPIV_CONF_1:
694 if (PCI_FUNC(ha->pdev->devfn))
695 ha->flt_region_npiv_conf = start;
696 break;
697 }
698 }
699 goto done;
700
701no_flash_data:
702 /* Use hardcoded defaults. */
703 loc = locations[0];
704 ha->flt_region_fw = FA_RISC_CODE_ADDR;
705 ha->flt_region_boot = FA_BOOT_CODE_ADDR;
706 ha->flt_region_vpd_nvram = FA_VPD_NVRAM_ADDR;
707 ha->flt_region_fdt = IS_QLA24XX_TYPE(ha) ? FA_FLASH_DESCR_ADDR_24:
708 FA_FLASH_DESCR_ADDR;
709 ha->flt_region_hw_event = !PCI_FUNC(ha->pdev->devfn) ?
710 FA_HW_EVENT0_ADDR: FA_HW_EVENT1_ADDR;
711 ha->flt_region_npiv_conf = !PCI_FUNC(ha->pdev->devfn) ?
712 (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF0_ADDR_24: FA_NPIV_CONF0_ADDR):
713 (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF1_ADDR_24: FA_NPIV_CONF1_ADDR);
714done:
715 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
716 "vpd_nvram=0x%x fdt=0x%x flt=0x%x hwe=0x%x npiv=0x%x.\n", loc,
717 ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram,
718 ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_hw_event,
719 ha->flt_region_npiv_conf));
720}
721
722static void
723qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
548{ 724{
549#define FLASH_BLK_SIZE_32K 0x8000 725#define FLASH_BLK_SIZE_32K 0x8000
550#define FLASH_BLK_SIZE_64K 0x10000 726#define FLASH_BLK_SIZE_64K 0x10000
727 const char *loc, *locations[] = { "MID", "FDT" };
551 uint16_t cnt, chksum; 728 uint16_t cnt, chksum;
552 uint16_t *wptr; 729 uint16_t *wptr;
553 struct qla_fdt_layout *fdt; 730 struct qla_fdt_layout *fdt;
554 uint8_t man_id, flash_id; 731 uint8_t man_id, flash_id;
555 732 uint16_t mid, fid;
556 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
557 return;
558 733
559 wptr = (uint16_t *)ha->request_ring; 734 wptr = (uint16_t *)ha->request_ring;
560 fdt = (struct qla_fdt_layout *)ha->request_ring; 735 fdt = (struct qla_fdt_layout *)ha->request_ring;
561 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, 736 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
562 FA_FLASH_DESCR_ADDR << 2, OPTROM_BURST_SIZE); 737 ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
563 if (*wptr == __constant_cpu_to_le16(0xffff)) 738 if (*wptr == __constant_cpu_to_le16(0xffff))
564 goto no_flash_data; 739 goto no_flash_data;
565 if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' || 740 if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
@@ -577,7 +752,10 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
577 goto no_flash_data; 752 goto no_flash_data;
578 } 753 }
579 754
580 ha->fdt_odd_index = le16_to_cpu(fdt->man_id) == 0x1f; 755 loc = locations[1];
756 mid = le16_to_cpu(fdt->man_id);
757 fid = le16_to_cpu(fdt->id);
758 ha->fdt_odd_index = mid == 0x1f;
581 ha->fdt_wrt_disable = fdt->wrt_disable_bits; 759 ha->fdt_wrt_disable = fdt->wrt_disable_bits;
582 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd); 760 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd);
583 ha->fdt_block_size = le32_to_cpu(fdt->block_size); 761 ha->fdt_block_size = le32_to_cpu(fdt->block_size);
@@ -588,16 +766,12 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
588 flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd): 766 flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd):
589 flash_conf_to_access_addr(0x0336); 767 flash_conf_to_access_addr(0x0336);
590 } 768 }
591 769 goto done;
592 DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[FDT]: (0x%x/0x%x) erase=0x%x "
593 "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n",
594 le16_to_cpu(fdt->man_id), le16_to_cpu(fdt->id), ha->fdt_erase_cmd,
595 ha->fdt_protect_sec_cmd, ha->fdt_unprotect_sec_cmd,
596 ha->fdt_odd_index, ha->fdt_wrt_disable, ha->fdt_block_size));
597 return;
598
599no_flash_data: 770no_flash_data:
771 loc = locations[0];
600 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); 772 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
773 mid = man_id;
774 fid = flash_id;
601 ha->fdt_wrt_disable = 0x9c; 775 ha->fdt_wrt_disable = 0x9c;
602 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8); 776 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8);
603 switch (man_id) { 777 switch (man_id) {
@@ -625,14 +799,117 @@ no_flash_data:
625 ha->fdt_block_size = FLASH_BLK_SIZE_64K; 799 ha->fdt_block_size = FLASH_BLK_SIZE_64K;
626 break; 800 break;
627 } 801 }
628 802done:
629 DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[MID]: (0x%x/0x%x) erase=0x%x " 803 DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
630 "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", man_id, flash_id, 804 "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
631 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, 805 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
632 ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable, 806 ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable,
633 ha->fdt_block_size)); 807 ha->fdt_block_size));
634} 808}
635 809
810int
811qla2xxx_get_flash_info(scsi_qla_host_t *ha)
812{
813 int ret;
814 uint32_t flt_addr;
815
816 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
817 return QLA_SUCCESS;
818
819 ret = qla2xxx_find_flt_start(ha, &flt_addr);
820 if (ret != QLA_SUCCESS)
821 return ret;
822
823 qla2xxx_get_flt_info(ha, flt_addr);
824 qla2xxx_get_fdt_info(ha);
825
826 return QLA_SUCCESS;
827}
828
829void
830qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
831{
832#define NPIV_CONFIG_SIZE (16*1024)
833 void *data;
834 uint16_t *wptr;
835 uint16_t cnt, chksum;
836 struct qla_npiv_header hdr;
837 struct qla_npiv_entry *entry;
838
839 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
840 return;
841
842 ha->isp_ops->read_optrom(ha, (uint8_t *)&hdr,
843 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
844 if (hdr.version == __constant_cpu_to_le16(0xffff))
845 return;
846 if (hdr.version != __constant_cpu_to_le16(1)) {
847 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config "
848 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
849 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
850 le16_to_cpu(hdr.checksum)));
851 return;
852 }
853
854 data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
855 if (!data) {
856 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to "
857 "allocate memory.\n"));
858 return;
859 }
860
861 ha->isp_ops->read_optrom(ha, (uint8_t *)data,
862 ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
863
864 cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
865 sizeof(struct qla_npiv_entry)) >> 1;
866 for (wptr = data, chksum = 0; cnt; cnt--)
867 chksum += le16_to_cpu(*wptr++);
868 if (chksum) {
869 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config "
870 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
871 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
872 chksum));
873 goto done;
874 }
875
876 entry = data + sizeof(struct qla_npiv_header);
877 cnt = le16_to_cpu(hdr.entries);
878 for ( ; cnt; cnt--, entry++) {
879 uint16_t flags;
880 struct fc_vport_identifiers vid;
881 struct fc_vport *vport;
882
883 flags = le16_to_cpu(entry->flags);
884 if (flags == 0xffff)
885 continue;
886 if ((flags & BIT_0) == 0)
887 continue;
888
889 memset(&vid, 0, sizeof(vid));
890 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
891 vid.vport_type = FC_PORTTYPE_NPIV;
892 vid.disable = false;
893 vid.port_name = wwn_to_u64(entry->port_name);
894 vid.node_name = wwn_to_u64(entry->node_name);
895
896 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
897 "wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt,
898 (unsigned long long)vid.port_name,
899 (unsigned long long)vid.node_name,
900 le16_to_cpu(entry->vf_id), le16_to_cpu(entry->qos)));
901
902 vport = fc_vport_create(ha->host, 0, &vid);
903 if (!vport)
904 qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to "
905 "create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt,
906 (unsigned long long)vid.port_name,
907 (unsigned long long)vid.node_name);
908 }
909done:
910 kfree(data);
911}
912
636static void 913static void
637qla24xx_unprotect_flash(scsi_qla_host_t *ha) 914qla24xx_unprotect_flash(scsi_qla_host_t *ha)
638{ 915{
@@ -920,7 +1197,8 @@ qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
920 dwptr = (uint32_t *)buf; 1197 dwptr = (uint32_t *)buf;
921 for (i = 0; i < bytes >> 2; i++, naddr++) 1198 for (i = 0; i < bytes >> 2; i++, naddr++)
922 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 1199 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
923 flash_data_to_access_addr(FA_VPD_NVRAM_ADDR | naddr))); 1200 flash_data_to_access_addr(ha->flt_region_vpd_nvram |
1201 naddr)));
924 1202
925 return buf; 1203 return buf;
926} 1204}
@@ -935,10 +1213,10 @@ qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
935 dbuf = vmalloc(RMW_BUFFER_SIZE); 1213 dbuf = vmalloc(RMW_BUFFER_SIZE);
936 if (!dbuf) 1214 if (!dbuf)
937 return QLA_MEMORY_ALLOC_FAILED; 1215 return QLA_MEMORY_ALLOC_FAILED;
938 ha->isp_ops->read_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2, 1216 ha->isp_ops->read_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
939 RMW_BUFFER_SIZE); 1217 RMW_BUFFER_SIZE);
940 memcpy(dbuf + (naddr << 2), buf, bytes); 1218 memcpy(dbuf + (naddr << 2), buf, bytes);
941 ha->isp_ops->write_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2, 1219 ha->isp_ops->write_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
942 RMW_BUFFER_SIZE); 1220 RMW_BUFFER_SIZE);
943 vfree(dbuf); 1221 vfree(dbuf);
944 1222
@@ -2166,7 +2444,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2166 memset(dbyte, 0, 8); 2444 memset(dbyte, 0, 8);
2167 dcode = (uint16_t *)dbyte; 2445 dcode = (uint16_t *)dbyte;
2168 2446
2169 qla2x00_read_flash_data(ha, dbyte, FA_RISC_CODE_ADDR * 4 + 10, 2447 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
2170 8); 2448 8);
2171 DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n", 2449 DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n",
2172 __func__, ha->host_no)); 2450 __func__, ha->host_no));
@@ -2177,7 +2455,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2177 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2455 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2178 dcode[3] == 0)) { 2456 dcode[3] == 0)) {
2179 DEBUG2(printk("%s(): Unrecognized fw revision at " 2457 DEBUG2(printk("%s(): Unrecognized fw revision at "
2180 "%x.\n", __func__, FA_RISC_CODE_ADDR * 4)); 2458 "%x.\n", __func__, ha->flt_region_fw * 4));
2181 } else { 2459 } else {
2182 /* values are in big endian */ 2460 /* values are in big endian */
2183 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; 2461 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
@@ -2212,7 +2490,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2212 dcode = mbuf; 2490 dcode = mbuf;
2213 2491
2214 /* Begin with first PCI expansion ROM header. */ 2492 /* Begin with first PCI expansion ROM header. */
2215 pcihdr = 0; 2493 pcihdr = ha->flt_region_boot;
2216 last_image = 1; 2494 last_image = 1;
2217 do { 2495 do {
2218 /* Verify PCI expansion ROM header. */ 2496 /* Verify PCI expansion ROM header. */
@@ -2282,7 +2560,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2282 memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); 2560 memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
2283 dcode = mbuf; 2561 dcode = mbuf;
2284 2562
2285 qla24xx_read_flash_data(ha, dcode, FA_RISC_CODE_ADDR + 4, 4); 2563 qla24xx_read_flash_data(ha, dcode, ha->flt_region_fw + 4, 4);
2286 for (i = 0; i < 4; i++) 2564 for (i = 0; i < 4; i++)
2287 dcode[i] = be32_to_cpu(dcode[i]); 2565 dcode[i] = be32_to_cpu(dcode[i]);
2288 2566
@@ -2291,7 +2569,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2291 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2569 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2292 dcode[3] == 0)) { 2570 dcode[3] == 0)) {
2293 DEBUG2(printk("%s(): Unrecognized fw version at %x.\n", 2571 DEBUG2(printk("%s(): Unrecognized fw version at %x.\n",
2294 __func__, FA_RISC_CODE_ADDR)); 2572 __func__, ha->flt_region_fw));
2295 } else { 2573 } else {
2296 ha->fw_revision[0] = dcode[0]; 2574 ha->fw_revision[0] = dcode[0];
2297 ha->fw_revision[1] = dcode[1]; 2575 ha->fw_revision[1] = dcode[1];
@@ -2355,7 +2633,7 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
2355 /* Locate first empty entry. */ 2633 /* Locate first empty entry. */
2356 for (;;) { 2634 for (;;) {
2357 if (ha->hw_event_ptr >= 2635 if (ha->hw_event_ptr >=
2358 ha->hw_event_start + FA_HW_EVENT_SIZE) { 2636 ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
2359 DEBUG2(qla_printk(KERN_WARNING, ha, 2637 DEBUG2(qla_printk(KERN_WARNING, ha,
2360 "HW event -- Log Full!\n")); 2638 "HW event -- Log Full!\n"));
2361 return QLA_MEMORY_ALLOC_FAILED; 2639 return QLA_MEMORY_ALLOC_FAILED;
@@ -2391,7 +2669,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2391 int rval; 2669 int rval;
2392 uint32_t marker[2], fdata[4]; 2670 uint32_t marker[2], fdata[4];
2393 2671
2394 if (ha->hw_event_start == 0) 2672 if (ha->flt_region_hw_event == 0)
2395 return QLA_FUNCTION_FAILED; 2673 return QLA_FUNCTION_FAILED;
2396 2674
2397 DEBUG2(qla_printk(KERN_WARNING, ha, 2675 DEBUG2(qla_printk(KERN_WARNING, ha,
@@ -2406,7 +2684,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2406 QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER); 2684 QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER);
2407 2685
2408 /* Locate marker. */ 2686 /* Locate marker. */
2409 ha->hw_event_ptr = ha->hw_event_start; 2687 ha->hw_event_ptr = ha->flt_region_hw_event;
2410 for (;;) { 2688 for (;;) {
2411 qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr, 2689 qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr,
2412 4); 2690 4);
@@ -2415,7 +2693,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2415 break; 2693 break;
2416 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE; 2694 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
2417 if (ha->hw_event_ptr >= 2695 if (ha->hw_event_ptr >=
2418 ha->hw_event_start + FA_HW_EVENT_SIZE) { 2696 ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
2419 DEBUG2(qla_printk(KERN_WARNING, ha, 2697 DEBUG2(qla_printk(KERN_WARNING, ha,
2420 "HW event -- Log Full!\n")); 2698 "HW event -- Log Full!\n"));
2421 return QLA_MEMORY_ALLOC_FAILED; 2699 return QLA_MEMORY_ALLOC_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 4160e4caa7b9..be5e299df528 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.01-k7" 10#define QLA2XXX_VERSION "8.02.01-k8"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 88bebb13bc52..de8279ad7d89 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1542,7 +1542,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
1542 DEBUG2(printk(KERN_INFO 1542 DEBUG2(printk(KERN_INFO
1543 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," 1543 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
1544 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, 1544 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
1545 cmd, jiffies, cmd->timeout_per_command / HZ, 1545 cmd, jiffies, cmd->request->timeout / HZ,
1546 ha->dpc_flags, cmd->result, cmd->allowed)); 1546 ha->dpc_flags, cmd->result, cmd->allowed));
1547 1547
1548 /* FIXME: wait for hba to go online */ 1548 /* FIXME: wait for hba to go online */
@@ -1598,7 +1598,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
1598 DEBUG2(printk(KERN_INFO 1598 DEBUG2(printk(KERN_INFO
1599 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " 1599 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
1600 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", 1600 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
1601 ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ, 1601 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
1602 ha->dpc_flags, cmd->result, cmd->allowed)); 1602 ha->dpc_flags, cmd->result, cmd->allowed));
1603 1603
1604 stat = qla4xxx_reset_target(ha, ddb_entry); 1604 stat = qla4xxx_reset_target(ha, ddb_entry);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 4a1cf6377f6c..905350896725 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -914,6 +914,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
914 ds[i].d_count = sg_dma_len(s); 914 ds[i].d_count = sg_dma_len(s);
915 } 915 }
916 sg_count -= n; 916 sg_count -= n;
917 sg = s;
917 } 918 }
918 } else { 919 } else {
919 cmd->dataseg[0].d_base = 0; 920 cmd->dataseg[0].d_base = 0;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index ee6be596503d..2ac3cb2b9081 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -291,7 +291,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
291 unsigned long flags; 291 unsigned long flags;
292 292
293 cmd->device = dev; 293 cmd->device = dev;
294 init_timer(&cmd->eh_timeout);
295 INIT_LIST_HEAD(&cmd->list); 294 INIT_LIST_HEAD(&cmd->list);
296 spin_lock_irqsave(&dev->list_lock, flags); 295 spin_lock_irqsave(&dev->list_lock, flags);
297 list_add_tail(&cmd->list, &dev->cmd_list); 296 list_add_tail(&cmd->list, &dev->cmd_list);
@@ -652,26 +651,33 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
652 unsigned long timeout; 651 unsigned long timeout;
653 int rtn = 0; 652 int rtn = 0;
654 653
654 /*
655 * We will use a queued command if possible, otherwise we will
656 * emulate the queuing and calling of completion function ourselves.
657 */
658 atomic_inc(&cmd->device->iorequest_cnt);
659
655 /* check if the device is still usable */ 660 /* check if the device is still usable */
656 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 661 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
657 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 662 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
658 * returns an immediate error upwards, and signals 663 * returns an immediate error upwards, and signals
659 * that the device is no longer present */ 664 * that the device is no longer present */
660 cmd->result = DID_NO_CONNECT << 16; 665 cmd->result = DID_NO_CONNECT << 16;
661 atomic_inc(&cmd->device->iorequest_cnt); 666 scsi_done(cmd);
662 __scsi_done(cmd);
663 /* return 0 (because the command has been processed) */ 667 /* return 0 (because the command has been processed) */
664 goto out; 668 goto out;
665 } 669 }
666 670
667 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */ 671 /* Check to see if the scsi lld made this device blocked. */
668 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) { 672 if (unlikely(scsi_device_blocked(cmd->device))) {
669 /* 673 /*
670 * in SDEV_BLOCK, the command is just put back on the device 674 * in blocked state, the command is just put back on
671 * queue. The suspend state has already blocked the queue so 675 * the device queue. The suspend state has already
672 * future requests should not occur until the device 676 * blocked the queue so future requests should not
673 * transitions out of the suspend state. 677 * occur until the device transitions out of the
678 * suspend state.
674 */ 679 */
680
675 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 681 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
676 682
677 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); 683 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
@@ -714,21 +720,9 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
714 host->resetting = 0; 720 host->resetting = 0;
715 } 721 }
716 722
717 /*
718 * AK: unlikely race here: for some reason the timer could
719 * expire before the serial number is set up below.
720 */
721 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
722
723 scsi_log_send(cmd); 723 scsi_log_send(cmd);
724 724
725 /* 725 /*
726 * We will use a queued command if possible, otherwise we will
727 * emulate the queuing and calling of completion function ourselves.
728 */
729 atomic_inc(&cmd->device->iorequest_cnt);
730
731 /*
732 * Before we queue this command, check if the command 726 * Before we queue this command, check if the command
733 * length exceeds what the host adapter can handle. 727 * length exceeds what the host adapter can handle.
734 */ 728 */
@@ -744,6 +738,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
744 } 738 }
745 739
746 spin_lock_irqsave(host->host_lock, flags); 740 spin_lock_irqsave(host->host_lock, flags);
741 /*
742 * AK: unlikely race here: for some reason the timer could
743 * expire before the serial number is set up below.
744 *
745 * TODO: kill serial or move to blk layer
746 */
747 scsi_cmd_get_serial(host, cmd); 747 scsi_cmd_get_serial(host, cmd);
748 748
749 if (unlikely(host->shost_state == SHOST_DEL)) { 749 if (unlikely(host->shost_state == SHOST_DEL)) {
@@ -754,12 +754,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
754 } 754 }
755 spin_unlock_irqrestore(host->host_lock, flags); 755 spin_unlock_irqrestore(host->host_lock, flags);
756 if (rtn) { 756 if (rtn) {
757 if (scsi_delete_timer(cmd)) { 757 scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
758 atomic_inc(&cmd->device->iodone_cnt); 758 rtn : SCSI_MLQUEUE_HOST_BUSY);
759 scsi_queue_insert(cmd,
760 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
761 rtn : SCSI_MLQUEUE_HOST_BUSY);
762 }
763 SCSI_LOG_MLQUEUE(3, 759 SCSI_LOG_MLQUEUE(3,
764 printk("queuecommand : request rejected\n")); 760 printk("queuecommand : request rejected\n"));
765 } 761 }
@@ -770,24 +766,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
770} 766}
771 767
772/** 768/**
773 * scsi_req_abort_cmd -- Request command recovery for the specified command
774 * @cmd: pointer to the SCSI command of interest
775 *
776 * This function requests that SCSI Core start recovery for the
777 * command by deleting the timer and adding the command to the eh
778 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
779 * implement their own error recovery MAY ignore the timeout event if
780 * they generated scsi_req_abort_cmd.
781 */
782void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
783{
784 if (!scsi_delete_timer(cmd))
785 return;
786 scsi_times_out(cmd);
787}
788EXPORT_SYMBOL(scsi_req_abort_cmd);
789
790/**
791 * scsi_done - Enqueue the finished SCSI command into the done queue. 769 * scsi_done - Enqueue the finished SCSI command into the done queue.
792 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 770 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
793 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 771 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
@@ -802,42 +780,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
802 */ 780 */
803static void scsi_done(struct scsi_cmnd *cmd) 781static void scsi_done(struct scsi_cmnd *cmd)
804{ 782{
805 /* 783 blk_complete_request(cmd->request);
806 * We don't have to worry about this one timing out anymore.
807 * If we are unable to remove the timer, then the command
808 * has already timed out. In which case, we have no choice but to
809 * let the timeout function run, as we have no idea where in fact
810 * that function could really be. It might be on another processor,
811 * etc, etc.
812 */
813 if (!scsi_delete_timer(cmd))
814 return;
815 __scsi_done(cmd);
816}
817
818/* Private entry to scsi_done() to complete a command when the timer
819 * isn't running --- used by scsi_times_out */
820void __scsi_done(struct scsi_cmnd *cmd)
821{
822 struct request *rq = cmd->request;
823
824 /*
825 * Set the serial numbers back to zero
826 */
827 cmd->serial_number = 0;
828
829 atomic_inc(&cmd->device->iodone_cnt);
830 if (cmd->result)
831 atomic_inc(&cmd->device->ioerr_cnt);
832
833 BUG_ON(!rq);
834
835 /*
836 * The uptodate/nbytes values don't matter, as we allow partial
837 * completes and thus will check this in the softirq callback
838 */
839 rq->completion_data = cmd;
840 blk_complete_request(rq);
841} 784}
842 785
843/* Move this to a header if it becomes more generally useful */ 786/* Move this to a header if it becomes more generally useful */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 39ce3aba1dac..fecefa05cb62 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -112,69 +112,8 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
112} 112}
113 113
114/** 114/**
115 * scsi_add_timer - Start timeout timer for a single scsi command.
116 * @scmd: scsi command that is about to start running.
117 * @timeout: amount of time to allow this command to run.
118 * @complete: timeout function to call if timer isn't canceled.
119 *
120 * Notes:
121 * This should be turned into an inline function. Each scsi command
122 * has its own timer, and as it is added to the queue, we set up the
123 * timer. When the command completes, we cancel the timer.
124 */
125void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
126 void (*complete)(struct scsi_cmnd *))
127{
128
129 /*
130 * If the clock was already running for this command, then
131 * first delete the timer. The timer handling code gets rather
132 * confused if we don't do this.
133 */
134 if (scmd->eh_timeout.function)
135 del_timer(&scmd->eh_timeout);
136
137 scmd->eh_timeout.data = (unsigned long)scmd;
138 scmd->eh_timeout.expires = jiffies + timeout;
139 scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
140
141 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
142 " %d, (%p)\n", __func__,
143 scmd, timeout, complete));
144
145 add_timer(&scmd->eh_timeout);
146}
147
148/**
149 * scsi_delete_timer - Delete/cancel timer for a given function.
150 * @scmd: Cmd that we are canceling timer for
151 *
152 * Notes:
153 * This should be turned into an inline function.
154 *
155 * Return value:
156 * 1 if we were able to detach the timer. 0 if we blew it, and the
157 * timer function has already started to run.
158 */
159int scsi_delete_timer(struct scsi_cmnd *scmd)
160{
161 int rtn;
162
163 rtn = del_timer(&scmd->eh_timeout);
164
165 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
166 " rtn: %d\n", __func__,
167 scmd, rtn));
168
169 scmd->eh_timeout.data = (unsigned long)NULL;
170 scmd->eh_timeout.function = NULL;
171
172 return rtn;
173}
174
175/**
176 * scsi_times_out - Timeout function for normal scsi commands. 115 * scsi_times_out - Timeout function for normal scsi commands.
177 * @scmd: Cmd that is timing out. 116 * @req: request that is timing out.
178 * 117 *
179 * Notes: 118 * Notes:
180 * We do not need to lock this. There is the potential for a race 119 * We do not need to lock this. There is the potential for a race
@@ -182,9 +121,11 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
182 * normal completion function determines that the timer has already 121 * normal completion function determines that the timer has already
183 * fired, then it mustn't do anything. 122 * fired, then it mustn't do anything.
184 */ 123 */
185void scsi_times_out(struct scsi_cmnd *scmd) 124enum blk_eh_timer_return scsi_times_out(struct request *req)
186{ 125{
187 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); 126 struct scsi_cmnd *scmd = req->special;
127 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
128 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
188 129
189 scsi_log_completion(scmd, TIMEOUT_ERROR); 130 scsi_log_completion(scmd, TIMEOUT_ERROR);
190 131
@@ -196,22 +137,20 @@ void scsi_times_out(struct scsi_cmnd *scmd)
196 eh_timed_out = NULL; 137 eh_timed_out = NULL;
197 138
198 if (eh_timed_out) 139 if (eh_timed_out)
199 switch (eh_timed_out(scmd)) { 140 rtn = eh_timed_out(scmd);
200 case EH_HANDLED: 141 switch (rtn) {
201 __scsi_done(scmd); 142 case BLK_EH_NOT_HANDLED:
202 return;
203 case EH_RESET_TIMER:
204 scsi_add_timer(scmd, scmd->timeout_per_command,
205 scsi_times_out);
206 return;
207 case EH_NOT_HANDLED:
208 break; 143 break;
144 default:
145 return rtn;
209 } 146 }
210 147
211 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 148 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
212 scmd->result |= DID_TIME_OUT << 16; 149 scmd->result |= DID_TIME_OUT << 16;
213 __scsi_done(scmd); 150 return BLK_EH_HANDLED;
214 } 151 }
152
153 return BLK_EH_NOT_HANDLED;
215} 154}
216 155
217/** 156/**
@@ -1793,7 +1732,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1793 1732
1794 blk_rq_init(NULL, &req); 1733 blk_rq_init(NULL, &req);
1795 scmd->request = &req; 1734 scmd->request = &req;
1796 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1797 1735
1798 scmd->cmnd = req.cmd; 1736 scmd->cmnd = req.cmd;
1799 1737
@@ -1804,8 +1742,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1804 1742
1805 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 1743 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
1806 1744
1807 init_timer(&scmd->eh_timeout);
1808
1809 spin_lock_irqsave(shost->host_lock, flags); 1745 spin_lock_irqsave(shost->host_lock, flags);
1810 shost->tmf_in_progress = 1; 1746 shost->tmf_in_progress = 1;
1811 spin_unlock_irqrestore(shost->host_lock, flags); 1747 spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ff5d56b3ee4d..98ee55ced592 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -852,7 +852,7 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
852void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 852void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
853{ 853{
854 int result = cmd->result; 854 int result = cmd->result;
855 int this_count = scsi_bufflen(cmd); 855 int this_count;
856 struct request_queue *q = cmd->device->request_queue; 856 struct request_queue *q = cmd->device->request_queue;
857 struct request *req = cmd->request; 857 struct request *req = cmd->request;
858 int error = 0; 858 int error = 0;
@@ -908,6 +908,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
908 */ 908 */
909 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 909 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
910 return; 910 return;
911 this_count = blk_rq_bytes(req);
911 912
912 /* good_bytes = 0, or (inclusive) there were leftovers and 913 /* good_bytes = 0, or (inclusive) there were leftovers and
913 * result = 0, so scsi_end_request couldn't retry. 914 * result = 0, so scsi_end_request couldn't retry.
@@ -1180,7 +1181,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1180 1181
1181 cmd->transfersize = req->data_len; 1182 cmd->transfersize = req->data_len;
1182 cmd->allowed = req->retries; 1183 cmd->allowed = req->retries;
1183 cmd->timeout_per_command = req->timeout;
1184 return BLKPREP_OK; 1184 return BLKPREP_OK;
1185} 1185}
1186EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1186EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
@@ -1250,6 +1250,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1250 break; 1250 break;
1251 case SDEV_QUIESCE: 1251 case SDEV_QUIESCE:
1252 case SDEV_BLOCK: 1252 case SDEV_BLOCK:
1253 case SDEV_CREATED_BLOCK:
1253 /* 1254 /*
1254 * If the devices is blocked we defer normal commands. 1255 * If the devices is blocked we defer normal commands.
1255 */ 1256 */
@@ -1415,17 +1416,26 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1415 spin_unlock(shost->host_lock); 1416 spin_unlock(shost->host_lock);
1416 spin_lock(sdev->request_queue->queue_lock); 1417 spin_lock(sdev->request_queue->queue_lock);
1417 1418
1418 __scsi_done(cmd); 1419 blk_complete_request(req);
1419} 1420}
1420 1421
1421static void scsi_softirq_done(struct request *rq) 1422static void scsi_softirq_done(struct request *rq)
1422{ 1423{
1423 struct scsi_cmnd *cmd = rq->completion_data; 1424 struct scsi_cmnd *cmd = rq->special;
1424 unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; 1425 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1425 int disposition; 1426 int disposition;
1426 1427
1427 INIT_LIST_HEAD(&cmd->eh_entry); 1428 INIT_LIST_HEAD(&cmd->eh_entry);
1428 1429
1430 /*
1431 * Set the serial numbers back to zero
1432 */
1433 cmd->serial_number = 0;
1434
1435 atomic_inc(&cmd->device->iodone_cnt);
1436 if (cmd->result)
1437 atomic_inc(&cmd->device->ioerr_cnt);
1438
1429 disposition = scsi_decide_disposition(cmd); 1439 disposition = scsi_decide_disposition(cmd);
1430 if (disposition != SUCCESS && 1440 if (disposition != SUCCESS &&
1431 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1441 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
@@ -1674,6 +1684,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1674 1684
1675 blk_queue_prep_rq(q, scsi_prep_fn); 1685 blk_queue_prep_rq(q, scsi_prep_fn);
1676 blk_queue_softirq_done(q, scsi_softirq_done); 1686 blk_queue_softirq_done(q, scsi_softirq_done);
1687 blk_queue_rq_timed_out(q, scsi_times_out);
1677 return q; 1688 return q;
1678} 1689}
1679 1690
@@ -2063,10 +2074,13 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2063 2074
2064 switch (state) { 2075 switch (state) {
2065 case SDEV_CREATED: 2076 case SDEV_CREATED:
2066 /* There are no legal states that come back to 2077 switch (oldstate) {
2067 * created. This is the manually initialised start 2078 case SDEV_CREATED_BLOCK:
2068 * state */ 2079 break;
2069 goto illegal; 2080 default:
2081 goto illegal;
2082 }
2083 break;
2070 2084
2071 case SDEV_RUNNING: 2085 case SDEV_RUNNING:
2072 switch (oldstate) { 2086 switch (oldstate) {
@@ -2104,8 +2118,17 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2104 2118
2105 case SDEV_BLOCK: 2119 case SDEV_BLOCK:
2106 switch (oldstate) { 2120 switch (oldstate) {
2107 case SDEV_CREATED:
2108 case SDEV_RUNNING: 2121 case SDEV_RUNNING:
2122 case SDEV_CREATED_BLOCK:
2123 break;
2124 default:
2125 goto illegal;
2126 }
2127 break;
2128
2129 case SDEV_CREATED_BLOCK:
2130 switch (oldstate) {
2131 case SDEV_CREATED:
2109 break; 2132 break;
2110 default: 2133 default:
2111 goto illegal; 2134 goto illegal;
@@ -2393,8 +2416,12 @@ scsi_internal_device_block(struct scsi_device *sdev)
2393 int err = 0; 2416 int err = 0;
2394 2417
2395 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2418 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2396 if (err) 2419 if (err) {
2397 return err; 2420 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2421
2422 if (err)
2423 return err;
2424 }
2398 2425
2399 /* 2426 /*
2400 * The device has transitioned to SDEV_BLOCK. Stop the 2427 * The device has transitioned to SDEV_BLOCK. Stop the
@@ -2437,8 +2464,12 @@ scsi_internal_device_unblock(struct scsi_device *sdev)
2437 * and goose the device queue if successful. 2464 * and goose the device queue if successful.
2438 */ 2465 */
2439 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2466 err = scsi_device_set_state(sdev, SDEV_RUNNING);
2440 if (err) 2467 if (err) {
2441 return err; 2468 err = scsi_device_set_state(sdev, SDEV_CREATED);
2469
2470 if (err)
2471 return err;
2472 }
2442 2473
2443 spin_lock_irqsave(q->queue_lock, flags); 2474 spin_lock_irqsave(q->queue_lock, flags);
2444 blk_start_queue(q); 2475 blk_start_queue(q);
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index ae7ed9a22662..b37e133de805 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -21,6 +21,7 @@
21#include <linux/time.h> 21#include <linux/time.h>
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
23#include <linux/security.h> 23#include <linux/security.h>
24#include <linux/delay.h>
24#include <net/sock.h> 25#include <net/sock.h>
25#include <net/netlink.h> 26#include <net/netlink.h>
26 27
@@ -30,6 +31,39 @@
30struct sock *scsi_nl_sock = NULL; 31struct sock *scsi_nl_sock = NULL;
31EXPORT_SYMBOL_GPL(scsi_nl_sock); 32EXPORT_SYMBOL_GPL(scsi_nl_sock);
32 33
34static DEFINE_SPINLOCK(scsi_nl_lock);
35static struct list_head scsi_nl_drivers;
36
37static u32 scsi_nl_state;
38#define STATE_EHANDLER_BSY 0x00000001
39
40struct scsi_nl_transport {
41 int (*msg_handler)(struct sk_buff *);
42 void (*event_handler)(struct notifier_block *, unsigned long, void *);
43 unsigned int refcnt;
44 int flags;
45};
46
47/* flags values (bit flags) */
48#define HANDLER_DELETING 0x1
49
50static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] =
51 { {NULL, }, };
52
53
54struct scsi_nl_drvr {
55 struct list_head next;
56 int (*dmsg_handler)(struct Scsi_Host *shost, void *payload,
57 u32 len, u32 pid);
58 void (*devt_handler)(struct notifier_block *nb,
59 unsigned long event, void *notify_ptr);
60 struct scsi_host_template *hostt;
61 u64 vendor_id;
62 unsigned int refcnt;
63 int flags;
64};
65
66
33 67
34/** 68/**
35 * scsi_nl_rcv_msg - Receive message handler. 69 * scsi_nl_rcv_msg - Receive message handler.
@@ -45,8 +79,9 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
45{ 79{
46 struct nlmsghdr *nlh; 80 struct nlmsghdr *nlh;
47 struct scsi_nl_hdr *hdr; 81 struct scsi_nl_hdr *hdr;
48 uint32_t rlen; 82 unsigned long flags;
49 int err; 83 u32 rlen;
84 int err, tport;
50 85
51 while (skb->len >= NLMSG_SPACE(0)) { 86 while (skb->len >= NLMSG_SPACE(0)) {
52 err = 0; 87 err = 0;
@@ -65,7 +100,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
65 100
66 if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) { 101 if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
67 err = -EBADMSG; 102 err = -EBADMSG;
68 return; 103 goto next_msg;
69 } 104 }
70 105
71 hdr = NLMSG_DATA(nlh); 106 hdr = NLMSG_DATA(nlh);
@@ -83,12 +118,27 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
83 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) { 118 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
84 printk(KERN_WARNING "%s: discarding partial message\n", 119 printk(KERN_WARNING "%s: discarding partial message\n",
85 __func__); 120 __func__);
86 return; 121 goto next_msg;
87 } 122 }
88 123
89 /* 124 /*
90 * We currently don't support anyone sending us a message 125 * Deliver message to the appropriate transport
91 */ 126 */
127 spin_lock_irqsave(&scsi_nl_lock, flags);
128
129 tport = hdr->transport;
130 if ((tport < SCSI_NL_MAX_TRANSPORTS) &&
131 !(transports[tport].flags & HANDLER_DELETING) &&
132 (transports[tport].msg_handler)) {
133 transports[tport].refcnt++;
134 spin_unlock_irqrestore(&scsi_nl_lock, flags);
135 err = transports[tport].msg_handler(skb);
136 spin_lock_irqsave(&scsi_nl_lock, flags);
137 transports[tport].refcnt--;
138 } else
139 err = -ENOENT;
140
141 spin_unlock_irqrestore(&scsi_nl_lock, flags);
92 142
93next_msg: 143next_msg:
94 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK)) 144 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
@@ -110,14 +160,42 @@ static int
110scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr) 160scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
111{ 161{
112 struct netlink_notify *n = ptr; 162 struct netlink_notify *n = ptr;
163 struct scsi_nl_drvr *driver;
164 unsigned long flags;
165 int tport;
113 166
114 if (n->protocol != NETLINK_SCSITRANSPORT) 167 if (n->protocol != NETLINK_SCSITRANSPORT)
115 return NOTIFY_DONE; 168 return NOTIFY_DONE;
116 169
170 spin_lock_irqsave(&scsi_nl_lock, flags);
171 scsi_nl_state |= STATE_EHANDLER_BSY;
172
117 /* 173 /*
118 * Currently, we are not tracking PID's, etc. There is nothing 174 * Pass event on to any transports that may be listening
119 * to handle.
120 */ 175 */
176 for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) {
177 if (!(transports[tport].flags & HANDLER_DELETING) &&
178 (transports[tport].event_handler)) {
179 spin_unlock_irqrestore(&scsi_nl_lock, flags);
180 transports[tport].event_handler(this, event, ptr);
181 spin_lock_irqsave(&scsi_nl_lock, flags);
182 }
183 }
184
185 /*
186 * Pass event on to any drivers that may be listening
187 */
188 list_for_each_entry(driver, &scsi_nl_drivers, next) {
189 if (!(driver->flags & HANDLER_DELETING) &&
190 (driver->devt_handler)) {
191 spin_unlock_irqrestore(&scsi_nl_lock, flags);
192 driver->devt_handler(this, event, ptr);
193 spin_lock_irqsave(&scsi_nl_lock, flags);
194 }
195 }
196
197 scsi_nl_state &= ~STATE_EHANDLER_BSY;
198 spin_unlock_irqrestore(&scsi_nl_lock, flags);
121 199
122 return NOTIFY_DONE; 200 return NOTIFY_DONE;
123} 201}
@@ -128,7 +206,281 @@ static struct notifier_block scsi_netlink_notifier = {
128 206
129 207
130/** 208/**
131 * scsi_netlink_init - Called by SCSI subsystem to intialize the SCSI transport netlink interface 209 * GENERIC SCSI transport receive and event handlers
210 **/
211
212/**
213 * scsi_generic_msg_handler - receive message handler for GENERIC transport
214 * messages
215 *
216 * @skb: socket receive buffer
217 *
218 **/
219static int
220scsi_generic_msg_handler(struct sk_buff *skb)
221{
222 struct nlmsghdr *nlh = nlmsg_hdr(skb);
223 struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh);
224 struct scsi_nl_drvr *driver;
225 struct Scsi_Host *shost;
226 unsigned long flags;
227 int err = 0, match, pid;
228
229 pid = NETLINK_CREDS(skb)->pid;
230
231 switch (snlh->msgtype) {
232 case SCSI_NL_SHOST_VENDOR:
233 {
234 struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh);
235
236 /* Locate the driver that corresponds to the message */
237 spin_lock_irqsave(&scsi_nl_lock, flags);
238 match = 0;
239 list_for_each_entry(driver, &scsi_nl_drivers, next) {
240 if (driver->vendor_id == msg->vendor_id) {
241 match = 1;
242 break;
243 }
244 }
245
246 if ((!match) || (!driver->dmsg_handler)) {
247 spin_unlock_irqrestore(&scsi_nl_lock, flags);
248 err = -ESRCH;
249 goto rcv_exit;
250 }
251
252 if (driver->flags & HANDLER_DELETING) {
253 spin_unlock_irqrestore(&scsi_nl_lock, flags);
254 err = -ESHUTDOWN;
255 goto rcv_exit;
256 }
257
258 driver->refcnt++;
259 spin_unlock_irqrestore(&scsi_nl_lock, flags);
260
261
262 /* if successful, scsi_host_lookup takes a shost reference */
263 shost = scsi_host_lookup(msg->host_no);
264 if (!shost) {
265 err = -ENODEV;
266 goto driver_exit;
267 }
268
269 /* is this host owned by the vendor ? */
270 if (shost->hostt != driver->hostt) {
271 err = -EINVAL;
272 goto vendormsg_put;
273 }
274
275 /* pass message on to the driver */
276 err = driver->dmsg_handler(shost, (void *)&msg[1],
277 msg->vmsg_datalen, pid);
278
279vendormsg_put:
280 /* release reference by scsi_host_lookup */
281 scsi_host_put(shost);
282
283driver_exit:
284 /* release our own reference on the registration object */
285 spin_lock_irqsave(&scsi_nl_lock, flags);
286 driver->refcnt--;
287 spin_unlock_irqrestore(&scsi_nl_lock, flags);
288 break;
289 }
290
291 default:
292 err = -EBADR;
293 break;
294 }
295
296rcv_exit:
297 if (err)
298 printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
299 __func__, snlh->msgtype, err);
300 return err;
301}
302
303
304/**
305 * scsi_nl_add_transport -
306 * Registers message and event handlers for a transport. Enables
307 * receipt of netlink messages and events to a transport.
308 *
309 * @tport: transport registering handlers
310 * @msg_handler: receive message handler callback
311 * @event_handler: receive event handler callback
312 **/
313int
314scsi_nl_add_transport(u8 tport,
315 int (*msg_handler)(struct sk_buff *),
316 void (*event_handler)(struct notifier_block *, unsigned long, void *))
317{
318 unsigned long flags;
319 int err = 0;
320
321 if (tport >= SCSI_NL_MAX_TRANSPORTS)
322 return -EINVAL;
323
324 spin_lock_irqsave(&scsi_nl_lock, flags);
325
326 if (scsi_nl_state & STATE_EHANDLER_BSY) {
327 spin_unlock_irqrestore(&scsi_nl_lock, flags);
328 msleep(1);
329 spin_lock_irqsave(&scsi_nl_lock, flags);
330 }
331
332 if (transports[tport].msg_handler || transports[tport].event_handler) {
333 err = -EALREADY;
334 goto register_out;
335 }
336
337 transports[tport].msg_handler = msg_handler;
338 transports[tport].event_handler = event_handler;
339 transports[tport].flags = 0;
340 transports[tport].refcnt = 0;
341
342register_out:
343 spin_unlock_irqrestore(&scsi_nl_lock, flags);
344
345 return err;
346}
347EXPORT_SYMBOL_GPL(scsi_nl_add_transport);
348
349
350/**
351 * scsi_nl_remove_transport -
352 * Disable transport receiption of messages and events
353 *
354 * @tport: transport deregistering handlers
355 *
356 **/
357void
358scsi_nl_remove_transport(u8 tport)
359{
360 unsigned long flags;
361
362 spin_lock_irqsave(&scsi_nl_lock, flags);
363 if (scsi_nl_state & STATE_EHANDLER_BSY) {
364 spin_unlock_irqrestore(&scsi_nl_lock, flags);
365 msleep(1);
366 spin_lock_irqsave(&scsi_nl_lock, flags);
367 }
368
369 if (tport < SCSI_NL_MAX_TRANSPORTS) {
370 transports[tport].flags |= HANDLER_DELETING;
371
372 while (transports[tport].refcnt != 0) {
373 spin_unlock_irqrestore(&scsi_nl_lock, flags);
374 schedule_timeout_uninterruptible(HZ/4);
375 spin_lock_irqsave(&scsi_nl_lock, flags);
376 }
377 transports[tport].msg_handler = NULL;
378 transports[tport].event_handler = NULL;
379 transports[tport].flags = 0;
380 }
381
382 spin_unlock_irqrestore(&scsi_nl_lock, flags);
383
384 return;
385}
386EXPORT_SYMBOL_GPL(scsi_nl_remove_transport);
387
388
389/**
390 * scsi_nl_add_driver -
391 * A driver is registering its interfaces for SCSI netlink messages
392 *
393 * @vendor_id: A unique identification value for the driver.
394 * @hostt: address of the driver's host template. Used
395 * to verify an shost is bound to the driver
396 * @nlmsg_handler: receive message handler callback
397 * @nlevt_handler: receive event handler callback
398 *
399 * Returns:
400 * 0 on Success
401 * error result otherwise
402 **/
403int
404scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
405 int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
406 u32 len, u32 pid),
407 void (*nlevt_handler)(struct notifier_block *nb,
408 unsigned long event, void *notify_ptr))
409{
410 struct scsi_nl_drvr *driver;
411 unsigned long flags;
412
413 driver = kzalloc(sizeof(*driver), GFP_KERNEL);
414 if (unlikely(!driver)) {
415 printk(KERN_ERR "%s: allocation failure\n", __func__);
416 return -ENOMEM;
417 }
418
419 driver->dmsg_handler = nlmsg_handler;
420 driver->devt_handler = nlevt_handler;
421 driver->hostt = hostt;
422 driver->vendor_id = vendor_id;
423
424 spin_lock_irqsave(&scsi_nl_lock, flags);
425 if (scsi_nl_state & STATE_EHANDLER_BSY) {
426 spin_unlock_irqrestore(&scsi_nl_lock, flags);
427 msleep(1);
428 spin_lock_irqsave(&scsi_nl_lock, flags);
429 }
430 list_add_tail(&driver->next, &scsi_nl_drivers);
431 spin_unlock_irqrestore(&scsi_nl_lock, flags);
432
433 return 0;
434}
435EXPORT_SYMBOL_GPL(scsi_nl_add_driver);
436
437
438/**
439 * scsi_nl_remove_driver -
440 * An driver is unregistering with the SCSI netlink messages
441 *
442 * @vendor_id: The unique identification value for the driver.
443 **/
444void
445scsi_nl_remove_driver(u64 vendor_id)
446{
447 struct scsi_nl_drvr *driver;
448 unsigned long flags;
449
450 spin_lock_irqsave(&scsi_nl_lock, flags);
451 if (scsi_nl_state & STATE_EHANDLER_BSY) {
452 spin_unlock_irqrestore(&scsi_nl_lock, flags);
453 msleep(1);
454 spin_lock_irqsave(&scsi_nl_lock, flags);
455 }
456
457 list_for_each_entry(driver, &scsi_nl_drivers, next) {
458 if (driver->vendor_id == vendor_id) {
459 driver->flags |= HANDLER_DELETING;
460 while (driver->refcnt != 0) {
461 spin_unlock_irqrestore(&scsi_nl_lock, flags);
462 schedule_timeout_uninterruptible(HZ/4);
463 spin_lock_irqsave(&scsi_nl_lock, flags);
464 }
465 list_del(&driver->next);
466 kfree(driver);
467 spin_unlock_irqrestore(&scsi_nl_lock, flags);
468 return;
469 }
470 }
471
472 spin_unlock_irqrestore(&scsi_nl_lock, flags);
473
474 printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n",
475 __func__, (unsigned long long)vendor_id);
476 return;
477}
478EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
479
480
481/**
482 * scsi_netlink_init - Called by SCSI subsystem to intialize
483 * the SCSI transport netlink interface
132 * 484 *
133 **/ 485 **/
134void 486void
@@ -136,6 +488,8 @@ scsi_netlink_init(void)
136{ 488{
137 int error; 489 int error;
138 490
491 INIT_LIST_HEAD(&scsi_nl_drivers);
492
139 error = netlink_register_notifier(&scsi_netlink_notifier); 493 error = netlink_register_notifier(&scsi_netlink_notifier);
140 if (error) { 494 if (error) {
141 printk(KERN_ERR "%s: register of event handler failed - %d\n", 495 printk(KERN_ERR "%s: register of event handler failed - %d\n",
@@ -150,8 +504,15 @@ scsi_netlink_init(void)
150 printk(KERN_ERR "%s: register of recieve handler failed\n", 504 printk(KERN_ERR "%s: register of recieve handler failed\n",
151 __func__); 505 __func__);
152 netlink_unregister_notifier(&scsi_netlink_notifier); 506 netlink_unregister_notifier(&scsi_netlink_notifier);
507 return;
153 } 508 }
154 509
510 /* Register the entry points for the generic SCSI transport */
511 error = scsi_nl_add_transport(SCSI_NL_TRANSPORT,
512 scsi_generic_msg_handler, NULL);
513 if (error)
514 printk(KERN_ERR "%s: register of GENERIC transport handler"
515 " failed - %d\n", __func__, error);
155 return; 516 return;
156} 517}
157 518
@@ -163,6 +524,8 @@ scsi_netlink_init(void)
163void 524void
164scsi_netlink_exit(void) 525scsi_netlink_exit(void)
165{ 526{
527 scsi_nl_remove_transport(SCSI_NL_TRANSPORT);
528
166 if (scsi_nl_sock) { 529 if (scsi_nl_sock) {
167 netlink_kernel_release(scsi_nl_sock); 530 netlink_kernel_release(scsi_nl_sock);
168 netlink_unregister_notifier(&scsi_netlink_notifier); 531 netlink_unregister_notifier(&scsi_netlink_notifier);
@@ -172,3 +535,147 @@ scsi_netlink_exit(void)
172} 535}
173 536
174 537
538/*
539 * Exported Interfaces
540 */
541
542/**
543 * scsi_nl_send_transport_msg -
544 * Generic function to send a single message from a SCSI transport to
545 * a single process
546 *
547 * @pid: receiving pid
548 * @hdr: message payload
549 *
550 **/
551void
552scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr)
553{
554 struct sk_buff *skb;
555 struct nlmsghdr *nlh;
556 const char *fn;
557 char *datab;
558 u32 len, skblen;
559 int err;
560
561 if (!scsi_nl_sock) {
562 err = -ENOENT;
563 fn = "netlink socket";
564 goto msg_fail;
565 }
566
567 len = NLMSG_SPACE(hdr->msglen);
568 skblen = NLMSG_SPACE(len);
569
570 skb = alloc_skb(skblen, GFP_KERNEL);
571 if (!skb) {
572 err = -ENOBUFS;
573 fn = "alloc_skb";
574 goto msg_fail;
575 }
576
577 nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0);
578 if (!nlh) {
579 err = -ENOBUFS;
580 fn = "nlmsg_put";
581 goto msg_fail_skb;
582 }
583 datab = NLMSG_DATA(nlh);
584 memcpy(datab, hdr, hdr->msglen);
585
586 err = nlmsg_unicast(scsi_nl_sock, skb, pid);
587 if (err < 0) {
588 fn = "nlmsg_unicast";
589 /* nlmsg_unicast already kfree_skb'd */
590 goto msg_fail;
591 }
592
593 return;
594
595msg_fail_skb:
596 kfree_skb(skb);
597msg_fail:
598 printk(KERN_WARNING
599 "%s: Dropped Message : pid %d Transport %d, msgtype x%x, "
600 "msglen %d: %s : err %d\n",
601 __func__, pid, hdr->transport, hdr->msgtype, hdr->msglen,
602 fn, err);
603 return;
604}
605EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
606
607
608/**
609 * scsi_nl_send_vendor_msg - called to send a shost vendor unique message
610 * to a specific process id.
611 *
612 * @pid: process id of the receiver
613 * @host_no: host # sending the message
614 * @vendor_id: unique identifier for the driver's vendor
615 * @data_len: amount, in bytes, of vendor unique payload data
616 * @data_buf: pointer to vendor unique data buffer
617 *
618 * Returns:
619 * 0 on succesful return
620 * otherwise, failing error code
621 *
622 * Notes:
623 * This routine assumes no locks are held on entry.
624 */
625int
626scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
627 char *data_buf, u32 data_len)
628{
629 struct sk_buff *skb;
630 struct nlmsghdr *nlh;
631 struct scsi_nl_host_vendor_msg *msg;
632 u32 len, skblen;
633 int err;
634
635 if (!scsi_nl_sock) {
636 err = -ENOENT;
637 goto send_vendor_fail;
638 }
639
640 len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len);
641 skblen = NLMSG_SPACE(len);
642
643 skb = alloc_skb(skblen, GFP_KERNEL);
644 if (!skb) {
645 err = -ENOBUFS;
646 goto send_vendor_fail;
647 }
648
649 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
650 skblen - sizeof(*nlh), 0);
651 if (!nlh) {
652 err = -ENOBUFS;
653 goto send_vendor_fail_skb;
654 }
655 msg = NLMSG_DATA(nlh);
656
657 INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT,
658 SCSI_NL_SHOST_VENDOR, len);
659 msg->vendor_id = vendor_id;
660 msg->host_no = host_no;
661 msg->vmsg_datalen = data_len; /* bytes */
662 memcpy(&msg[1], data_buf, data_len);
663
664 err = nlmsg_unicast(scsi_nl_sock, skb, pid);
665 if (err)
666 /* nlmsg_multicast already kfree_skb'd */
667 goto send_vendor_fail;
668
669 return 0;
670
671send_vendor_fail_skb:
672 kfree_skb(skb);
673send_vendor_fail:
674 printk(KERN_WARNING
675 "%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n",
676 __func__, host_no, err);
677 return err;
678}
679EXPORT_SYMBOL(scsi_nl_send_vendor_msg);
680
681
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 79f0f7511204..6cddd5dd323c 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -4,6 +4,7 @@
4#include <linux/device.h> 4#include <linux/device.h>
5 5
6struct request_queue; 6struct request_queue;
7struct request;
7struct scsi_cmnd; 8struct scsi_cmnd;
8struct scsi_device; 9struct scsi_device;
9struct scsi_host_template; 10struct scsi_host_template;
@@ -27,7 +28,6 @@ extern void scsi_exit_hosts(void);
27extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd); 28extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
28extern int scsi_setup_command_freelist(struct Scsi_Host *shost); 29extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
29extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); 30extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
30extern void __scsi_done(struct scsi_cmnd *cmd);
31#ifdef CONFIG_SCSI_LOGGING 31#ifdef CONFIG_SCSI_LOGGING
32void scsi_log_send(struct scsi_cmnd *cmd); 32void scsi_log_send(struct scsi_cmnd *cmd);
33void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); 33void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
@@ -49,10 +49,7 @@ extern int __init scsi_init_devinfo(void);
49extern void scsi_exit_devinfo(void); 49extern void scsi_exit_devinfo(void);
50 50
51/* scsi_error.c */ 51/* scsi_error.c */
52extern void scsi_add_timer(struct scsi_cmnd *, int, 52extern enum blk_eh_timer_return scsi_times_out(struct request *req);
53 void (*)(struct scsi_cmnd *));
54extern int scsi_delete_timer(struct scsi_cmnd *);
55extern void scsi_times_out(struct scsi_cmnd *cmd);
56extern int scsi_error_handler(void *host); 53extern int scsi_error_handler(void *host);
57extern int scsi_decide_disposition(struct scsi_cmnd *cmd); 54extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
58extern void scsi_eh_wakeup(struct Scsi_Host *shost); 55extern void scsi_eh_wakeup(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index c6a904a45bf9..82f7b2dd08a2 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -259,8 +259,8 @@ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
259 int error = -ENXIO; 259 int error = -ENXIO;
260 260
261 shost = scsi_host_lookup(host); 261 shost = scsi_host_lookup(host);
262 if (IS_ERR(shost)) 262 if (!shost)
263 return PTR_ERR(shost); 263 return error;
264 264
265 if (shost->transportt->user_scan) 265 if (shost->transportt->user_scan)
266 error = shost->transportt->user_scan(shost, channel, id, lun); 266 error = shost->transportt->user_scan(shost, channel, id, lun);
@@ -287,8 +287,8 @@ static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun)
287 int error = -ENXIO; 287 int error = -ENXIO;
288 288
289 shost = scsi_host_lookup(host); 289 shost = scsi_host_lookup(host);
290 if (IS_ERR(shost)) 290 if (!shost)
291 return PTR_ERR(shost); 291 return error;
292 sdev = scsi_device_lookup(shost, channel, id, lun); 292 sdev = scsi_device_lookup(shost, channel, id, lun);
293 if (sdev) { 293 if (sdev) {
294 scsi_remove_device(sdev); 294 scsi_remove_device(sdev);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 34d0de6cd511..334862e26a1b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -730,6 +730,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
730static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, 730static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
731 int *bflags, int async) 731 int *bflags, int async)
732{ 732{
733 int ret;
734
733 /* 735 /*
734 * XXX do not save the inquiry, since it can change underneath us, 736 * XXX do not save the inquiry, since it can change underneath us,
735 * save just vendor/model/rev. 737 * save just vendor/model/rev.
@@ -885,7 +887,17 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
885 887
886 /* set the device running here so that slave configure 888 /* set the device running here so that slave configure
887 * may do I/O */ 889 * may do I/O */
888 scsi_device_set_state(sdev, SDEV_RUNNING); 890 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
891 if (ret) {
892 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
893
894 if (ret) {
895 sdev_printk(KERN_ERR, sdev,
896 "in wrong state %s to complete scan\n",
897 scsi_device_state_name(sdev->sdev_state));
898 return SCSI_SCAN_NO_RESPONSE;
899 }
900 }
889 901
890 if (*bflags & BLIST_MS_192_BYTES_FOR_3F) 902 if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
891 sdev->use_192_bytes_for_3f = 1; 903 sdev->use_192_bytes_for_3f = 1;
@@ -899,7 +911,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
899 transport_configure_device(&sdev->sdev_gendev); 911 transport_configure_device(&sdev->sdev_gendev);
900 912
901 if (sdev->host->hostt->slave_configure) { 913 if (sdev->host->hostt->slave_configure) {
902 int ret = sdev->host->hostt->slave_configure(sdev); 914 ret = sdev->host->hostt->slave_configure(sdev);
903 if (ret) { 915 if (ret) {
904 /* 916 /*
905 * if LLDD reports slave not present, don't clutter 917 * if LLDD reports slave not present, don't clutter
@@ -994,7 +1006,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
994 */ 1006 */
995 sdev = scsi_device_lookup_by_target(starget, lun); 1007 sdev = scsi_device_lookup_by_target(starget, lun);
996 if (sdev) { 1008 if (sdev) {
997 if (rescan || sdev->sdev_state != SDEV_CREATED) { 1009 if (rescan || !scsi_device_created(sdev)) {
998 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 1010 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
999 "scsi scan: device exists on %s\n", 1011 "scsi scan: device exists on %s\n",
1000 sdev->sdev_gendev.bus_id)); 1012 sdev->sdev_gendev.bus_id));
@@ -1467,7 +1479,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1467 kfree(lun_data); 1479 kfree(lun_data);
1468 out: 1480 out:
1469 scsi_device_put(sdev); 1481 scsi_device_put(sdev);
1470 if (sdev->sdev_state == SDEV_CREATED) 1482 if (scsi_device_created(sdev))
1471 /* 1483 /*
1472 * the sdev we used didn't appear in the report luns scan 1484 * the sdev we used didn't appear in the report luns scan
1473 */ 1485 */
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index ab3c71869be5..93c28f30bbd7 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -34,6 +34,7 @@ static const struct {
34 { SDEV_QUIESCE, "quiesce" }, 34 { SDEV_QUIESCE, "quiesce" },
35 { SDEV_OFFLINE, "offline" }, 35 { SDEV_OFFLINE, "offline" },
36 { SDEV_BLOCK, "blocked" }, 36 { SDEV_BLOCK, "blocked" },
37 { SDEV_CREATED_BLOCK, "created-blocked" },
37}; 38};
38 39
39const char *scsi_device_state_name(enum scsi_device_state state) 40const char *scsi_device_state_name(enum scsi_device_state state)
@@ -560,12 +561,15 @@ sdev_rd_attr (vendor, "%.8s\n");
560sdev_rd_attr (model, "%.16s\n"); 561sdev_rd_attr (model, "%.16s\n");
561sdev_rd_attr (rev, "%.4s\n"); 562sdev_rd_attr (rev, "%.4s\n");
562 563
564/*
565 * TODO: can we make these symlinks to the block layer ones?
566 */
563static ssize_t 567static ssize_t
564sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) 568sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
565{ 569{
566 struct scsi_device *sdev; 570 struct scsi_device *sdev;
567 sdev = to_scsi_device(dev); 571 sdev = to_scsi_device(dev);
568 return snprintf (buf, 20, "%d\n", sdev->timeout / HZ); 572 return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
569} 573}
570 574
571static ssize_t 575static ssize_t
@@ -576,7 +580,7 @@ sdev_store_timeout (struct device *dev, struct device_attribute *attr,
576 int timeout; 580 int timeout;
577 sdev = to_scsi_device(dev); 581 sdev = to_scsi_device(dev);
578 sscanf (buf, "%d\n", &timeout); 582 sscanf (buf, "%d\n", &timeout);
579 sdev->timeout = timeout * HZ; 583 blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
580 return count; 584 return count;
581} 585}
582static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); 586static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 257e097c39af..48ba413f7f6a 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
362 int err; 362 int err;
363 363
364 dprintk("%lx %u\n", uaddr, len); 364 dprintk("%lx %u\n", uaddr, len);
365 err = blk_rq_map_user(q, rq, (void *)uaddr, len); 365 err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
366 if (err) { 366 if (err) {
367 /* 367 /*
368 * TODO: need to fixup sg_tablesize, max_segment_size, 368 * TODO: need to fixup sg_tablesize, max_segment_size,
@@ -460,7 +460,7 @@ int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag,
460 460
461 /* TODO: replace with a O(1) alg */ 461 /* TODO: replace with a O(1) alg */
462 shost = scsi_host_lookup(host_no); 462 shost = scsi_host_lookup(host_no);
463 if (IS_ERR(shost)) { 463 if (!shost) {
464 printk(KERN_ERR "Could not find host no %d\n", host_no); 464 printk(KERN_ERR "Could not find host no %d\n", host_no);
465 return -EINVAL; 465 return -EINVAL;
466 } 466 }
@@ -550,7 +550,7 @@ int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 itn_id, u64 mid, int result)
550 dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid); 550 dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
551 551
552 shost = scsi_host_lookup(host_no); 552 shost = scsi_host_lookup(host_no);
553 if (IS_ERR(shost)) { 553 if (!shost) {
554 printk(KERN_ERR "Could not find host no %d\n", host_no); 554 printk(KERN_ERR "Could not find host no %d\n", host_no);
555 return err; 555 return err;
556 } 556 }
@@ -603,7 +603,7 @@ int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result)
603 dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id); 603 dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id);
604 604
605 shost = scsi_host_lookup(host_no); 605 shost = scsi_host_lookup(host_no);
606 if (IS_ERR(shost)) { 606 if (!shost) {
607 printk(KERN_ERR "Could not find host no %d\n", host_no); 607 printk(KERN_ERR "Could not find host no %d\n", host_no);
608 return err; 608 return err;
609 } 609 }
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 56823fd1fb84..d5f7653bb94b 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -40,31 +40,7 @@
40 40
41static int fc_queue_work(struct Scsi_Host *, struct work_struct *); 41static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
42static void fc_vport_sched_delete(struct work_struct *work); 42static void fc_vport_sched_delete(struct work_struct *work);
43 43static int fc_vport_setup(struct Scsi_Host *shost, int channel,
44/*
45 * This is a temporary carrier for creating a vport. It will eventually
46 * be replaced by a real message definition for sgio or netlink.
47 *
48 * fc_vport_identifiers: This set of data contains all elements
49 * to uniquely identify and instantiate a FC virtual port.
50 *
51 * Notes:
52 * symbolic_name: The driver is to append the symbolic_name string data
53 * to the symbolic_node_name data that it generates by default.
54 * the resulting combination should then be registered with the switch.
55 * It is expected that things like Xen may stuff a VM title into
56 * this field.
57 */
58struct fc_vport_identifiers {
59 u64 node_name;
60 u64 port_name;
61 u32 roles;
62 bool disable;
63 enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */
64 char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
65};
66
67static int fc_vport_create(struct Scsi_Host *shost, int channel,
68 struct device *pdev, struct fc_vport_identifiers *ids, 44 struct device *pdev, struct fc_vport_identifiers *ids,
69 struct fc_vport **vport); 45 struct fc_vport **vport);
70 46
@@ -1760,7 +1736,7 @@ store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
1760 vid.disable = false; /* always enabled */ 1736 vid.disable = false; /* always enabled */
1761 1737
1762 /* we only allow support on Channel 0 !!! */ 1738 /* we only allow support on Channel 0 !!! */
1763 stat = fc_vport_create(shost, 0, &shost->shost_gendev, &vid, &vport); 1739 stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
1764 return stat ? stat : count; 1740 return stat ? stat : count;
1765} 1741}
1766static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL, 1742static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
@@ -1950,15 +1926,15 @@ static int fc_vport_match(struct attribute_container *cont,
1950 * Notes: 1926 * Notes:
1951 * This routine assumes no locks are held on entry. 1927 * This routine assumes no locks are held on entry.
1952 */ 1928 */
1953static enum scsi_eh_timer_return 1929static enum blk_eh_timer_return
1954fc_timed_out(struct scsi_cmnd *scmd) 1930fc_timed_out(struct scsi_cmnd *scmd)
1955{ 1931{
1956 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); 1932 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
1957 1933
1958 if (rport->port_state == FC_PORTSTATE_BLOCKED) 1934 if (rport->port_state == FC_PORTSTATE_BLOCKED)
1959 return EH_RESET_TIMER; 1935 return BLK_EH_RESET_TIMER;
1960 1936
1961 return EH_NOT_HANDLED; 1937 return BLK_EH_NOT_HANDLED;
1962} 1938}
1963 1939
1964/* 1940/*
@@ -3103,7 +3079,7 @@ fc_scsi_scan_rport(struct work_struct *work)
3103 3079
3104 3080
3105/** 3081/**
3106 * fc_vport_create - allocates and creates a FC virtual port. 3082 * fc_vport_setup - allocates and creates a FC virtual port.
3107 * @shost: scsi host the virtual port is connected to. 3083 * @shost: scsi host the virtual port is connected to.
3108 * @channel: Channel on shost port connected to. 3084 * @channel: Channel on shost port connected to.
3109 * @pdev: parent device for vport 3085 * @pdev: parent device for vport
@@ -3118,7 +3094,7 @@ fc_scsi_scan_rport(struct work_struct *work)
3118 * This routine assumes no locks are held on entry. 3094 * This routine assumes no locks are held on entry.
3119 */ 3095 */
3120static int 3096static int
3121fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev, 3097fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3122 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport) 3098 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
3123{ 3099{
3124 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 3100 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
@@ -3231,6 +3207,28 @@ delete_vport:
3231 return error; 3207 return error;
3232} 3208}
3233 3209
3210/**
3211 * fc_vport_create - Admin App or LLDD requests creation of a vport
3212 * @shost: scsi host the virtual port is connected to.
3213 * @channel: channel on shost port connected to.
3214 * @ids: The world wide names, FC4 port roles, etc for
3215 * the virtual port.
3216 *
3217 * Notes:
3218 * This routine assumes no locks are held on entry.
3219 */
3220struct fc_vport *
3221fc_vport_create(struct Scsi_Host *shost, int channel,
3222 struct fc_vport_identifiers *ids)
3223{
3224 int stat;
3225 struct fc_vport *vport;
3226
3227 stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
3228 ids, &vport);
3229 return stat ? NULL : vport;
3230}
3231EXPORT_SYMBOL(fc_vport_create);
3234 3232
3235/** 3233/**
3236 * fc_vport_terminate - Admin App or LLDD requests termination of a vport 3234 * fc_vport_terminate - Admin App or LLDD requests termination of a vport
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 043c3921164f..0ce5f7cdfe2a 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1361,7 +1361,7 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport,
1361 return -EINVAL; 1361 return -EINVAL;
1362 1362
1363 shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no); 1363 shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
1364 if (IS_ERR(shost)) { 1364 if (!shost) {
1365 printk(KERN_ERR "target discovery could not find host no %u\n", 1365 printk(KERN_ERR "target discovery could not find host no %u\n",
1366 ev->u.tgt_dscvr.host_no); 1366 ev->u.tgt_dscvr.host_no);
1367 return -ENODEV; 1367 return -ENODEV;
@@ -1387,7 +1387,7 @@ iscsi_set_host_param(struct iscsi_transport *transport,
1387 return -ENOSYS; 1387 return -ENOSYS;
1388 1388
1389 shost = scsi_host_lookup(ev->u.set_host_param.host_no); 1389 shost = scsi_host_lookup(ev->u.set_host_param.host_no);
1390 if (IS_ERR(shost)) { 1390 if (!shost) {
1391 printk(KERN_ERR "set_host_param could not find host no %u\n", 1391 printk(KERN_ERR "set_host_param could not find host no %u\n",
1392 ev->u.set_host_param.host_no); 1392 ev->u.set_host_param.host_no);
1393 return -ENODEV; 1393 return -ENODEV;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e5e7d7856454..a7b53be63367 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -47,6 +47,7 @@
47#include <linux/blkpg.h> 47#include <linux/blkpg.h>
48#include <linux/delay.h> 48#include <linux/delay.h>
49#include <linux/mutex.h> 49#include <linux/mutex.h>
50#include <linux/string_helpers.h>
50#include <asm/uaccess.h> 51#include <asm/uaccess.h>
51 52
52#include <scsi/scsi.h> 53#include <scsi/scsi.h>
@@ -86,6 +87,12 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
86MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); 87MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
87MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); 88MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
88 89
90#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
91#define SD_MINORS 16
92#else
93#define SD_MINORS 0
94#endif
95
89static int sd_revalidate_disk(struct gendisk *); 96static int sd_revalidate_disk(struct gendisk *);
90static int sd_probe(struct device *); 97static int sd_probe(struct device *);
91static int sd_remove(struct device *); 98static int sd_remove(struct device *);
@@ -159,7 +166,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
159 sd_print_sense_hdr(sdkp, &sshdr); 166 sd_print_sense_hdr(sdkp, &sshdr);
160 return -EINVAL; 167 return -EINVAL;
161 } 168 }
162 sd_revalidate_disk(sdkp->disk); 169 revalidate_disk(sdkp->disk);
163 return count; 170 return count;
164} 171}
165 172
@@ -377,7 +384,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
377 sector_t block = rq->sector; 384 sector_t block = rq->sector;
378 sector_t threshold; 385 sector_t threshold;
379 unsigned int this_count = rq->nr_sectors; 386 unsigned int this_count = rq->nr_sectors;
380 unsigned int timeout = sdp->timeout;
381 int ret; 387 int ret;
382 388
383 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 389 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -578,7 +584,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
578 SCpnt->transfersize = sdp->sector_size; 584 SCpnt->transfersize = sdp->sector_size;
579 SCpnt->underflow = this_count << 9; 585 SCpnt->underflow = this_count << 9;
580 SCpnt->allowed = SD_MAX_RETRIES; 586 SCpnt->allowed = SD_MAX_RETRIES;
581 SCpnt->timeout_per_command = timeout;
582 587
583 /* 588 /*
584 * This indicates that the command is ready from our end to be 589 * This indicates that the command is ready from our end to be
@@ -910,7 +915,7 @@ static void sd_rescan(struct device *dev)
910 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); 915 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
911 916
912 if (sdkp) { 917 if (sdkp) {
913 sd_revalidate_disk(sdkp->disk); 918 revalidate_disk(sdkp->disk);
914 scsi_disk_put(sdkp); 919 scsi_disk_put(sdkp);
915 } 920 }
916} 921}
@@ -1429,27 +1434,21 @@ got_data:
1429 */ 1434 */
1430 sector_size = 512; 1435 sector_size = 512;
1431 } 1436 }
1437 blk_queue_hardsect_size(sdp->request_queue, sector_size);
1438
1432 { 1439 {
1433 /* 1440 char cap_str_2[10], cap_str_10[10];
1434 * The msdos fs needs to know the hardware sector size 1441 u64 sz = sdkp->capacity << ffz(~sector_size);
1435 * So I have created this table. See ll_rw_blk.c
1436 * Jacques Gelinas (Jacques@solucorp.qc.ca)
1437 */
1438 int hard_sector = sector_size;
1439 sector_t sz = (sdkp->capacity/2) * (hard_sector/256);
1440 struct request_queue *queue = sdp->request_queue;
1441 sector_t mb = sz;
1442 1442
1443 blk_queue_hardsect_size(queue, hard_sector); 1443 string_get_size(sz, STRING_UNITS_2, cap_str_2,
1444 /* avoid 64-bit division on 32-bit platforms */ 1444 sizeof(cap_str_2));
1445 sector_div(sz, 625); 1445 string_get_size(sz, STRING_UNITS_10, cap_str_10,
1446 mb -= sz - 974; 1446 sizeof(cap_str_10));
1447 sector_div(mb, 1950);
1448 1447
1449 sd_printk(KERN_NOTICE, sdkp, 1448 sd_printk(KERN_NOTICE, sdkp,
1450 "%llu %d-byte hardware sectors (%llu MB)\n", 1449 "%llu %d-byte hardware sectors: (%s/%s)\n",
1451 (unsigned long long)sdkp->capacity, 1450 (unsigned long long)sdkp->capacity,
1452 hard_sector, (unsigned long long)mb); 1451 sector_size, cap_str_10, cap_str_2);
1453 } 1452 }
1454 1453
1455 /* Rescale capacity to 512-byte units */ 1454 /* Rescale capacity to 512-byte units */
@@ -1764,6 +1763,52 @@ static int sd_revalidate_disk(struct gendisk *disk)
1764} 1763}
1765 1764
1766/** 1765/**
1766 * sd_format_disk_name - format disk name
1767 * @prefix: name prefix - ie. "sd" for SCSI disks
1768 * @index: index of the disk to format name for
1769 * @buf: output buffer
1770 * @buflen: length of the output buffer
1771 *
1772 * SCSI disk names starts at sda. The 26th device is sdz and the
1773 * 27th is sdaa. The last one for two lettered suffix is sdzz
1774 * which is followed by sdaaa.
1775 *
1776 * This is basically 26 base counting with one extra 'nil' entry
1777 * at the beggining from the second digit on and can be
1778 * determined using similar method as 26 base conversion with the
1779 * index shifted -1 after each digit is computed.
1780 *
1781 * CONTEXT:
1782 * Don't care.
1783 *
1784 * RETURNS:
1785 * 0 on success, -errno on failure.
1786 */
1787static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
1788{
1789 const int base = 'z' - 'a' + 1;
1790 char *begin = buf + strlen(prefix);
1791 char *end = buf + buflen;
1792 char *p;
1793 int unit;
1794
1795 p = end - 1;
1796 *p = '\0';
1797 unit = base;
1798 do {
1799 if (p == begin)
1800 return -EINVAL;
1801 *--p = 'a' + (index % unit);
1802 index = (index / unit) - 1;
1803 } while (index >= 0);
1804
1805 memmove(begin, p, end - p);
1806 memcpy(buf, prefix, strlen(prefix));
1807
1808 return 0;
1809}
1810
1811/**
1767 * sd_probe - called during driver initialization and whenever a 1812 * sd_probe - called during driver initialization and whenever a
1768 * new scsi device is attached to the system. It is called once 1813 * new scsi device is attached to the system. It is called once
1769 * for each scsi device (not just disks) present. 1814 * for each scsi device (not just disks) present.
@@ -1801,7 +1846,7 @@ static int sd_probe(struct device *dev)
1801 if (!sdkp) 1846 if (!sdkp)
1802 goto out; 1847 goto out;
1803 1848
1804 gd = alloc_disk(16); 1849 gd = alloc_disk(SD_MINORS);
1805 if (!gd) 1850 if (!gd)
1806 goto out_free; 1851 goto out_free;
1807 1852
@@ -1815,8 +1860,8 @@ static int sd_probe(struct device *dev)
1815 if (error) 1860 if (error)
1816 goto out_put; 1861 goto out_put;
1817 1862
1818 error = -EBUSY; 1863 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
1819 if (index >= SD_MAX_DISKS) 1864 if (error)
1820 goto out_free_index; 1865 goto out_free_index;
1821 1866
1822 sdkp->device = sdp; 1867 sdkp->device = sdp;
@@ -1826,11 +1871,12 @@ static int sd_probe(struct device *dev)
1826 sdkp->openers = 0; 1871 sdkp->openers = 0;
1827 sdkp->previous_state = 1; 1872 sdkp->previous_state = 1;
1828 1873
1829 if (!sdp->timeout) { 1874 if (!sdp->request_queue->rq_timeout) {
1830 if (sdp->type != TYPE_MOD) 1875 if (sdp->type != TYPE_MOD)
1831 sdp->timeout = SD_TIMEOUT; 1876 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
1832 else 1877 else
1833 sdp->timeout = SD_MOD_TIMEOUT; 1878 blk_queue_rq_timeout(sdp->request_queue,
1879 SD_MOD_TIMEOUT);
1834 } 1880 }
1835 1881
1836 device_initialize(&sdkp->dev); 1882 device_initialize(&sdkp->dev);
@@ -1843,24 +1889,12 @@ static int sd_probe(struct device *dev)
1843 1889
1844 get_device(&sdp->sdev_gendev); 1890 get_device(&sdp->sdev_gendev);
1845 1891
1846 gd->major = sd_major((index & 0xf0) >> 4); 1892 if (index < SD_MAX_DISKS) {
1847 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 1893 gd->major = sd_major((index & 0xf0) >> 4);
1848 gd->minors = 16; 1894 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
1849 gd->fops = &sd_fops; 1895 gd->minors = SD_MINORS;
1850
1851 if (index < 26) {
1852 sprintf(gd->disk_name, "sd%c", 'a' + index % 26);
1853 } else if (index < (26 + 1) * 26) {
1854 sprintf(gd->disk_name, "sd%c%c",
1855 'a' + index / 26 - 1,'a' + index % 26);
1856 } else {
1857 const unsigned int m1 = (index / 26 - 1) / 26 - 1;
1858 const unsigned int m2 = (index / 26 - 1) % 26;
1859 const unsigned int m3 = index % 26;
1860 sprintf(gd->disk_name, "sd%c%c%c",
1861 'a' + m1, 'a' + m2, 'a' + m3);
1862 } 1896 }
1863 1897 gd->fops = &sd_fops;
1864 gd->private_data = &sdkp->driver; 1898 gd->private_data = &sdkp->driver;
1865 gd->queue = sdkp->device->request_queue; 1899 gd->queue = sdkp->device->request_queue;
1866 1900
@@ -1869,7 +1903,7 @@ static int sd_probe(struct device *dev)
1869 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 1903 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
1870 1904
1871 gd->driverfs_dev = &sdp->sdev_gendev; 1905 gd->driverfs_dev = &sdp->sdev_gendev;
1872 gd->flags = GENHD_FL_DRIVERFS; 1906 gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS;
1873 if (sdp->removable) 1907 if (sdp->removable)
1874 gd->flags |= GENHD_FL_REMOVABLE; 1908 gd->flags |= GENHD_FL_REMOVABLE;
1875 1909
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 661f9f21650a..ba9b9bbd4e73 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -47,7 +47,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */
47#include <linux/seq_file.h> 47#include <linux/seq_file.h>
48#include <linux/blkdev.h> 48#include <linux/blkdev.h>
49#include <linux/delay.h> 49#include <linux/delay.h>
50#include <linux/scatterlist.h>
51#include <linux/blktrace_api.h> 50#include <linux/blktrace_api.h>
52#include <linux/smp_lock.h> 51#include <linux/smp_lock.h>
53 52
@@ -69,7 +68,6 @@ static void sg_proc_cleanup(void);
69#endif 68#endif
70 69
71#define SG_ALLOW_DIO_DEF 0 70#define SG_ALLOW_DIO_DEF 0
72#define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
73 71
74#define SG_MAX_DEVS 32768 72#define SG_MAX_DEVS 32768
75 73
@@ -118,8 +116,8 @@ typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
118 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ 116 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
119 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ 117 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
120 unsigned bufflen; /* Size of (aggregate) data buffer */ 118 unsigned bufflen; /* Size of (aggregate) data buffer */
121 unsigned b_malloc_len; /* actual len malloc'ed in buffer */ 119 struct page **pages;
122 struct scatterlist *buffer;/* scatter list */ 120 int page_order;
123 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ 121 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
124 unsigned char cmd_opcode; /* first byte of command */ 122 unsigned char cmd_opcode; /* first byte of command */
125} Sg_scatter_hold; 123} Sg_scatter_hold;
@@ -137,6 +135,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
137 char orphan; /* 1 -> drop on sight, 0 -> normal */ 135 char orphan; /* 1 -> drop on sight, 0 -> normal */
138 char sg_io_owned; /* 1 -> packet belongs to SG_IO */ 136 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
139 volatile char done; /* 0->before bh, 1->before read, 2->read */ 137 volatile char done; /* 0->before bh, 1->before read, 2->read */
138 struct request *rq;
139 struct bio *bio;
140} Sg_request; 140} Sg_request;
141 141
142typedef struct sg_fd { /* holds the state of a file descriptor */ 142typedef struct sg_fd { /* holds the state of a file descriptor */
@@ -175,8 +175,8 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
175 175
176static int sg_fasync(int fd, struct file *filp, int mode); 176static int sg_fasync(int fd, struct file *filp, int mode);
177/* tasklet or soft irq callback */ 177/* tasklet or soft irq callback */
178static void sg_cmd_done(void *data, char *sense, int result, int resid); 178static void sg_rq_end_io(struct request *rq, int uptodate);
179static int sg_start_req(Sg_request * srp); 179static int sg_start_req(Sg_request *srp, unsigned char *cmd);
180static void sg_finish_rem_req(Sg_request * srp); 180static void sg_finish_rem_req(Sg_request * srp);
181static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); 181static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
182static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, 182static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
@@ -188,17 +188,11 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
188 int read_only, Sg_request **o_srp); 188 int read_only, Sg_request **o_srp);
189static int sg_common_write(Sg_fd * sfp, Sg_request * srp, 189static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
190 unsigned char *cmnd, int timeout, int blocking); 190 unsigned char *cmnd, int timeout, int blocking);
191static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
192 int wr_xf, int *countp, unsigned char __user **up);
193static int sg_write_xfer(Sg_request * srp);
194static int sg_read_xfer(Sg_request * srp);
195static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); 191static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
196static void sg_remove_scat(Sg_scatter_hold * schp); 192static void sg_remove_scat(Sg_scatter_hold * schp);
197static void sg_build_reserve(Sg_fd * sfp, int req_size); 193static void sg_build_reserve(Sg_fd * sfp, int req_size);
198static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); 194static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
199static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); 195static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
200static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
201static void sg_page_free(struct page *page, int size);
202static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); 196static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
203static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 197static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
204static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 198static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
@@ -206,7 +200,6 @@ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
206static Sg_request *sg_add_request(Sg_fd * sfp); 200static Sg_request *sg_add_request(Sg_fd * sfp);
207static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); 201static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
208static int sg_res_in_use(Sg_fd * sfp); 202static int sg_res_in_use(Sg_fd * sfp);
209static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
210static Sg_device *sg_get_dev(int dev); 203static Sg_device *sg_get_dev(int dev);
211#ifdef CONFIG_SCSI_PROC_FS 204#ifdef CONFIG_SCSI_PROC_FS
212static int sg_last_dev(void); 205static int sg_last_dev(void);
@@ -529,8 +522,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
529 err = -EFAULT; 522 err = -EFAULT;
530 goto err_out; 523 goto err_out;
531 } 524 }
532 err = sg_read_xfer(srp); 525err_out:
533 err_out:
534 sg_finish_rem_req(srp); 526 sg_finish_rem_req(srp);
535 return (0 == err) ? count : err; 527 return (0 == err) ? count : err;
536} 528}
@@ -612,7 +604,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
612 else 604 else
613 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; 605 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
614 hp->dxfer_len = mxsize; 606 hp->dxfer_len = mxsize;
615 hp->dxferp = (char __user *)buf + cmd_size; 607 if (hp->dxfer_direction == SG_DXFER_TO_DEV)
608 hp->dxferp = (char __user *)buf + cmd_size;
609 else
610 hp->dxferp = NULL;
616 hp->sbp = NULL; 611 hp->sbp = NULL;
617 hp->timeout = old_hdr.reply_len; /* structure abuse ... */ 612 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
618 hp->flags = input_size; /* structure abuse ... */ 613 hp->flags = input_size; /* structure abuse ... */
@@ -732,16 +727,12 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
732 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 727 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
733 (int) cmnd[0], (int) hp->cmd_len)); 728 (int) cmnd[0], (int) hp->cmd_len));
734 729
735 if ((k = sg_start_req(srp))) { 730 k = sg_start_req(srp, cmnd);
731 if (k) {
736 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k)); 732 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
737 sg_finish_rem_req(srp); 733 sg_finish_rem_req(srp);
738 return k; /* probably out of space --> ENOMEM */ 734 return k; /* probably out of space --> ENOMEM */
739 } 735 }
740 if ((k = sg_write_xfer(srp))) {
741 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n"));
742 sg_finish_rem_req(srp);
743 return k;
744 }
745 if (sdp->detached) { 736 if (sdp->detached) {
746 sg_finish_rem_req(srp); 737 sg_finish_rem_req(srp);
747 return -ENODEV; 738 return -ENODEV;
@@ -763,20 +754,11 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
763 break; 754 break;
764 } 755 }
765 hp->duration = jiffies_to_msecs(jiffies); 756 hp->duration = jiffies_to_msecs(jiffies);
766/* Now send everything of to mid-level. The next time we hear about this 757
767 packet is when sg_cmd_done() is called (i.e. a callback). */ 758 srp->rq->timeout = timeout;
768 if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer, 759 blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
769 hp->dxfer_len, srp->data.k_use_sg, timeout, 760 srp->rq, 1, sg_rq_end_io);
770 SG_DEFAULT_RETRIES, srp, sg_cmd_done, 761 return 0;
771 GFP_ATOMIC)) {
772 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n"));
773 /*
774 * most likely out of mem, but could also be a bad map
775 */
776 sg_finish_rem_req(srp);
777 return -ENOMEM;
778 } else
779 return 0;
780} 762}
781 763
782static int 764static int
@@ -1192,8 +1174,7 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1192 Sg_fd *sfp; 1174 Sg_fd *sfp;
1193 unsigned long offset, len, sa; 1175 unsigned long offset, len, sa;
1194 Sg_scatter_hold *rsv_schp; 1176 Sg_scatter_hold *rsv_schp;
1195 struct scatterlist *sg; 1177 int k, length;
1196 int k;
1197 1178
1198 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) 1179 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1199 return VM_FAULT_SIGBUS; 1180 return VM_FAULT_SIGBUS;
@@ -1203,15 +1184,14 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1203 return VM_FAULT_SIGBUS; 1184 return VM_FAULT_SIGBUS;
1204 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n", 1185 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
1205 offset, rsv_schp->k_use_sg)); 1186 offset, rsv_schp->k_use_sg));
1206 sg = rsv_schp->buffer;
1207 sa = vma->vm_start; 1187 sa = vma->vm_start;
1208 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1188 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1209 ++k, sg = sg_next(sg)) { 1189 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1210 len = vma->vm_end - sa; 1190 len = vma->vm_end - sa;
1211 len = (len < sg->length) ? len : sg->length; 1191 len = (len < length) ? len : length;
1212 if (offset < len) { 1192 if (offset < len) {
1213 struct page *page; 1193 struct page *page = nth_page(rsv_schp->pages[k],
1214 page = virt_to_page(page_address(sg_page(sg)) + offset); 1194 offset >> PAGE_SHIFT);
1215 get_page(page); /* increment page count */ 1195 get_page(page); /* increment page count */
1216 vmf->page = page; 1196 vmf->page = page;
1217 return 0; /* success */ 1197 return 0; /* success */
@@ -1233,8 +1213,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1233 Sg_fd *sfp; 1213 Sg_fd *sfp;
1234 unsigned long req_sz, len, sa; 1214 unsigned long req_sz, len, sa;
1235 Sg_scatter_hold *rsv_schp; 1215 Sg_scatter_hold *rsv_schp;
1236 int k; 1216 int k, length;
1237 struct scatterlist *sg;
1238 1217
1239 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) 1218 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1240 return -ENXIO; 1219 return -ENXIO;
@@ -1248,11 +1227,10 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1248 return -ENOMEM; /* cannot map more than reserved buffer */ 1227 return -ENOMEM; /* cannot map more than reserved buffer */
1249 1228
1250 sa = vma->vm_start; 1229 sa = vma->vm_start;
1251 sg = rsv_schp->buffer; 1230 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1252 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1231 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1253 ++k, sg = sg_next(sg)) {
1254 len = vma->vm_end - sa; 1232 len = vma->vm_end - sa;
1255 len = (len < sg->length) ? len : sg->length; 1233 len = (len < length) ? len : length;
1256 sa += len; 1234 sa += len;
1257 } 1235 }
1258 1236
@@ -1263,16 +1241,19 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1263 return 0; 1241 return 0;
1264} 1242}
1265 1243
1266/* This function is a "bottom half" handler that is called by the 1244/*
1267 * mid level when a command is completed (or has failed). */ 1245 * This function is a "bottom half" handler that is called by the mid
1268static void 1246 * level when a command is completed (or has failed).
1269sg_cmd_done(void *data, char *sense, int result, int resid) 1247 */
1248static void sg_rq_end_io(struct request *rq, int uptodate)
1270{ 1249{
1271 Sg_request *srp = data; 1250 struct sg_request *srp = rq->end_io_data;
1272 Sg_device *sdp = NULL; 1251 Sg_device *sdp = NULL;
1273 Sg_fd *sfp; 1252 Sg_fd *sfp;
1274 unsigned long iflags; 1253 unsigned long iflags;
1275 unsigned int ms; 1254 unsigned int ms;
1255 char *sense;
1256 int result, resid;
1276 1257
1277 if (NULL == srp) { 1258 if (NULL == srp) {
1278 printk(KERN_ERR "sg_cmd_done: NULL request\n"); 1259 printk(KERN_ERR "sg_cmd_done: NULL request\n");
@@ -1286,6 +1267,9 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
1286 return; 1267 return;
1287 } 1268 }
1288 1269
1270 sense = rq->sense;
1271 result = rq->errors;
1272 resid = rq->data_len;
1289 1273
1290 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1274 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1291 sdp->disk->disk_name, srp->header.pack_id, result)); 1275 sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1296,7 +1280,6 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
1296 if (0 != result) { 1280 if (0 != result) {
1297 struct scsi_sense_hdr sshdr; 1281 struct scsi_sense_hdr sshdr;
1298 1282
1299 memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
1300 srp->header.status = 0xff & result; 1283 srp->header.status = 0xff & result;
1301 srp->header.masked_status = status_byte(result); 1284 srp->header.masked_status = status_byte(result);
1302 srp->header.msg_status = msg_byte(result); 1285 srp->header.msg_status = msg_byte(result);
@@ -1634,37 +1617,79 @@ exit_sg(void)
1634 idr_destroy(&sg_index_idr); 1617 idr_destroy(&sg_index_idr);
1635} 1618}
1636 1619
1637static int 1620static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1638sg_start_req(Sg_request * srp)
1639{ 1621{
1640 int res; 1622 int res;
1623 struct request *rq;
1641 Sg_fd *sfp = srp->parentfp; 1624 Sg_fd *sfp = srp->parentfp;
1642 sg_io_hdr_t *hp = &srp->header; 1625 sg_io_hdr_t *hp = &srp->header;
1643 int dxfer_len = (int) hp->dxfer_len; 1626 int dxfer_len = (int) hp->dxfer_len;
1644 int dxfer_dir = hp->dxfer_direction; 1627 int dxfer_dir = hp->dxfer_direction;
1628 unsigned int iov_count = hp->iovec_count;
1645 Sg_scatter_hold *req_schp = &srp->data; 1629 Sg_scatter_hold *req_schp = &srp->data;
1646 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1630 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1631 struct request_queue *q = sfp->parentdp->device->request_queue;
1632 struct rq_map_data *md, map_data;
1633 int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
1634
1635 SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
1636 dxfer_len));
1637
1638 rq = blk_get_request(q, rw, GFP_ATOMIC);
1639 if (!rq)
1640 return -ENOMEM;
1641
1642 memcpy(rq->cmd, cmd, hp->cmd_len);
1643
1644 rq->cmd_len = hp->cmd_len;
1645 rq->cmd_type = REQ_TYPE_BLOCK_PC;
1646
1647 srp->rq = rq;
1648 rq->end_io_data = srp;
1649 rq->sense = srp->sense_b;
1650 rq->retries = SG_DEFAULT_RETRIES;
1647 1651
1648 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1649 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) 1652 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1650 return 0; 1653 return 0;
1651 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && 1654
1652 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && 1655 if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
1653 (!sfp->parentdp->device->host->unchecked_isa_dma)) { 1656 dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
1654 res = sg_build_direct(srp, sfp, dxfer_len); 1657 !sfp->parentdp->device->host->unchecked_isa_dma &&
1655 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */ 1658 blk_rq_aligned(q, hp->dxferp, dxfer_len))
1656 return res; 1659 md = NULL;
1657 } 1660 else
1658 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) 1661 md = &map_data;
1659 sg_link_reserve(sfp, srp, dxfer_len); 1662
1660 else { 1663 if (md) {
1661 res = sg_build_indirect(req_schp, sfp, dxfer_len); 1664 if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
1662 if (res) { 1665 sg_link_reserve(sfp, srp, dxfer_len);
1663 sg_remove_scat(req_schp); 1666 else {
1664 return res; 1667 res = sg_build_indirect(req_schp, sfp, dxfer_len);
1668 if (res)
1669 return res;
1665 } 1670 }
1671
1672 md->pages = req_schp->pages;
1673 md->page_order = req_schp->page_order;
1674 md->nr_entries = req_schp->k_use_sg;
1666 } 1675 }
1667 return 0; 1676
1677 if (iov_count)
1678 res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count,
1679 hp->dxfer_len, GFP_ATOMIC);
1680 else
1681 res = blk_rq_map_user(q, rq, md, hp->dxferp,
1682 hp->dxfer_len, GFP_ATOMIC);
1683
1684 if (!res) {
1685 srp->bio = rq->bio;
1686
1687 if (!md) {
1688 req_schp->dio_in_use = 1;
1689 hp->info |= SG_INFO_DIRECT_IO;
1690 }
1691 }
1692 return res;
1668} 1693}
1669 1694
1670static void 1695static void
@@ -1678,186 +1703,37 @@ sg_finish_rem_req(Sg_request * srp)
1678 sg_unlink_reserve(sfp, srp); 1703 sg_unlink_reserve(sfp, srp);
1679 else 1704 else
1680 sg_remove_scat(req_schp); 1705 sg_remove_scat(req_schp);
1706
1707 if (srp->rq) {
1708 if (srp->bio)
1709 blk_rq_unmap_user(srp->bio);
1710
1711 blk_put_request(srp->rq);
1712 }
1713
1681 sg_remove_request(sfp, srp); 1714 sg_remove_request(sfp, srp);
1682} 1715}
1683 1716
1684static int 1717static int
1685sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) 1718sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1686{ 1719{
1687 int sg_bufflen = tablesize * sizeof(struct scatterlist); 1720 int sg_bufflen = tablesize * sizeof(struct page *);
1688 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; 1721 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1689 1722
1690 /* 1723 schp->pages = kzalloc(sg_bufflen, gfp_flags);
1691 * TODO: test without low_dma, we should not need it since 1724 if (!schp->pages)
1692 * the block layer will bounce the buffer for us
1693 *
1694 * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
1695 */
1696 if (sfp->low_dma)
1697 gfp_flags |= GFP_DMA;
1698 schp->buffer = kzalloc(sg_bufflen, gfp_flags);
1699 if (!schp->buffer)
1700 return -ENOMEM; 1725 return -ENOMEM;
1701 sg_init_table(schp->buffer, tablesize);
1702 schp->sglist_len = sg_bufflen; 1726 schp->sglist_len = sg_bufflen;
1703 return tablesize; /* number of scat_gath elements allocated */ 1727 return tablesize; /* number of scat_gath elements allocated */
1704} 1728}
1705 1729
1706#ifdef SG_ALLOW_DIO_CODE
1707/* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
1708 /* TODO: hopefully we can use the generic block layer code */
1709
1710/* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1711 - mapping of all pages not successful
1712 (i.e., either completely successful or fails)
1713*/
1714static int
1715st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1716 unsigned long uaddr, size_t count, int rw)
1717{
1718 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1719 unsigned long start = uaddr >> PAGE_SHIFT;
1720 const int nr_pages = end - start;
1721 int res, i, j;
1722 struct page **pages;
1723
1724 /* User attempted Overflow! */
1725 if ((uaddr + count) < uaddr)
1726 return -EINVAL;
1727
1728 /* Too big */
1729 if (nr_pages > max_pages)
1730 return -ENOMEM;
1731
1732 /* Hmm? */
1733 if (count == 0)
1734 return 0;
1735
1736 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
1737 return -ENOMEM;
1738
1739 /* Try to fault in all of the necessary pages */
1740 down_read(&current->mm->mmap_sem);
1741 /* rw==READ means read from drive, write into memory area */
1742 res = get_user_pages(
1743 current,
1744 current->mm,
1745 uaddr,
1746 nr_pages,
1747 rw == READ,
1748 0, /* don't force */
1749 pages,
1750 NULL);
1751 up_read(&current->mm->mmap_sem);
1752
1753 /* Errors and no page mapped should return here */
1754 if (res < nr_pages)
1755 goto out_unmap;
1756
1757 for (i=0; i < nr_pages; i++) {
1758 /* FIXME: flush superflous for rw==READ,
1759 * probably wrong function for rw==WRITE
1760 */
1761 flush_dcache_page(pages[i]);
1762 /* ?? Is locking needed? I don't think so */
1763 /* if (!trylock_page(pages[i]))
1764 goto out_unlock; */
1765 }
1766
1767 sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK);
1768 if (nr_pages > 1) {
1769 sgl[0].length = PAGE_SIZE - sgl[0].offset;
1770 count -= sgl[0].length;
1771 for (i=1; i < nr_pages ; i++)
1772 sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0);
1773 }
1774 else {
1775 sgl[0].length = count;
1776 }
1777
1778 kfree(pages);
1779 return nr_pages;
1780
1781 out_unmap:
1782 if (res > 0) {
1783 for (j=0; j < res; j++)
1784 page_cache_release(pages[j]);
1785 res = 0;
1786 }
1787 kfree(pages);
1788 return res;
1789}
1790
1791
1792/* And unmap them... */
1793static int
1794st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1795 int dirtied)
1796{
1797 int i;
1798
1799 for (i=0; i < nr_pages; i++) {
1800 struct page *page = sg_page(&sgl[i]);
1801
1802 if (dirtied)
1803 SetPageDirty(page);
1804 /* unlock_page(page); */
1805 /* FIXME: cache flush missing for rw==READ
1806 * FIXME: call the correct reference counting function
1807 */
1808 page_cache_release(page);
1809 }
1810
1811 return 0;
1812}
1813
1814/* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
1815#endif
1816
1817
1818/* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1819static int
1820sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1821{
1822#ifdef SG_ALLOW_DIO_CODE
1823 sg_io_hdr_t *hp = &srp->header;
1824 Sg_scatter_hold *schp = &srp->data;
1825 int sg_tablesize = sfp->parentdp->sg_tablesize;
1826 int mx_sc_elems, res;
1827 struct scsi_device *sdev = sfp->parentdp->device;
1828
1829 if (((unsigned long)hp->dxferp &
1830 queue_dma_alignment(sdev->request_queue)) != 0)
1831 return 1;
1832
1833 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1834 if (mx_sc_elems <= 0) {
1835 return 1;
1836 }
1837 res = st_map_user_pages(schp->buffer, mx_sc_elems,
1838 (unsigned long)hp->dxferp, dxfer_len,
1839 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
1840 if (res <= 0) {
1841 sg_remove_scat(schp);
1842 return 1;
1843 }
1844 schp->k_use_sg = res;
1845 schp->dio_in_use = 1;
1846 hp->info |= SG_INFO_DIRECT_IO;
1847 return 0;
1848#else
1849 return 1;
1850#endif
1851}
1852
1853static int 1730static int
1854sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) 1731sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1855{ 1732{
1856 struct scatterlist *sg; 1733 int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
1857 int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
1858 int sg_tablesize = sfp->parentdp->sg_tablesize; 1734 int sg_tablesize = sfp->parentdp->sg_tablesize;
1859 int blk_size = buff_size; 1735 int blk_size = buff_size, order;
1860 struct page *p = NULL; 1736 gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
1861 1737
1862 if (blk_size < 0) 1738 if (blk_size < 0)
1863 return -EFAULT; 1739 return -EFAULT;
@@ -1881,15 +1757,26 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1881 } else 1757 } else
1882 scatter_elem_sz_prev = num; 1758 scatter_elem_sz_prev = num;
1883 } 1759 }
1884 for (k = 0, sg = schp->buffer, rem_sz = blk_size; 1760
1885 (rem_sz > 0) && (k < mx_sc_elems); 1761 if (sfp->low_dma)
1886 ++k, rem_sz -= ret_sz, sg = sg_next(sg)) { 1762 gfp_mask |= GFP_DMA;
1887 1763
1764 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1765 gfp_mask |= __GFP_ZERO;
1766
1767 order = get_order(num);
1768retry:
1769 ret_sz = 1 << (PAGE_SHIFT + order);
1770
1771 for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
1772 k++, rem_sz -= ret_sz) {
1773
1888 num = (rem_sz > scatter_elem_sz_prev) ? 1774 num = (rem_sz > scatter_elem_sz_prev) ?
1889 scatter_elem_sz_prev : rem_sz; 1775 scatter_elem_sz_prev : rem_sz;
1890 p = sg_page_malloc(num, sfp->low_dma, &ret_sz); 1776
1891 if (!p) 1777 schp->pages[k] = alloc_pages(gfp_mask, order);
1892 return -ENOMEM; 1778 if (!schp->pages[k])
1779 goto out;
1893 1780
1894 if (num == scatter_elem_sz_prev) { 1781 if (num == scatter_elem_sz_prev) {
1895 if (unlikely(ret_sz > scatter_elem_sz_prev)) { 1782 if (unlikely(ret_sz > scatter_elem_sz_prev)) {
@@ -1897,12 +1784,12 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1897 scatter_elem_sz_prev = ret_sz; 1784 scatter_elem_sz_prev = ret_sz;
1898 } 1785 }
1899 } 1786 }
1900 sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0);
1901 1787
1902 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " 1788 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
1903 "ret_sz=%d\n", k, num, ret_sz)); 1789 "ret_sz=%d\n", k, num, ret_sz));
1904 } /* end of for loop */ 1790 } /* end of for loop */
1905 1791
1792 schp->page_order = order;
1906 schp->k_use_sg = k; 1793 schp->k_use_sg = k;
1907 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, " 1794 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
1908 "rem_sz=%d\n", k, rem_sz)); 1795 "rem_sz=%d\n", k, rem_sz));
@@ -1910,223 +1797,42 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1910 schp->bufflen = blk_size; 1797 schp->bufflen = blk_size;
1911 if (rem_sz > 0) /* must have failed */ 1798 if (rem_sz > 0) /* must have failed */
1912 return -ENOMEM; 1799 return -ENOMEM;
1913
1914 return 0; 1800 return 0;
1915} 1801out:
1916 1802 for (i = 0; i < k; i++)
1917static int 1803 __free_pages(schp->pages[k], order);
1918sg_write_xfer(Sg_request * srp)
1919{
1920 sg_io_hdr_t *hp = &srp->header;
1921 Sg_scatter_hold *schp = &srp->data;
1922 struct scatterlist *sg = schp->buffer;
1923 int num_xfer = 0;
1924 int j, k, onum, usglen, ksglen, res;
1925 int iovec_count = (int) hp->iovec_count;
1926 int dxfer_dir = hp->dxfer_direction;
1927 unsigned char *p;
1928 unsigned char __user *up;
1929 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
1930
1931 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
1932 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
1933 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
1934 if (schp->bufflen < num_xfer)
1935 num_xfer = schp->bufflen;
1936 }
1937 if ((num_xfer <= 0) || (schp->dio_in_use) ||
1938 (new_interface
1939 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
1940 return 0;
1941
1942 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
1943 num_xfer, iovec_count, schp->k_use_sg));
1944 if (iovec_count) {
1945 onum = iovec_count;
1946 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
1947 return -EFAULT;
1948 } else
1949 onum = 1;
1950
1951 ksglen = sg->length;
1952 p = page_address(sg_page(sg));
1953 for (j = 0, k = 0; j < onum; ++j) {
1954 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1955 if (res)
1956 return res;
1957
1958 for (; p; sg = sg_next(sg), ksglen = sg->length,
1959 p = page_address(sg_page(sg))) {
1960 if (usglen <= 0)
1961 break;
1962 if (ksglen > usglen) {
1963 if (usglen >= num_xfer) {
1964 if (__copy_from_user(p, up, num_xfer))
1965 return -EFAULT;
1966 return 0;
1967 }
1968 if (__copy_from_user(p, up, usglen))
1969 return -EFAULT;
1970 p += usglen;
1971 ksglen -= usglen;
1972 break;
1973 } else {
1974 if (ksglen >= num_xfer) {
1975 if (__copy_from_user(p, up, num_xfer))
1976 return -EFAULT;
1977 return 0;
1978 }
1979 if (__copy_from_user(p, up, ksglen))
1980 return -EFAULT;
1981 up += ksglen;
1982 usglen -= ksglen;
1983 }
1984 ++k;
1985 if (k >= schp->k_use_sg)
1986 return 0;
1987 }
1988 }
1989
1990 return 0;
1991}
1992 1804
1993static int 1805 if (--order >= 0)
1994sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 1806 goto retry;
1995 int wr_xf, int *countp, unsigned char __user **up)
1996{
1997 int num_xfer = (int) hp->dxfer_len;
1998 unsigned char __user *p = hp->dxferp;
1999 int count;
2000 1807
2001 if (0 == sg_num) { 1808 return -ENOMEM;
2002 if (wr_xf && ('\0' == hp->interface_id))
2003 count = (int) hp->flags; /* holds "old" input_size */
2004 else
2005 count = num_xfer;
2006 } else {
2007 sg_iovec_t iovec;
2008 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
2009 return -EFAULT;
2010 p = iovec.iov_base;
2011 count = (int) iovec.iov_len;
2012 }
2013 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
2014 return -EFAULT;
2015 if (up)
2016 *up = p;
2017 if (countp)
2018 *countp = count;
2019 return 0;
2020} 1809}
2021 1810
2022static void 1811static void
2023sg_remove_scat(Sg_scatter_hold * schp) 1812sg_remove_scat(Sg_scatter_hold * schp)
2024{ 1813{
2025 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); 1814 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
2026 if (schp->buffer && (schp->sglist_len > 0)) { 1815 if (schp->pages && schp->sglist_len > 0) {
2027 struct scatterlist *sg = schp->buffer; 1816 if (!schp->dio_in_use) {
2028
2029 if (schp->dio_in_use) {
2030#ifdef SG_ALLOW_DIO_CODE
2031 st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
2032#endif
2033 } else {
2034 int k; 1817 int k;
2035 1818
2036 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); 1819 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
2037 ++k, sg = sg_next(sg)) {
2038 SCSI_LOG_TIMEOUT(5, printk( 1820 SCSI_LOG_TIMEOUT(5, printk(
2039 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n", 1821 "sg_remove_scat: k=%d, pg=0x%p\n",
2040 k, sg_page(sg), sg->length)); 1822 k, schp->pages[k]));
2041 sg_page_free(sg_page(sg), sg->length); 1823 __free_pages(schp->pages[k], schp->page_order);
2042 } 1824 }
2043 }
2044 kfree(schp->buffer);
2045 }
2046 memset(schp, 0, sizeof (*schp));
2047}
2048 1825
2049static int 1826 kfree(schp->pages);
2050sg_read_xfer(Sg_request * srp)
2051{
2052 sg_io_hdr_t *hp = &srp->header;
2053 Sg_scatter_hold *schp = &srp->data;
2054 struct scatterlist *sg = schp->buffer;
2055 int num_xfer = 0;
2056 int j, k, onum, usglen, ksglen, res;
2057 int iovec_count = (int) hp->iovec_count;
2058 int dxfer_dir = hp->dxfer_direction;
2059 unsigned char *p;
2060 unsigned char __user *up;
2061 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2062
2063 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
2064 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2065 num_xfer = hp->dxfer_len;
2066 if (schp->bufflen < num_xfer)
2067 num_xfer = schp->bufflen;
2068 }
2069 if ((num_xfer <= 0) || (schp->dio_in_use) ||
2070 (new_interface
2071 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2072 return 0;
2073
2074 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2075 num_xfer, iovec_count, schp->k_use_sg));
2076 if (iovec_count) {
2077 onum = iovec_count;
2078 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2079 return -EFAULT;
2080 } else
2081 onum = 1;
2082
2083 p = page_address(sg_page(sg));
2084 ksglen = sg->length;
2085 for (j = 0, k = 0; j < onum; ++j) {
2086 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2087 if (res)
2088 return res;
2089
2090 for (; p; sg = sg_next(sg), ksglen = sg->length,
2091 p = page_address(sg_page(sg))) {
2092 if (usglen <= 0)
2093 break;
2094 if (ksglen > usglen) {
2095 if (usglen >= num_xfer) {
2096 if (__copy_to_user(up, p, num_xfer))
2097 return -EFAULT;
2098 return 0;
2099 }
2100 if (__copy_to_user(up, p, usglen))
2101 return -EFAULT;
2102 p += usglen;
2103 ksglen -= usglen;
2104 break;
2105 } else {
2106 if (ksglen >= num_xfer) {
2107 if (__copy_to_user(up, p, num_xfer))
2108 return -EFAULT;
2109 return 0;
2110 }
2111 if (__copy_to_user(up, p, ksglen))
2112 return -EFAULT;
2113 up += ksglen;
2114 usglen -= ksglen;
2115 }
2116 ++k;
2117 if (k >= schp->k_use_sg)
2118 return 0;
2119 } 1827 }
2120 } 1828 }
2121 1829 memset(schp, 0, sizeof (*schp));
2122 return 0;
2123} 1830}
2124 1831
2125static int 1832static int
2126sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) 1833sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2127{ 1834{
2128 Sg_scatter_hold *schp = &srp->data; 1835 Sg_scatter_hold *schp = &srp->data;
2129 struct scatterlist *sg = schp->buffer;
2130 int k, num; 1836 int k, num;
2131 1837
2132 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", 1838 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
@@ -2134,15 +1840,15 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2134 if ((!outp) || (num_read_xfer <= 0)) 1840 if ((!outp) || (num_read_xfer <= 0))
2135 return 0; 1841 return 0;
2136 1842
2137 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) { 1843 num = 1 << (PAGE_SHIFT + schp->page_order);
2138 num = sg->length; 1844 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
2139 if (num > num_read_xfer) { 1845 if (num > num_read_xfer) {
2140 if (__copy_to_user(outp, page_address(sg_page(sg)), 1846 if (__copy_to_user(outp, page_address(schp->pages[k]),
2141 num_read_xfer)) 1847 num_read_xfer))
2142 return -EFAULT; 1848 return -EFAULT;
2143 break; 1849 break;
2144 } else { 1850 } else {
2145 if (__copy_to_user(outp, page_address(sg_page(sg)), 1851 if (__copy_to_user(outp, page_address(schp->pages[k]),
2146 num)) 1852 num))
2147 return -EFAULT; 1853 return -EFAULT;
2148 num_read_xfer -= num; 1854 num_read_xfer -= num;
@@ -2177,24 +1883,21 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2177{ 1883{
2178 Sg_scatter_hold *req_schp = &srp->data; 1884 Sg_scatter_hold *req_schp = &srp->data;
2179 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1885 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2180 struct scatterlist *sg = rsv_schp->buffer;
2181 int k, num, rem; 1886 int k, num, rem;
2182 1887
2183 srp->res_used = 1; 1888 srp->res_used = 1;
2184 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); 1889 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2185 rem = size; 1890 rem = size;
2186 1891
2187 for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) { 1892 num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
2188 num = sg->length; 1893 for (k = 0; k < rsv_schp->k_use_sg; k++) {
2189 if (rem <= num) { 1894 if (rem <= num) {
2190 sfp->save_scat_len = num;
2191 sg->length = rem;
2192 req_schp->k_use_sg = k + 1; 1895 req_schp->k_use_sg = k + 1;
2193 req_schp->sglist_len = rsv_schp->sglist_len; 1896 req_schp->sglist_len = rsv_schp->sglist_len;
2194 req_schp->buffer = rsv_schp->buffer; 1897 req_schp->pages = rsv_schp->pages;
2195 1898
2196 req_schp->bufflen = size; 1899 req_schp->bufflen = size;
2197 req_schp->b_malloc_len = rsv_schp->b_malloc_len; 1900 req_schp->page_order = rsv_schp->page_order;
2198 break; 1901 break;
2199 } else 1902 } else
2200 rem -= num; 1903 rem -= num;
@@ -2208,22 +1911,13 @@ static void
2208sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) 1911sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2209{ 1912{
2210 Sg_scatter_hold *req_schp = &srp->data; 1913 Sg_scatter_hold *req_schp = &srp->data;
2211 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2212 1914
2213 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", 1915 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2214 (int) req_schp->k_use_sg)); 1916 (int) req_schp->k_use_sg));
2215 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2216 struct scatterlist *sg = rsv_schp->buffer;
2217
2218 if (sfp->save_scat_len > 0)
2219 (sg + (req_schp->k_use_sg - 1))->length =
2220 (unsigned) sfp->save_scat_len;
2221 else
2222 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2223 }
2224 req_schp->k_use_sg = 0; 1917 req_schp->k_use_sg = 0;
2225 req_schp->bufflen = 0; 1918 req_schp->bufflen = 0;
2226 req_schp->buffer = NULL; 1919 req_schp->pages = NULL;
1920 req_schp->page_order = 0;
2227 req_schp->sglist_len = 0; 1921 req_schp->sglist_len = 0;
2228 sfp->save_scat_len = 0; 1922 sfp->save_scat_len = 0;
2229 srp->res_used = 0; 1923 srp->res_used = 0;
@@ -2481,53 +2175,6 @@ sg_res_in_use(Sg_fd * sfp)
2481 return srp ? 1 : 0; 2175 return srp ? 1 : 0;
2482} 2176}
2483 2177
2484/* The size fetched (value output via retSzp) set when non-NULL return */
2485static struct page *
2486sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2487{
2488 struct page *resp = NULL;
2489 gfp_t page_mask;
2490 int order, a_size;
2491 int resSz;
2492
2493 if ((rqSz <= 0) || (NULL == retSzp))
2494 return resp;
2495
2496 if (lowDma)
2497 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
2498 else
2499 page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
2500
2501 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2502 order++, a_size <<= 1) ;
2503 resSz = a_size; /* rounded up if necessary */
2504 resp = alloc_pages(page_mask, order);
2505 while ((!resp) && order) {
2506 --order;
2507 a_size >>= 1; /* divide by 2, until PAGE_SIZE */
2508 resp = alloc_pages(page_mask, order); /* try half */
2509 resSz = a_size;
2510 }
2511 if (resp) {
2512 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2513 memset(page_address(resp), 0, resSz);
2514 *retSzp = resSz;
2515 }
2516 return resp;
2517}
2518
2519static void
2520sg_page_free(struct page *page, int size)
2521{
2522 int order, a_size;
2523
2524 if (!page)
2525 return;
2526 for (order = 0, a_size = PAGE_SIZE; a_size < size;
2527 order++, a_size <<= 1) ;
2528 __free_pages(page, order);
2529}
2530
2531#ifdef CONFIG_SCSI_PROC_FS 2178#ifdef CONFIG_SCSI_PROC_FS
2532static int 2179static int
2533sg_idr_max_id(int id, void *p, void *data) 2180sg_idr_max_id(int id, void *p, void *data)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 27f5bfd1def3..0f17009c99d2 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -331,7 +331,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
331 331
332static int sr_prep_fn(struct request_queue *q, struct request *rq) 332static int sr_prep_fn(struct request_queue *q, struct request *rq)
333{ 333{
334 int block=0, this_count, s_size, timeout = SR_TIMEOUT; 334 int block = 0, this_count, s_size;
335 struct scsi_cd *cd; 335 struct scsi_cd *cd;
336 struct scsi_cmnd *SCpnt; 336 struct scsi_cmnd *SCpnt;
337 struct scsi_device *sdp = q->queuedata; 337 struct scsi_device *sdp = q->queuedata;
@@ -461,7 +461,6 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
461 SCpnt->transfersize = cd->device->sector_size; 461 SCpnt->transfersize = cd->device->sector_size;
462 SCpnt->underflow = this_count << 9; 462 SCpnt->underflow = this_count << 9;
463 SCpnt->allowed = MAX_RETRIES; 463 SCpnt->allowed = MAX_RETRIES;
464 SCpnt->timeout_per_command = timeout;
465 464
466 /* 465 /*
467 * This indicates that the command is ready from our end to be 466 * This indicates that the command is ready from our end to be
@@ -620,6 +619,8 @@ static int sr_probe(struct device *dev)
620 disk->fops = &sr_bdops; 619 disk->fops = &sr_bdops;
621 disk->flags = GENHD_FL_CD; 620 disk->flags = GENHD_FL_CD;
622 621
622 blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
623
623 cd->device = sdev; 624 cd->device = sdev;
624 cd->disk = disk; 625 cd->disk = disk;
625 cd->driver = &sr_template; 626 cd->driver = &sr_template;
@@ -878,7 +879,7 @@ static void sr_kref_release(struct kref *kref)
878 struct gendisk *disk = cd->disk; 879 struct gendisk *disk = cd->disk;
879 880
880 spin_lock(&sr_index_lock); 881 spin_lock(&sr_index_lock);
881 clear_bit(disk->first_minor, sr_index_bits); 882 clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
882 spin_unlock(&sr_index_lock); 883 spin_unlock(&sr_index_lock);
883 884
884 unregister_cdrom(&cd->cdi); 885 unregister_cdrom(&cd->cdi);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index d39107b7669b..f4e6cde1fd0d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -519,8 +519,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd,
519 * Shorten our settle_time if needed for 519 * Shorten our settle_time if needed for
520 * this command not to time out. 520 * this command not to time out.
521 */ 521 */
522 if (np->s.settle_time_valid && cmd->timeout_per_command) { 522 if (np->s.settle_time_valid && cmd->request->timeout) {
523 unsigned long tlimit = jiffies + cmd->timeout_per_command; 523 unsigned long tlimit = jiffies + cmd->request->timeout;
524 tlimit -= SYM_CONF_TIMER_INTERVAL*2; 524 tlimit -= SYM_CONF_TIMER_INTERVAL*2;
525 if (time_after(np->s.settle_time, tlimit)) { 525 if (time_after(np->s.settle_time, tlimit)) {
526 np->s.settle_time = tlimit; 526 np->s.settle_time = tlimit;
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 1723d71cbf3f..69ac6e590f1d 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2573,8 +2573,8 @@ static struct pci_driver dc390_driver = {
2573static int __init dc390_module_init(void) 2573static int __init dc390_module_init(void)
2574{ 2574{
2575 if (!disable_clustering) 2575 if (!disable_clustering)
2576 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n" 2576 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
2577 "\twith \"disable_clustering=1\" and report to maintainers\n"); 2577 printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n");
2578 2578
2579 if (tmscsim[0] == -1 || tmscsim[0] > 15) { 2579 if (tmscsim[0] == -1 || tmscsim[0] > 15) {
2580 tmscsim[0] = 7; 2580 tmscsim[0] = 7;
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 3a6da80b081c..61fb8b6d19af 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -131,7 +131,8 @@ struct atmel_uart_char {
131struct atmel_uart_port { 131struct atmel_uart_port {
132 struct uart_port uart; /* uart */ 132 struct uart_port uart; /* uart */
133 struct clk *clk; /* uart clock */ 133 struct clk *clk; /* uart clock */
134 unsigned short suspended; /* is port suspended? */ 134 int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */
135 u32 backup_imr; /* IMR saved during suspend */
135 int break_active; /* break being received */ 136 int break_active; /* break being received */
136 137
137 short use_dma_rx; /* enable PDC receiver */ 138 short use_dma_rx; /* enable PDC receiver */
@@ -984,8 +985,15 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
984 * This is called on uart_open() or a resume event. 985 * This is called on uart_open() or a resume event.
985 */ 986 */
986 clk_enable(atmel_port->clk); 987 clk_enable(atmel_port->clk);
988
989 /* re-enable interrupts if we disabled some on suspend */
990 UART_PUT_IER(port, atmel_port->backup_imr);
987 break; 991 break;
988 case 3: 992 case 3:
993 /* Back up the interrupt mask and disable all interrupts */
994 atmel_port->backup_imr = UART_GET_IMR(port);
995 UART_PUT_IDR(port, -1);
996
989 /* 997 /*
990 * Disable the peripheral clock for this serial port. 998 * Disable the peripheral clock for this serial port.
991 * This is called on uart_close() or a suspend event. 999 * This is called on uart_close() or a suspend event.
@@ -1475,13 +1483,12 @@ static int atmel_serial_suspend(struct platform_device *pdev,
1475 cpu_relax(); 1483 cpu_relax();
1476 } 1484 }
1477 1485
1478 if (device_may_wakeup(&pdev->dev) 1486 /* we can not wake up if we're running on slow clock */
1479 && !atmel_serial_clk_will_stop()) 1487 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
1480 enable_irq_wake(port->irq); 1488 if (atmel_serial_clk_will_stop())
1481 else { 1489 device_set_wakeup_enable(&pdev->dev, 0);
1482 uart_suspend_port(&atmel_uart, port); 1490
1483 atmel_port->suspended = 1; 1491 uart_suspend_port(&atmel_uart, port);
1484 }
1485 1492
1486 return 0; 1493 return 0;
1487} 1494}
@@ -1491,11 +1498,8 @@ static int atmel_serial_resume(struct platform_device *pdev)
1491 struct uart_port *port = platform_get_drvdata(pdev); 1498 struct uart_port *port = platform_get_drvdata(pdev);
1492 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1499 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1493 1500
1494 if (atmel_port->suspended) { 1501 uart_resume_port(&atmel_uart, port);
1495 uart_resume_port(&atmel_uart, port); 1502 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
1496 atmel_port->suspended = 0;
1497 } else
1498 disable_irq_wake(port->irq);
1499 1503
1500 return 0; 1504 return 0;
1501} 1505}
@@ -1513,6 +1517,8 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
1513 BUILD_BUG_ON(!is_power_of_2(ATMEL_SERIAL_RINGSIZE)); 1517 BUILD_BUG_ON(!is_power_of_2(ATMEL_SERIAL_RINGSIZE));
1514 1518
1515 port = &atmel_ports[pdev->id]; 1519 port = &atmel_ports[pdev->id];
1520 port->backup_imr = 0;
1521
1516 atmel_init_port(port, pdev); 1522 atmel_init_port(port, pdev);
1517 1523
1518 if (!atmel_use_dma_rx(&port->uart)) { 1524 if (!atmel_use_dma_rx(&port->uart)) {
diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c
index c4eaacd6e553..b872bfaf4bd2 100644
--- a/drivers/spi/orion_spi.c
+++ b/drivers/spi/orion_spi.c
@@ -427,7 +427,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m)
427 goto msg_rejected; 427 goto msg_rejected;
428 } 428 }
429 429
430 if (t->speed_hz < orion_spi->min_speed) { 430 if (t->speed_hz && t->speed_hz < orion_spi->min_speed) {
431 dev_err(&spi->dev, 431 dev_err(&spi->dev,
432 "message rejected : " 432 "message rejected : "
433 "device min speed (%d Hz) exceeds " 433 "device min speed (%d Hz) exceeds "
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 0e53354c1cfe..d47d3636227f 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -49,7 +49,7 @@ MODULE_ALIAS("platform:pxa2xx-spi");
49 49
50#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) 50#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
51#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) 51#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
52#define IS_DMA_ALIGNED(x) (((x) & 0x07) == 0) 52#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0)
53#define MAX_DMA_LEN 8191 53#define MAX_DMA_LEN 8191
54 54
55/* 55/*
@@ -896,7 +896,7 @@ static void pump_transfers(unsigned long data)
896 || transfer->rx_dma || transfer->tx_dma) { 896 || transfer->rx_dma || transfer->tx_dma) {
897 dev_err(&drv_data->pdev->dev, 897 dev_err(&drv_data->pdev->dev,
898 "pump_transfers: mapped transfer length " 898 "pump_transfers: mapped transfer length "
899 "of %lu is greater than %d\n", 899 "of %u is greater than %d\n",
900 transfer->len, MAX_DMA_LEN); 900 transfer->len, MAX_DMA_LEN);
901 message->status = -EINVAL; 901 message->status = -EINVAL;
902 giveback(drv_data); 902 giveback(drv_data);
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 87ab2443e66d..0ffabf5c0b60 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -471,6 +471,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
471#endif 471#endif
472 break; 472 break;
473 case SSB_BUSTYPE_SSB: 473 case SSB_BUSTYPE_SSB:
474 dev->dma_mask = &dev->coherent_dma_mask;
474 break; 475 break;
475 } 476 }
476 477
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 8abd4e59bf4a..8ab389dca2b9 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1876,7 +1876,8 @@ int usb_add_hcd(struct usb_hcd *hcd,
1876 * with IRQF_SHARED. As usb_hcd_irq() will always disable 1876 * with IRQF_SHARED. As usb_hcd_irq() will always disable
1877 * interrupts we can remove it here. 1877 * interrupts we can remove it here.
1878 */ 1878 */
1879 irqflags &= ~IRQF_DISABLED; 1879 if (irqflags & IRQF_SHARED)
1880 irqflags &= ~IRQF_DISABLED;
1880 1881
1881 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", 1882 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
1882 hcd->driver->description, hcd->self.busnum); 1883 hcd->driver->description, hcd->self.busnum);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 6a5cb018383d..d99963873e37 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2683,35 +2683,17 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
2683 USB_PORT_STAT_C_ENABLE); 2683 USB_PORT_STAT_C_ENABLE);
2684#endif 2684#endif
2685 2685
2686 /* Try to use the debounce delay for protection against
2687 * port-enable changes caused, for example, by EMI.
2688 */
2689 if (portchange & (USB_PORT_STAT_C_CONNECTION |
2690 USB_PORT_STAT_C_ENABLE)) {
2691 status = hub_port_debounce(hub, port1);
2692 if (status < 0) {
2693 if (printk_ratelimit())
2694 dev_err (hub_dev, "connect-debounce failed, "
2695 "port %d disabled\n", port1);
2696 portstatus &= ~USB_PORT_STAT_CONNECTION;
2697 } else {
2698 portstatus = status;
2699 }
2700 }
2701
2702 /* Try to resuscitate an existing device */ 2686 /* Try to resuscitate an existing device */
2703 udev = hdev->children[port1-1]; 2687 udev = hdev->children[port1-1];
2704 if ((portstatus & USB_PORT_STAT_CONNECTION) && udev && 2688 if ((portstatus & USB_PORT_STAT_CONNECTION) && udev &&
2705 udev->state != USB_STATE_NOTATTACHED) { 2689 udev->state != USB_STATE_NOTATTACHED) {
2706
2707 usb_lock_device(udev); 2690 usb_lock_device(udev);
2708 if (portstatus & USB_PORT_STAT_ENABLE) { 2691 if (portstatus & USB_PORT_STAT_ENABLE) {
2709 status = 0; /* Nothing to do */ 2692 status = 0; /* Nothing to do */
2710 } else if (!udev->persist_enabled) {
2711 status = -ENODEV; /* Mustn't resuscitate */
2712 2693
2713#ifdef CONFIG_USB_SUSPEND 2694#ifdef CONFIG_USB_SUSPEND
2714 } else if (udev->state == USB_STATE_SUSPENDED) { 2695 } else if (udev->state == USB_STATE_SUSPENDED &&
2696 udev->persist_enabled) {
2715 /* For a suspended device, treat this as a 2697 /* For a suspended device, treat this as a
2716 * remote wakeup event. 2698 * remote wakeup event.
2717 */ 2699 */
@@ -2726,7 +2708,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
2726#endif 2708#endif
2727 2709
2728 } else { 2710 } else {
2729 status = usb_reset_device(udev); 2711 status = -ENODEV; /* Don't resuscitate */
2730 } 2712 }
2731 usb_unlock_device(udev); 2713 usb_unlock_device(udev);
2732 2714
@@ -2741,6 +2723,19 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
2741 usb_disconnect(&hdev->children[port1-1]); 2723 usb_disconnect(&hdev->children[port1-1]);
2742 clear_bit(port1, hub->change_bits); 2724 clear_bit(port1, hub->change_bits);
2743 2725
2726 if (portchange & (USB_PORT_STAT_C_CONNECTION |
2727 USB_PORT_STAT_C_ENABLE)) {
2728 status = hub_port_debounce(hub, port1);
2729 if (status < 0) {
2730 if (printk_ratelimit())
2731 dev_err(hub_dev, "connect-debounce failed, "
2732 "port %d disabled\n", port1);
2733 portstatus &= ~USB_PORT_STAT_CONNECTION;
2734 } else {
2735 portstatus = status;
2736 }
2737 }
2738
2744 /* Return now if debouncing failed or nothing is connected */ 2739 /* Return now if debouncing failed or nothing is connected */
2745 if (!(portstatus & USB_PORT_STAT_CONNECTION)) { 2740 if (!(portstatus & USB_PORT_STAT_CONNECTION)) {
2746 2741
@@ -2748,7 +2743,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
2748 if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2 2743 if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2
2749 && !(portstatus & (1 << USB_PORT_FEAT_POWER))) 2744 && !(portstatus & (1 << USB_PORT_FEAT_POWER)))
2750 set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); 2745 set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
2751 2746
2752 if (portstatus & USB_PORT_STAT_ENABLE) 2747 if (portstatus & USB_PORT_STAT_ENABLE)
2753 goto done; 2748 goto done;
2754 return; 2749 return;
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_usb2_udc.c
index 1cfccf102a2d..45ad556169f1 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.c
+++ b/drivers/usb/gadget/fsl_usb2_udc.c
@@ -223,7 +223,7 @@ static int dr_controller_setup(struct fsl_udc *udc)
223 fsl_writel(tmp, &dr_regs->endpointlistaddr); 223 fsl_writel(tmp, &dr_regs->endpointlistaddr);
224 224
225 VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x", 225 VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x",
226 (int)udc->ep_qh, (int)tmp, 226 udc->ep_qh, (int)tmp,
227 fsl_readl(&dr_regs->endpointlistaddr)); 227 fsl_readl(&dr_regs->endpointlistaddr));
228 228
229 /* Config PHY interface */ 229 /* Config PHY interface */
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 574c53831a05..bb54cca4c543 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -787,7 +787,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
787 omap_set_dma_dest_params(ep->lch, 787 omap_set_dma_dest_params(ep->lch,
788 OMAP_DMA_PORT_TIPB, 788 OMAP_DMA_PORT_TIPB,
789 OMAP_DMA_AMODE_CONSTANT, 789 OMAP_DMA_AMODE_CONSTANT,
790 (unsigned long) io_v2p(UDC_DATA_DMA), 790 UDC_DATA_DMA,
791 0, 0); 791 0, 0);
792 } 792 }
793 } else { 793 } else {
@@ -804,7 +804,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
804 omap_set_dma_src_params(ep->lch, 804 omap_set_dma_src_params(ep->lch,
805 OMAP_DMA_PORT_TIPB, 805 OMAP_DMA_PORT_TIPB,
806 OMAP_DMA_AMODE_CONSTANT, 806 OMAP_DMA_AMODE_CONSTANT,
807 (unsigned long) io_v2p(UDC_DATA_DMA), 807 UDC_DATA_DMA,
808 0, 0); 808 0, 0);
809 /* EMIFF or SDRC */ 809 /* EMIFF or SDRC */
810 omap_set_dma_dest_burst_mode(ep->lch, 810 omap_set_dma_dest_burst_mode(ep->lch,
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index d9d53f289caf..8409e0705d63 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -145,16 +145,6 @@ static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
145 return -ETIMEDOUT; 145 return -ETIMEDOUT;
146} 146}
147 147
148static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
149 u32 mask, u32 done, int usec)
150{
151 int error = handshake(ehci, ptr, mask, done, usec);
152 if (error)
153 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
154
155 return error;
156}
157
158/* force HC to halt state from unknown (EHCI spec section 2.3) */ 148/* force HC to halt state from unknown (EHCI spec section 2.3) */
159static int ehci_halt (struct ehci_hcd *ehci) 149static int ehci_halt (struct ehci_hcd *ehci)
160{ 150{
@@ -173,6 +163,22 @@ static int ehci_halt (struct ehci_hcd *ehci)
173 STS_HALT, STS_HALT, 16 * 125); 163 STS_HALT, STS_HALT, 16 * 125);
174} 164}
175 165
166static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
167 u32 mask, u32 done, int usec)
168{
169 int error;
170
171 error = handshake(ehci, ptr, mask, done, usec);
172 if (error) {
173 ehci_halt(ehci);
174 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
175 ehci_err(ehci, "force halt; handhake %p %08x %08x -> %d\n",
176 ptr, mask, done, error);
177 }
178
179 return error;
180}
181
176/* put TDI/ARC silicon into EHCI mode */ 182/* put TDI/ARC silicon into EHCI mode */
177static void tdi_reset (struct ehci_hcd *ehci) 183static void tdi_reset (struct ehci_hcd *ehci)
178{ 184{
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index b7853c8bac0f..4a0c5a78b2ed 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -437,6 +437,9 @@ static int enable_periodic (struct ehci_hcd *ehci)
437 u32 cmd; 437 u32 cmd;
438 int status; 438 int status;
439 439
440 if (ehci->periodic_sched++)
441 return 0;
442
440 /* did clearing PSE did take effect yet? 443 /* did clearing PSE did take effect yet?
441 * takes effect only at frame boundaries... 444 * takes effect only at frame boundaries...
442 */ 445 */
@@ -461,6 +464,9 @@ static int disable_periodic (struct ehci_hcd *ehci)
461 u32 cmd; 464 u32 cmd;
462 int status; 465 int status;
463 466
467 if (--ehci->periodic_sched)
468 return 0;
469
464 /* did setting PSE not take effect yet? 470 /* did setting PSE not take effect yet?
465 * takes effect only at frame boundaries... 471 * takes effect only at frame boundaries...
466 */ 472 */
@@ -544,13 +550,10 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
544 : (qh->usecs * 8); 550 : (qh->usecs * 8);
545 551
546 /* maybe enable periodic schedule processing */ 552 /* maybe enable periodic schedule processing */
547 if (!ehci->periodic_sched++) 553 return enable_periodic(ehci);
548 return enable_periodic (ehci);
549
550 return 0;
551} 554}
552 555
553static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) 556static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
554{ 557{
555 unsigned i; 558 unsigned i;
556 unsigned period; 559 unsigned period;
@@ -586,9 +589,7 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
586 qh_put (qh); 589 qh_put (qh);
587 590
588 /* maybe turn off periodic schedule */ 591 /* maybe turn off periodic schedule */
589 ehci->periodic_sched--; 592 return disable_periodic(ehci);
590 if (!ehci->periodic_sched)
591 (void) disable_periodic (ehci);
592} 593}
593 594
594static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) 595static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
@@ -1562,9 +1563,7 @@ itd_link_urb (
1562 urb->hcpriv = NULL; 1563 urb->hcpriv = NULL;
1563 1564
1564 timer_action (ehci, TIMER_IO_WATCHDOG); 1565 timer_action (ehci, TIMER_IO_WATCHDOG);
1565 if (unlikely (!ehci->periodic_sched++)) 1566 return enable_periodic(ehci);
1566 return enable_periodic (ehci);
1567 return 0;
1568} 1567}
1569 1568
1570#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) 1569#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
@@ -1642,7 +1641,7 @@ itd_complete (
1642 ehci_urb_done(ehci, urb, 0); 1641 ehci_urb_done(ehci, urb, 0);
1643 retval = true; 1642 retval = true;
1644 urb = NULL; 1643 urb = NULL;
1645 ehci->periodic_sched--; 1644 (void) disable_periodic(ehci);
1646 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; 1645 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1647 1646
1648 if (unlikely (list_empty (&stream->td_list))) { 1647 if (unlikely (list_empty (&stream->td_list))) {
@@ -1951,9 +1950,7 @@ sitd_link_urb (
1951 urb->hcpriv = NULL; 1950 urb->hcpriv = NULL;
1952 1951
1953 timer_action (ehci, TIMER_IO_WATCHDOG); 1952 timer_action (ehci, TIMER_IO_WATCHDOG);
1954 if (!ehci->periodic_sched++) 1953 return enable_periodic(ehci);
1955 return enable_periodic (ehci);
1956 return 0;
1957} 1954}
1958 1955
1959/*-------------------------------------------------------------------------*/ 1956/*-------------------------------------------------------------------------*/
@@ -2019,7 +2016,7 @@ sitd_complete (
2019 ehci_urb_done(ehci, urb, 0); 2016 ehci_urb_done(ehci, urb, 0);
2020 retval = true; 2017 retval = true;
2021 urb = NULL; 2018 urb = NULL;
2022 ehci->periodic_sched--; 2019 (void) disable_periodic(ehci);
2023 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; 2020 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2024 2021
2025 if (list_empty (&stream->td_list)) { 2022 if (list_empty (&stream->td_list)) {
@@ -2243,8 +2240,7 @@ restart:
2243 if (unlikely (modified)) { 2240 if (unlikely (modified)) {
2244 if (likely(ehci->periodic_sched > 0)) 2241 if (likely(ehci->periodic_sched > 0))
2245 goto restart; 2242 goto restart;
2246 /* maybe we can short-circuit this scan! */ 2243 /* short-circuit this scan */
2247 disable_periodic(ehci);
2248 now_uframe = clock; 2244 now_uframe = clock;
2249 break; 2245 break;
2250 } 2246 }
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index a0017486ad4e..58b2b8fc9439 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -9,6 +9,7 @@ comment "Enable Host or Gadget support to see Inventra options"
9# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller 9# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
10config USB_MUSB_HDRC 10config USB_MUSB_HDRC
11 depends on (USB || USB_GADGET) && HAVE_CLK 11 depends on (USB || USB_GADGET) && HAVE_CLK
12 depends on !SUPERH
12 select TWL4030_USB if MACH_OMAP_3430SDP 13 select TWL4030_USB if MACH_OMAP_3430SDP
13 tristate 'Inventra Highspeed Dual Role Controller (TI, ...)' 14 tristate 'Inventra Highspeed Dual Role Controller (TI, ...)'
14 help 15 help
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c5b8f0296fcf..128e949db47c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -100,8 +100,8 @@
100#include <linux/io.h> 100#include <linux/io.h>
101 101
102#ifdef CONFIG_ARM 102#ifdef CONFIG_ARM
103#include <asm/arch/hardware.h> 103#include <mach/hardware.h>
104#include <asm/arch/memory.h> 104#include <mach/memory.h>
105#include <asm/mach-types.h> 105#include <asm/mach-types.h>
106#endif 106#endif
107 107
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 298b22e6ad0d..9d2dcb121c5e 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -35,8 +35,8 @@
35#include <linux/io.h> 35#include <linux/io.h>
36 36
37#include <asm/mach-types.h> 37#include <asm/mach-types.h>
38#include <asm/arch/hardware.h> 38#include <mach/hardware.h>
39#include <asm/arch/mux.h> 39#include <mach/mux.h>
40 40
41#include "musb_core.h" 41#include "musb_core.h"
42#include "omap2430.h" 42#include "omap2430.h"
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
index 786a62071f72..dc7670718cd2 100644
--- a/drivers/usb/musb/omap2430.h
+++ b/drivers/usb/musb/omap2430.h
@@ -11,8 +11,8 @@
11#define __MUSB_OMAP243X_H__ 11#define __MUSB_OMAP243X_H__
12 12
13#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) 13#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
14#include <asm/arch/hardware.h> 14#include <mach/hardware.h>
15#include <asm/arch/usb.h> 15#include <mach/usb.h>
16 16
17/* 17/*
18 * OMAP2430-specific definitions 18 * OMAP2430-specific definitions
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c
index 442cba69cce5..1279553381e3 100644
--- a/drivers/usb/serial/cp2101.c
+++ b/drivers/usb/serial/cp2101.c
@@ -72,6 +72,7 @@ static struct usb_device_id id_table [] = {
72 { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ 72 { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
73 { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ 73 { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
74 { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ 74 { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
75 { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
75 { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ 76 { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
76 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ 77 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
77 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ 78 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
@@ -83,6 +84,7 @@ static struct usb_device_id id_table [] = {
83 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ 84 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
84 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ 85 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
85 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ 86 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
87 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
86 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 88 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
87 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 89 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
88 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ 90 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
@@ -93,6 +95,7 @@ static struct usb_device_id id_table [] = {
93 { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ 95 { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
94 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ 96 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
95 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ 97 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
98 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
96 { } /* Terminating Entry */ 99 { } /* Terminating Entry */
97}; 100};
98 101
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 984f6eff4c47..3dc93b542b30 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -654,6 +654,9 @@ static struct usb_device_id id_table_combined [] = {
654 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 654 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
655 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, 655 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
656 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, 656 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
657 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
658 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
659 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
657 { }, /* Optional parameter entry */ 660 { }, /* Optional parameter entry */
658 { } /* Terminating entry */ 661 { } /* Terminating entry */
659}; 662};
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 382265bba969..8a5b6df3a976 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -750,6 +750,7 @@
750 750
751#define PAPOUCH_VID 0x5050 /* Vendor ID */ 751#define PAPOUCH_VID 0x5050 /* Vendor ID */
752#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ 752#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
753#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
753 754
754/* 755/*
755 * ACG Identification Technologies GmbH products (http://www.acg.de/). 756 * ACG Identification Technologies GmbH products (http://www.acg.de/).
@@ -838,6 +839,10 @@
838/* Rig Expert Ukraine devices */ 839/* Rig Expert Ukraine devices */
839#define FTDI_REU_TINY_PID 0xED22 /* RigExpert Tiny */ 840#define FTDI_REU_TINY_PID 0xED22 /* RigExpert Tiny */
840 841
842/* Domintell products http://www.domintell.com */
843#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */
844#define FTDI_DOMINTELL_DUSB_PID 0xEF51 /* DUSB01 module */
845
841/* Commands */ 846/* Commands */
842#define FTDI_SIO_RESET 0 /* Reset the port */ 847#define FTDI_SIO_RESET 0 /* Reset the port */
843#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */ 848#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 9f9cd36455f4..73f8277f88f2 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -218,6 +218,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
218/* ZTE PRODUCTS */ 218/* ZTE PRODUCTS */
219#define ZTE_VENDOR_ID 0x19d2 219#define ZTE_VENDOR_ID 0x19d2
220#define ZTE_PRODUCT_MF628 0x0015 220#define ZTE_PRODUCT_MF628 0x0015
221#define ZTE_PRODUCT_CDMA_TECH 0xfffe
221 222
222static struct usb_device_id option_ids[] = { 223static struct usb_device_id option_ids[] = {
223 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 224 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -347,6 +348,7 @@ static struct usb_device_id option_ids[] = {
347 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ 348 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
348 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, 349 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
349 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, 350 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
351 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
350 { } /* Terminating entry */ 352 { } /* Terminating entry */
351}; 353};
352MODULE_DEVICE_TABLE(usb, option_ids); 354MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 706033753adb..ea1a103c99be 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -14,7 +14,7 @@
14 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> 14 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
15*/ 15*/
16 16
17#define DRIVER_VERSION "v.1.2.13a" 17#define DRIVER_VERSION "v.1.3.2"
18#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>" 18#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>"
19#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" 19#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
20 20
@@ -30,9 +30,6 @@
30 30
31#define SWIMS_USB_REQUEST_SetPower 0x00 31#define SWIMS_USB_REQUEST_SetPower 0x00
32#define SWIMS_USB_REQUEST_SetNmea 0x07 32#define SWIMS_USB_REQUEST_SetNmea 0x07
33#define SWIMS_USB_REQUEST_SetMode 0x0B
34#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A
35#define SWIMS_SET_MODE_Modem 0x0001
36 33
37/* per port private data */ 34/* per port private data */
38#define N_IN_URB 4 35#define N_IN_URB 4
@@ -163,7 +160,7 @@ static struct usb_device_id id_table [] = {
163 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ 160 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
164 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ 161 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */
165 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ 162 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
166 { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ 163 { USB_DEVICE(0x03f0, 0x1b1d) }, /* HP ev2200 a.k.a MC5720 */
167 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ 164 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
168 { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */ 165 { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */
169 { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ 166 { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */
@@ -175,6 +172,8 @@ static struct usb_device_id id_table [] = {
175 /* Sierra Wireless Device */ 172 /* Sierra Wireless Device */
176 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) }, 173 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) },
177 { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */ 174 { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */
175 { USB_DEVICE(0x1199, 0x0027) }, /* Sierra Wireless Device */
176 { USB_DEVICE(0x1199, 0x0028) }, /* Sierra Wireless Device */
178 177
179 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ 178 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
180 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ 179 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
@@ -187,6 +186,7 @@ static struct usb_device_id id_table [] = {
187 { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ 186 { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */
188 { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */ 187 { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */
189 { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */ 188 { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */
189 { USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */
190 { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */ 190 { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */
191 { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */ 191 { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */
192 { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */ 192 { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */
@@ -204,6 +204,8 @@ static struct usb_device_id id_table [] = {
204 /* Sierra Wireless Device */ 204 /* Sierra Wireless Device */
205 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)}, 205 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)},
206 /* Sierra Wireless Device */ 206 /* Sierra Wireless Device */
207 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6891, 0xFF, 0xFF, 0xFF)},
208 /* Sierra Wireless Device */
207 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, 209 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
208 210
209 { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ 211 { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e39c779e4160..9a3e495c769c 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1744,7 +1744,7 @@ static int ti_download_firmware(struct ti_device *tdev, int type)
1744 if (buffer) { 1744 if (buffer) {
1745 memcpy(buffer, fw_p->data, fw_p->size); 1745 memcpy(buffer, fw_p->data, fw_p->size);
1746 memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size); 1746 memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size);
1747 ti_do_download(dev, pipe, buffer, fw_p->size); 1747 status = ti_do_download(dev, pipe, buffer, fw_p->size);
1748 kfree(buffer); 1748 kfree(buffer);
1749 } 1749 }
1750 release_firmware(fw_p); 1750 release_firmware(fw_p);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index b157c48e8b78..4f7f9e3ae0a4 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -733,7 +733,9 @@ int usb_serial_probe(struct usb_interface *interface,
733 ((le16_to_cpu(dev->descriptor.idVendor) == ATEN_VENDOR_ID) && 733 ((le16_to_cpu(dev->descriptor.idVendor) == ATEN_VENDOR_ID) &&
734 (le16_to_cpu(dev->descriptor.idProduct) == ATEN_PRODUCT_ID)) || 734 (le16_to_cpu(dev->descriptor.idProduct) == ATEN_PRODUCT_ID)) ||
735 ((le16_to_cpu(dev->descriptor.idVendor) == ALCOR_VENDOR_ID) && 735 ((le16_to_cpu(dev->descriptor.idVendor) == ALCOR_VENDOR_ID) &&
736 (le16_to_cpu(dev->descriptor.idProduct) == ALCOR_PRODUCT_ID))) { 736 (le16_to_cpu(dev->descriptor.idProduct) == ALCOR_PRODUCT_ID)) ||
737 ((le16_to_cpu(dev->descriptor.idVendor) == SIEMENS_VENDOR_ID) &&
738 (le16_to_cpu(dev->descriptor.idProduct) == SIEMENS_PRODUCT_ID_EF81))) {
737 if (interface != dev->actconfig->interface[0]) { 739 if (interface != dev->actconfig->interface[0]) {
738 /* check out the endpoints of the other interface*/ 740 /* check out the endpoints of the other interface*/
739 iface_desc = dev->actconfig->interface[0]->cur_altsetting; 741 iface_desc = dev->actconfig->interface[0]->cur_altsetting;
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index c76034672c18..3d9249632ae1 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -146,18 +146,6 @@ config USB_STORAGE_KARMA
146 on the resulting scsi device node returns the Karma to normal 146 on the resulting scsi device node returns the Karma to normal
147 operation. 147 operation.
148 148
149config USB_STORAGE_SIERRA
150 bool "Sierra Wireless TRU-Install Feature Support"
151 depends on USB_STORAGE
152 help
153 Say Y here to include additional code to support Sierra Wireless
154 products with the TRU-Install feature (e.g., AC597E, AC881U).
155
156 This code switches the Sierra Wireless device from being in
157 Mass Storage mode to Modem mode. It also has the ability to
158 support host software upgrades should full Linux support be added
159 to TRU-Install.
160
161config USB_STORAGE_CYPRESS_ATACB 149config USB_STORAGE_CYPRESS_ATACB
162 bool "SAT emulation on Cypress USB/ATA Bridge with ATACB" 150 bool "SAT emulation on Cypress USB/ATA Bridge with ATACB"
163 depends on USB_STORAGE 151 depends on USB_STORAGE
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
index bc3415b475c9..7f8beb5366ae 100644
--- a/drivers/usb/storage/Makefile
+++ b/drivers/usb/storage/Makefile
@@ -21,11 +21,10 @@ usb-storage-obj-$(CONFIG_USB_STORAGE_JUMPSHOT) += jumpshot.o
21usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o 21usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o
22usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o 22usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o
23usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o 23usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o
24usb-storage-obj-$(CONFIG_USB_STORAGE_SIERRA) += sierra_ms.o
25usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o 24usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o
26 25
27usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \ 26usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \
28 initializers.o $(usb-storage-obj-y) 27 initializers.o sierra_ms.o $(usb-storage-obj-y)
29 28
30ifneq ($(CONFIG_USB_LIBUSUAL),) 29ifneq ($(CONFIG_USB_LIBUSUAL),)
31 obj-$(CONFIG_USB) += libusual.o 30 obj-$(CONFIG_USB) += libusual.o
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ba412e68d474..cd155475cb6e 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -160,6 +160,13 @@ UNUSUAL_DEV( 0x0421, 0x0019, 0x0592, 0x0592,
160 US_SC_DEVICE, US_PR_DEVICE, NULL, 160 US_SC_DEVICE, US_PR_DEVICE, NULL,
161 US_FL_MAX_SECTORS_64 ), 161 US_FL_MAX_SECTORS_64 ),
162 162
163/* Reported by Filip Joelsson <filip@blueturtle.nu> */
164UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600,
165 "Nokia",
166 "Nokia 3110c",
167 US_SC_DEVICE, US_PR_DEVICE, NULL,
168 US_FL_FIX_CAPACITY ),
169
163/* Reported by Mario Rettig <mariorettig@web.de> */ 170/* Reported by Mario Rettig <mariorettig@web.de> */
164UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100, 171UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100,
165 "Nokia", 172 "Nokia",
@@ -232,6 +239,20 @@ UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551,
232 US_SC_DEVICE, US_PR_DEVICE, NULL, 239 US_SC_DEVICE, US_PR_DEVICE, NULL,
233 US_FL_FIX_CAPACITY ), 240 US_FL_FIX_CAPACITY ),
234 241
242/* Reported by Richard Nauber <RichardNauber@web.de> */
243UNUSUAL_DEV( 0x0421, 0x04fa, 0x0601, 0x0601,
244 "Nokia",
245 "6300",
246 US_SC_DEVICE, US_PR_DEVICE, NULL,
247 US_FL_FIX_CAPACITY ),
248
249/* Patch for Nokia 5310 capacity */
250UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591,
251 "Nokia",
252 "5310",
253 US_SC_DEVICE, US_PR_DEVICE, NULL,
254 US_FL_FIX_CAPACITY ),
255
235/* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */ 256/* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
236UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210, 257UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210,
237 "SMSC", 258 "SMSC",
@@ -987,6 +1008,13 @@ UNUSUAL_DEV( 0x069b, 0x3004, 0x0001, 0x0001,
987 US_SC_DEVICE, US_PR_DEVICE, NULL, 1008 US_SC_DEVICE, US_PR_DEVICE, NULL,
988 US_FL_FIX_CAPACITY ), 1009 US_FL_FIX_CAPACITY ),
989 1010
1011/* Reported by Adrian Pilchowiec <adi1981@epf.pl> */
1012UNUSUAL_DEV( 0x071b, 0x3203, 0x0000, 0x0000,
1013 "RockChip",
1014 "MP3",
1015 US_SC_DEVICE, US_PR_DEVICE, NULL,
1016 US_FL_NO_WP_DETECT | US_FL_MAX_SECTORS_64),
1017
990/* Reported by Massimiliano Ghilardi <massimiliano.ghilardi@gmail.com> 1018/* Reported by Massimiliano Ghilardi <massimiliano.ghilardi@gmail.com>
991 * This USB MP3/AVI player device fails and disconnects if more than 128 1019 * This USB MP3/AVI player device fails and disconnects if more than 128
992 * sectors (64kB) are read/written in a single command, and may be present 1020 * sectors (64kB) are read/written in a single command, and may be present
@@ -1576,7 +1604,6 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
1576 US_SC_DEVICE, US_PR_DEVICE, NULL, 1604 US_SC_DEVICE, US_PR_DEVICE, NULL,
1577 0), 1605 0),
1578 1606
1579#ifdef CONFIG_USB_STORAGE_SIERRA
1580/* Reported by Kevin Lloyd <linux@sierrawireless.com> 1607/* Reported by Kevin Lloyd <linux@sierrawireless.com>
1581 * Entry is needed for the initializer function override, 1608 * Entry is needed for the initializer function override,
1582 * which instructs the device to load as a modem 1609 * which instructs the device to load as a modem
@@ -1587,7 +1614,6 @@ UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999,
1587 "USB MMC Storage", 1614 "USB MMC Storage",
1588 US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init, 1615 US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init,
1589 0), 1616 0),
1590#endif
1591 1617
1592/* Reported by Jaco Kroon <jaco@kroon.co.za> 1618/* Reported by Jaco Kroon <jaco@kroon.co.za>
1593 * The usb-storage module found on the Digitech GNX4 (and supposedly other 1619 * The usb-storage module found on the Digitech GNX4 (and supposedly other
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 73679aa506de..27016fd2cad1 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -102,9 +102,7 @@
102#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB 102#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB
103#include "cypress_atacb.h" 103#include "cypress_atacb.h"
104#endif 104#endif
105#ifdef CONFIG_USB_STORAGE_SIERRA
106#include "sierra_ms.h" 105#include "sierra_ms.h"
107#endif
108 106
109/* Some informational data */ 107/* Some informational data */
110MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>"); 108MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index c6299e8a041d..9cbff84b787d 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -2400,11 +2400,15 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
2400 2400
2401 if (!fbcon_is_inactive(vc, info)) { 2401 if (!fbcon_is_inactive(vc, info)) {
2402 if (ops->blank_state != blank) { 2402 if (ops->blank_state != blank) {
2403 int ret = 1;
2404
2403 ops->blank_state = blank; 2405 ops->blank_state = blank;
2404 fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW); 2406 fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
2405 ops->cursor_flash = (!blank); 2407 ops->cursor_flash = (!blank);
2406 2408
2407 if (fb_blank(info, blank)) 2409 if (info->fbops->fb_blank)
2410 ret = info->fbops->fb_blank(blank, info);
2411 if (ret)
2408 fbcon_generic_blank(vc, info, blank); 2412 fbcon_generic_blank(vc, info, blank);
2409 } 2413 }
2410 2414
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index a6e38e9ea73f..89a346880ec0 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -110,7 +110,7 @@ static inline int mono_col(const struct fb_info *info)
110 __u32 max_len; 110 __u32 max_len;
111 max_len = max(info->var.green.length, info->var.red.length); 111 max_len = max(info->var.green.length, info->var.red.length);
112 max_len = max(info->var.blue.length, max_len); 112 max_len = max(info->var.blue.length, max_len);
113 return ~(0xfff << (max_len & 0xff)); 113 return (~(0xfff << max_len)) & 0xff;
114} 114}
115 115
116static inline int attr_col_ec(int shift, struct vc_data *vc, 116static inline int attr_col_ec(int shift, struct vc_data *vc,
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 614a5c7017b6..6799a6de66fe 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -130,8 +130,8 @@ static ssize_t geodewdt_write(struct file *file, const char __user *data,
130 return len; 130 return len;
131} 131}
132 132
133static int geodewdt_ioctl(struct inode *inode, struct file *file, 133static long geodewdt_ioctl(struct file *file, unsigned int cmd,
134 unsigned int cmd, unsigned long arg) 134 unsigned long arg)
135{ 135{
136 void __user *argp = (void __user *)arg; 136 void __user *argp = (void __user *)arg;
137 int __user *p = argp; 137 int __user *p = argp;
@@ -198,7 +198,7 @@ static const struct file_operations geodewdt_fops = {
198 .owner = THIS_MODULE, 198 .owner = THIS_MODULE,
199 .llseek = no_llseek, 199 .llseek = no_llseek,
200 .write = geodewdt_write, 200 .write = geodewdt_write,
201 .ioctl = geodewdt_ioctl, 201 .unlocked_ioctl = geodewdt_ioctl,
202 .open = geodewdt_open, 202 .open = geodewdt_open,
203 .release = geodewdt_release, 203 .release = geodewdt_release,
204}; 204};
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index b82405cfb4cd..89fcefcc8510 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -85,7 +85,6 @@ static void __asr_toggle(void)
85 85
86 outb(reg & ~asr_toggle_mask, asr_write_addr); 86 outb(reg & ~asr_toggle_mask, asr_write_addr);
87 reg = inb(asr_read_addr); 87 reg = inb(asr_read_addr);
88 spin_unlock(&asr_lock);
89} 88}
90 89
91static void asr_toggle(void) 90static void asr_toggle(void)
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 0ed84162437b..6d9f3d4a9987 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -173,8 +173,8 @@ static const struct watchdog_info ident = {
173 .identity = "PNX4008 Watchdog", 173 .identity = "PNX4008 Watchdog",
174}; 174};
175 175
176static long pnx4008_wdt_ioctl(struct inode *inode, struct file *file, 176static long pnx4008_wdt_ioctl(struct file *file, unsigned int cmd,
177 unsigned int cmd, unsigned long arg) 177 unsigned long arg)
178{ 178{
179 int ret = -ENOTTY; 179 int ret = -ENOTTY;
180 int time; 180 int time;
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 6756bcb009ed..c9c73b69c5e5 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -182,8 +182,8 @@ static ssize_t rc32434_wdt_write(struct file *file, const char *data,
182 return 0; 182 return 0;
183} 183}
184 184
185static int rc32434_wdt_ioctl(struct inode *inode, struct file *file, 185static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
186 unsigned int cmd, unsigned long arg) 186 unsigned long arg)
187{ 187{
188 void __user *argp = (void __user *)arg; 188 void __user *argp = (void __user *)arg;
189 int new_timeout; 189 int new_timeout;
@@ -242,7 +242,7 @@ static struct file_operations rc32434_wdt_fops = {
242 .owner = THIS_MODULE, 242 .owner = THIS_MODULE,
243 .llseek = no_llseek, 243 .llseek = no_llseek,
244 .write = rc32434_wdt_write, 244 .write = rc32434_wdt_write,
245 .ioctl = rc32434_wdt_ioctl, 245 .unlocked_ioctl = rc32434_wdt_ioctl,
246 .open = rc32434_wdt_open, 246 .open = rc32434_wdt_open,
247 .release = rc32434_wdt_release, 247 .release = rc32434_wdt_release,
248}; 248};
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 9108efa73e7d..bf92802f2bbe 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -144,8 +144,8 @@ static int rdc321x_wdt_release(struct inode *inode, struct file *file)
144 return 0; 144 return 0;
145} 145}
146 146
147static int rdc321x_wdt_ioctl(struct inode *inode, struct file *file, 147static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd,
148 unsigned int cmd, unsigned long arg) 148 unsigned long arg)
149{ 149{
150 void __user *argp = (void __user *)arg; 150 void __user *argp = (void __user *)arg;
151 unsigned int value; 151 unsigned int value;
@@ -204,7 +204,7 @@ static ssize_t rdc321x_wdt_write(struct file *file, const char __user *buf,
204static const struct file_operations rdc321x_wdt_fops = { 204static const struct file_operations rdc321x_wdt_fops = {
205 .owner = THIS_MODULE, 205 .owner = THIS_MODULE,
206 .llseek = no_llseek, 206 .llseek = no_llseek,
207 .ioctl = rdc321x_wdt_ioctl, 207 .unlocked_ioctl = rdc321x_wdt_ioctl,
208 .open = rdc321x_wdt_open, 208 .open = rdc321x_wdt_open,
209 .write = rdc321x_wdt_write, 209 .write = rdc321x_wdt_write,
210 .release = rdc321x_wdt_release, 210 .release = rdc321x_wdt_release,
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index db362c34958b..191ea6302107 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -115,8 +115,8 @@ static int watchdog_release(struct inode *inode, struct file *file)
115 return 0; 115 return 0;
116} 116}
117 117
118static ssize_t watchdog_write(struct file *file, const char *data, 118static ssize_t watchdog_write(struct file *file, const char __user *data,
119 size_t len, loff_t *ppos) 119 size_t len, loff_t *ppos)
120{ 120{
121 /* 121 /*
122 * Refresh the timer. 122 * Refresh the timer.
@@ -133,21 +133,22 @@ static const struct watchdog_info ident = {
133}; 133};
134 134
135static long watchdog_ioctl(struct file *file, unsigned int cmd, 135static long watchdog_ioctl(struct file *file, unsigned int cmd,
136 unsigned long arg) 136 unsigned long arg)
137{ 137{
138 unsigned int new_margin; 138 unsigned int new_margin;
139 int __user *int_arg = (int __user *)arg;
139 int ret = -ENOTTY; 140 int ret = -ENOTTY;
140 141
141 switch (cmd) { 142 switch (cmd) {
142 case WDIOC_GETSUPPORT: 143 case WDIOC_GETSUPPORT:
143 ret = 0; 144 ret = 0;
144 if (copy_to_user((void *)arg, &ident, sizeof(ident))) 145 if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
145 ret = -EFAULT; 146 ret = -EFAULT;
146 break; 147 break;
147 148
148 case WDIOC_GETSTATUS: 149 case WDIOC_GETSTATUS:
149 case WDIOC_GETBOOTSTATUS: 150 case WDIOC_GETBOOTSTATUS:
150 ret = put_user(0, (int *)arg); 151 ret = put_user(0, int_arg);
151 break; 152 break;
152 153
153 case WDIOC_KEEPALIVE: 154 case WDIOC_KEEPALIVE:
@@ -156,7 +157,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
156 break; 157 break;
157 158
158 case WDIOC_SETTIMEOUT: 159 case WDIOC_SETTIMEOUT:
159 ret = get_user(new_margin, (int *)arg); 160 ret = get_user(new_margin, int_arg);
160 if (ret) 161 if (ret)
161 break; 162 break;
162 163
@@ -171,7 +172,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
171 watchdog_ping(); 172 watchdog_ping();
172 /* Fall */ 173 /* Fall */
173 case WDIOC_GETTIMEOUT: 174 case WDIOC_GETTIMEOUT:
174 ret = put_user(soft_margin, (int *)arg); 175 ret = put_user(soft_margin, int_arg);
175 break; 176 break;
176 } 177 }
177 return ret; 178 return ret;