aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@csr.com>2009-02-02 12:52:39 -0500
committerDavid Vrabel <david.vrabel@csr.com>2009-02-02 12:52:39 -0500
commit8f04915532485d81e7f6c580a396ea7b01094221 (patch)
treec5740e961a025f2fb6b520a2bc5937f19d4345ab /drivers
parent8f5140a6a0b1a9aa79585b0008e88c5d266c5c1d (diff)
parent45c82b5a770be66845687a7d027c8b52946d59af (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-upstream
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/Kconfig6
-rw-r--r--drivers/ata/ahci.c32
-rw-r--r--drivers/ata/ata_piix.c34
-rw-r--r--drivers/ata/libata-scsi.c20
-rw-r--r--drivers/ata/libata-sff.c12
-rw-r--r--drivers/ata/pata_rb532_cf.c2
-rw-r--r--drivers/ata/pata_via.c22
-rw-r--r--drivers/ata/sata_mv.c56
-rw-r--r--drivers/ata/sata_nv.c70
-rw-r--r--drivers/ata/sata_sil.c37
-rw-r--r--drivers/base/core.c8
-rw-r--r--drivers/char/hw_random/omap-rng.c2
-rw-r--r--drivers/char/selection.c2
-rw-r--r--drivers/char/tty_io.c4
-rw-r--r--drivers/dma/Kconfig19
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmaengine.c8
-rw-r--r--drivers/dma/dmatest.c35
-rw-r--r--drivers/dma/fsldma.c8
-rw-r--r--drivers/dma/ipu/Makefile1
-rw-r--r--drivers/dma/ipu/ipu_idmac.c1740
-rw-r--r--drivers/dma/ipu/ipu_intern.h176
-rw-r--r--drivers/dma/ipu/ipu_irq.c413
-rw-r--r--drivers/edac/cell_edac.c8
-rw-r--r--drivers/firewire/fw-card.c4
-rw-r--r--drivers/firewire/fw-device.c123
-rw-r--r--drivers/firewire/fw-device.h1
-rw-r--r--drivers/firewire/fw-ohci.c6
-rw-r--r--drivers/firewire/fw-sbp2.c89
-rw-r--r--drivers/firewire/fw-topology.c12
-rw-r--r--drivers/firewire/fw-transaction.h9
-rw-r--r--drivers/firmware/dmi_scan.c74
-rw-r--r--drivers/gpio/gpiolib.c1
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c3
-rw-r--r--drivers/gpu/drm/drm_crtc.c14
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c161
-rw-r--r--drivers/gpu/drm/drm_stub.c8
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c15
-rw-r--r--drivers/hid/hid-core.c4
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-microsoft.c13
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hwmon/applesmc.c30
-rw-r--r--drivers/i2c/busses/i2c-acorn.c1
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c1
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c1
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c1
-rw-r--r--drivers/i2c/busses/i2c-amd756.c1
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c1
-rw-r--r--drivers/i2c/busses/i2c-au1550.c1
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c1
-rw-r--r--drivers/i2c/busses/i2c-elektor.c1
-rw-r--r--drivers/i2c/busses/i2c-hydra.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c1
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c1
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c1
-rw-r--r--drivers/i2c/busses/i2c-mpc.c1
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c1
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c1
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c1
-rw-r--r--drivers/i2c/busses/i2c-parport.c1
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c1
-rw-r--r--drivers/i2c/busses/i2c-piix4.c1
-rw-r--r--drivers/i2c/busses/i2c-sibyte.c2
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c1
-rw-r--r--drivers/i2c/busses/i2c-sis630.c1
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c1
-rw-r--r--drivers/i2c/busses/i2c-via.c1
-rw-r--r--drivers/i2c/busses/i2c-viapro.c1
-rw-r--r--drivers/i2c/busses/i2c-voodoo3.c2
-rw-r--r--drivers/i2c/busses/scx200_acb.c1
-rw-r--r--drivers/i2c/busses/scx200_i2c.c1
-rw-r--r--drivers/i2c/chips/Kconfig37
-rw-r--r--drivers/i2c/chips/Makefile2
-rw-r--r--drivers/ide/falconide.c2
-rw-r--r--drivers/ide/ide-probe.c3
-rw-r--r--drivers/ide/palm_bk3710.c11
-rw-r--r--drivers/ieee1394/ieee1394.h4
-rw-r--r--drivers/ieee1394/ieee1394_core.c16
-rw-r--r--drivers/ieee1394/ohci1394.h2
-rw-r--r--drivers/ieee1394/pcilynx.c1
-rw-r--r--drivers/ieee1394/sbp2.c54
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c2
-rw-r--r--drivers/lguest/core.c2
-rw-r--r--drivers/lguest/lguest_user.c5
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/eeprom/Kconfig59
-rw-r--r--drivers/misc/eeprom/Makefile4
-rw-r--r--drivers/misc/eeprom/at24.c (renamed from drivers/i2c/chips/at24.c)0
-rw-r--r--drivers/misc/eeprom/at25.c (renamed from drivers/spi/at25.c)0
-rw-r--r--drivers/misc/eeprom/eeprom.c (renamed from drivers/i2c/chips/eeprom.c)0
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c (renamed from drivers/misc/eeprom_93cx6.c)0
-rw-r--r--drivers/misc/hpilo.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c3
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c34
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/mmc/host/Kconfig10
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c1242
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c8
-rw-r--r--drivers/mtd/nand/pasemi_nand.c4
-rw-r--r--drivers/mtd/onenand/omap2.c6
-rw-r--r--drivers/mtd/ubi/Kconfig.debug10
-rw-r--r--drivers/mtd/ubi/build.c21
-rw-r--r--drivers/mtd/ubi/cdev.c184
-rw-r--r--drivers/mtd/ubi/gluebi.c11
-rw-r--r--drivers/mtd/ubi/scan.c8
-rw-r--r--drivers/mtd/ubi/ubi.h11
-rw-r--r--drivers/mtd/ubi/upd.c21
-rw-r--r--drivers/mtd/ubi/vmt.c17
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arm/am79c961a.c20
-rw-r--r--drivers/net/bnx2x.h11
-rw-r--r--drivers/net/bnx2x_link.c64
-rw-r--r--drivers/net/bnx2x_main.c302
-rw-r--r--drivers/net/bnx2x_reg.h2
-rw-r--r--drivers/net/cxgb3/sge.c1
-rw-r--r--drivers/net/e1000/e1000_main.c4
-rw-r--r--drivers/net/e1000e/82571.c6
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/fec.c2
-rw-r--r--drivers/net/gianfar.c6
-rw-r--r--drivers/net/gianfar_mii.c2
-rw-r--r--drivers/net/igb/e1000_82575.c11
-rw-r--r--drivers/net/igb/igb.h9
-rw-r--r--drivers/net/igb/igb_main.c24
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c62
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h3
-rw-r--r--drivers/net/korina.c15
-rw-r--r--drivers/net/macb.c8
-rw-r--r--drivers/net/mv643xx_eth.c17
-rw-r--r--drivers/net/myri10ge/myri10ge.c15
-rw-r--r--drivers/net/netxen/netxen_nic.h10
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c5
-rw-r--r--drivers/net/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c31
-rw-r--r--drivers/net/phy/mdio_bus.c8
-rw-r--r--drivers/net/phy/smsc.c12
-rw-r--r--drivers/net/r6040.c1
-rw-r--r--drivers/net/sfc/efx.c59
-rw-r--r--drivers/net/sfc/efx.h9
-rw-r--r--drivers/net/sfc/ethtool.c3
-rw-r--r--drivers/net/sfc/falcon.c32
-rw-r--r--drivers/net/sfc/mdio_10g.c191
-rw-r--r--drivers/net/sfc/mdio_10g.h3
-rw-r--r--drivers/net/sfc/net_driver.h9
-rw-r--r--drivers/net/sfc/phy.h1
-rw-r--r--drivers/net/sfc/selftest.c7
-rw-r--r--drivers/net/sfc/sfe4001.c42
-rw-r--r--drivers/net/sfc/tenxpress.c213
-rw-r--r--drivers/net/sfc/workarounds.h12
-rw-r--r--drivers/net/skfp/skfddi.c4
-rw-r--r--drivers/net/sky2.c6
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/smsc9420.c3
-rw-r--r--drivers/net/tg3.c81
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/tulip/21142.c23
-rw-r--r--drivers/net/ucc_geth.c20
-rw-r--r--drivers/net/ucc_geth.h2
-rw-r--r--drivers/net/ucc_geth_mii.c12
-rw-r--r--drivers/net/ucc_geth_mii.h1
-rw-r--r--drivers/net/usb/mcs7830.c20
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wimax/i2400m/debugfs.c14
-rw-r--r--drivers/net/wimax/i2400m/driver.c16
-rw-r--r--drivers/net/wimax/i2400m/usb-rx.c9
-rw-r--r--drivers/net/wireless/ath5k/base.c10
-rw-r--r--drivers/net/wireless/ath9k/rc.c2
-rw-r--r--drivers/net/wireless/ath9k/regd_common.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c4
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h91
-rw-r--r--drivers/net/wireless/orinoco/orinoco.c32
-rw-r--r--drivers/net/wireless/p54/p54common.c30
-rw-r--r--drivers/net/wireless/p54/p54usb.c41
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00rfkill.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_rtl8225.c10
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/oprofile/cpu_buffer.c5
-rw-r--r--drivers/oprofile/cpu_buffer.h7
-rw-r--r--drivers/parisc/sba_iommu.c18
-rw-r--r--drivers/pci/hotplug/pciehp_core.c4
-rw-r--r--drivers/pci/msi.c16
-rw-r--r--drivers/pci/pci-driver.c91
-rw-r--r--drivers/pci/pci.c63
-rw-r--r--drivers/pci/pci.h6
-rw-r--r--drivers/platform/x86/hp-wmi.c6
-rw-r--r--drivers/power/pda_power.c2
-rw-r--r--drivers/regulator/bq24022.c6
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/s390/net/lcs.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c4
-rw-r--r--drivers/scsi/libiscsi.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c10
-rw-r--r--drivers/serial/8250_pci.c6
-rw-r--r--drivers/serial/Kconfig2
-rw-r--r--drivers/serial/jsm/jsm_driver.c2
-rw-r--r--drivers/serial/jsm/jsm_tty.c6
-rw-r--r--drivers/serial/mcf.c11
-rw-r--r--drivers/serial/sh-sci.h5
-rw-r--r--drivers/spi/Kconfig11
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/staging/agnx/agnx.h2
-rw-r--r--drivers/staging/altpciechdma/altpciechdma.c4
-rw-r--r--drivers/staging/android/binder.c16
-rw-r--r--drivers/staging/android/lowmemorykiller.txt16
-rw-r--r--drivers/staging/android/timed_gpio.c5
-rw-r--r--drivers/staging/comedi/Kconfig1
-rw-r--r--drivers/staging/meilhaus/Kconfig21
-rw-r--r--drivers/staging/poch/poch.c2
-rw-r--r--drivers/staging/usbip/usbip_common.c16
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/class/cdc-acm.c9
-rw-r--r--drivers/usb/class/usblp.c1
-rw-r--r--drivers/usb/core/devio.c20
-rw-r--r--drivers/usb/core/driver.c2
-rw-r--r--drivers/usb/core/hcd-pci.c117
-rw-r--r--drivers/usb/core/hcd.h1
-rw-r--r--drivers/usb/core/hub.c4
-rw-r--r--drivers/usb/core/inode.c1
-rw-r--r--drivers/usb/core/message.c40
-rw-r--r--drivers/usb/core/usb.h6
-rw-r--r--drivers/usb/gadget/composite.c9
-rw-r--r--drivers/usb/gadget/imx_udc.c2
-rw-r--r--drivers/usb/host/Kconfig18
-rw-r--r--drivers/usb/host/Makefile6
-rw-r--r--drivers/usb/host/ehci-pci.c3
-rw-r--r--drivers/usb/host/fhci-dbg.c139
-rw-r--r--drivers/usb/host/fhci-hcd.c836
-rw-r--r--drivers/usb/host/fhci-hub.c345
-rw-r--r--drivers/usb/host/fhci-mem.c113
-rw-r--r--drivers/usb/host/fhci-q.c284
-rw-r--r--drivers/usb/host/fhci-sched.c888
-rw-r--r--drivers/usb/host/fhci-tds.c626
-rw-r--r--drivers/usb/host/fhci.h607
-rw-r--r--drivers/usb/host/ohci-hcd.c8
-rw-r--r--drivers/usb/host/ohci-omap.c6
-rw-r--r--drivers/usb/host/ohci-pci.c1
-rw-r--r--drivers/usb/host/uhci-hcd.c1
-rw-r--r--drivers/usb/misc/ldusb.c2
-rw-r--r--drivers/usb/mon/mon_bin.c105
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/musb/cppi_dma.c10
-rw-r--r--drivers/usb/musb/davinci.c13
-rw-r--r--drivers/usb/musb/musb_core.c5
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_host.c4
-rw-r--r--drivers/usb/musb/tusb6010_omap.c4
-rw-r--r--drivers/usb/otg/Kconfig4
-rw-r--r--drivers/usb/serial/cp2101.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.h9
-rw-r--r--drivers/usb/serial/option.c14
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c3
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h2
-rw-r--r--drivers/usb/storage/unusual_devs.h36
-rw-r--r--drivers/video/Kconfig14
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/aty/radeon_i2c.c1
-rw-r--r--drivers/video/i810/i810-i2c.c1
-rw-r--r--drivers/video/intelfb/intelfb_i2c.c1
-rw-r--r--drivers/video/mx3fb.c1555
-rw-r--r--drivers/video/nvidia/nv_i2c.c1
-rw-r--r--drivers/video/omap/lcdc.c4
-rw-r--r--drivers/video/savage/savagefb-i2c.c1
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c1
-rw-r--r--drivers/xen/balloon.c41
-rw-r--r--drivers/xen/xenfs/xenbus.c11
296 files changed, 11585 insertions, 1763 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 503a908afc80..0bcf26464670 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -112,11 +112,11 @@ config ATA_PIIX
112 If unsure, say N. 112 If unsure, say N.
113 113
114config SATA_MV 114config SATA_MV
115 tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)" 115 tristate "Marvell SATA support"
116 depends on EXPERIMENTAL
117 help 116 help
118 This option enables support for the Marvell Serial ATA family. 117 This option enables support for the Marvell Serial ATA family.
119 Currently supports 88SX[56]0[48][01] chips. 118 Currently supports 88SX[56]0[48][01] PCI(-X) chips,
119 as well as the newer [67]042 PCI-X/PCIe and SOC devices.
120 120
121 If unsure, say N. 121 If unsure, say N.
122 122
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 96039671e3b9..77bba4c083cb 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -2548,6 +2548,32 @@ static void ahci_p5wdh_workaround(struct ata_host *host)
2548 } 2548 }
2549} 2549}
2550 2550
2551static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2552{
2553 static const struct dmi_system_id broken_systems[] = {
2554 {
2555 .ident = "HP Compaq nx6310",
2556 .matches = {
2557 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2558 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
2559 },
2560 /* PCI slot number of the controller */
2561 .driver_data = (void *)0x1FUL,
2562 },
2563
2564 { } /* terminate list */
2565 };
2566 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
2567
2568 if (dmi) {
2569 unsigned long slot = (unsigned long)dmi->driver_data;
2570 /* apply the quirk only to on-board controllers */
2571 return slot == PCI_SLOT(pdev->devfn);
2572 }
2573
2574 return false;
2575}
2576
2551static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2577static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2552{ 2578{
2553 static int printed_version; 2579 static int printed_version;
@@ -2647,6 +2673,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2647 } 2673 }
2648 } 2674 }
2649 2675
2676 if (ahci_broken_system_poweroff(pdev)) {
2677 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
2678 dev_info(&pdev->dev,
2679 "quirky BIOS, skipping spindown on poweroff\n");
2680 }
2681
2650 /* CAP.NP sometimes indicate the index of the last enabled 2682 /* CAP.NP sometimes indicate the index of the last enabled
2651 * port, at other times, that of the last possible port, so 2683 * port, at other times, that of the last possible port, so
2652 * determining the maximum port number requires looking at 2684 * determining the maximum port number requires looking at
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 887d8f46a287..54961c0b2c73 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1387,6 +1387,32 @@ static void piix_iocfg_bit18_quirk(struct ata_host *host)
1387 } 1387 }
1388} 1388}
1389 1389
1390static bool piix_broken_system_poweroff(struct pci_dev *pdev)
1391{
1392 static const struct dmi_system_id broken_systems[] = {
1393 {
1394 .ident = "HP Compaq 2510p",
1395 .matches = {
1396 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1397 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 2510p"),
1398 },
1399 /* PCI slot number of the controller */
1400 .driver_data = (void *)0x1FUL,
1401 },
1402
1403 { } /* terminate list */
1404 };
1405 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
1406
1407 if (dmi) {
1408 unsigned long slot = (unsigned long)dmi->driver_data;
1409 /* apply the quirk only to on-board controllers */
1410 return slot == PCI_SLOT(pdev->devfn);
1411 }
1412
1413 return false;
1414}
1415
1390/** 1416/**
1391 * piix_init_one - Register PIIX ATA PCI device with kernel services 1417 * piix_init_one - Register PIIX ATA PCI device with kernel services
1392 * @pdev: PCI device to register 1418 * @pdev: PCI device to register
@@ -1422,6 +1448,14 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1422 if (!in_module_init) 1448 if (!in_module_init)
1423 return -ENODEV; 1449 return -ENODEV;
1424 1450
1451 if (piix_broken_system_poweroff(pdev)) {
1452 piix_port_info[ent->driver_data].flags |=
1453 ATA_FLAG_NO_POWEROFF_SPINDOWN |
1454 ATA_FLAG_NO_HIBERNATE_SPINDOWN;
1455 dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
1456 "on poweroff and hibernation\n");
1457 }
1458
1425 port_info[0] = piix_port_info[ent->driver_data]; 1459 port_info[0] = piix_port_info[ent->driver_data];
1426 port_info[1] = piix_port_info[ent->driver_data]; 1460 port_info[1] = piix_port_info[ent->driver_data];
1427 1461
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a1a6e6298c33..3c4c5ae277ba 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -46,6 +46,7 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47#include <linux/hdreg.h> 47#include <linux/hdreg.h>
48#include <linux/uaccess.h> 48#include <linux/uaccess.h>
49#include <linux/suspend.h>
49 50
50#include "libata.h" 51#include "libata.h"
51 52
@@ -1303,6 +1304,17 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
1303 1304
1304 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ 1305 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
1305 } else { 1306 } else {
1307 /* Some odd clown BIOSen issue spindown on power off (ACPI S4
1308 * or S5) causing some drives to spin up and down again.
1309 */
1310 if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
1311 system_state == SYSTEM_POWER_OFF)
1312 goto skip;
1313
1314 if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
1315 system_entering_hibernation())
1316 goto skip;
1317
1306 /* XXX: This is for backward compatibility, will be 1318 /* XXX: This is for backward compatibility, will be
1307 * removed. Read Documentation/feature-removal-schedule.txt 1319 * removed. Read Documentation/feature-removal-schedule.txt
1308 * for more info. 1320 * for more info.
@@ -1326,8 +1338,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
1326 scmd->scsi_done = qc->scsidone; 1338 scmd->scsi_done = qc->scsidone;
1327 qc->scsidone = ata_delayed_done; 1339 qc->scsidone = ata_delayed_done;
1328 } 1340 }
1329 scmd->result = SAM_STAT_GOOD; 1341 goto skip;
1330 return 1;
1331 } 1342 }
1332 1343
1333 /* Issue ATA STANDBY IMMEDIATE command */ 1344 /* Issue ATA STANDBY IMMEDIATE command */
@@ -1343,10 +1354,13 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
1343 1354
1344 return 0; 1355 return 0;
1345 1356
1346invalid_fld: 1357 invalid_fld:
1347 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); 1358 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
1348 /* "Invalid field in cbd" */ 1359 /* "Invalid field in cbd" */
1349 return 1; 1360 return 1;
1361 skip:
1362 scmd->result = SAM_STAT_GOOD;
1363 return 1;
1350} 1364}
1351 1365
1352 1366
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 5a4aad123c42..0b299b0f8172 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1322,7 +1322,7 @@ fsm_start:
1322 * condition. Mark hint. 1322 * condition. Mark hint.
1323 */ 1323 */
1324 ata_ehi_push_desc(ehi, "ST-ATA: " 1324 ata_ehi_push_desc(ehi, "ST-ATA: "
1325 "DRQ=1 with device error, " 1325 "DRQ=0 without device error, "
1326 "dev_stat 0x%X", status); 1326 "dev_stat 0x%X", status);
1327 qc->err_mask |= AC_ERR_HSM | 1327 qc->err_mask |= AC_ERR_HSM |
1328 AC_ERR_NODEV_HINT; 1328 AC_ERR_NODEV_HINT;
@@ -1358,6 +1358,16 @@ fsm_start:
1358 qc->err_mask |= AC_ERR_HSM; 1358 qc->err_mask |= AC_ERR_HSM;
1359 } 1359 }
1360 1360
1361 /* There are oddball controllers with
1362 * status register stuck at 0x7f and
1363 * lbal/m/h at zero which makes it
1364 * pass all other presence detection
1365 * mechanisms we have. Set NODEV_HINT
1366 * for it. Kernel bz#7241.
1367 */
1368 if (status == 0x7f)
1369 qc->err_mask |= AC_ERR_NODEV_HINT;
1370
1361 /* ata_pio_sectors() might change the 1371 /* ata_pio_sectors() might change the
1362 * state to HSM_ST_LAST. so, the state 1372 * state to HSM_ST_LAST. so, the state
1363 * is changed after ata_pio_sectors(). 1373 * is changed after ata_pio_sectors().
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index c2e6fb9f2ef9..ebfcda26d639 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -63,8 +63,6 @@ static inline void rb532_pata_finish_io(struct ata_port *ap)
63 ata_sff_sync might be sufficient. */ 63 ata_sff_sync might be sufficient. */
64 ata_sff_dma_pause(ap); 64 ata_sff_dma_pause(ap);
65 ndelay(RB500_CF_IO_DELAY); 65 ndelay(RB500_CF_IO_DELAY);
66
67 set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
68} 66}
69 67
70static void rb532_pata_exec_command(struct ata_port *ap, 68static void rb532_pata_exec_command(struct ata_port *ap,
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 681169c9c640..79a6c9a0b721 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -86,6 +86,10 @@ enum {
86 VIA_SATA_PATA = 0x800, /* SATA/PATA combined configuration */ 86 VIA_SATA_PATA = 0x800, /* SATA/PATA combined configuration */
87}; 87};
88 88
89enum {
90 VIA_IDFLAG_SINGLE = (1 << 0), /* single channel controller) */
91};
92
89/* 93/*
90 * VIA SouthBridge chips. 94 * VIA SouthBridge chips.
91 */ 95 */
@@ -97,8 +101,12 @@ static const struct via_isa_bridge {
97 u8 rev_max; 101 u8 rev_max;
98 u16 flags; 102 u16 flags;
99} via_isa_bridges[] = { 103} via_isa_bridges[] = {
104 { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f,
105 VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA },
100 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, VIA_UDMA_133 | 106 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, VIA_UDMA_133 |
101 VIA_BAD_AST | VIA_SATA_PATA }, 107 VIA_BAD_AST | VIA_SATA_PATA },
108 { "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f,
109 VIA_UDMA_133 | VIA_BAD_AST },
102 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 110 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
103 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 111 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
104 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA }, 112 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA },
@@ -122,6 +130,8 @@ static const struct via_isa_bridge {
122 { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, VIA_UDMA_NONE | VIA_SET_FIFO }, 130 { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, VIA_UDMA_NONE | VIA_SET_FIFO },
123 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK }, 131 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK },
124 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID }, 132 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
133 { "vtxxxx", PCI_DEVICE_ID_VIA_ANON, 0x00, 0x2f,
134 VIA_UDMA_133 | VIA_BAD_AST },
125 { NULL } 135 { NULL }
126}; 136};
127 137
@@ -460,6 +470,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
460 static int printed_version; 470 static int printed_version;
461 u8 enable; 471 u8 enable;
462 u32 timing; 472 u32 timing;
473 unsigned long flags = id->driver_data;
463 int rc; 474 int rc;
464 475
465 if (!printed_version++) 476 if (!printed_version++)
@@ -469,9 +480,13 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
469 if (rc) 480 if (rc)
470 return rc; 481 return rc;
471 482
483 if (flags & VIA_IDFLAG_SINGLE)
484 ppi[1] = &ata_dummy_port_info;
485
472 /* To find out how the IDE will behave and what features we 486 /* To find out how the IDE will behave and what features we
473 actually have to look at the bridge not the IDE controller */ 487 actually have to look at the bridge not the IDE controller */
474 for (config = via_isa_bridges; config->id; config++) 488 for (config = via_isa_bridges; config->id != PCI_DEVICE_ID_VIA_ANON;
489 config++)
475 if ((isa = pci_get_device(PCI_VENDOR_ID_VIA + 490 if ((isa = pci_get_device(PCI_VENDOR_ID_VIA +
476 !!(config->flags & VIA_BAD_ID), 491 !!(config->flags & VIA_BAD_ID),
477 config->id, NULL))) { 492 config->id, NULL))) {
@@ -482,10 +497,6 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
482 pci_dev_put(isa); 497 pci_dev_put(isa);
483 } 498 }
484 499
485 if (!config->id) {
486 printk(KERN_WARNING "via: Unknown VIA SouthBridge, disabling.\n");
487 return -ENODEV;
488 }
489 pci_dev_put(isa); 500 pci_dev_put(isa);
490 501
491 if (!(config->flags & VIA_NO_ENABLES)) { 502 if (!(config->flags & VIA_NO_ENABLES)) {
@@ -587,6 +598,7 @@ static const struct pci_device_id via[] = {
587 { PCI_VDEVICE(VIA, 0x1571), }, 598 { PCI_VDEVICE(VIA, 0x1571), },
588 { PCI_VDEVICE(VIA, 0x3164), }, 599 { PCI_VDEVICE(VIA, 0x3164), },
589 { PCI_VDEVICE(VIA, 0x5324), }, 600 { PCI_VDEVICE(VIA, 0x5324), },
601 { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
590 602
591 { }, 603 { },
592}; 604};
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 86918634a4c5..f2d8a020ea53 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -33,10 +33,6 @@
33 * 33 *
34 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it). 34 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35 * 35 *
36 * --> Investigate problems with PCI Message Signalled Interrupts (MSI).
37 *
38 * --> Cache frequently-accessed registers in mv_port_priv to reduce overhead.
39 *
40 * --> Develop a low-power-consumption strategy, and implement it. 36 * --> Develop a low-power-consumption strategy, and implement it.
41 * 37 *
42 * --> [Experiment, low priority] Investigate interrupt coalescing. 38 * --> [Experiment, low priority] Investigate interrupt coalescing.
@@ -72,7 +68,7 @@
72#include <linux/libata.h> 68#include <linux/libata.h>
73 69
74#define DRV_NAME "sata_mv" 70#define DRV_NAME "sata_mv"
75#define DRV_VERSION "1.24" 71#define DRV_VERSION "1.25"
76 72
77enum { 73enum {
78 /* BAR's are enumerated in terms of pci_resource_start() terms */ 74 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -351,8 +347,6 @@ enum {
351 347
352 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ 348 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
353 349
354 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
355
356 /* Host private flags (hp_flags) */ 350 /* Host private flags (hp_flags) */
357 MV_HP_FLAG_MSI = (1 << 0), 351 MV_HP_FLAG_MSI = (1 << 0),
358 MV_HP_ERRATA_50XXB0 = (1 << 1), 352 MV_HP_ERRATA_50XXB0 = (1 << 1),
@@ -883,19 +877,15 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
883 struct mv_host_priv *hpriv = ap->host->private_data; 877 struct mv_host_priv *hpriv = ap->host->private_data;
884 int hardport = mv_hardport_from_port(ap->port_no); 878 int hardport = mv_hardport_from_port(ap->port_no);
885 void __iomem *hc_mmio = mv_hc_base_from_port( 879 void __iomem *hc_mmio = mv_hc_base_from_port(
886 mv_host_base(ap->host), hardport); 880 mv_host_base(ap->host), ap->port_no);
887 u32 hc_irq_cause, ipending; 881 u32 hc_irq_cause;
888 882
889 /* clear EDMA event indicators, if any */ 883 /* clear EDMA event indicators, if any */
890 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 884 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
891 885
892 /* clear EDMA interrupt indicator, if any */ 886 /* clear pending irq events */
893 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 887 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
894 ipending = (DEV_IRQ | DMA_IRQ) << hardport; 888 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
895 if (hc_irq_cause & ipending) {
896 writelfl(hc_irq_cause & ~ipending,
897 hc_mmio + HC_IRQ_CAUSE_OFS);
898 }
899 889
900 mv_edma_cfg(ap, want_ncq); 890 mv_edma_cfg(ap, want_ncq);
901 891
@@ -1099,20 +1089,12 @@ static void mv6_dev_config(struct ata_device *adev)
1099 * 1089 *
1100 * Gen-II does not support NCQ over a port multiplier 1090 * Gen-II does not support NCQ over a port multiplier
1101 * (no FIS-based switching). 1091 * (no FIS-based switching).
1102 *
1103 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1104 * See mv_qc_prep() for more info.
1105 */ 1092 */
1106 if (adev->flags & ATA_DFLAG_NCQ) { 1093 if (adev->flags & ATA_DFLAG_NCQ) {
1107 if (sata_pmp_attached(adev->link->ap)) { 1094 if (sata_pmp_attached(adev->link->ap)) {
1108 adev->flags &= ~ATA_DFLAG_NCQ; 1095 adev->flags &= ~ATA_DFLAG_NCQ;
1109 ata_dev_printk(adev, KERN_INFO, 1096 ata_dev_printk(adev, KERN_INFO,
1110 "NCQ disabled for command-based switching\n"); 1097 "NCQ disabled for command-based switching\n");
1111 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1112 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1113 ata_dev_printk(adev, KERN_INFO,
1114 "max_sectors limited to %u for NCQ\n",
1115 adev->max_sectors);
1116 } 1098 }
1117 } 1099 }
1118} 1100}
@@ -1450,7 +1432,8 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1450 * only 11 bytes...so we must pick and choose required 1432 * only 11 bytes...so we must pick and choose required
1451 * registers based on the command. So, we drop feature and 1433 * registers based on the command. So, we drop feature and
1452 * hob_feature for [RW] DMA commands, but they are needed for 1434 * hob_feature for [RW] DMA commands, but they are needed for
1453 * NCQ. NCQ will drop hob_nsect. 1435 * NCQ. NCQ will drop hob_nsect, which is not needed there
1436 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
1454 */ 1437 */
1455 switch (tf->command) { 1438 switch (tf->command) {
1456 case ATA_CMD_READ: 1439 case ATA_CMD_READ:
@@ -2214,9 +2197,15 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2214 struct ata_host *host = dev_instance; 2197 struct ata_host *host = dev_instance;
2215 struct mv_host_priv *hpriv = host->private_data; 2198 struct mv_host_priv *hpriv = host->private_data;
2216 unsigned int handled = 0; 2199 unsigned int handled = 0;
2200 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
2217 u32 main_irq_cause, pending_irqs; 2201 u32 main_irq_cause, pending_irqs;
2218 2202
2219 spin_lock(&host->lock); 2203 spin_lock(&host->lock);
2204
2205 /* for MSI: block new interrupts while in here */
2206 if (using_msi)
2207 writel(0, hpriv->main_irq_mask_addr);
2208
2220 main_irq_cause = readl(hpriv->main_irq_cause_addr); 2209 main_irq_cause = readl(hpriv->main_irq_cause_addr);
2221 pending_irqs = main_irq_cause & hpriv->main_irq_mask; 2210 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
2222 /* 2211 /*
@@ -2230,6 +2219,11 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2230 handled = mv_host_intr(host, pending_irqs); 2219 handled = mv_host_intr(host, pending_irqs);
2231 } 2220 }
2232 spin_unlock(&host->lock); 2221 spin_unlock(&host->lock);
2222
2223 /* for MSI: unmask; interrupt cause bits will retrigger now */
2224 if (using_msi)
2225 writel(hpriv->main_irq_mask, hpriv->main_irq_mask_addr);
2226
2233 return IRQ_RETVAL(handled); 2227 return IRQ_RETVAL(handled);
2234} 2228}
2235 2229
@@ -2821,8 +2815,7 @@ static void mv_eh_thaw(struct ata_port *ap)
2821 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2815 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2822 2816
2823 /* clear pending irq events */ 2817 /* clear pending irq events */
2824 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 2818 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
2825 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2826 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 2819 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2827 2820
2828 mv_enable_port_irqs(ap, ERR_IRQ); 2821 mv_enable_port_irqs(ap, ERR_IRQ);
@@ -3075,6 +3068,9 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3075 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; 3068 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
3076 } 3069 }
3077 3070
3071 /* initialize shadow irq mask with register's value */
3072 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3073
3078 /* global interrupt mask: 0 == mask everything */ 3074 /* global interrupt mask: 0 == mask everything */
3079 mv_set_main_irq_mask(host, ~0, 0); 3075 mv_set_main_irq_mask(host, ~0, 0);
3080 3076
@@ -3430,9 +3426,9 @@ static int mv_pci_init_one(struct pci_dev *pdev,
3430 if (rc) 3426 if (rc)
3431 return rc; 3427 return rc;
3432 3428
3433 /* Enable interrupts */ 3429 /* Enable message-switched interrupts, if requested */
3434 if (msi && pci_enable_msi(pdev)) 3430 if (msi && pci_enable_msi(pdev) == 0)
3435 pci_intx(pdev, 1); 3431 hpriv->hp_flags |= MV_HP_FLAG_MSI;
3436 3432
3437 mv_dump_pci_cfg(pdev, 0x68); 3433 mv_dump_pci_cfg(pdev, 0x68);
3438 mv_print_info(host); 3434 mv_print_info(host);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 6f1460614325..c49ad0e61b6f 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -305,10 +305,10 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 305static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 306static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307 307
308static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
309 unsigned long deadline);
308static void nv_nf2_freeze(struct ata_port *ap); 310static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap); 311static void nv_nf2_thaw(struct ata_port *ap);
310static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
311 unsigned long deadline);
312static void nv_ck804_freeze(struct ata_port *ap); 312static void nv_ck804_freeze(struct ata_port *ap);
313static void nv_ck804_thaw(struct ata_port *ap); 313static void nv_ck804_thaw(struct ata_port *ap);
314static int nv_adma_slave_config(struct scsi_device *sdev); 314static int nv_adma_slave_config(struct scsi_device *sdev);
@@ -352,6 +352,7 @@ enum nv_host_type
352 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */ 352 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
353 CK804, 353 CK804,
354 ADMA, 354 ADMA,
355 MCP5x,
355 SWNCQ, 356 SWNCQ,
356}; 357};
357 358
@@ -363,10 +364,10 @@ static const struct pci_device_id nv_pci_tbl[] = {
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 }, 364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 }, 365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 }, 366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ }, 367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ }, 368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ }, 369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ }, 370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC }, 371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC }, 372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC }, 373 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
@@ -432,7 +433,7 @@ static struct ata_port_operations nv_nf2_ops = {
432 .inherits = &nv_common_ops, 433 .inherits = &nv_common_ops,
433 .freeze = nv_nf2_freeze, 434 .freeze = nv_nf2_freeze,
434 .thaw = nv_nf2_thaw, 435 .thaw = nv_nf2_thaw,
435 .hardreset = nv_nf2_hardreset, 436 .hardreset = nv_noclassify_hardreset,
436}; 437};
437 438
438/* CK804 finally gets hardreset right */ 439/* CK804 finally gets hardreset right */
@@ -467,8 +468,19 @@ static struct ata_port_operations nv_adma_ops = {
467 .host_stop = nv_adma_host_stop, 468 .host_stop = nv_adma_host_stop,
468}; 469};
469 470
471/* Kernel bz#12351 reports that when SWNCQ is enabled, for hotplug to
472 * work, hardreset should be used and hardreset can't report proper
473 * signature, which suggests that mcp5x is closer to nf2 as long as
474 * reset quirkiness is concerned. Define separate ops for mcp5x with
475 * nv_noclassify_hardreset().
476 */
477static struct ata_port_operations nv_mcp5x_ops = {
478 .inherits = &nv_common_ops,
479 .hardreset = nv_noclassify_hardreset,
480};
481
470static struct ata_port_operations nv_swncq_ops = { 482static struct ata_port_operations nv_swncq_ops = {
471 .inherits = &nv_generic_ops, 483 .inherits = &nv_mcp5x_ops,
472 484
473 .qc_defer = ata_std_qc_defer, 485 .qc_defer = ata_std_qc_defer,
474 .qc_prep = nv_swncq_qc_prep, 486 .qc_prep = nv_swncq_qc_prep,
@@ -531,6 +543,15 @@ static const struct ata_port_info nv_port_info[] = {
531 .port_ops = &nv_adma_ops, 543 .port_ops = &nv_adma_ops,
532 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht), 544 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
533 }, 545 },
546 /* MCP5x */
547 {
548 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
549 .pio_mask = NV_PIO_MASK,
550 .mwdma_mask = NV_MWDMA_MASK,
551 .udma_mask = NV_UDMA_MASK,
552 .port_ops = &nv_mcp5x_ops,
553 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
554 },
534 /* SWNCQ */ 555 /* SWNCQ */
535 { 556 {
536 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 557 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
@@ -1530,6 +1551,17 @@ static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1530 return 0; 1551 return 0;
1531} 1552}
1532 1553
1554static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
1555 unsigned long deadline)
1556{
1557 bool online;
1558 int rc;
1559
1560 rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1561 &online, NULL);
1562 return online ? -EAGAIN : rc;
1563}
1564
1533static void nv_nf2_freeze(struct ata_port *ap) 1565static void nv_nf2_freeze(struct ata_port *ap)
1534{ 1566{
1535 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; 1567 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
@@ -1554,17 +1586,6 @@ static void nv_nf2_thaw(struct ata_port *ap)
1554 iowrite8(mask, scr_addr + NV_INT_ENABLE); 1586 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1555} 1587}
1556 1588
1557static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
1558 unsigned long deadline)
1559{
1560 bool online;
1561 int rc;
1562
1563 rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1564 &online, NULL);
1565 return online ? -EAGAIN : rc;
1566}
1567
1568static void nv_ck804_freeze(struct ata_port *ap) 1589static void nv_ck804_freeze(struct ata_port *ap)
1569{ 1590{
1570 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; 1591 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
@@ -2355,14 +2376,9 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2355 if (type == CK804 && adma_enabled) { 2376 if (type == CK804 && adma_enabled) {
2356 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n"); 2377 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2357 type = ADMA; 2378 type = ADMA;
2358 } 2379 } else if (type == MCP5x && swncq_enabled) {
2359 2380 dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2360 if (type == SWNCQ) { 2381 type = SWNCQ;
2361 if (swncq_enabled)
2362 dev_printk(KERN_NOTICE, &pdev->dev,
2363 "Using SWNCQ mode\n");
2364 else
2365 type = GENERIC;
2366 } 2382 }
2367 2383
2368 ppi[0] = &nv_port_info[type]; 2384 ppi[0] = &nv_port_info[type];
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 564c142b03b0..9f029595f454 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -44,6 +44,7 @@
44#include <linux/device.h> 44#include <linux/device.h>
45#include <scsi/scsi_host.h> 45#include <scsi/scsi_host.h>
46#include <linux/libata.h> 46#include <linux/libata.h>
47#include <linux/dmi.h>
47 48
48#define DRV_NAME "sata_sil" 49#define DRV_NAME "sata_sil"
49#define DRV_VERSION "2.4" 50#define DRV_VERSION "2.4"
@@ -695,11 +696,38 @@ static void sil_init_controller(struct ata_host *host)
695 } 696 }
696} 697}
697 698
699static bool sil_broken_system_poweroff(struct pci_dev *pdev)
700{
701 static const struct dmi_system_id broken_systems[] = {
702 {
703 .ident = "HP Compaq nx6325",
704 .matches = {
705 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
706 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
707 },
708 /* PCI slot number of the controller */
709 .driver_data = (void *)0x12UL,
710 },
711
712 { } /* terminate list */
713 };
714 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
715
716 if (dmi) {
717 unsigned long slot = (unsigned long)dmi->driver_data;
718 /* apply the quirk only to on-board controllers */
719 return slot == PCI_SLOT(pdev->devfn);
720 }
721
722 return false;
723}
724
698static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 725static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
699{ 726{
700 static int printed_version; 727 static int printed_version;
701 int board_id = ent->driver_data; 728 int board_id = ent->driver_data;
702 const struct ata_port_info *ppi[] = { &sil_port_info[board_id], NULL }; 729 struct ata_port_info pi = sil_port_info[board_id];
730 const struct ata_port_info *ppi[] = { &pi, NULL };
703 struct ata_host *host; 731 struct ata_host *host;
704 void __iomem *mmio_base; 732 void __iomem *mmio_base;
705 int n_ports, rc; 733 int n_ports, rc;
@@ -713,6 +741,13 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
713 if (board_id == sil_3114) 741 if (board_id == sil_3114)
714 n_ports = 4; 742 n_ports = 4;
715 743
744 if (sil_broken_system_poweroff(pdev)) {
745 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN |
746 ATA_FLAG_NO_HIBERNATE_SPINDOWN;
747 dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
748 "on poweroff and hibernation\n");
749 }
750
716 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 751 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
717 if (!host) 752 if (!host)
718 return -ENOMEM; 753 return -ENOMEM;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 8079afca4972..f3eae630e589 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -777,10 +777,16 @@ static void device_remove_class_symlinks(struct device *dev)
777int dev_set_name(struct device *dev, const char *fmt, ...) 777int dev_set_name(struct device *dev, const char *fmt, ...)
778{ 778{
779 va_list vargs; 779 va_list vargs;
780 char *s;
780 781
781 va_start(vargs, fmt); 782 va_start(vargs, fmt);
782 vsnprintf(dev->bus_id, sizeof(dev->bus_id), fmt, vargs); 783 vsnprintf(dev->bus_id, sizeof(dev->bus_id), fmt, vargs);
783 va_end(vargs); 784 va_end(vargs);
785
786 /* ewww... some of these buggers have / in the name... */
787 while ((s = strchr(dev->bus_id, '/')))
788 *s = '!';
789
784 return 0; 790 return 0;
785} 791}
786EXPORT_SYMBOL_GPL(dev_set_name); 792EXPORT_SYMBOL_GPL(dev_set_name);
@@ -1274,7 +1280,7 @@ EXPORT_SYMBOL_GPL(__root_device_register);
1274 1280
1275/** 1281/**
1276 * root_device_unregister - unregister and free a root device 1282 * root_device_unregister - unregister and free a root device
1277 * @root: device going away. 1283 * @dev: device going away
1278 * 1284 *
1279 * This function unregisters and cleans up a device that was created by 1285 * This function unregisters and cleans up a device that was created by
1280 * root_device_register(). 1286 * root_device_register().
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index d4e7dca06e4f..ba68a4671cb5 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -102,7 +102,7 @@ static int __init omap_rng_probe(struct platform_device *pdev)
102 return -EBUSY; 102 return -EBUSY;
103 103
104 if (cpu_is_omap24xx()) { 104 if (cpu_is_omap24xx()) {
105 rng_ick = clk_get(NULL, "rng_ick"); 105 rng_ick = clk_get(&pdev->dev, "rng_ick");
106 if (IS_ERR(rng_ick)) { 106 if (IS_ERR(rng_ick)) {
107 dev_err(&pdev->dev, "Could not get rng_ick\n"); 107 dev_err(&pdev->dev, "Could not get rng_ick\n");
108 ret = PTR_ERR(rng_ick); 108 ret = PTR_ERR(rng_ick);
diff --git a/drivers/char/selection.c b/drivers/char/selection.c
index f29fbe9b8ed7..cb8ca5698963 100644
--- a/drivers/char/selection.c
+++ b/drivers/char/selection.c
@@ -268,7 +268,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
268 268
269 /* Allocate a new buffer before freeing the old one ... */ 269 /* Allocate a new buffer before freeing the old one ... */
270 multiplier = use_unicode ? 3 : 1; /* chars can take up to 3 bytes */ 270 multiplier = use_unicode ? 3 : 1; /* chars can take up to 3 bytes */
271 bp = kmalloc((sel_end-sel_start)/2*multiplier+1, GFP_KERNEL); 271 bp = kmalloc(((sel_end-sel_start)/2+1)*multiplier, GFP_KERNEL);
272 if (!bp) { 272 if (!bp) {
273 printk(KERN_WARNING "selection: kmalloc() failed\n"); 273 printk(KERN_WARNING "selection: kmalloc() failed\n");
274 clear_selection(); 274 clear_selection();
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index d33e5ab06177..bc84e125c6bc 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1817,8 +1817,10 @@ got_driver:
1817 /* check whether we're reopening an existing tty */ 1817 /* check whether we're reopening an existing tty */
1818 tty = tty_driver_lookup_tty(driver, inode, index); 1818 tty = tty_driver_lookup_tty(driver, inode, index);
1819 1819
1820 if (IS_ERR(tty)) 1820 if (IS_ERR(tty)) {
1821 mutex_unlock(&tty_mutex);
1821 return PTR_ERR(tty); 1822 return PTR_ERR(tty);
1823 }
1822 } 1824 }
1823 1825
1824 if (tty) { 1826 if (tty) {
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e34b06420816..48ea59e79672 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -62,6 +62,25 @@ config MV_XOR
62 ---help--- 62 ---help---
63 Enable support for the Marvell XOR engine. 63 Enable support for the Marvell XOR engine.
64 64
65config MX3_IPU
66 bool "MX3x Image Processing Unit support"
67 depends on ARCH_MX3
68 select DMA_ENGINE
69 default y
70 help
71 If you plan to use the Image Processing unit in the i.MX3x, say
72 Y here. If unsure, select Y.
73
74config MX3_IPU_IRQS
75 int "Number of dynamically mapped interrupts for IPU"
76 depends on MX3_IPU
77 range 2 137
78 default 4
79 help
80 Out of 137 interrupt sources on i.MX31 IPU only very few are used.
81 To avoid bloating the irq_desc[] array we allocate a sufficient
82 number of IRQ slots and map them dynamically to specific sources.
83
65config DMA_ENGINE 84config DMA_ENGINE
66 bool 85 bool
67 86
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 14f59527d4f6..2e5dc96700d2 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
7obj-$(CONFIG_FSL_DMA) += fsldma.o 7obj-$(CONFIG_FSL_DMA) += fsldma.o
8obj-$(CONFIG_MV_XOR) += mv_xor.o 8obj-$(CONFIG_MV_XOR) += mv_xor.o
9obj-$(CONFIG_DW_DMAC) += dw_dmac.o 9obj-$(CONFIG_DW_DMAC) += dw_dmac.o
10obj-$(CONFIG_MX3_IPU) += ipu/
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 403dbe781122..a58993011edb 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -329,9 +329,6 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
329 struct dma_chan *chan; 329 struct dma_chan *chan;
330 int cpu; 330 int cpu;
331 331
332 WARN_ONCE(dmaengine_ref_count == 0,
333 "client called %s without a reference", __func__);
334
335 cpu = get_cpu(); 332 cpu = get_cpu();
336 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan; 333 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
337 put_cpu(); 334 put_cpu();
@@ -348,9 +345,6 @@ void dma_issue_pending_all(void)
348 struct dma_device *device; 345 struct dma_device *device;
349 struct dma_chan *chan; 346 struct dma_chan *chan;
350 347
351 WARN_ONCE(dmaengine_ref_count == 0,
352 "client called %s without a reference", __func__);
353
354 rcu_read_lock(); 348 rcu_read_lock();
355 list_for_each_entry_rcu(device, &dma_device_list, global_node) { 349 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
356 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 350 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
@@ -961,6 +955,8 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
961 if (!dep) 955 if (!dep)
962 return; 956 return;
963 957
958 /* we'll submit tx->next now, so clear the link */
959 tx->next = NULL;
964 chan = dep->chan; 960 chan = dep->chan;
965 961
966 /* keep submitting up until a channel switch is detected 962 /* keep submitting up until a channel switch is detected
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 3603f1ea5b28..732fa1ec36ab 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -217,6 +217,10 @@ static int dmatest_func(void *data)
217 chan = thread->chan; 217 chan = thread->chan;
218 218
219 while (!kthread_should_stop()) { 219 while (!kthread_should_stop()) {
220 struct dma_device *dev = chan->device;
221 struct dma_async_tx_descriptor *tx;
222 dma_addr_t dma_src, dma_dest;
223
220 total_tests++; 224 total_tests++;
221 225
222 len = dmatest_random() % test_buf_size + 1; 226 len = dmatest_random() % test_buf_size + 1;
@@ -226,10 +230,30 @@ static int dmatest_func(void *data)
226 dmatest_init_srcbuf(thread->srcbuf, src_off, len); 230 dmatest_init_srcbuf(thread->srcbuf, src_off, len);
227 dmatest_init_dstbuf(thread->dstbuf, dst_off, len); 231 dmatest_init_dstbuf(thread->dstbuf, dst_off, len);
228 232
229 cookie = dma_async_memcpy_buf_to_buf(chan, 233 dma_src = dma_map_single(dev->dev, thread->srcbuf + src_off,
230 thread->dstbuf + dst_off, 234 len, DMA_TO_DEVICE);
231 thread->srcbuf + src_off, 235 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
232 len); 236 dma_dest = dma_map_single(dev->dev, thread->dstbuf,
237 test_buf_size, DMA_BIDIRECTIONAL);
238
239 tx = dev->device_prep_dma_memcpy(chan, dma_dest + dst_off,
240 dma_src, len,
241 DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP);
242 if (!tx) {
243 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
244 dma_unmap_single(dev->dev, dma_dest,
245 test_buf_size, DMA_BIDIRECTIONAL);
246 pr_warning("%s: #%u: prep error with src_off=0x%x "
247 "dst_off=0x%x len=0x%x\n",
248 thread_name, total_tests - 1,
249 src_off, dst_off, len);
250 msleep(100);
251 failed_tests++;
252 continue;
253 }
254 tx->callback = NULL;
255 cookie = tx->tx_submit(tx);
256
233 if (dma_submit_error(cookie)) { 257 if (dma_submit_error(cookie)) {
234 pr_warning("%s: #%u: submit error %d with src_off=0x%x " 258 pr_warning("%s: #%u: submit error %d with src_off=0x%x "
235 "dst_off=0x%x len=0x%x\n", 259 "dst_off=0x%x len=0x%x\n",
@@ -253,6 +277,9 @@ static int dmatest_func(void *data)
253 failed_tests++; 277 failed_tests++;
254 continue; 278 continue;
255 } 279 }
280 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
281 dma_unmap_single(dev->dev, dma_dest,
282 test_buf_size, DMA_BIDIRECTIONAL);
256 283
257 error_count = 0; 284 error_count = 0;
258 285
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index ca70a21afc68..70126a606239 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -822,7 +822,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
822 */ 822 */
823 WARN_ON(fdev->feature != new_fsl_chan->feature); 823 WARN_ON(fdev->feature != new_fsl_chan->feature);
824 824
825 new_fsl_chan->dev = &new_fsl_chan->common.dev->device; 825 new_fsl_chan->dev = fdev->dev;
826 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, 826 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
827 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 827 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
828 828
@@ -875,7 +875,8 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
875 } 875 }
876 876
877 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, 877 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
878 compatible, new_fsl_chan->irq); 878 compatible,
879 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
879 880
880 return 0; 881 return 0;
881 882
@@ -890,7 +891,8 @@ err_no_reg:
890 891
891static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) 892static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
892{ 893{
893 free_irq(fchan->irq, fchan); 894 if (fchan->irq != NO_IRQ)
895 free_irq(fchan->irq, fchan);
894 list_del(&fchan->common.device_node); 896 list_del(&fchan->common.device_node);
895 iounmap(fchan->reg_base); 897 iounmap(fchan->reg_base);
896 kfree(fchan); 898 kfree(fchan);
diff --git a/drivers/dma/ipu/Makefile b/drivers/dma/ipu/Makefile
new file mode 100644
index 000000000000..6704cf48326d
--- /dev/null
+++ b/drivers/dma/ipu/Makefile
@@ -0,0 +1 @@
obj-y += ipu_irq.o ipu_idmac.o
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
new file mode 100644
index 000000000000..1f154d08e98f
--- /dev/null
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -0,0 +1,1740 @@
1/*
2 * Copyright (C) 2008
3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
4 *
5 * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/init.h>
13#include <linux/platform_device.h>
14#include <linux/err.h>
15#include <linux/spinlock.h>
16#include <linux/delay.h>
17#include <linux/list.h>
18#include <linux/clk.h>
19#include <linux/vmalloc.h>
20#include <linux/string.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23
24#include <mach/ipu.h>
25
26#include "ipu_intern.h"
27
28#define FS_VF_IN_VALID 0x00000002
29#define FS_ENC_IN_VALID 0x00000001
30
31/*
32 * There can be only one, we could allocate it dynamically, but then we'd have
33 * to add an extra parameter to some functions, and use something as ugly as
34 * struct ipu *ipu = to_ipu(to_idmac(ichan->dma_chan.device));
35 * in the ISR
36 */
37static struct ipu ipu_data;
38
39#define to_ipu(id) container_of(id, struct ipu, idmac)
40
41static u32 __idmac_read_icreg(struct ipu *ipu, unsigned long reg)
42{
43 return __raw_readl(ipu->reg_ic + reg);
44}
45
46#define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF)
47
48static void __idmac_write_icreg(struct ipu *ipu, u32 value, unsigned long reg)
49{
50 __raw_writel(value, ipu->reg_ic + reg);
51}
52
53#define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF)
54
55static u32 idmac_read_ipureg(struct ipu *ipu, unsigned long reg)
56{
57 return __raw_readl(ipu->reg_ipu + reg);
58}
59
60static void idmac_write_ipureg(struct ipu *ipu, u32 value, unsigned long reg)
61{
62 __raw_writel(value, ipu->reg_ipu + reg);
63}
64
65/*****************************************************************************
66 * IPU / IC common functions
67 */
68static void dump_idmac_reg(struct ipu *ipu)
69{
70 dev_dbg(ipu->dev, "IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, "
71 "IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n",
72 idmac_read_icreg(ipu, IDMAC_CONF),
73 idmac_read_icreg(ipu, IC_CONF),
74 idmac_read_icreg(ipu, IDMAC_CHA_EN),
75 idmac_read_icreg(ipu, IDMAC_CHA_PRI),
76 idmac_read_icreg(ipu, IDMAC_CHA_BUSY));
77 dev_dbg(ipu->dev, "BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, "
78 "DB_MODE 0x%x, TASKS_STAT 0x%x\n",
79 idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
80 idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
81 idmac_read_ipureg(ipu, IPU_CHA_CUR_BUF),
82 idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL),
83 idmac_read_ipureg(ipu, IPU_TASKS_STAT));
84}
85
86static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
87{
88 switch (fmt) {
89 case IPU_PIX_FMT_GENERIC: /* generic data */
90 case IPU_PIX_FMT_RGB332:
91 case IPU_PIX_FMT_YUV420P:
92 case IPU_PIX_FMT_YUV422P:
93 default:
94 return 1;
95 case IPU_PIX_FMT_RGB565:
96 case IPU_PIX_FMT_YUYV:
97 case IPU_PIX_FMT_UYVY:
98 return 2;
99 case IPU_PIX_FMT_BGR24:
100 case IPU_PIX_FMT_RGB24:
101 return 3;
102 case IPU_PIX_FMT_GENERIC_32: /* generic data */
103 case IPU_PIX_FMT_BGR32:
104 case IPU_PIX_FMT_RGB32:
105 case IPU_PIX_FMT_ABGR32:
106 return 4;
107 }
108}
109
110/* Enable / disable direct write to memory by the Camera Sensor Interface */
111static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
112{
113 uint32_t ic_conf, mask;
114
115 switch (channel) {
116 case IDMAC_IC_0:
117 mask = IC_CONF_PRPENC_EN;
118 break;
119 case IDMAC_IC_7:
120 mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
121 break;
122 default:
123 return;
124 }
125 ic_conf = idmac_read_icreg(ipu, IC_CONF) | mask;
126 idmac_write_icreg(ipu, ic_conf, IC_CONF);
127}
128
129static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
130{
131 uint32_t ic_conf, mask;
132
133 switch (channel) {
134 case IDMAC_IC_0:
135 mask = IC_CONF_PRPENC_EN;
136 break;
137 case IDMAC_IC_7:
138 mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
139 break;
140 default:
141 return;
142 }
143 ic_conf = idmac_read_icreg(ipu, IC_CONF) & ~mask;
144 idmac_write_icreg(ipu, ic_conf, IC_CONF);
145}
146
147static uint32_t ipu_channel_status(struct ipu *ipu, enum ipu_channel channel)
148{
149 uint32_t stat = TASK_STAT_IDLE;
150 uint32_t task_stat_reg = idmac_read_ipureg(ipu, IPU_TASKS_STAT);
151
152 switch (channel) {
153 case IDMAC_IC_7:
154 stat = (task_stat_reg & TSTAT_CSI2MEM_MASK) >>
155 TSTAT_CSI2MEM_OFFSET;
156 break;
157 case IDMAC_IC_0:
158 case IDMAC_SDC_0:
159 case IDMAC_SDC_1:
160 default:
161 break;
162 }
163 return stat;
164}
165
166struct chan_param_mem_planar {
167 /* Word 0 */
168 u32 xv:10;
169 u32 yv:10;
170 u32 xb:12;
171
172 u32 yb:12;
173 u32 res1:2;
174 u32 nsb:1;
175 u32 lnpb:6;
176 u32 ubo_l:11;
177
178 u32 ubo_h:15;
179 u32 vbo_l:17;
180
181 u32 vbo_h:9;
182 u32 res2:3;
183 u32 fw:12;
184 u32 fh_l:8;
185
186 u32 fh_h:4;
187 u32 res3:28;
188
189 /* Word 1 */
190 u32 eba0;
191
192 u32 eba1;
193
194 u32 bpp:3;
195 u32 sl:14;
196 u32 pfs:3;
197 u32 bam:3;
198 u32 res4:2;
199 u32 npb:6;
200 u32 res5:1;
201
202 u32 sat:2;
203 u32 res6:30;
204} __attribute__ ((packed));
205
206struct chan_param_mem_interleaved {
207 /* Word 0 */
208 u32 xv:10;
209 u32 yv:10;
210 u32 xb:12;
211
212 u32 yb:12;
213 u32 sce:1;
214 u32 res1:1;
215 u32 nsb:1;
216 u32 lnpb:6;
217 u32 sx:10;
218 u32 sy_l:1;
219
220 u32 sy_h:9;
221 u32 ns:10;
222 u32 sm:10;
223 u32 sdx_l:3;
224
225 u32 sdx_h:2;
226 u32 sdy:5;
227 u32 sdrx:1;
228 u32 sdry:1;
229 u32 sdr1:1;
230 u32 res2:2;
231 u32 fw:12;
232 u32 fh_l:8;
233
234 u32 fh_h:4;
235 u32 res3:28;
236
237 /* Word 1 */
238 u32 eba0;
239
240 u32 eba1;
241
242 u32 bpp:3;
243 u32 sl:14;
244 u32 pfs:3;
245 u32 bam:3;
246 u32 res4:2;
247 u32 npb:6;
248 u32 res5:1;
249
250 u32 sat:2;
251 u32 scc:1;
252 u32 ofs0:5;
253 u32 ofs1:5;
254 u32 ofs2:5;
255 u32 ofs3:5;
256 u32 wid0:3;
257 u32 wid1:3;
258 u32 wid2:3;
259
260 u32 wid3:3;
261 u32 dec_sel:1;
262 u32 res6:28;
263} __attribute__ ((packed));
264
265union chan_param_mem {
266 struct chan_param_mem_planar pp;
267 struct chan_param_mem_interleaved ip;
268};
269
270static void ipu_ch_param_set_plane_offset(union chan_param_mem *params,
271 u32 u_offset, u32 v_offset)
272{
273 params->pp.ubo_l = u_offset & 0x7ff;
274 params->pp.ubo_h = u_offset >> 11;
275 params->pp.vbo_l = v_offset & 0x1ffff;
276 params->pp.vbo_h = v_offset >> 17;
277}
278
279static void ipu_ch_param_set_size(union chan_param_mem *params,
280 uint32_t pixel_fmt, uint16_t width,
281 uint16_t height, uint16_t stride)
282{
283 u32 u_offset;
284 u32 v_offset;
285
286 params->pp.fw = width - 1;
287 params->pp.fh_l = height - 1;
288 params->pp.fh_h = (height - 1) >> 8;
289 params->pp.sl = stride - 1;
290
291 switch (pixel_fmt) {
292 case IPU_PIX_FMT_GENERIC:
293 /*Represents 8-bit Generic data */
294 params->pp.bpp = 3;
295 params->pp.pfs = 7;
296 params->pp.npb = 31;
297 params->pp.sat = 2; /* SAT = use 32-bit access */
298 break;
299 case IPU_PIX_FMT_GENERIC_32:
300 /*Represents 32-bit Generic data */
301 params->pp.bpp = 0;
302 params->pp.pfs = 7;
303 params->pp.npb = 7;
304 params->pp.sat = 2; /* SAT = use 32-bit access */
305 break;
306 case IPU_PIX_FMT_RGB565:
307 params->ip.bpp = 2;
308 params->ip.pfs = 4;
309 params->ip.npb = 7;
310 params->ip.sat = 2; /* SAT = 32-bit access */
311 params->ip.ofs0 = 0; /* Red bit offset */
312 params->ip.ofs1 = 5; /* Green bit offset */
313 params->ip.ofs2 = 11; /* Blue bit offset */
314 params->ip.ofs3 = 16; /* Alpha bit offset */
315 params->ip.wid0 = 4; /* Red bit width - 1 */
316 params->ip.wid1 = 5; /* Green bit width - 1 */
317 params->ip.wid2 = 4; /* Blue bit width - 1 */
318 break;
319 case IPU_PIX_FMT_BGR24:
320 params->ip.bpp = 1; /* 24 BPP & RGB PFS */
321 params->ip.pfs = 4;
322 params->ip.npb = 7;
323 params->ip.sat = 2; /* SAT = 32-bit access */
324 params->ip.ofs0 = 0; /* Red bit offset */
325 params->ip.ofs1 = 8; /* Green bit offset */
326 params->ip.ofs2 = 16; /* Blue bit offset */
327 params->ip.ofs3 = 24; /* Alpha bit offset */
328 params->ip.wid0 = 7; /* Red bit width - 1 */
329 params->ip.wid1 = 7; /* Green bit width - 1 */
330 params->ip.wid2 = 7; /* Blue bit width - 1 */
331 break;
332 case IPU_PIX_FMT_RGB24:
333 params->ip.bpp = 1; /* 24 BPP & RGB PFS */
334 params->ip.pfs = 4;
335 params->ip.npb = 7;
336 params->ip.sat = 2; /* SAT = 32-bit access */
337 params->ip.ofs0 = 16; /* Red bit offset */
338 params->ip.ofs1 = 8; /* Green bit offset */
339 params->ip.ofs2 = 0; /* Blue bit offset */
340 params->ip.ofs3 = 24; /* Alpha bit offset */
341 params->ip.wid0 = 7; /* Red bit width - 1 */
342 params->ip.wid1 = 7; /* Green bit width - 1 */
343 params->ip.wid2 = 7; /* Blue bit width - 1 */
344 break;
345 case IPU_PIX_FMT_BGRA32:
346 case IPU_PIX_FMT_BGR32:
347 params->ip.bpp = 0;
348 params->ip.pfs = 4;
349 params->ip.npb = 7;
350 params->ip.sat = 2; /* SAT = 32-bit access */
351 params->ip.ofs0 = 8; /* Red bit offset */
352 params->ip.ofs1 = 16; /* Green bit offset */
353 params->ip.ofs2 = 24; /* Blue bit offset */
354 params->ip.ofs3 = 0; /* Alpha bit offset */
355 params->ip.wid0 = 7; /* Red bit width - 1 */
356 params->ip.wid1 = 7; /* Green bit width - 1 */
357 params->ip.wid2 = 7; /* Blue bit width - 1 */
358 params->ip.wid3 = 7; /* Alpha bit width - 1 */
359 break;
360 case IPU_PIX_FMT_RGBA32:
361 case IPU_PIX_FMT_RGB32:
362 params->ip.bpp = 0;
363 params->ip.pfs = 4;
364 params->ip.npb = 7;
365 params->ip.sat = 2; /* SAT = 32-bit access */
366 params->ip.ofs0 = 24; /* Red bit offset */
367 params->ip.ofs1 = 16; /* Green bit offset */
368 params->ip.ofs2 = 8; /* Blue bit offset */
369 params->ip.ofs3 = 0; /* Alpha bit offset */
370 params->ip.wid0 = 7; /* Red bit width - 1 */
371 params->ip.wid1 = 7; /* Green bit width - 1 */
372 params->ip.wid2 = 7; /* Blue bit width - 1 */
373 params->ip.wid3 = 7; /* Alpha bit width - 1 */
374 break;
375 case IPU_PIX_FMT_ABGR32:
376 params->ip.bpp = 0;
377 params->ip.pfs = 4;
378 params->ip.npb = 7;
379 params->ip.sat = 2; /* SAT = 32-bit access */
380 params->ip.ofs0 = 8; /* Red bit offset */
381 params->ip.ofs1 = 16; /* Green bit offset */
382 params->ip.ofs2 = 24; /* Blue bit offset */
383 params->ip.ofs3 = 0; /* Alpha bit offset */
384 params->ip.wid0 = 7; /* Red bit width - 1 */
385 params->ip.wid1 = 7; /* Green bit width - 1 */
386 params->ip.wid2 = 7; /* Blue bit width - 1 */
387 params->ip.wid3 = 7; /* Alpha bit width - 1 */
388 break;
389 case IPU_PIX_FMT_UYVY:
390 params->ip.bpp = 2;
391 params->ip.pfs = 6;
392 params->ip.npb = 7;
393 params->ip.sat = 2; /* SAT = 32-bit access */
394 break;
395 case IPU_PIX_FMT_YUV420P2:
396 case IPU_PIX_FMT_YUV420P:
397 params->ip.bpp = 3;
398 params->ip.pfs = 3;
399 params->ip.npb = 7;
400 params->ip.sat = 2; /* SAT = 32-bit access */
401 u_offset = stride * height;
402 v_offset = u_offset + u_offset / 4;
403 ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
404 break;
405 case IPU_PIX_FMT_YVU422P:
406 params->ip.bpp = 3;
407 params->ip.pfs = 2;
408 params->ip.npb = 7;
409 params->ip.sat = 2; /* SAT = 32-bit access */
410 v_offset = stride * height;
411 u_offset = v_offset + v_offset / 2;
412 ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
413 break;
414 case IPU_PIX_FMT_YUV422P:
415 params->ip.bpp = 3;
416 params->ip.pfs = 2;
417 params->ip.npb = 7;
418 params->ip.sat = 2; /* SAT = 32-bit access */
419 u_offset = stride * height;
420 v_offset = u_offset + u_offset / 2;
421 ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
422 break;
423 default:
424 dev_err(ipu_data.dev,
425 "mxc ipu: unimplemented pixel format %d\n", pixel_fmt);
426 break;
427 }
428
429 params->pp.nsb = 1;
430}
431
432static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
433 uint16_t burst_pixels)
434{
435 params->pp.npb = burst_pixels - 1;
436};
437
438static void ipu_ch_param_set_buffer(union chan_param_mem *params,
439 dma_addr_t buf0, dma_addr_t buf1)
440{
441 params->pp.eba0 = buf0;
442 params->pp.eba1 = buf1;
443};
444
445static void ipu_ch_param_set_rotation(union chan_param_mem *params,
446 enum ipu_rotate_mode rotate)
447{
448 params->pp.bam = rotate;
449};
450
451static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
452 uint32_t num_words)
453{
454 for (; num_words > 0; num_words--) {
455 dev_dbg(ipu_data.dev,
456 "write param mem - addr = 0x%08X, data = 0x%08X\n",
457 addr, *data);
458 idmac_write_ipureg(&ipu_data, addr, IPU_IMA_ADDR);
459 idmac_write_ipureg(&ipu_data, *data++, IPU_IMA_DATA);
460 addr++;
461 if ((addr & 0x7) == 5) {
462 addr &= ~0x7; /* set to word 0 */
463 addr += 8; /* increment to next row */
464 }
465 }
466}
467
468static int calc_resize_coeffs(uint32_t in_size, uint32_t out_size,
469 uint32_t *resize_coeff,
470 uint32_t *downsize_coeff)
471{
472 uint32_t temp_size;
473 uint32_t temp_downsize;
474
475 *resize_coeff = 1 << 13;
476 *downsize_coeff = 1 << 13;
477
478 /* Cannot downsize more than 8:1 */
479 if (out_size << 3 < in_size)
480 return -EINVAL;
481
482 /* compute downsizing coefficient */
483 temp_downsize = 0;
484 temp_size = in_size;
485 while (temp_size >= out_size * 2 && temp_downsize < 2) {
486 temp_size >>= 1;
487 temp_downsize++;
488 }
489 *downsize_coeff = temp_downsize;
490
491 /*
492 * compute resizing coefficient using the following formula:
493 * resize_coeff = M*(SI -1)/(SO - 1)
494 * where M = 2^13, SI - input size, SO - output size
495 */
496 *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
497 if (*resize_coeff >= 16384L) {
498 dev_err(ipu_data.dev, "Warning! Overflow on resize coeff.\n");
499 *resize_coeff = 0x3FFF;
500 }
501
502 dev_dbg(ipu_data.dev, "resizing from %u -> %u pixels, "
503 "downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size,
504 *downsize_coeff, *resize_coeff >= 8192L ? 1 : 0,
505 ((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff);
506
507 return 0;
508}
509
510static enum ipu_color_space format_to_colorspace(enum pixel_fmt fmt)
511{
512 switch (fmt) {
513 case IPU_PIX_FMT_RGB565:
514 case IPU_PIX_FMT_BGR24:
515 case IPU_PIX_FMT_RGB24:
516 case IPU_PIX_FMT_BGR32:
517 case IPU_PIX_FMT_RGB32:
518 return IPU_COLORSPACE_RGB;
519 default:
520 return IPU_COLORSPACE_YCBCR;
521 }
522}
523
524static int ipu_ic_init_prpenc(struct ipu *ipu,
525 union ipu_channel_param *params, bool src_is_csi)
526{
527 uint32_t reg, ic_conf;
528 uint32_t downsize_coeff, resize_coeff;
529 enum ipu_color_space in_fmt, out_fmt;
530
531 /* Setup vertical resizing */
532 calc_resize_coeffs(params->video.in_height,
533 params->video.out_height,
534 &resize_coeff, &downsize_coeff);
535 reg = (downsize_coeff << 30) | (resize_coeff << 16);
536
537 /* Setup horizontal resizing */
538 calc_resize_coeffs(params->video.in_width,
539 params->video.out_width,
540 &resize_coeff, &downsize_coeff);
541 reg |= (downsize_coeff << 14) | resize_coeff;
542
543 /* Setup color space conversion */
544 in_fmt = format_to_colorspace(params->video.in_pixel_fmt);
545 out_fmt = format_to_colorspace(params->video.out_pixel_fmt);
546
547 /*
548 * Colourspace conversion unsupported yet - see _init_csc() in
549 * Freescale sources
550 */
551 if (in_fmt != out_fmt) {
552 dev_err(ipu->dev, "Colourspace conversion unsupported!\n");
553 return -EOPNOTSUPP;
554 }
555
556 idmac_write_icreg(ipu, reg, IC_PRP_ENC_RSC);
557
558 ic_conf = idmac_read_icreg(ipu, IC_CONF);
559
560 if (src_is_csi)
561 ic_conf &= ~IC_CONF_RWS_EN;
562 else
563 ic_conf |= IC_CONF_RWS_EN;
564
565 idmac_write_icreg(ipu, ic_conf, IC_CONF);
566
567 return 0;
568}
569
570static uint32_t dma_param_addr(uint32_t dma_ch)
571{
572 /* Channel Parameter Memory */
573 return 0x10000 | (dma_ch << 4);
574};
575
576static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
577 bool prio)
578{
579 u32 reg = idmac_read_icreg(ipu, IDMAC_CHA_PRI);
580
581 if (prio)
582 reg |= 1UL << channel;
583 else
584 reg &= ~(1UL << channel);
585
586 idmac_write_icreg(ipu, reg, IDMAC_CHA_PRI);
587
588 dump_idmac_reg(ipu);
589}
590
591static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
592{
593 uint32_t mask;
594
595 switch (channel) {
596 case IDMAC_IC_0:
597 case IDMAC_IC_7:
598 mask = IPU_CONF_CSI_EN | IPU_CONF_IC_EN;
599 break;
600 case IDMAC_SDC_0:
601 case IDMAC_SDC_1:
602 mask = IPU_CONF_SDC_EN | IPU_CONF_DI_EN;
603 break;
604 default:
605 mask = 0;
606 break;
607 }
608
609 return mask;
610}
611
612/**
613 * ipu_enable_channel() - enable an IPU channel.
614 * @channel: channel ID.
615 * @return: 0 on success or negative error code on failure.
616 */
617static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
618{
619 struct ipu *ipu = to_ipu(idmac);
620 enum ipu_channel channel = ichan->dma_chan.chan_id;
621 uint32_t reg;
622 unsigned long flags;
623
624 spin_lock_irqsave(&ipu->lock, flags);
625
626 /* Reset to buffer 0 */
627 idmac_write_ipureg(ipu, 1UL << channel, IPU_CHA_CUR_BUF);
628 ichan->active_buffer = 0;
629 ichan->status = IPU_CHANNEL_ENABLED;
630
631 switch (channel) {
632 case IDMAC_SDC_0:
633 case IDMAC_SDC_1:
634 case IDMAC_IC_7:
635 ipu_channel_set_priority(ipu, channel, true);
636 default:
637 break;
638 }
639
640 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
641
642 idmac_write_icreg(ipu, reg | (1UL << channel), IDMAC_CHA_EN);
643
644 ipu_ic_enable_task(ipu, channel);
645
646 spin_unlock_irqrestore(&ipu->lock, flags);
647 return 0;
648}
649
650/**
651 * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
652 * @channel: channel ID.
653 * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code.
654 * @width: width of buffer in pixels.
655 * @height: height of buffer in pixels.
656 * @stride: stride length of buffer in pixels.
657 * @rot_mode: rotation mode of buffer. A rotation setting other than
658 * IPU_ROTATE_VERT_FLIP should only be used for input buffers of
659 * rotation channels.
660 * @phyaddr_0: buffer 0 physical address.
661 * @phyaddr_1: buffer 1 physical address. Setting this to a value other than
662 * NULL enables double buffering mode.
663 * @return: 0 on success or negative error code on failure.
664 */
665static int ipu_init_channel_buffer(struct idmac_channel *ichan,
666 enum pixel_fmt pixel_fmt,
667 uint16_t width, uint16_t height,
668 uint32_t stride,
669 enum ipu_rotate_mode rot_mode,
670 dma_addr_t phyaddr_0, dma_addr_t phyaddr_1)
671{
672 enum ipu_channel channel = ichan->dma_chan.chan_id;
673 struct idmac *idmac = to_idmac(ichan->dma_chan.device);
674 struct ipu *ipu = to_ipu(idmac);
675 union chan_param_mem params = {};
676 unsigned long flags;
677 uint32_t reg;
678 uint32_t stride_bytes;
679
680 stride_bytes = stride * bytes_per_pixel(pixel_fmt);
681
682 if (stride_bytes % 4) {
683 dev_err(ipu->dev,
684 "Stride length must be 32-bit aligned, stride = %d, bytes = %d\n",
685 stride, stride_bytes);
686 return -EINVAL;
687 }
688
689 /* IC channel's stride must be a multiple of 8 pixels */
690 if ((channel <= 13) && (stride % 8)) {
691 dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
692 return -EINVAL;
693 }
694
695 /* Build parameter memory data for DMA channel */
696 ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
697 ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
698 ipu_ch_param_set_rotation(&params, rot_mode);
699 /* Some channels (rotation) have restriction on burst length */
700 switch (channel) {
701 case IDMAC_IC_7: /* Hangs with burst 8, 16, other values
702 invalid - Table 44-30 */
703/*
704 ipu_ch_param_set_burst_size(&params, 8);
705 */
706 break;
707 case IDMAC_SDC_0:
708 case IDMAC_SDC_1:
709 /* In original code only IPU_PIX_FMT_RGB565 was setting burst */
710 ipu_ch_param_set_burst_size(&params, 16);
711 break;
712 case IDMAC_IC_0:
713 default:
714 break;
715 }
716
717 spin_lock_irqsave(&ipu->lock, flags);
718
719 ipu_write_param_mem(dma_param_addr(channel), (uint32_t *)&params, 10);
720
721 reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
722
723 if (phyaddr_1)
724 reg |= 1UL << channel;
725 else
726 reg &= ~(1UL << channel);
727
728 idmac_write_ipureg(ipu, reg, IPU_CHA_DB_MODE_SEL);
729
730 ichan->status = IPU_CHANNEL_READY;
731
732 spin_unlock_irqrestore(ipu->lock, flags);
733
734 return 0;
735}
736
737/**
738 * ipu_select_buffer() - mark a channel's buffer as ready.
739 * @channel: channel ID.
740 * @buffer_n: buffer number to mark ready.
741 */
742static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
743{
744 /* No locking - this is a write-one-to-set register, cleared by IPU */
745 if (buffer_n == 0)
746 /* Mark buffer 0 as ready. */
747 idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF0_RDY);
748 else
749 /* Mark buffer 1 as ready. */
750 idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF1_RDY);
751}
752
753/**
754 * ipu_update_channel_buffer() - update physical address of a channel buffer.
755 * @channel: channel ID.
756 * @buffer_n: buffer number to update.
757 * 0 or 1 are the only valid values.
758 * @phyaddr: buffer physical address.
759 * @return: Returns 0 on success or negative error code on failure. This
760 * function will fail if the buffer is set to ready.
761 */
762/* Called under spin_lock(_irqsave)(&ichan->lock) */
763static int ipu_update_channel_buffer(enum ipu_channel channel,
764 int buffer_n, dma_addr_t phyaddr)
765{
766 uint32_t reg;
767 unsigned long flags;
768
769 spin_lock_irqsave(&ipu_data.lock, flags);
770
771 if (buffer_n == 0) {
772 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
773 if (reg & (1UL << channel)) {
774 spin_unlock_irqrestore(&ipu_data.lock, flags);
775 return -EACCES;
776 }
777
778 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
779 idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
780 0x0008UL, IPU_IMA_ADDR);
781 idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
782 } else {
783 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
784 if (reg & (1UL << channel)) {
785 spin_unlock_irqrestore(&ipu_data.lock, flags);
786 return -EACCES;
787 }
788
789 /* Check if double-buffering is already enabled */
790 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_DB_MODE_SEL);
791
792 if (!(reg & (1UL << channel)))
793 idmac_write_ipureg(&ipu_data, reg | (1UL << channel),
794 IPU_CHA_DB_MODE_SEL);
795
796 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 1) */
797 idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
798 0x0009UL, IPU_IMA_ADDR);
799 idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
800 }
801
802 spin_unlock_irqrestore(&ipu_data.lock, flags);
803
804 return 0;
805}
806
807/* Called under spin_lock_irqsave(&ichan->lock) */
808static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
809 struct idmac_tx_desc *desc)
810{
811 struct scatterlist *sg;
812 int i, ret = 0;
813
814 for (i = 0, sg = desc->sg; i < 2 && sg; i++) {
815 if (!ichan->sg[i]) {
816 ichan->sg[i] = sg;
817
818 /*
819 * On first invocation this shouldn't be necessary, the
820 * call to ipu_init_channel_buffer() above will set
821 * addresses for us, so we could make it conditional
822 * on status >= IPU_CHANNEL_ENABLED, but doing it again
823 * shouldn't hurt either.
824 */
825 ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i,
826 sg_dma_address(sg));
827 if (ret < 0)
828 return ret;
829
830 ipu_select_buffer(ichan->dma_chan.chan_id, i);
831
832 sg = sg_next(sg);
833 }
834 }
835
836 return ret;
837}
838
839static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
840{
841 struct idmac_tx_desc *desc = to_tx_desc(tx);
842 struct idmac_channel *ichan = to_idmac_chan(tx->chan);
843 struct idmac *idmac = to_idmac(tx->chan->device);
844 struct ipu *ipu = to_ipu(idmac);
845 dma_cookie_t cookie;
846 unsigned long flags;
847
848 /* Sanity check */
849 if (!list_empty(&desc->list)) {
850 /* The descriptor doesn't belong to client */
851 dev_err(&ichan->dma_chan.dev->device,
852 "Descriptor %p not prepared!\n", tx);
853 return -EBUSY;
854 }
855
856 mutex_lock(&ichan->chan_mutex);
857
858 if (ichan->status < IPU_CHANNEL_READY) {
859 struct idmac_video_param *video = &ichan->params.video;
860 /*
861 * Initial buffer assignment - the first two sg-entries from
862 * the descriptor will end up in the IDMAC buffers
863 */
864 dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 :
865 sg_dma_address(&desc->sg[1]);
866
867 WARN_ON(ichan->sg[0] || ichan->sg[1]);
868
869 cookie = ipu_init_channel_buffer(ichan,
870 video->out_pixel_fmt,
871 video->out_width,
872 video->out_height,
873 video->out_stride,
874 IPU_ROTATE_NONE,
875 sg_dma_address(&desc->sg[0]),
876 dma_1);
877 if (cookie < 0)
878 goto out;
879 }
880
881 /* ipu->lock can be taken under ichan->lock, but not v.v. */
882 spin_lock_irqsave(&ichan->lock, flags);
883
884 /* submit_buffers() atomically verifies and fills empty sg slots */
885 cookie = ipu_submit_channel_buffers(ichan, desc);
886
887 spin_unlock_irqrestore(&ichan->lock, flags);
888
889 if (cookie < 0)
890 goto out;
891
892 cookie = ichan->dma_chan.cookie;
893
894 if (++cookie < 0)
895 cookie = 1;
896
897 /* from dmaengine.h: "last cookie value returned to client" */
898 ichan->dma_chan.cookie = cookie;
899 tx->cookie = cookie;
900 spin_lock_irqsave(&ichan->lock, flags);
901 list_add_tail(&desc->list, &ichan->queue);
902 spin_unlock_irqrestore(&ichan->lock, flags);
903
904 if (ichan->status < IPU_CHANNEL_ENABLED) {
905 int ret = ipu_enable_channel(idmac, ichan);
906 if (ret < 0) {
907 cookie = ret;
908 spin_lock_irqsave(&ichan->lock, flags);
909 list_del_init(&desc->list);
910 spin_unlock_irqrestore(&ichan->lock, flags);
911 tx->cookie = cookie;
912 ichan->dma_chan.cookie = cookie;
913 }
914 }
915
916 dump_idmac_reg(ipu);
917
918out:
919 mutex_unlock(&ichan->chan_mutex);
920
921 return cookie;
922}
923
924/* Called with ichan->chan_mutex held */
925static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
926{
927 struct idmac_tx_desc *desc = vmalloc(n * sizeof(struct idmac_tx_desc));
928 struct idmac *idmac = to_idmac(ichan->dma_chan.device);
929
930 if (!desc)
931 return -ENOMEM;
932
933 /* No interrupts, just disable the tasklet for a moment */
934 tasklet_disable(&to_ipu(idmac)->tasklet);
935
936 ichan->n_tx_desc = n;
937 ichan->desc = desc;
938 INIT_LIST_HEAD(&ichan->queue);
939 INIT_LIST_HEAD(&ichan->free_list);
940
941 while (n--) {
942 struct dma_async_tx_descriptor *txd = &desc->txd;
943
944 memset(txd, 0, sizeof(*txd));
945 dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
946 txd->tx_submit = idmac_tx_submit;
947 txd->chan = &ichan->dma_chan;
948 INIT_LIST_HEAD(&txd->tx_list);
949
950 list_add(&desc->list, &ichan->free_list);
951
952 desc++;
953 }
954
955 tasklet_enable(&to_ipu(idmac)->tasklet);
956
957 return 0;
958}
959
960/**
961 * ipu_init_channel() - initialize an IPU channel.
962 * @idmac: IPU DMAC context.
963 * @ichan: pointer to the channel object.
964 * @return 0 on success or negative error code on failure.
965 */
966static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan)
967{
968 union ipu_channel_param *params = &ichan->params;
969 uint32_t ipu_conf;
970 enum ipu_channel channel = ichan->dma_chan.chan_id;
971 unsigned long flags;
972 uint32_t reg;
973 struct ipu *ipu = to_ipu(idmac);
974 int ret = 0, n_desc = 0;
975
976 dev_dbg(ipu->dev, "init channel = %d\n", channel);
977
978 if (channel != IDMAC_SDC_0 && channel != IDMAC_SDC_1 &&
979 channel != IDMAC_IC_7)
980 return -EINVAL;
981
982 spin_lock_irqsave(&ipu->lock, flags);
983
984 switch (channel) {
985 case IDMAC_IC_7:
986 n_desc = 16;
987 reg = idmac_read_icreg(ipu, IC_CONF);
988 idmac_write_icreg(ipu, reg & ~IC_CONF_CSI_MEM_WR_EN, IC_CONF);
989 break;
990 case IDMAC_IC_0:
991 n_desc = 16;
992 reg = idmac_read_ipureg(ipu, IPU_FS_PROC_FLOW);
993 idmac_write_ipureg(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW);
994 ret = ipu_ic_init_prpenc(ipu, params, true);
995 break;
996 case IDMAC_SDC_0:
997 case IDMAC_SDC_1:
998 n_desc = 4;
999 default:
1000 break;
1001 }
1002
1003 ipu->channel_init_mask |= 1L << channel;
1004
1005 /* Enable IPU sub module */
1006 ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) |
1007 ipu_channel_conf_mask(channel);
1008 idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
1009
1010 spin_unlock_irqrestore(&ipu->lock, flags);
1011
1012 if (n_desc && !ichan->desc)
1013 ret = idmac_desc_alloc(ichan, n_desc);
1014
1015 dump_idmac_reg(ipu);
1016
1017 return ret;
1018}
1019
1020/**
1021 * ipu_uninit_channel() - uninitialize an IPU channel.
1022 * @idmac: IPU DMAC context.
1023 * @ichan: pointer to the channel object.
1024 */
1025static void ipu_uninit_channel(struct idmac *idmac, struct idmac_channel *ichan)
1026{
1027 enum ipu_channel channel = ichan->dma_chan.chan_id;
1028 unsigned long flags;
1029 uint32_t reg;
1030 unsigned long chan_mask = 1UL << channel;
1031 uint32_t ipu_conf;
1032 struct ipu *ipu = to_ipu(idmac);
1033
1034 spin_lock_irqsave(&ipu->lock, flags);
1035
1036 if (!(ipu->channel_init_mask & chan_mask)) {
1037 dev_err(ipu->dev, "Channel already uninitialized %d\n",
1038 channel);
1039 spin_unlock_irqrestore(&ipu->lock, flags);
1040 return;
1041 }
1042
1043 /* Reset the double buffer */
1044 reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
1045 idmac_write_ipureg(ipu, reg & ~chan_mask, IPU_CHA_DB_MODE_SEL);
1046
1047 ichan->sec_chan_en = false;
1048
1049 switch (channel) {
1050 case IDMAC_IC_7:
1051 reg = idmac_read_icreg(ipu, IC_CONF);
1052 idmac_write_icreg(ipu, reg & ~(IC_CONF_RWS_EN | IC_CONF_PRPENC_EN),
1053 IC_CONF);
1054 break;
1055 case IDMAC_IC_0:
1056 reg = idmac_read_icreg(ipu, IC_CONF);
1057 idmac_write_icreg(ipu, reg & ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1),
1058 IC_CONF);
1059 break;
1060 case IDMAC_SDC_0:
1061 case IDMAC_SDC_1:
1062 default:
1063 break;
1064 }
1065
1066 ipu->channel_init_mask &= ~(1L << channel);
1067
1068 ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) &
1069 ~ipu_channel_conf_mask(channel);
1070 idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
1071
1072 spin_unlock_irqrestore(&ipu->lock, flags);
1073
1074 ichan->n_tx_desc = 0;
1075 vfree(ichan->desc);
1076 ichan->desc = NULL;
1077}
1078
1079/**
1080 * ipu_disable_channel() - disable an IPU channel.
1081 * @idmac: IPU DMAC context.
1082 * @ichan: channel object pointer.
1083 * @wait_for_stop: flag to set whether to wait for channel end of frame or
1084 * return immediately.
1085 * @return: 0 on success or negative error code on failure.
1086 */
1087static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1088 bool wait_for_stop)
1089{
1090 enum ipu_channel channel = ichan->dma_chan.chan_id;
1091 struct ipu *ipu = to_ipu(idmac);
1092 uint32_t reg;
1093 unsigned long flags;
1094 unsigned long chan_mask = 1UL << channel;
1095 unsigned int timeout;
1096
1097 if (wait_for_stop && channel != IDMAC_SDC_1 && channel != IDMAC_SDC_0) {
1098 timeout = 40;
1099 /* This waiting always fails. Related to spurious irq problem */
1100 while ((idmac_read_icreg(ipu, IDMAC_CHA_BUSY) & chan_mask) ||
1101 (ipu_channel_status(ipu, channel) == TASK_STAT_ACTIVE)) {
1102 timeout--;
1103 msleep(10);
1104
1105 if (!timeout) {
1106 dev_dbg(ipu->dev,
1107 "Warning: timeout waiting for channel %u to "
1108 "stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, "
1109 "busy = 0x%08X, tstat = 0x%08X\n", channel,
1110 idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
1111 idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
1112 idmac_read_icreg(ipu, IDMAC_CHA_BUSY),
1113 idmac_read_ipureg(ipu, IPU_TASKS_STAT));
1114 break;
1115 }
1116 }
1117 dev_dbg(ipu->dev, "timeout = %d * 10ms\n", 40 - timeout);
1118 }
1119 /* SDC BG and FG must be disabled before DMA is disabled */
1120 if (wait_for_stop && (channel == IDMAC_SDC_0 ||
1121 channel == IDMAC_SDC_1)) {
1122 for (timeout = 5;
1123 timeout && !ipu_irq_status(ichan->eof_irq); timeout--)
1124 msleep(5);
1125 }
1126
1127 spin_lock_irqsave(&ipu->lock, flags);
1128
1129 /* Disable IC task */
1130 ipu_ic_disable_task(ipu, channel);
1131
1132 /* Disable DMA channel(s) */
1133 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
1134 idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
1135
1136 /*
1137 * Problem (observed with channel DMAIC_7): after enabling the channel
1138 * and initialising buffers, there comes an interrupt with current still
1139 * pointing at buffer 0, whereas it should use buffer 0 first and only
1140 * generate an interrupt when it is done, then current should already
1141 * point to buffer 1. This spurious interrupt also comes on channel
1142 * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the
1143 * first interrupt, there comes the second with current correctly
1144 * pointing to buffer 1 this time. But sometimes this second interrupt
1145 * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling
1146 * the channel seems to prevent the channel from hanging, but it doesn't
1147 * prevent the spurious interrupt. This might also be unsafe. Think
1148 * about the IDMAC controller trying to switch to a buffer, when we
1149 * clear the ready bit, and re-enable it a moment later.
1150 */
1151 reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY);
1152 idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY);
1153 idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY);
1154
1155 reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY);
1156 idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY);
1157 idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY);
1158
1159 spin_unlock_irqrestore(&ipu->lock, flags);
1160
1161 return 0;
1162}
1163
1164/*
1165 * We have several possibilities here:
1166 * current BUF next BUF
1167 *
1168 * not last sg next not last sg
1169 * not last sg next last sg
1170 * last sg first sg from next descriptor
1171 * last sg NULL
1172 *
1173 * Besides, the descriptor queue might be empty or not. We process all these
1174 * cases carefully.
1175 */
1176static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1177{
1178 struct idmac_channel *ichan = dev_id;
1179 unsigned int chan_id = ichan->dma_chan.chan_id;
1180 struct scatterlist **sg, *sgnext, *sgnew = NULL;
1181 /* Next transfer descriptor */
1182 struct idmac_tx_desc *desc = NULL, *descnew;
1183 dma_async_tx_callback callback;
1184 void *callback_param;
1185 bool done = false;
1186 u32 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY),
1187 ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY),
1188 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1189
1190 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
1191
1192 pr_debug("IDMAC irq %d\n", irq);
1193 /* Other interrupts do not interfere with this channel */
1194 spin_lock(&ichan->lock);
1195
1196 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
1197 ((curbuf >> chan_id) & 1) == ichan->active_buffer)) {
1198 int i = 100;
1199
1200 /* This doesn't help. See comment in ipu_disable_channel() */
1201 while (--i) {
1202 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1203 if (((curbuf >> chan_id) & 1) != ichan->active_buffer)
1204 break;
1205 cpu_relax();
1206 }
1207
1208 if (!i) {
1209 spin_unlock(&ichan->lock);
1210 dev_dbg(ichan->dma_chan.device->dev,
1211 "IRQ on active buffer on channel %x, active "
1212 "%d, ready %x, %x, current %x!\n", chan_id,
1213 ichan->active_buffer, ready0, ready1, curbuf);
1214 return IRQ_NONE;
1215 }
1216 }
1217
1218 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
1219 (!ichan->active_buffer && (ready0 >> chan_id) & 1)
1220 )) {
1221 spin_unlock(&ichan->lock);
1222 dev_dbg(ichan->dma_chan.device->dev,
1223 "IRQ with active buffer still ready on channel %x, "
1224 "active %d, ready %x, %x!\n", chan_id,
1225 ichan->active_buffer, ready0, ready1);
1226 return IRQ_NONE;
1227 }
1228
1229 if (unlikely(list_empty(&ichan->queue))) {
1230 spin_unlock(&ichan->lock);
1231 dev_err(ichan->dma_chan.device->dev,
1232 "IRQ without queued buffers on channel %x, active %d, "
1233 "ready %x, %x!\n", chan_id,
1234 ichan->active_buffer, ready0, ready1);
1235 return IRQ_NONE;
1236 }
1237
1238 /*
1239 * active_buffer is a software flag, it shows which buffer we are
1240 * currently expecting back from the hardware, IDMAC should be
1241 * processing the other buffer already
1242 */
1243 sg = &ichan->sg[ichan->active_buffer];
1244 sgnext = ichan->sg[!ichan->active_buffer];
1245
1246 /*
1247 * if sgnext == NULL sg must be the last element in a scatterlist and
1248 * queue must be empty
1249 */
1250 if (unlikely(!sgnext)) {
1251 if (unlikely(sg_next(*sg))) {
1252 dev_err(ichan->dma_chan.device->dev,
1253 "Broken buffer-update locking on channel %x!\n",
1254 chan_id);
1255 /* We'll let the user catch up */
1256 } else {
1257 /* Underrun */
1258 ipu_ic_disable_task(&ipu_data, chan_id);
1259 dev_dbg(ichan->dma_chan.device->dev,
1260 "Underrun on channel %x\n", chan_id);
1261 ichan->status = IPU_CHANNEL_READY;
1262 /* Continue to check for complete descriptor */
1263 }
1264 }
1265
1266 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
1267
1268 /* First calculate and submit the next sg element */
1269 if (likely(sgnext))
1270 sgnew = sg_next(sgnext);
1271
1272 if (unlikely(!sgnew)) {
1273 /* Start a new scatterlist, if any queued */
1274 if (likely(desc->list.next != &ichan->queue)) {
1275 descnew = list_entry(desc->list.next,
1276 struct idmac_tx_desc, list);
1277 sgnew = &descnew->sg[0];
1278 }
1279 }
1280
1281 if (unlikely(!sg_next(*sg)) || !sgnext) {
1282 /*
1283 * Last element in scatterlist done, remove from the queue,
1284 * _init for debugging
1285 */
1286 list_del_init(&desc->list);
1287 done = true;
1288 }
1289
1290 *sg = sgnew;
1291
1292 if (likely(sgnew)) {
1293 int ret;
1294
1295 ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer,
1296 sg_dma_address(*sg));
1297 if (ret < 0)
1298 dev_err(ichan->dma_chan.device->dev,
1299 "Failed to update buffer on channel %x buffer %d!\n",
1300 chan_id, ichan->active_buffer);
1301 else
1302 ipu_select_buffer(chan_id, ichan->active_buffer);
1303 }
1304
1305 /* Flip the active buffer - even if update above failed */
1306 ichan->active_buffer = !ichan->active_buffer;
1307 if (done)
1308 ichan->completed = desc->txd.cookie;
1309
1310 callback = desc->txd.callback;
1311 callback_param = desc->txd.callback_param;
1312
1313 spin_unlock(&ichan->lock);
1314
1315 if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback)
1316 callback(callback_param);
1317
1318 return IRQ_HANDLED;
1319}
1320
1321static void ipu_gc_tasklet(unsigned long arg)
1322{
1323 struct ipu *ipu = (struct ipu *)arg;
1324 int i;
1325
1326 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1327 struct idmac_channel *ichan = ipu->channel + i;
1328 struct idmac_tx_desc *desc;
1329 unsigned long flags;
1330 int j;
1331
1332 for (j = 0; j < ichan->n_tx_desc; j++) {
1333 desc = ichan->desc + j;
1334 spin_lock_irqsave(&ichan->lock, flags);
1335 if (async_tx_test_ack(&desc->txd)) {
1336 list_move(&desc->list, &ichan->free_list);
1337 async_tx_clear_ack(&desc->txd);
1338 }
1339 spin_unlock_irqrestore(&ichan->lock, flags);
1340 }
1341 }
1342}
1343
1344/*
1345 * At the time .device_alloc_chan_resources() method is called, we cannot know,
1346 * whether the client will accept the channel. Thus we must only check, if we
1347 * can satisfy client's request but the only real criterion to verify, whether
1348 * the client has accepted our offer is the client_count. That's why we have to
1349 * perform the rest of our allocation tasks on the first call to this function.
1350 */
1351static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
1352 struct scatterlist *sgl, unsigned int sg_len,
1353 enum dma_data_direction direction, unsigned long tx_flags)
1354{
1355 struct idmac_channel *ichan = to_idmac_chan(chan);
1356 struct idmac_tx_desc *desc = NULL;
1357 struct dma_async_tx_descriptor *txd = NULL;
1358 unsigned long flags;
1359
1360 /* We only can handle these three channels so far */
1361 if (ichan->dma_chan.chan_id != IDMAC_SDC_0 && ichan->dma_chan.chan_id != IDMAC_SDC_1 &&
1362 ichan->dma_chan.chan_id != IDMAC_IC_7)
1363 return NULL;
1364
1365 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
1366 dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
1367 return NULL;
1368 }
1369
1370 mutex_lock(&ichan->chan_mutex);
1371
1372 spin_lock_irqsave(&ichan->lock, flags);
1373 if (!list_empty(&ichan->free_list)) {
1374 desc = list_entry(ichan->free_list.next,
1375 struct idmac_tx_desc, list);
1376
1377 list_del_init(&desc->list);
1378
1379 desc->sg_len = sg_len;
1380 desc->sg = sgl;
1381 txd = &desc->txd;
1382 txd->flags = tx_flags;
1383 }
1384 spin_unlock_irqrestore(&ichan->lock, flags);
1385
1386 mutex_unlock(&ichan->chan_mutex);
1387
1388 tasklet_schedule(&to_ipu(to_idmac(chan->device))->tasklet);
1389
1390 return txd;
1391}
1392
1393/* Re-select the current buffer and re-activate the channel */
1394static void idmac_issue_pending(struct dma_chan *chan)
1395{
1396 struct idmac_channel *ichan = to_idmac_chan(chan);
1397 struct idmac *idmac = to_idmac(chan->device);
1398 struct ipu *ipu = to_ipu(idmac);
1399 unsigned long flags;
1400
1401 /* This is not always needed, but doesn't hurt either */
1402 spin_lock_irqsave(&ipu->lock, flags);
1403 ipu_select_buffer(ichan->dma_chan.chan_id, ichan->active_buffer);
1404 spin_unlock_irqrestore(&ipu->lock, flags);
1405
1406 /*
1407 * Might need to perform some parts of initialisation from
1408 * ipu_enable_channel(), but not all, we do not want to reset to buffer
1409 * 0, don't need to set priority again either, but re-enabling the task
1410 * and the channel might be a good idea.
1411 */
1412}
1413
1414static void __idmac_terminate_all(struct dma_chan *chan)
1415{
1416 struct idmac_channel *ichan = to_idmac_chan(chan);
1417 struct idmac *idmac = to_idmac(chan->device);
1418 unsigned long flags;
1419 int i;
1420
1421 ipu_disable_channel(idmac, ichan,
1422 ichan->status >= IPU_CHANNEL_ENABLED);
1423
1424 tasklet_disable(&to_ipu(idmac)->tasklet);
1425
1426 /* ichan->queue is modified in ISR, have to spinlock */
1427 spin_lock_irqsave(&ichan->lock, flags);
1428 list_splice_init(&ichan->queue, &ichan->free_list);
1429
1430 if (ichan->desc)
1431 for (i = 0; i < ichan->n_tx_desc; i++) {
1432 struct idmac_tx_desc *desc = ichan->desc + i;
1433 if (list_empty(&desc->list))
1434 /* Descriptor was prepared, but not submitted */
1435 list_add(&desc->list,
1436 &ichan->free_list);
1437
1438 async_tx_clear_ack(&desc->txd);
1439 }
1440
1441 ichan->sg[0] = NULL;
1442 ichan->sg[1] = NULL;
1443 spin_unlock_irqrestore(&ichan->lock, flags);
1444
1445 tasklet_enable(&to_ipu(idmac)->tasklet);
1446
1447 ichan->status = IPU_CHANNEL_INITIALIZED;
1448}
1449
1450static void idmac_terminate_all(struct dma_chan *chan)
1451{
1452 struct idmac_channel *ichan = to_idmac_chan(chan);
1453
1454 mutex_lock(&ichan->chan_mutex);
1455
1456 __idmac_terminate_all(chan);
1457
1458 mutex_unlock(&ichan->chan_mutex);
1459}
1460
1461static int idmac_alloc_chan_resources(struct dma_chan *chan)
1462{
1463 struct idmac_channel *ichan = to_idmac_chan(chan);
1464 struct idmac *idmac = to_idmac(chan->device);
1465 int ret;
1466
1467 /* dmaengine.c now guarantees to only offer free channels */
1468 BUG_ON(chan->client_count > 1);
1469 WARN_ON(ichan->status != IPU_CHANNEL_FREE);
1470
1471 chan->cookie = 1;
1472 ichan->completed = -ENXIO;
1473
1474 ret = ipu_irq_map(ichan->dma_chan.chan_id);
1475 if (ret < 0)
1476 goto eimap;
1477
1478 ichan->eof_irq = ret;
1479 ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
1480 ichan->eof_name, ichan);
1481 if (ret < 0)
1482 goto erirq;
1483
1484 ret = ipu_init_channel(idmac, ichan);
1485 if (ret < 0)
1486 goto eichan;
1487
1488 ichan->status = IPU_CHANNEL_INITIALIZED;
1489
1490 dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n",
1491 ichan->dma_chan.chan_id, ichan->eof_irq);
1492
1493 return ret;
1494
1495eichan:
1496 free_irq(ichan->eof_irq, ichan);
1497erirq:
1498 ipu_irq_unmap(ichan->dma_chan.chan_id);
1499eimap:
1500 return ret;
1501}
1502
1503static void idmac_free_chan_resources(struct dma_chan *chan)
1504{
1505 struct idmac_channel *ichan = to_idmac_chan(chan);
1506 struct idmac *idmac = to_idmac(chan->device);
1507
1508 mutex_lock(&ichan->chan_mutex);
1509
1510 __idmac_terminate_all(chan);
1511
1512 if (ichan->status > IPU_CHANNEL_FREE) {
1513 free_irq(ichan->eof_irq, ichan);
1514 ipu_irq_unmap(ichan->dma_chan.chan_id);
1515 }
1516
1517 ichan->status = IPU_CHANNEL_FREE;
1518
1519 ipu_uninit_channel(idmac, ichan);
1520
1521 mutex_unlock(&ichan->chan_mutex);
1522
1523 tasklet_schedule(&to_ipu(idmac)->tasklet);
1524}
1525
1526static enum dma_status idmac_is_tx_complete(struct dma_chan *chan,
1527 dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
1528{
1529 struct idmac_channel *ichan = to_idmac_chan(chan);
1530
1531 if (done)
1532 *done = ichan->completed;
1533 if (used)
1534 *used = chan->cookie;
1535 if (cookie != chan->cookie)
1536 return DMA_ERROR;
1537 return DMA_SUCCESS;
1538}
1539
1540static int __init ipu_idmac_init(struct ipu *ipu)
1541{
1542 struct idmac *idmac = &ipu->idmac;
1543 struct dma_device *dma = &idmac->dma;
1544 int i;
1545
1546 dma_cap_set(DMA_SLAVE, dma->cap_mask);
1547 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1548
1549 /* Compulsory common fields */
1550 dma->dev = ipu->dev;
1551 dma->device_alloc_chan_resources = idmac_alloc_chan_resources;
1552 dma->device_free_chan_resources = idmac_free_chan_resources;
1553 dma->device_is_tx_complete = idmac_is_tx_complete;
1554 dma->device_issue_pending = idmac_issue_pending;
1555
1556 /* Compulsory for DMA_SLAVE fields */
1557 dma->device_prep_slave_sg = idmac_prep_slave_sg;
1558 dma->device_terminate_all = idmac_terminate_all;
1559
1560 INIT_LIST_HEAD(&dma->channels);
1561 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1562 struct idmac_channel *ichan = ipu->channel + i;
1563 struct dma_chan *dma_chan = &ichan->dma_chan;
1564
1565 spin_lock_init(&ichan->lock);
1566 mutex_init(&ichan->chan_mutex);
1567
1568 ichan->status = IPU_CHANNEL_FREE;
1569 ichan->sec_chan_en = false;
1570 ichan->completed = -ENXIO;
1571 snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
1572
1573 dma_chan->device = &idmac->dma;
1574 dma_chan->cookie = 1;
1575 dma_chan->chan_id = i;
1576 list_add_tail(&ichan->dma_chan.device_node, &dma->channels);
1577 }
1578
1579 idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
1580
1581 return dma_async_device_register(&idmac->dma);
1582}
1583
1584static void ipu_idmac_exit(struct ipu *ipu)
1585{
1586 int i;
1587 struct idmac *idmac = &ipu->idmac;
1588
1589 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1590 struct idmac_channel *ichan = ipu->channel + i;
1591
1592 idmac_terminate_all(&ichan->dma_chan);
1593 idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
1594 }
1595
1596 dma_async_device_unregister(&idmac->dma);
1597}
1598
1599/*****************************************************************************
1600 * IPU common probe / remove
1601 */
1602
1603static int ipu_probe(struct platform_device *pdev)
1604{
1605 struct ipu_platform_data *pdata = pdev->dev.platform_data;
1606 struct resource *mem_ipu, *mem_ic;
1607 int ret;
1608
1609 spin_lock_init(&ipu_data.lock);
1610
1611 mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1612 mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1613 if (!pdata || !mem_ipu || !mem_ic)
1614 return -EINVAL;
1615
1616 ipu_data.dev = &pdev->dev;
1617
1618 platform_set_drvdata(pdev, &ipu_data);
1619
1620 ret = platform_get_irq(pdev, 0);
1621 if (ret < 0)
1622 goto err_noirq;
1623
1624 ipu_data.irq_fn = ret;
1625 ret = platform_get_irq(pdev, 1);
1626 if (ret < 0)
1627 goto err_noirq;
1628
1629 ipu_data.irq_err = ret;
1630 ipu_data.irq_base = pdata->irq_base;
1631
1632 dev_dbg(&pdev->dev, "fn irq %u, err irq %u, irq-base %u\n",
1633 ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base);
1634
1635 /* Remap IPU common registers */
1636 ipu_data.reg_ipu = ioremap(mem_ipu->start,
1637 mem_ipu->end - mem_ipu->start + 1);
1638 if (!ipu_data.reg_ipu) {
1639 ret = -ENOMEM;
1640 goto err_ioremap_ipu;
1641 }
1642
1643 /* Remap Image Converter and Image DMA Controller registers */
1644 ipu_data.reg_ic = ioremap(mem_ic->start,
1645 mem_ic->end - mem_ic->start + 1);
1646 if (!ipu_data.reg_ic) {
1647 ret = -ENOMEM;
1648 goto err_ioremap_ic;
1649 }
1650
1651 /* Get IPU clock */
1652 ipu_data.ipu_clk = clk_get(&pdev->dev, "ipu_clk");
1653 if (IS_ERR(ipu_data.ipu_clk)) {
1654 ret = PTR_ERR(ipu_data.ipu_clk);
1655 goto err_clk_get;
1656 }
1657
1658 /* Make sure IPU HSP clock is running */
1659 clk_enable(ipu_data.ipu_clk);
1660
1661 /* Disable all interrupts */
1662 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
1663 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_2);
1664 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_3);
1665 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_4);
1666 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_5);
1667
1668 dev_dbg(&pdev->dev, "%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->name,
1669 (unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err);
1670
1671 ret = ipu_irq_attach_irq(&ipu_data, pdev);
1672 if (ret < 0)
1673 goto err_attach_irq;
1674
1675 /* Initialize DMA engine */
1676 ret = ipu_idmac_init(&ipu_data);
1677 if (ret < 0)
1678 goto err_idmac_init;
1679
1680 tasklet_init(&ipu_data.tasklet, ipu_gc_tasklet, (unsigned long)&ipu_data);
1681
1682 ipu_data.dev = &pdev->dev;
1683
1684 dev_dbg(ipu_data.dev, "IPU initialized\n");
1685
1686 return 0;
1687
1688err_idmac_init:
1689err_attach_irq:
1690 ipu_irq_detach_irq(&ipu_data, pdev);
1691 clk_disable(ipu_data.ipu_clk);
1692 clk_put(ipu_data.ipu_clk);
1693err_clk_get:
1694 iounmap(ipu_data.reg_ic);
1695err_ioremap_ic:
1696 iounmap(ipu_data.reg_ipu);
1697err_ioremap_ipu:
1698err_noirq:
1699 dev_err(&pdev->dev, "Failed to probe IPU: %d\n", ret);
1700 return ret;
1701}
1702
1703static int ipu_remove(struct platform_device *pdev)
1704{
1705 struct ipu *ipu = platform_get_drvdata(pdev);
1706
1707 ipu_idmac_exit(ipu);
1708 ipu_irq_detach_irq(ipu, pdev);
1709 clk_disable(ipu->ipu_clk);
1710 clk_put(ipu->ipu_clk);
1711 iounmap(ipu->reg_ic);
1712 iounmap(ipu->reg_ipu);
1713 tasklet_kill(&ipu->tasklet);
1714 platform_set_drvdata(pdev, NULL);
1715
1716 return 0;
1717}
1718
1719/*
1720 * We need two MEM resources - with IPU-common and Image Converter registers,
1721 * including PF_CONF and IDMAC_* registers, and two IRQs - function and error
1722 */
1723static struct platform_driver ipu_platform_driver = {
1724 .driver = {
1725 .name = "ipu-core",
1726 .owner = THIS_MODULE,
1727 },
1728 .remove = ipu_remove,
1729};
1730
1731static int __init ipu_init(void)
1732{
1733 return platform_driver_probe(&ipu_platform_driver, ipu_probe);
1734}
1735subsys_initcall(ipu_init);
1736
1737MODULE_DESCRIPTION("IPU core driver");
1738MODULE_LICENSE("GPL v2");
1739MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
1740MODULE_ALIAS("platform:ipu-core");
diff --git a/drivers/dma/ipu/ipu_intern.h b/drivers/dma/ipu/ipu_intern.h
new file mode 100644
index 000000000000..545cf11a94ab
--- /dev/null
+++ b/drivers/dma/ipu/ipu_intern.h
@@ -0,0 +1,176 @@
1/*
2 * Copyright (C) 2008
3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
4 *
5 * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _IPU_INTERN_H_
13#define _IPU_INTERN_H_
14
15#include <linux/dmaengine.h>
16#include <linux/platform_device.h>
17#include <linux/interrupt.h>
18
19/* IPU Common registers */
20#define IPU_CONF 0x00
21#define IPU_CHA_BUF0_RDY 0x04
22#define IPU_CHA_BUF1_RDY 0x08
23#define IPU_CHA_DB_MODE_SEL 0x0C
24#define IPU_CHA_CUR_BUF 0x10
25#define IPU_FS_PROC_FLOW 0x14
26#define IPU_FS_DISP_FLOW 0x18
27#define IPU_TASKS_STAT 0x1C
28#define IPU_IMA_ADDR 0x20
29#define IPU_IMA_DATA 0x24
30#define IPU_INT_CTRL_1 0x28
31#define IPU_INT_CTRL_2 0x2C
32#define IPU_INT_CTRL_3 0x30
33#define IPU_INT_CTRL_4 0x34
34#define IPU_INT_CTRL_5 0x38
35#define IPU_INT_STAT_1 0x3C
36#define IPU_INT_STAT_2 0x40
37#define IPU_INT_STAT_3 0x44
38#define IPU_INT_STAT_4 0x48
39#define IPU_INT_STAT_5 0x4C
40#define IPU_BRK_CTRL_1 0x50
41#define IPU_BRK_CTRL_2 0x54
42#define IPU_BRK_STAT 0x58
43#define IPU_DIAGB_CTRL 0x5C
44
45/* IPU_CONF Register bits */
46#define IPU_CONF_CSI_EN 0x00000001
47#define IPU_CONF_IC_EN 0x00000002
48#define IPU_CONF_ROT_EN 0x00000004
49#define IPU_CONF_PF_EN 0x00000008
50#define IPU_CONF_SDC_EN 0x00000010
51#define IPU_CONF_ADC_EN 0x00000020
52#define IPU_CONF_DI_EN 0x00000040
53#define IPU_CONF_DU_EN 0x00000080
54#define IPU_CONF_PXL_ENDIAN 0x00000100
55
56/* Image Converter Registers */
57#define IC_CONF 0x88
58#define IC_PRP_ENC_RSC 0x8C
59#define IC_PRP_VF_RSC 0x90
60#define IC_PP_RSC 0x94
61#define IC_CMBP_1 0x98
62#define IC_CMBP_2 0x9C
63#define PF_CONF 0xA0
64#define IDMAC_CONF 0xA4
65#define IDMAC_CHA_EN 0xA8
66#define IDMAC_CHA_PRI 0xAC
67#define IDMAC_CHA_BUSY 0xB0
68
69/* Image Converter Register bits */
70#define IC_CONF_PRPENC_EN 0x00000001
71#define IC_CONF_PRPENC_CSC1 0x00000002
72#define IC_CONF_PRPENC_ROT_EN 0x00000004
73#define IC_CONF_PRPVF_EN 0x00000100
74#define IC_CONF_PRPVF_CSC1 0x00000200
75#define IC_CONF_PRPVF_CSC2 0x00000400
76#define IC_CONF_PRPVF_CMB 0x00000800
77#define IC_CONF_PRPVF_ROT_EN 0x00001000
78#define IC_CONF_PP_EN 0x00010000
79#define IC_CONF_PP_CSC1 0x00020000
80#define IC_CONF_PP_CSC2 0x00040000
81#define IC_CONF_PP_CMB 0x00080000
82#define IC_CONF_PP_ROT_EN 0x00100000
83#define IC_CONF_IC_GLB_LOC_A 0x10000000
84#define IC_CONF_KEY_COLOR_EN 0x20000000
85#define IC_CONF_RWS_EN 0x40000000
86#define IC_CONF_CSI_MEM_WR_EN 0x80000000
87
88#define IDMA_CHAN_INVALID 0x000000FF
89#define IDMA_IC_0 0x00000001
90#define IDMA_IC_1 0x00000002
91#define IDMA_IC_2 0x00000004
92#define IDMA_IC_3 0x00000008
93#define IDMA_IC_4 0x00000010
94#define IDMA_IC_5 0x00000020
95#define IDMA_IC_6 0x00000040
96#define IDMA_IC_7 0x00000080
97#define IDMA_IC_8 0x00000100
98#define IDMA_IC_9 0x00000200
99#define IDMA_IC_10 0x00000400
100#define IDMA_IC_11 0x00000800
101#define IDMA_IC_12 0x00001000
102#define IDMA_IC_13 0x00002000
103#define IDMA_SDC_BG 0x00004000
104#define IDMA_SDC_FG 0x00008000
105#define IDMA_SDC_MASK 0x00010000
106#define IDMA_SDC_PARTIAL 0x00020000
107#define IDMA_ADC_SYS1_WR 0x00040000
108#define IDMA_ADC_SYS2_WR 0x00080000
109#define IDMA_ADC_SYS1_CMD 0x00100000
110#define IDMA_ADC_SYS2_CMD 0x00200000
111#define IDMA_ADC_SYS1_RD 0x00400000
112#define IDMA_ADC_SYS2_RD 0x00800000
113#define IDMA_PF_QP 0x01000000
114#define IDMA_PF_BSP 0x02000000
115#define IDMA_PF_Y_IN 0x04000000
116#define IDMA_PF_U_IN 0x08000000
117#define IDMA_PF_V_IN 0x10000000
118#define IDMA_PF_Y_OUT 0x20000000
119#define IDMA_PF_U_OUT 0x40000000
120#define IDMA_PF_V_OUT 0x80000000
121
122#define TSTAT_PF_H264_PAUSE 0x00000001
123#define TSTAT_CSI2MEM_MASK 0x0000000C
124#define TSTAT_CSI2MEM_OFFSET 2
125#define TSTAT_VF_MASK 0x00000600
126#define TSTAT_VF_OFFSET 9
127#define TSTAT_VF_ROT_MASK 0x000C0000
128#define TSTAT_VF_ROT_OFFSET 18
129#define TSTAT_ENC_MASK 0x00000180
130#define TSTAT_ENC_OFFSET 7
131#define TSTAT_ENC_ROT_MASK 0x00030000
132#define TSTAT_ENC_ROT_OFFSET 16
133#define TSTAT_PP_MASK 0x00001800
134#define TSTAT_PP_OFFSET 11
135#define TSTAT_PP_ROT_MASK 0x00300000
136#define TSTAT_PP_ROT_OFFSET 20
137#define TSTAT_PF_MASK 0x00C00000
138#define TSTAT_PF_OFFSET 22
139#define TSTAT_ADCSYS1_MASK 0x03000000
140#define TSTAT_ADCSYS1_OFFSET 24
141#define TSTAT_ADCSYS2_MASK 0x0C000000
142#define TSTAT_ADCSYS2_OFFSET 26
143
144#define TASK_STAT_IDLE 0
145#define TASK_STAT_ACTIVE 1
146#define TASK_STAT_WAIT4READY 2
147
148struct idmac {
149 struct dma_device dma;
150};
151
152struct ipu {
153 void __iomem *reg_ipu;
154 void __iomem *reg_ic;
155 unsigned int irq_fn; /* IPU Function IRQ to the CPU */
156 unsigned int irq_err; /* IPU Error IRQ to the CPU */
157 unsigned int irq_base; /* Beginning of the IPU IRQ range */
158 unsigned long channel_init_mask;
159 spinlock_t lock;
160 struct clk *ipu_clk;
161 struct device *dev;
162 struct idmac idmac;
163 struct idmac_channel channel[IPU_CHANNELS_NUM];
164 struct tasklet_struct tasklet;
165};
166
167#define to_idmac(d) container_of(d, struct idmac, dma)
168
169extern int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev);
170extern void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev);
171
172extern bool ipu_irq_status(uint32_t irq);
173extern int ipu_irq_map(unsigned int source);
174extern int ipu_irq_unmap(unsigned int source);
175
176#endif
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
new file mode 100644
index 000000000000..83f532cc767f
--- /dev/null
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -0,0 +1,413 @@
1/*
2 * Copyright (C) 2008
3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/init.h>
11#include <linux/err.h>
12#include <linux/spinlock.h>
13#include <linux/delay.h>
14#include <linux/clk.h>
15#include <linux/irq.h>
16#include <linux/io.h>
17
18#include <mach/ipu.h>
19
20#include "ipu_intern.h"
21
22/*
23 * Register read / write - shall be inlined by the compiler
24 */
25static u32 ipu_read_reg(struct ipu *ipu, unsigned long reg)
26{
27 return __raw_readl(ipu->reg_ipu + reg);
28}
29
30static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg)
31{
32 __raw_writel(value, ipu->reg_ipu + reg);
33}
34
35
36/*
37 * IPU IRQ chip driver
38 */
39
40#define IPU_IRQ_NR_FN_BANKS 3
41#define IPU_IRQ_NR_ERR_BANKS 2
42#define IPU_IRQ_NR_BANKS (IPU_IRQ_NR_FN_BANKS + IPU_IRQ_NR_ERR_BANKS)
43
44struct ipu_irq_bank {
45 unsigned int control;
46 unsigned int status;
47 spinlock_t lock;
48 struct ipu *ipu;
49};
50
51static struct ipu_irq_bank irq_bank[IPU_IRQ_NR_BANKS] = {
52 /* 3 groups of functional interrupts */
53 {
54 .control = IPU_INT_CTRL_1,
55 .status = IPU_INT_STAT_1,
56 }, {
57 .control = IPU_INT_CTRL_2,
58 .status = IPU_INT_STAT_2,
59 }, {
60 .control = IPU_INT_CTRL_3,
61 .status = IPU_INT_STAT_3,
62 },
63 /* 2 groups of error interrupts */
64 {
65 .control = IPU_INT_CTRL_4,
66 .status = IPU_INT_STAT_4,
67 }, {
68 .control = IPU_INT_CTRL_5,
69 .status = IPU_INT_STAT_5,
70 },
71};
72
73struct ipu_irq_map {
74 unsigned int irq;
75 int source;
76 struct ipu_irq_bank *bank;
77 struct ipu *ipu;
78};
79
80static struct ipu_irq_map irq_map[CONFIG_MX3_IPU_IRQS];
81/* Protects allocations from the above array of maps */
82static DEFINE_MUTEX(map_lock);
83/* Protects register accesses and individual mappings */
84static DEFINE_SPINLOCK(bank_lock);
85
86static struct ipu_irq_map *src2map(unsigned int src)
87{
88 int i;
89
90 for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++)
91 if (irq_map[i].source == src)
92 return irq_map + i;
93
94 return NULL;
95}
96
97static void ipu_irq_unmask(unsigned int irq)
98{
99 struct ipu_irq_map *map = get_irq_chip_data(irq);
100 struct ipu_irq_bank *bank;
101 uint32_t reg;
102 unsigned long lock_flags;
103
104 spin_lock_irqsave(&bank_lock, lock_flags);
105
106 bank = map->bank;
107 if (!bank) {
108 spin_unlock_irqrestore(&bank_lock, lock_flags);
109 pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq);
110 return;
111 }
112
113 reg = ipu_read_reg(bank->ipu, bank->control);
114 reg |= (1UL << (map->source & 31));
115 ipu_write_reg(bank->ipu, reg, bank->control);
116
117 spin_unlock_irqrestore(&bank_lock, lock_flags);
118}
119
120static void ipu_irq_mask(unsigned int irq)
121{
122 struct ipu_irq_map *map = get_irq_chip_data(irq);
123 struct ipu_irq_bank *bank;
124 uint32_t reg;
125 unsigned long lock_flags;
126
127 spin_lock_irqsave(&bank_lock, lock_flags);
128
129 bank = map->bank;
130 if (!bank) {
131 spin_unlock_irqrestore(&bank_lock, lock_flags);
132 pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq);
133 return;
134 }
135
136 reg = ipu_read_reg(bank->ipu, bank->control);
137 reg &= ~(1UL << (map->source & 31));
138 ipu_write_reg(bank->ipu, reg, bank->control);
139
140 spin_unlock_irqrestore(&bank_lock, lock_flags);
141}
142
143static void ipu_irq_ack(unsigned int irq)
144{
145 struct ipu_irq_map *map = get_irq_chip_data(irq);
146 struct ipu_irq_bank *bank;
147 unsigned long lock_flags;
148
149 spin_lock_irqsave(&bank_lock, lock_flags);
150
151 bank = map->bank;
152 if (!bank) {
153 spin_unlock_irqrestore(&bank_lock, lock_flags);
154 pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq);
155 return;
156 }
157
158 ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status);
159 spin_unlock_irqrestore(&bank_lock, lock_flags);
160}
161
162/**
163 * ipu_irq_status() - returns the current interrupt status of the specified IRQ.
164 * @irq: interrupt line to get status for.
165 * @return: true if the interrupt is pending/asserted or false if the
166 * interrupt is not pending.
167 */
168bool ipu_irq_status(unsigned int irq)
169{
170 struct ipu_irq_map *map = get_irq_chip_data(irq);
171 struct ipu_irq_bank *bank;
172 unsigned long lock_flags;
173 bool ret;
174
175 spin_lock_irqsave(&bank_lock, lock_flags);
176 bank = map->bank;
177 ret = bank && ipu_read_reg(bank->ipu, bank->status) &
178 (1UL << (map->source & 31));
179 spin_unlock_irqrestore(&bank_lock, lock_flags);
180
181 return ret;
182}
183
184/**
185 * ipu_irq_map() - map an IPU interrupt source to an IRQ number
186 * @source: interrupt source bit position (see below)
187 * @return: mapped IRQ number or negative error code
188 *
189 * The source parameter has to be explained further. On i.MX31 IPU has 137 IRQ
190 * sources, they are broken down in 5 32-bit registers, like 32, 32, 24, 32, 17.
191 * However, the source argument of this function is not the sequence number of
192 * the possible IRQ, but rather its bit position. So, first interrupt in fourth
193 * register has source number 96, and not 88. This makes calculations easier,
194 * and also provides forward compatibility with any future IPU implementations
195 * with any interrupt bit assignments.
196 */
197int ipu_irq_map(unsigned int source)
198{
199 int i, ret = -ENOMEM;
200 struct ipu_irq_map *map;
201
202 might_sleep();
203
204 mutex_lock(&map_lock);
205 map = src2map(source);
206 if (map) {
207 pr_err("IPU: Source %u already mapped to IRQ %u\n", source, map->irq);
208 ret = -EBUSY;
209 goto out;
210 }
211
212 for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
213 if (irq_map[i].source < 0) {
214 unsigned long lock_flags;
215
216 spin_lock_irqsave(&bank_lock, lock_flags);
217 irq_map[i].source = source;
218 irq_map[i].bank = irq_bank + source / 32;
219 spin_unlock_irqrestore(&bank_lock, lock_flags);
220
221 ret = irq_map[i].irq;
222 pr_debug("IPU: mapped source %u to IRQ %u\n",
223 source, ret);
224 break;
225 }
226 }
227out:
228 mutex_unlock(&map_lock);
229
230 if (ret < 0)
231 pr_err("IPU: couldn't map source %u: %d\n", source, ret);
232
233 return ret;
234}
235
236/**
237 * ipu_irq_map() - map an IPU interrupt source to an IRQ number
238 * @source: interrupt source bit position (see ipu_irq_map())
239 * @return: 0 or negative error code
240 */
241int ipu_irq_unmap(unsigned int source)
242{
243 int i, ret = -EINVAL;
244
245 might_sleep();
246
247 mutex_lock(&map_lock);
248 for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
249 if (irq_map[i].source == source) {
250 unsigned long lock_flags;
251
252 pr_debug("IPU: unmapped source %u from IRQ %u\n",
253 source, irq_map[i].irq);
254
255 spin_lock_irqsave(&bank_lock, lock_flags);
256 irq_map[i].source = -EINVAL;
257 irq_map[i].bank = NULL;
258 spin_unlock_irqrestore(&bank_lock, lock_flags);
259
260 ret = 0;
261 break;
262 }
263 }
264 mutex_unlock(&map_lock);
265
266 return ret;
267}
268
269/* Chained IRQ handler for IPU error interrupt */
270static void ipu_irq_err(unsigned int irq, struct irq_desc *desc)
271{
272 struct ipu *ipu = get_irq_data(irq);
273 u32 status;
274 int i, line;
275
276 for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) {
277 struct ipu_irq_bank *bank = irq_bank + i;
278
279 spin_lock(&bank_lock);
280 status = ipu_read_reg(ipu, bank->status);
281 /*
282 * Don't think we have to clear all interrupts here, they will
283 * be acked by ->handle_irq() (handle_level_irq). However, we
284 * might want to clear unhandled interrupts after the loop...
285 */
286 status &= ipu_read_reg(ipu, bank->control);
287 spin_unlock(&bank_lock);
288 while ((line = ffs(status))) {
289 struct ipu_irq_map *map;
290
291 line--;
292 status &= ~(1UL << line);
293
294 spin_lock(&bank_lock);
295 map = src2map(32 * i + line);
296 if (map)
297 irq = map->irq;
298 spin_unlock(&bank_lock);
299
300 if (!map) {
301 pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
302 line, i);
303 continue;
304 }
305 generic_handle_irq(irq);
306 }
307 }
308}
309
310/* Chained IRQ handler for IPU function interrupt */
311static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc)
312{
313 struct ipu *ipu = get_irq_data(irq);
314 u32 status;
315 int i, line;
316
317 for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) {
318 struct ipu_irq_bank *bank = irq_bank + i;
319
320 spin_lock(&bank_lock);
321 status = ipu_read_reg(ipu, bank->status);
322 /* Not clearing all interrupts, see above */
323 status &= ipu_read_reg(ipu, bank->control);
324 spin_unlock(&bank_lock);
325 while ((line = ffs(status))) {
326 struct ipu_irq_map *map;
327
328 line--;
329 status &= ~(1UL << line);
330
331 spin_lock(&bank_lock);
332 map = src2map(32 * i + line);
333 if (map)
334 irq = map->irq;
335 spin_unlock(&bank_lock);
336
337 if (!map) {
338 pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
339 line, i);
340 continue;
341 }
342 generic_handle_irq(irq);
343 }
344 }
345}
346
347static struct irq_chip ipu_irq_chip = {
348 .name = "ipu_irq",
349 .ack = ipu_irq_ack,
350 .mask = ipu_irq_mask,
351 .unmask = ipu_irq_unmask,
352};
353
354/* Install the IRQ handler */
355int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
356{
357 struct ipu_platform_data *pdata = dev->dev.platform_data;
358 unsigned int irq, irq_base, i;
359
360 irq_base = pdata->irq_base;
361
362 for (i = 0; i < IPU_IRQ_NR_BANKS; i++)
363 irq_bank[i].ipu = ipu;
364
365 for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
366 int ret;
367
368 irq = irq_base + i;
369 ret = set_irq_chip(irq, &ipu_irq_chip);
370 if (ret < 0)
371 return ret;
372 ret = set_irq_chip_data(irq, irq_map + i);
373 if (ret < 0)
374 return ret;
375 irq_map[i].ipu = ipu;
376 irq_map[i].irq = irq;
377 irq_map[i].source = -EINVAL;
378 set_irq_handler(irq, handle_level_irq);
379#ifdef CONFIG_ARM
380 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
381#endif
382 }
383
384 set_irq_data(ipu->irq_fn, ipu);
385 set_irq_chained_handler(ipu->irq_fn, ipu_irq_fn);
386
387 set_irq_data(ipu->irq_err, ipu);
388 set_irq_chained_handler(ipu->irq_err, ipu_irq_err);
389
390 return 0;
391}
392
393void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev)
394{
395 struct ipu_platform_data *pdata = dev->dev.platform_data;
396 unsigned int irq, irq_base;
397
398 irq_base = pdata->irq_base;
399
400 set_irq_chained_handler(ipu->irq_fn, NULL);
401 set_irq_data(ipu->irq_fn, NULL);
402
403 set_irq_chained_handler(ipu->irq_err, NULL);
404 set_irq_data(ipu->irq_err, NULL);
405
406 for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) {
407#ifdef CONFIG_ARM
408 set_irq_flags(irq, 0);
409#endif
410 set_irq_chip(irq, NULL);
411 set_irq_chip_data(irq, NULL);
412 }
413}
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index cd2e3b8087e7..24f3ca851523 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -36,7 +36,7 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
36 struct csrow_info *csrow = &mci->csrows[0]; 36 struct csrow_info *csrow = &mci->csrows[0];
37 unsigned long address, pfn, offset, syndrome; 37 unsigned long address, pfn, offset, syndrome;
38 38
39 dev_dbg(mci->dev, "ECC CE err on node %d, channel %d, ar = 0x%016lx\n", 39 dev_dbg(mci->dev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
40 priv->node, chan, ar); 40 priv->node, chan, ar);
41 41
42 /* Address decoding is likely a bit bogus, to dbl check */ 42 /* Address decoding is likely a bit bogus, to dbl check */
@@ -58,7 +58,7 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
58 struct csrow_info *csrow = &mci->csrows[0]; 58 struct csrow_info *csrow = &mci->csrows[0];
59 unsigned long address, pfn, offset; 59 unsigned long address, pfn, offset;
60 60
61 dev_dbg(mci->dev, "ECC UE err on node %d, channel %d, ar = 0x%016lx\n", 61 dev_dbg(mci->dev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
62 priv->node, chan, ar); 62 priv->node, chan, ar);
63 63
64 /* Address decoding is likely a bit bogus, to dbl check */ 64 /* Address decoding is likely a bit bogus, to dbl check */
@@ -169,7 +169,7 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
169 169
170 /* Get channel population */ 170 /* Get channel population */
171 reg = in_be64(&regs->mic_mnt_cfg); 171 reg = in_be64(&regs->mic_mnt_cfg);
172 dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016lx\n", reg); 172 dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016llx\n", reg);
173 chanmask = 0; 173 chanmask = 0;
174 if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP) 174 if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP)
175 chanmask |= 0x1; 175 chanmask |= 0x1;
@@ -180,7 +180,7 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
180 "Yuck ! No channel populated ? Aborting !\n"); 180 "Yuck ! No channel populated ? Aborting !\n");
181 return -ENODEV; 181 return -ENODEV;
182 } 182 }
183 dev_dbg(&pdev->dev, "Initial FIR = 0x%016lx\n", 183 dev_dbg(&pdev->dev, "Initial FIR = 0x%016llx\n",
184 in_be64(&regs->mic_fir)); 184 in_be64(&regs->mic_fir));
185 185
186 /* Allocate & init EDAC MC data structure */ 186 /* Allocate & init EDAC MC data structure */
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index 6bd91a15d5e6..7be2cf3514e7 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -232,7 +232,7 @@ fw_card_bm_work(struct work_struct *work)
232 root_id = root_node->node_id; 232 root_id = root_node->node_id;
233 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); 233 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
234 234
235 if (card->bm_generation + 1 == generation || 235 if (is_next_generation(generation, card->bm_generation) ||
236 (card->bm_generation != generation && grace)) { 236 (card->bm_generation != generation && grace)) {
237 /* 237 /*
238 * This first step is to figure out who is IRM and 238 * This first step is to figure out who is IRM and
@@ -512,7 +512,7 @@ fw_core_remove_card(struct fw_card *card)
512 fw_core_initiate_bus_reset(card, 1); 512 fw_core_initiate_bus_reset(card, 1);
513 513
514 mutex_lock(&card_mutex); 514 mutex_lock(&card_mutex);
515 list_del(&card->link); 515 list_del_init(&card->link);
516 mutex_unlock(&card_mutex); 516 mutex_unlock(&card_mutex);
517 517
518 /* Set up the dummy driver. */ 518 /* Set up the dummy driver. */
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index 2af5a8d1e012..bf53acb45652 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -25,6 +25,7 @@
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/idr.h> 27#include <linux/idr.h>
28#include <linux/jiffies.h>
28#include <linux/string.h> 29#include <linux/string.h>
29#include <linux/rwsem.h> 30#include <linux/rwsem.h>
30#include <linux/semaphore.h> 31#include <linux/semaphore.h>
@@ -634,12 +635,39 @@ struct fw_device *fw_device_get_by_devt(dev_t devt)
634 return device; 635 return device;
635} 636}
636 637
638/*
639 * These defines control the retry behavior for reading the config
640 * rom. It shouldn't be necessary to tweak these; if the device
641 * doesn't respond to a config rom read within 10 seconds, it's not
642 * going to respond at all. As for the initial delay, a lot of
643 * devices will be able to respond within half a second after bus
644 * reset. On the other hand, it's not really worth being more
645 * aggressive than that, since it scales pretty well; if 10 devices
646 * are plugged in, they're all getting read within one second.
647 */
648
649#define MAX_RETRIES 10
650#define RETRY_DELAY (3 * HZ)
651#define INITIAL_DELAY (HZ / 2)
652#define SHUTDOWN_DELAY (2 * HZ)
653
637static void fw_device_shutdown(struct work_struct *work) 654static void fw_device_shutdown(struct work_struct *work)
638{ 655{
639 struct fw_device *device = 656 struct fw_device *device =
640 container_of(work, struct fw_device, work.work); 657 container_of(work, struct fw_device, work.work);
641 int minor = MINOR(device->device.devt); 658 int minor = MINOR(device->device.devt);
642 659
660 if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY)
661 && !list_empty(&device->card->link)) {
662 schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
663 return;
664 }
665
666 if (atomic_cmpxchg(&device->state,
667 FW_DEVICE_GONE,
668 FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE)
669 return;
670
643 fw_device_cdev_remove(device); 671 fw_device_cdev_remove(device);
644 device_for_each_child(&device->device, NULL, shutdown_unit); 672 device_for_each_child(&device->device, NULL, shutdown_unit);
645 device_unregister(&device->device); 673 device_unregister(&device->device);
@@ -647,6 +675,7 @@ static void fw_device_shutdown(struct work_struct *work)
647 down_write(&fw_device_rwsem); 675 down_write(&fw_device_rwsem);
648 idr_remove(&fw_device_idr, minor); 676 idr_remove(&fw_device_idr, minor);
649 up_write(&fw_device_rwsem); 677 up_write(&fw_device_rwsem);
678
650 fw_device_put(device); 679 fw_device_put(device);
651} 680}
652 681
@@ -654,25 +683,63 @@ static struct device_type fw_device_type = {
654 .release = fw_device_release, 683 .release = fw_device_release,
655}; 684};
656 685
686static void fw_device_update(struct work_struct *work);
687
657/* 688/*
658 * These defines control the retry behavior for reading the config 689 * If a device was pending for deletion because its node went away but its
659 * rom. It shouldn't be necessary to tweak these; if the device 690 * bus info block and root directory header matches that of a newly discovered
660 * doesn't respond to a config rom read within 10 seconds, it's not 691 * device, revive the existing fw_device.
661 * going to respond at all. As for the initial delay, a lot of 692 * The newly allocated fw_device becomes obsolete instead.
662 * devices will be able to respond within half a second after bus
663 * reset. On the other hand, it's not really worth being more
664 * aggressive than that, since it scales pretty well; if 10 devices
665 * are plugged in, they're all getting read within one second.
666 */ 693 */
694static int lookup_existing_device(struct device *dev, void *data)
695{
696 struct fw_device *old = fw_device(dev);
697 struct fw_device *new = data;
698 struct fw_card *card = new->card;
699 int match = 0;
700
701 down_read(&fw_device_rwsem); /* serialize config_rom access */
702 spin_lock_irq(&card->lock); /* serialize node access */
703
704 if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 &&
705 atomic_cmpxchg(&old->state,
706 FW_DEVICE_GONE,
707 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
708 struct fw_node *current_node = new->node;
709 struct fw_node *obsolete_node = old->node;
710
711 new->node = obsolete_node;
712 new->node->data = new;
713 old->node = current_node;
714 old->node->data = old;
715
716 old->max_speed = new->max_speed;
717 old->node_id = current_node->node_id;
718 smp_wmb(); /* update node_id before generation */
719 old->generation = card->generation;
720 old->config_rom_retries = 0;
721 fw_notify("rediscovered device %s\n", dev_name(dev));
667 722
668#define MAX_RETRIES 10 723 PREPARE_DELAYED_WORK(&old->work, fw_device_update);
669#define RETRY_DELAY (3 * HZ) 724 schedule_delayed_work(&old->work, 0);
670#define INITIAL_DELAY (HZ / 2) 725
726 if (current_node == card->root_node)
727 fw_schedule_bm_work(card, 0);
728
729 match = 1;
730 }
731
732 spin_unlock_irq(&card->lock);
733 up_read(&fw_device_rwsem);
734
735 return match;
736}
671 737
672static void fw_device_init(struct work_struct *work) 738static void fw_device_init(struct work_struct *work)
673{ 739{
674 struct fw_device *device = 740 struct fw_device *device =
675 container_of(work, struct fw_device, work.work); 741 container_of(work, struct fw_device, work.work);
742 struct device *revived_dev;
676 int minor, err; 743 int minor, err;
677 744
678 /* 745 /*
@@ -696,6 +763,15 @@ static void fw_device_init(struct work_struct *work)
696 return; 763 return;
697 } 764 }
698 765
766 revived_dev = device_find_child(device->card->device,
767 device, lookup_existing_device);
768 if (revived_dev) {
769 put_device(revived_dev);
770 fw_device_release(&device->device);
771
772 return;
773 }
774
699 device_initialize(&device->device); 775 device_initialize(&device->device);
700 776
701 fw_device_get(device); 777 fw_device_get(device);
@@ -734,9 +810,10 @@ static void fw_device_init(struct work_struct *work)
734 * fw_node_event(). 810 * fw_node_event().
735 */ 811 */
736 if (atomic_cmpxchg(&device->state, 812 if (atomic_cmpxchg(&device->state,
737 FW_DEVICE_INITIALIZING, 813 FW_DEVICE_INITIALIZING,
738 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) { 814 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
739 fw_device_shutdown(work); 815 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
816 schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
740 } else { 817 } else {
741 if (device->config_rom_retries) 818 if (device->config_rom_retries)
742 fw_notify("created device %s: GUID %08x%08x, S%d00, " 819 fw_notify("created device %s: GUID %08x%08x, S%d00, "
@@ -847,8 +924,8 @@ static void fw_device_refresh(struct work_struct *work)
847 924
848 case REREAD_BIB_UNCHANGED: 925 case REREAD_BIB_UNCHANGED:
849 if (atomic_cmpxchg(&device->state, 926 if (atomic_cmpxchg(&device->state,
850 FW_DEVICE_INITIALIZING, 927 FW_DEVICE_INITIALIZING,
851 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) 928 FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
852 goto gone; 929 goto gone;
853 930
854 fw_device_update(work); 931 fw_device_update(work);
@@ -879,8 +956,8 @@ static void fw_device_refresh(struct work_struct *work)
879 create_units(device); 956 create_units(device);
880 957
881 if (atomic_cmpxchg(&device->state, 958 if (atomic_cmpxchg(&device->state,
882 FW_DEVICE_INITIALIZING, 959 FW_DEVICE_INITIALIZING,
883 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) 960 FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
884 goto gone; 961 goto gone;
885 962
886 fw_notify("refreshed device %s\n", dev_name(&device->device)); 963 fw_notify("refreshed device %s\n", dev_name(&device->device));
@@ -890,8 +967,9 @@ static void fw_device_refresh(struct work_struct *work)
890 give_up: 967 give_up:
891 fw_notify("giving up on refresh of device %s\n", dev_name(&device->device)); 968 fw_notify("giving up on refresh of device %s\n", dev_name(&device->device));
892 gone: 969 gone:
893 atomic_set(&device->state, FW_DEVICE_SHUTDOWN); 970 atomic_set(&device->state, FW_DEVICE_GONE);
894 fw_device_shutdown(work); 971 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
972 schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
895 out: 973 out:
896 if (node_id == card->root_node->node_id) 974 if (node_id == card->root_node->node_id)
897 fw_schedule_bm_work(card, 0); 975 fw_schedule_bm_work(card, 0);
@@ -995,9 +1073,10 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
995 */ 1073 */
996 device = node->data; 1074 device = node->data;
997 if (atomic_xchg(&device->state, 1075 if (atomic_xchg(&device->state,
998 FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) { 1076 FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
999 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1077 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
1000 schedule_delayed_work(&device->work, 0); 1078 schedule_delayed_work(&device->work,
1079 list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
1001 } 1080 }
1002 break; 1081 break;
1003 } 1082 }
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index df51732608d9..8ef6ec2ca21c 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -28,6 +28,7 @@
28enum fw_device_state { 28enum fw_device_state {
29 FW_DEVICE_INITIALIZING, 29 FW_DEVICE_INITIALIZING,
30 FW_DEVICE_RUNNING, 30 FW_DEVICE_RUNNING,
31 FW_DEVICE_GONE,
31 FW_DEVICE_SHUTDOWN, 32 FW_DEVICE_SHUTDOWN,
32}; 33};
33 34
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index ab9c01e462ef..6d19828a93a5 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -226,7 +226,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
226#define CONTEXT_DEAD 0x0800 226#define CONTEXT_DEAD 0x0800
227#define CONTEXT_ACTIVE 0x0400 227#define CONTEXT_ACTIVE 0x0400
228 228
229#define OHCI1394_MAX_AT_REQ_RETRIES 0x2 229#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
230#define OHCI1394_MAX_AT_RESP_RETRIES 0x2 230#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
231#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 231#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
232 232
@@ -896,11 +896,11 @@ static void context_stop(struct context *ctx)
896 for (i = 0; i < 10; i++) { 896 for (i = 0; i < 10; i++) {
897 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); 897 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
898 if ((reg & CONTEXT_ACTIVE) == 0) 898 if ((reg & CONTEXT_ACTIVE) == 0)
899 break; 899 return;
900 900
901 fw_notify("context_stop: still active (0x%08x)\n", reg);
902 mdelay(1); 901 mdelay(1);
903 } 902 }
903 fw_error("Error: DMA context still active (0x%08x)\n", reg);
904} 904}
905 905
906struct driver_data { 906struct driver_data {
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index e88d5067448c..c71c4419d9e8 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -168,6 +168,7 @@ struct sbp2_target {
168 int address_high; 168 int address_high;
169 unsigned int workarounds; 169 unsigned int workarounds;
170 unsigned int mgt_orb_timeout; 170 unsigned int mgt_orb_timeout;
171 unsigned int max_payload;
171 172
172 int dont_block; /* counter for each logical unit */ 173 int dont_block; /* counter for each logical unit */
173 int blocked; /* ditto */ 174 int blocked; /* ditto */
@@ -310,14 +311,16 @@ struct sbp2_command_orb {
310 dma_addr_t page_table_bus; 311 dma_addr_t page_table_bus;
311}; 312};
312 313
314#define SBP2_ROM_VALUE_WILDCARD ~0 /* match all */
315#define SBP2_ROM_VALUE_MISSING 0xff000000 /* not present in the unit dir. */
316
313/* 317/*
314 * List of devices with known bugs. 318 * List of devices with known bugs.
315 * 319 *
316 * The firmware_revision field, masked with 0xffff00, is the best 320 * The firmware_revision field, masked with 0xffff00, is the best
317 * indicator for the type of bridge chip of a device. It yields a few 321 * indicator for the type of bridge chip of a device. It yields a few
318 * false positives but this did not break correctly behaving devices 322 * false positives but this did not break correctly behaving devices
319 * so far. We use ~0 as a wildcard, since the 24 bit values we get 323 * so far.
320 * from the config rom can never match that.
321 */ 324 */
322static const struct { 325static const struct {
323 u32 firmware_revision; 326 u32 firmware_revision;
@@ -339,33 +342,35 @@ static const struct {
339 }, 342 },
340 /* Initio bridges, actually only needed for some older ones */ { 343 /* Initio bridges, actually only needed for some older ones */ {
341 .firmware_revision = 0x000200, 344 .firmware_revision = 0x000200,
342 .model = ~0, 345 .model = SBP2_ROM_VALUE_WILDCARD,
343 .workarounds = SBP2_WORKAROUND_INQUIRY_36, 346 .workarounds = SBP2_WORKAROUND_INQUIRY_36,
344 }, 347 },
345 /* PL-3507 bridge with Prolific firmware */ { 348 /* PL-3507 bridge with Prolific firmware */ {
346 .firmware_revision = 0x012800, 349 .firmware_revision = 0x012800,
347 .model = ~0, 350 .model = SBP2_ROM_VALUE_WILDCARD,
348 .workarounds = SBP2_WORKAROUND_POWER_CONDITION, 351 .workarounds = SBP2_WORKAROUND_POWER_CONDITION,
349 }, 352 },
350 /* Symbios bridge */ { 353 /* Symbios bridge */ {
351 .firmware_revision = 0xa0b800, 354 .firmware_revision = 0xa0b800,
352 .model = ~0, 355 .model = SBP2_ROM_VALUE_WILDCARD,
353 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, 356 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
354 }, 357 },
355 /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ { 358 /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
356 .firmware_revision = 0x002600, 359 .firmware_revision = 0x002600,
357 .model = ~0, 360 .model = SBP2_ROM_VALUE_WILDCARD,
358 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, 361 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
359 }, 362 },
360
361 /* 363 /*
362 * There are iPods (2nd gen, 3rd gen) with model_id == 0, but 364 * iPod 2nd generation: needs 128k max transfer size workaround
363 * these iPods do not feature the read_capacity bug according 365 * iPod 3rd generation: needs fix capacity workaround
364 * to one report. Read_capacity behaviour as well as model_id
365 * could change due to Apple-supplied firmware updates though.
366 */ 366 */
367 367 {
368 /* iPod 4th generation. */ { 368 .firmware_revision = 0x0a2700,
369 .model = 0x000000,
370 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS |
371 SBP2_WORKAROUND_FIX_CAPACITY,
372 },
373 /* iPod 4th generation */ {
369 .firmware_revision = 0x0a2700, 374 .firmware_revision = 0x0a2700,
370 .model = 0x000021, 375 .model = 0x000021,
371 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, 376 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
@@ -1092,7 +1097,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
1092 continue; 1097 continue;
1093 1098
1094 if (sbp2_workarounds_table[i].model != model && 1099 if (sbp2_workarounds_table[i].model != model &&
1095 sbp2_workarounds_table[i].model != ~0) 1100 sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD)
1096 continue; 1101 continue;
1097 1102
1098 w |= sbp2_workarounds_table[i].workarounds; 1103 w |= sbp2_workarounds_table[i].workarounds;
@@ -1142,20 +1147,28 @@ static int sbp2_probe(struct device *dev)
1142 fw_device_get(device); 1147 fw_device_get(device);
1143 fw_unit_get(unit); 1148 fw_unit_get(unit);
1144 1149
1145 /* Initialize to values that won't match anything in our table. */
1146 firmware_revision = 0xff000000;
1147 model = 0xff000000;
1148
1149 /* implicit directory ID */ 1150 /* implicit directory ID */
1150 tgt->directory_id = ((unit->directory - device->config_rom) * 4 1151 tgt->directory_id = ((unit->directory - device->config_rom) * 4
1151 + CSR_CONFIG_ROM) & 0xffffff; 1152 + CSR_CONFIG_ROM) & 0xffffff;
1152 1153
1154 firmware_revision = SBP2_ROM_VALUE_MISSING;
1155 model = SBP2_ROM_VALUE_MISSING;
1156
1153 if (sbp2_scan_unit_dir(tgt, unit->directory, &model, 1157 if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
1154 &firmware_revision) < 0) 1158 &firmware_revision) < 0)
1155 goto fail_tgt_put; 1159 goto fail_tgt_put;
1156 1160
1157 sbp2_init_workarounds(tgt, model, firmware_revision); 1161 sbp2_init_workarounds(tgt, model, firmware_revision);
1158 1162
1163 /*
1164 * At S100 we can do 512 bytes per packet, at S200 1024 bytes,
1165 * and so on up to 4096 bytes. The SBP-2 max_payload field
1166 * specifies the max payload size as 2 ^ (max_payload + 2), so
1167 * if we set this to max_speed + 7, we get the right value.
1168 */
1169 tgt->max_payload = min(device->max_speed + 7, 10U);
1170 tgt->max_payload = min(tgt->max_payload, device->card->max_receive - 1);
1171
1159 /* Do the login in a workqueue so we can easily reschedule retries. */ 1172 /* Do the login in a workqueue so we can easily reschedule retries. */
1160 list_for_each_entry(lu, &tgt->lu_list, link) 1173 list_for_each_entry(lu, &tgt->lu_list, link)
1161 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); 1174 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
@@ -1273,6 +1286,19 @@ static struct fw_driver sbp2_driver = {
1273 .id_table = sbp2_id_table, 1286 .id_table = sbp2_id_table,
1274}; 1287};
1275 1288
1289static void sbp2_unmap_scatterlist(struct device *card_device,
1290 struct sbp2_command_orb *orb)
1291{
1292 if (scsi_sg_count(orb->cmd))
1293 dma_unmap_sg(card_device, scsi_sglist(orb->cmd),
1294 scsi_sg_count(orb->cmd),
1295 orb->cmd->sc_data_direction);
1296
1297 if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT))
1298 dma_unmap_single(card_device, orb->page_table_bus,
1299 sizeof(orb->page_table), DMA_TO_DEVICE);
1300}
1301
1276static unsigned int 1302static unsigned int
1277sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) 1303sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1278{ 1304{
@@ -1352,15 +1378,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
1352 1378
1353 dma_unmap_single(device->card->device, orb->base.request_bus, 1379 dma_unmap_single(device->card->device, orb->base.request_bus,
1354 sizeof(orb->request), DMA_TO_DEVICE); 1380 sizeof(orb->request), DMA_TO_DEVICE);
1355 1381 sbp2_unmap_scatterlist(device->card->device, orb);
1356 if (scsi_sg_count(orb->cmd) > 0)
1357 dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
1358 scsi_sg_count(orb->cmd),
1359 orb->cmd->sc_data_direction);
1360
1361 if (orb->page_table_bus != 0)
1362 dma_unmap_single(device->card->device, orb->page_table_bus,
1363 sizeof(orb->page_table), DMA_TO_DEVICE);
1364 1382
1365 orb->cmd->result = result; 1383 orb->cmd->result = result;
1366 orb->done(orb->cmd); 1384 orb->done(orb->cmd);
@@ -1434,7 +1452,6 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1434 struct sbp2_logical_unit *lu = cmd->device->hostdata; 1452 struct sbp2_logical_unit *lu = cmd->device->hostdata;
1435 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 1453 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
1436 struct sbp2_command_orb *orb; 1454 struct sbp2_command_orb *orb;
1437 unsigned int max_payload;
1438 int generation, retval = SCSI_MLQUEUE_HOST_BUSY; 1455 int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
1439 1456
1440 /* 1457 /*
@@ -1462,17 +1479,9 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1462 orb->done = done; 1479 orb->done = done;
1463 orb->cmd = cmd; 1480 orb->cmd = cmd;
1464 1481
1465 orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); 1482 orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
1466 /*
1467 * At speed 100 we can do 512 bytes per packet, at speed 200,
1468 * 1024 bytes per packet etc. The SBP-2 max_payload field
1469 * specifies the max payload size as 2 ^ (max_payload + 2), so
1470 * if we set this to max_speed + 7, we get the right value.
1471 */
1472 max_payload = min(device->max_speed + 7,
1473 device->card->max_receive - 1);
1474 orb->request.misc = cpu_to_be32( 1483 orb->request.misc = cpu_to_be32(
1475 COMMAND_ORB_MAX_PAYLOAD(max_payload) | 1484 COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) |
1476 COMMAND_ORB_SPEED(device->max_speed) | 1485 COMMAND_ORB_SPEED(device->max_speed) |
1477 COMMAND_ORB_NOTIFY); 1486 COMMAND_ORB_NOTIFY);
1478 1487
@@ -1491,8 +1500,10 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1491 orb->base.request_bus = 1500 orb->base.request_bus =
1492 dma_map_single(device->card->device, &orb->request, 1501 dma_map_single(device->card->device, &orb->request,
1493 sizeof(orb->request), DMA_TO_DEVICE); 1502 sizeof(orb->request), DMA_TO_DEVICE);
1494 if (dma_mapping_error(device->card->device, orb->base.request_bus)) 1503 if (dma_mapping_error(device->card->device, orb->base.request_bus)) {
1504 sbp2_unmap_scatterlist(device->card->device, orb);
1495 goto out; 1505 goto out;
1506 }
1496 1507
1497 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation, 1508 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation,
1498 lu->command_block_agent_address + SBP2_ORB_POINTER); 1509 lu->command_block_agent_address + SBP2_ORB_POINTER);
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index c9be6e6948c4..8dd6703b55cd 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -518,6 +518,18 @@ fw_core_handle_bus_reset(struct fw_card *card,
518 struct fw_node *local_node; 518 struct fw_node *local_node;
519 unsigned long flags; 519 unsigned long flags;
520 520
521 /*
522 * If the selfID buffer is not the immediate successor of the
523 * previously processed one, we cannot reliably compare the
524 * old and new topologies.
525 */
526 if (!is_next_generation(generation, card->generation) &&
527 card->local_node != NULL) {
528 fw_notify("skipped bus generations, destroying all nodes\n");
529 fw_destroy_nodes(card);
530 card->bm_retries = 0;
531 }
532
521 spin_lock_irqsave(&card->lock, flags); 533 spin_lock_irqsave(&card->lock, flags);
522 534
523 card->node_id = node_id; 535 card->node_id = node_id;
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index c9ab12a15f6e..1d78e9cc5940 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -276,6 +276,15 @@ static inline void fw_card_put(struct fw_card *card)
276extern void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); 276extern void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
277 277
278/* 278/*
279 * Check whether new_generation is the immediate successor of old_generation.
280 * Take counter roll-over at 255 (as per to OHCI) into account.
281 */
282static inline bool is_next_generation(int new_generation, int old_generation)
283{
284 return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
285}
286
287/*
279 * The iso packet format allows for an immediate header/payload part 288 * The iso packet format allows for an immediate header/payload part
280 * stored in 'header' immediately after the packet info plus an 289 * stored in 'header' immediately after the packet info plus an
281 * indirect payload part that is pointer to by the 'payload' field. 290 * indirect payload part that is pointer to by the 'payload' field.
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index d76adfea5df7..8f0f7c449305 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -415,6 +415,29 @@ void __init dmi_scan_machine(void)
415} 415}
416 416
417/** 417/**
418 * dmi_matches - check if dmi_system_id structure matches system DMI data
419 * @dmi: pointer to the dmi_system_id structure to check
420 */
421static bool dmi_matches(const struct dmi_system_id *dmi)
422{
423 int i;
424
425 WARN(!dmi_initialized, KERN_ERR "dmi check: not initialized yet.\n");
426
427 for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) {
428 int s = dmi->matches[i].slot;
429 if (s == DMI_NONE)
430 continue;
431 if (dmi_ident[s]
432 && strstr(dmi_ident[s], dmi->matches[i].substr))
433 continue;
434 /* No match */
435 return false;
436 }
437 return true;
438}
439
440/**
418 * dmi_check_system - check system DMI data 441 * dmi_check_system - check system DMI data
419 * @list: array of dmi_system_id structures to match against 442 * @list: array of dmi_system_id structures to match against
420 * All non-null elements of the list must match 443 * All non-null elements of the list must match
@@ -429,32 +452,45 @@ void __init dmi_scan_machine(void)
429 */ 452 */
430int dmi_check_system(const struct dmi_system_id *list) 453int dmi_check_system(const struct dmi_system_id *list)
431{ 454{
432 int i, count = 0; 455 int count = 0;
433 const struct dmi_system_id *d = list; 456 const struct dmi_system_id *d;
434 457
435 WARN(!dmi_initialized, KERN_ERR "dmi check: not initialized yet.\n"); 458 for (d = list; d->ident; d++)
436 459 if (dmi_matches(d)) {
437 while (d->ident) { 460 count++;
438 for (i = 0; i < ARRAY_SIZE(d->matches); i++) { 461 if (d->callback && d->callback(d))
439 int s = d->matches[i].slot; 462 break;
440 if (s == DMI_NONE)
441 continue;
442 if (dmi_ident[s] && strstr(dmi_ident[s], d->matches[i].substr))
443 continue;
444 /* No match */
445 goto fail;
446 } 463 }
447 count++;
448 if (d->callback && d->callback(d))
449 break;
450fail: d++;
451 }
452 464
453 return count; 465 return count;
454} 466}
455EXPORT_SYMBOL(dmi_check_system); 467EXPORT_SYMBOL(dmi_check_system);
456 468
457/** 469/**
470 * dmi_first_match - find dmi_system_id structure matching system DMI data
471 * @list: array of dmi_system_id structures to match against
472 * All non-null elements of the list must match
473 * their slot's (field index's) data (i.e., each
474 * list string must be a substring of the specified
475 * DMI slot's string data) to be considered a
476 * successful match.
477 *
478 * Walk the blacklist table until the first match is found. Return the
479 * pointer to the matching entry or NULL if there's no match.
480 */
481const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list)
482{
483 const struct dmi_system_id *d;
484
485 for (d = list; d->ident; d++)
486 if (dmi_matches(d))
487 return d;
488
489 return NULL;
490}
491EXPORT_SYMBOL(dmi_first_match);
492
493/**
458 * dmi_get_system_info - return DMI data value 494 * dmi_get_system_info - return DMI data value
459 * @field: data index (see enum dmi_field) 495 * @field: data index (see enum dmi_field)
460 * 496 *
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 35e7aea4222c..42fb2fd24c0c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -789,6 +789,7 @@ int gpio_request(unsigned gpio, const char *label)
789 } else { 789 } else {
790 status = -EBUSY; 790 status = -EBUSY;
791 module_put(chip->owner); 791 module_put(chip->owner);
792 goto done;
792 } 793 }
793 794
794 if (chip->request) { 795 if (chip->request) {
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 3d33b8252b58..14796594e5d9 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -33,10 +33,11 @@
33 33
34#include "drmP.h" 34#include "drmP.h"
35#include <linux/module.h> 35#include <linux/module.h>
36#include <asm/agp.h>
37 36
38#if __OS_HAS_AGP 37#if __OS_HAS_AGP
39 38
39#include <asm/agp.h>
40
40/** 41/**
41 * Get AGP information. 42 * Get AGP information.
42 * 43 *
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5b2cbb778162..bfce0992fefb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -194,7 +194,6 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
194 * @type: object type 194 * @type: object type
195 * 195 *
196 * LOCKING: 196 * LOCKING:
197 * Caller must hold DRM mode_config lock.
198 * 197 *
199 * Create a unique identifier based on @ptr in @dev's identifier space. Used 198 * Create a unique identifier based on @ptr in @dev's identifier space. Used
200 * for tracking modes, CRTCs and connectors. 199 * for tracking modes, CRTCs and connectors.
@@ -209,15 +208,15 @@ static int drm_mode_object_get(struct drm_device *dev,
209 int new_id = 0; 208 int new_id = 0;
210 int ret; 209 int ret;
211 210
212 WARN(!mutex_is_locked(&dev->mode_config.mutex),
213 "%s called w/o mode_config lock\n", __func__);
214again: 211again:
215 if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) { 212 if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
216 DRM_ERROR("Ran out memory getting a mode number\n"); 213 DRM_ERROR("Ran out memory getting a mode number\n");
217 return -EINVAL; 214 return -EINVAL;
218 } 215 }
219 216
217 mutex_lock(&dev->mode_config.idr_mutex);
220 ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); 218 ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
219 mutex_unlock(&dev->mode_config.idr_mutex);
221 if (ret == -EAGAIN) 220 if (ret == -EAGAIN)
222 goto again; 221 goto again;
223 222
@@ -239,16 +238,20 @@ again:
239static void drm_mode_object_put(struct drm_device *dev, 238static void drm_mode_object_put(struct drm_device *dev,
240 struct drm_mode_object *object) 239 struct drm_mode_object *object)
241{ 240{
241 mutex_lock(&dev->mode_config.idr_mutex);
242 idr_remove(&dev->mode_config.crtc_idr, object->id); 242 idr_remove(&dev->mode_config.crtc_idr, object->id);
243 mutex_unlock(&dev->mode_config.idr_mutex);
243} 244}
244 245
245void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) 246void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
246{ 247{
247 struct drm_mode_object *obj; 248 struct drm_mode_object *obj = NULL;
248 249
250 mutex_lock(&dev->mode_config.idr_mutex);
249 obj = idr_find(&dev->mode_config.crtc_idr, id); 251 obj = idr_find(&dev->mode_config.crtc_idr, id);
250 if (!obj || (obj->type != type) || (obj->id != id)) 252 if (!obj || (obj->type != type) || (obj->id != id))
251 return NULL; 253 obj = NULL;
254 mutex_unlock(&dev->mode_config.idr_mutex);
252 255
253 return obj; 256 return obj;
254} 257}
@@ -786,6 +789,7 @@ EXPORT_SYMBOL(drm_mode_create_dithering_property);
786void drm_mode_config_init(struct drm_device *dev) 789void drm_mode_config_init(struct drm_device *dev)
787{ 790{
788 mutex_init(&dev->mode_config.mutex); 791 mutex_init(&dev->mode_config.mutex);
792 mutex_init(&dev->mode_config.idr_mutex);
789 INIT_LIST_HEAD(&dev->mode_config.fb_list); 793 INIT_LIST_HEAD(&dev->mode_config.fb_list);
790 INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list); 794 INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
791 INIT_LIST_HEAD(&dev->mode_config.crtc_list); 795 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 5ff88d952226..14c7a23dc157 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -294,6 +294,7 @@ EXPORT_SYMBOL(drm_init);
294 */ 294 */
295static void drm_cleanup(struct drm_device * dev) 295static void drm_cleanup(struct drm_device * dev)
296{ 296{
297 struct drm_map_list *r_list, *list_temp;
297 DRM_DEBUG("\n"); 298 DRM_DEBUG("\n");
298 299
299 if (!dev) { 300 if (!dev) {
@@ -325,6 +326,9 @@ static void drm_cleanup(struct drm_device * dev)
325 drm_ht_remove(&dev->map_hash); 326 drm_ht_remove(&dev->map_hash);
326 drm_ctxbitmap_cleanup(dev); 327 drm_ctxbitmap_cleanup(dev);
327 328
329 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
330 drm_rmmap(dev, r_list->map);
331
328 if (drm_core_check_feature(dev, DRIVER_MODESET)) 332 if (drm_core_check_feature(dev, DRIVER_MODESET))
329 drm_put_minor(&dev->control); 333 drm_put_minor(&dev->control);
330 334
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0fbb0da342cb..5a4d3244758a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -660,7 +660,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
660 660
661 edid = (struct edid *)drm_ddc_read(adapter); 661 edid = (struct edid *)drm_ddc_read(adapter);
662 if (!edid) { 662 if (!edid) {
663 dev_warn(&connector->dev->pdev->dev, "%s: no EDID data\n", 663 dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n",
664 drm_get_connector_name(connector)); 664 drm_get_connector_name(connector));
665 return NULL; 665 return NULL;
666 } 666 }
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 9da581452874..6915fb82d0b0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -136,7 +136,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
136 obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); 136 obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
137 137
138 obj->dev = dev; 138 obj->dev = dev;
139 obj->filp = shmem_file_setup("drm mm object", size, 0); 139 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
140 if (IS_ERR(obj->filp)) { 140 if (IS_ERR(obj->filp)) {
141 kfree(obj); 141 kfree(obj);
142 return NULL; 142 return NULL;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 477caa1b1e4b..69aa0ab28403 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -106,8 +106,6 @@ void drm_vblank_cleanup(struct drm_device *dev)
106 106
107 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, 107 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
108 DRM_MEM_DRIVER); 108 DRM_MEM_DRIVER);
109 drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
110 DRM_MEM_DRIVER);
111 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * 109 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
112 dev->num_crtcs, DRM_MEM_DRIVER); 110 dev->num_crtcs, DRM_MEM_DRIVER);
113 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * 111 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
@@ -132,7 +130,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
132 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, 130 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
133 (unsigned long)dev); 131 (unsigned long)dev);
134 spin_lock_init(&dev->vbl_lock); 132 spin_lock_init(&dev->vbl_lock);
135 atomic_set(&dev->vbl_signal_pending, 0);
136 dev->num_crtcs = num_crtcs; 133 dev->num_crtcs = num_crtcs;
137 134
138 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, 135 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
@@ -140,11 +137,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
140 if (!dev->vbl_queue) 137 if (!dev->vbl_queue)
141 goto err; 138 goto err;
142 139
143 dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
144 DRM_MEM_DRIVER);
145 if (!dev->vbl_sigs)
146 goto err;
147
148 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, 140 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
149 DRM_MEM_DRIVER); 141 DRM_MEM_DRIVER);
150 if (!dev->_vblank_count) 142 if (!dev->_vblank_count)
@@ -177,7 +169,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
177 /* Zero per-crtc vblank stuff */ 169 /* Zero per-crtc vblank stuff */
178 for (i = 0; i < num_crtcs; i++) { 170 for (i = 0; i < num_crtcs; i++) {
179 init_waitqueue_head(&dev->vbl_queue[i]); 171 init_waitqueue_head(&dev->vbl_queue[i]);
180 INIT_LIST_HEAD(&dev->vbl_sigs[i]);
181 atomic_set(&dev->_vblank_count[i], 0); 172 atomic_set(&dev->_vblank_count[i], 0);
182 atomic_set(&dev->vblank_refcount[i], 0); 173 atomic_set(&dev->vblank_refcount[i], 0);
183 } 174 }
@@ -540,15 +531,10 @@ out:
540 * \param data user argument, pointing to a drm_wait_vblank structure. 531 * \param data user argument, pointing to a drm_wait_vblank structure.
541 * \return zero on success or a negative number on failure. 532 * \return zero on success or a negative number on failure.
542 * 533 *
543 * Verifies the IRQ is installed. 534 * This function enables the vblank interrupt on the pipe requested, then
544 * 535 * sleeps waiting for the requested sequence number to occur, and drops
545 * If a signal is requested checks if this task has already scheduled the same signal 536 * the vblank interrupt refcount afterwards. (vblank irq disable follows that
546 * for the same vblank sequence number - nothing to be done in 537 * after a timeout with no further vblank waits scheduled).
547 * that case. If the number of tasks waiting for the interrupt exceeds 100 the
548 * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
549 * task.
550 *
551 * If a signal is not requested, then calls vblank_wait().
552 */ 538 */
553int drm_wait_vblank(struct drm_device *dev, void *data, 539int drm_wait_vblank(struct drm_device *dev, void *data,
554 struct drm_file *file_priv) 540 struct drm_file *file_priv)
@@ -560,6 +546,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
560 if ((!dev->pdev->irq) || (!dev->irq_enabled)) 546 if ((!dev->pdev->irq) || (!dev->irq_enabled))
561 return -EINVAL; 547 return -EINVAL;
562 548
549 if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
550 return -EINVAL;
551
563 if (vblwait->request.type & 552 if (vblwait->request.type &
564 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { 553 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
565 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", 554 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
@@ -597,89 +586,26 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
597 vblwait->request.sequence = seq + 1; 586 vblwait->request.sequence = seq + 1;
598 } 587 }
599 588
600 if (flags & _DRM_VBLANK_SIGNAL) { 589 DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
601 unsigned long irqflags; 590 vblwait->request.sequence, crtc);
602 struct list_head *vbl_sigs = &dev->vbl_sigs[crtc]; 591 dev->last_vblank_wait[crtc] = vblwait->request.sequence;
603 struct drm_vbl_sig *vbl_sig; 592 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
604 593 (((drm_vblank_count(dev, crtc) -
605 spin_lock_irqsave(&dev->vbl_lock, irqflags); 594 vblwait->request.sequence) <= (1 << 23)) ||
606 595 !dev->irq_enabled));
607 /* Check if this task has already scheduled the same signal
608 * for the same vblank sequence number; nothing to be done in
609 * that case
610 */
611 list_for_each_entry(vbl_sig, vbl_sigs, head) {
612 if (vbl_sig->sequence == vblwait->request.sequence
613 && vbl_sig->info.si_signo ==
614 vblwait->request.signal
615 && vbl_sig->task == current) {
616 spin_unlock_irqrestore(&dev->vbl_lock,
617 irqflags);
618 vblwait->reply.sequence = seq;
619 goto done;
620 }
621 }
622
623 if (atomic_read(&dev->vbl_signal_pending) >= 100) {
624 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
625 ret = -EBUSY;
626 goto done;
627 }
628
629 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
630
631 vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
632 DRM_MEM_DRIVER);
633 if (!vbl_sig) {
634 ret = -ENOMEM;
635 goto done;
636 }
637
638 /* Get a refcount on the vblank, which will be released by
639 * drm_vbl_send_signals().
640 */
641 ret = drm_vblank_get(dev, crtc);
642 if (ret) {
643 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
644 DRM_MEM_DRIVER);
645 goto done;
646 }
647
648 atomic_inc(&dev->vbl_signal_pending);
649
650 vbl_sig->sequence = vblwait->request.sequence;
651 vbl_sig->info.si_signo = vblwait->request.signal;
652 vbl_sig->task = current;
653 596
654 spin_lock_irqsave(&dev->vbl_lock, irqflags); 597 if (ret != -EINTR) {
655 598 struct timeval now;
656 list_add_tail(&vbl_sig->head, vbl_sigs);
657 599
658 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 600 do_gettimeofday(&now);
659 601
660 vblwait->reply.sequence = seq; 602 vblwait->reply.tval_sec = now.tv_sec;
603 vblwait->reply.tval_usec = now.tv_usec;
604 vblwait->reply.sequence = drm_vblank_count(dev, crtc);
605 DRM_DEBUG("returning %d to client\n",
606 vblwait->reply.sequence);
661 } else { 607 } else {
662 DRM_DEBUG("waiting on vblank count %d, crtc %d\n", 608 DRM_DEBUG("vblank wait interrupted by signal\n");
663 vblwait->request.sequence, crtc);
664 dev->last_vblank_wait[crtc] = vblwait->request.sequence;
665 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
666 (((drm_vblank_count(dev, crtc) -
667 vblwait->request.sequence) <= (1 << 23)) ||
668 !dev->irq_enabled));
669
670 if (ret != -EINTR) {
671 struct timeval now;
672
673 do_gettimeofday(&now);
674
675 vblwait->reply.tval_sec = now.tv_sec;
676 vblwait->reply.tval_usec = now.tv_usec;
677 vblwait->reply.sequence = drm_vblank_count(dev, crtc);
678 DRM_DEBUG("returning %d to client\n",
679 vblwait->reply.sequence);
680 } else {
681 DRM_DEBUG("vblank wait interrupted by signal\n");
682 }
683 } 609 }
684 610
685done: 611done:
@@ -688,46 +614,6 @@ done:
688} 614}
689 615
690/** 616/**
691 * Send the VBLANK signals.
692 *
693 * \param dev DRM device.
694 * \param crtc CRTC where the vblank event occurred
695 *
696 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
697 *
698 * If a signal is not requested, then calls vblank_wait().
699 */
700static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
701{
702 struct drm_vbl_sig *vbl_sig, *tmp;
703 struct list_head *vbl_sigs;
704 unsigned int vbl_seq;
705 unsigned long flags;
706
707 spin_lock_irqsave(&dev->vbl_lock, flags);
708
709 vbl_sigs = &dev->vbl_sigs[crtc];
710 vbl_seq = drm_vblank_count(dev, crtc);
711
712 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
713 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
714 vbl_sig->info.si_code = vbl_seq;
715 send_sig_info(vbl_sig->info.si_signo,
716 &vbl_sig->info, vbl_sig->task);
717
718 list_del(&vbl_sig->head);
719
720 drm_free(vbl_sig, sizeof(*vbl_sig),
721 DRM_MEM_DRIVER);
722 atomic_dec(&dev->vbl_signal_pending);
723 drm_vblank_put(dev, crtc);
724 }
725 }
726
727 spin_unlock_irqrestore(&dev->vbl_lock, flags);
728}
729
730/**
731 * drm_handle_vblank - handle a vblank event 617 * drm_handle_vblank - handle a vblank event
732 * @dev: DRM device 618 * @dev: DRM device
733 * @crtc: where this event occurred 619 * @crtc: where this event occurred
@@ -739,6 +625,5 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
739{ 625{
740 atomic_inc(&dev->_vblank_count[crtc]); 626 atomic_inc(&dev->_vblank_count[crtc]);
741 DRM_WAKEUP(&dev->vbl_queue[crtc]); 627 DRM_WAKEUP(&dev->vbl_queue[crtc]);
742 drm_vbl_send_signals(dev, crtc);
743} 628}
744EXPORT_SYMBOL(drm_handle_vblank); 629EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 5ca132afa4f2..46bb923b097c 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -118,12 +118,20 @@ static void drm_master_destroy(struct kref *kref)
118 struct drm_master *master = container_of(kref, struct drm_master, refcount); 118 struct drm_master *master = container_of(kref, struct drm_master, refcount);
119 struct drm_magic_entry *pt, *next; 119 struct drm_magic_entry *pt, *next;
120 struct drm_device *dev = master->minor->dev; 120 struct drm_device *dev = master->minor->dev;
121 struct drm_map_list *r_list, *list_temp;
121 122
122 list_del(&master->head); 123 list_del(&master->head);
123 124
124 if (dev->driver->master_destroy) 125 if (dev->driver->master_destroy)
125 dev->driver->master_destroy(dev, master); 126 dev->driver->master_destroy(dev, master);
126 127
128 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
129 if (r_list->master == master) {
130 drm_rmmap_locked(dev, r_list->map);
131 r_list = NULL;
132 }
133 }
134
127 if (master->unique) { 135 if (master->unique) {
128 drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER); 136 drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER);
129 master->unique = NULL; 137 master->unique = NULL;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index bbadf1c04142..ee64b7301f67 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -944,13 +944,14 @@ static int i915_load_modeset_init(struct drm_device *dev)
944 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 944 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
945 0xff000000; 945 0xff000000;
946 946
947 DRM_DEBUG("*** fb base 0x%08lx\n", dev->mode_config.fb_base); 947 if (IS_MOBILE(dev) || IS_I9XX(dev))
948
949 if (IS_MOBILE(dev) || (IS_I9XX(dev) && !IS_I965G(dev) && !IS_G33(dev)))
950 dev_priv->cursor_needs_physical = true; 948 dev_priv->cursor_needs_physical = true;
951 else 949 else
952 dev_priv->cursor_needs_physical = false; 950 dev_priv->cursor_needs_physical = false;
953 951
952 if (IS_I965G(dev) || IS_G33(dev))
953 dev_priv->cursor_needs_physical = false;
954
954 ret = i915_probe_agp(dev, &agp_size, &prealloc_size); 955 ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
955 if (ret) 956 if (ret)
956 goto kfree_devname; 957 goto kfree_devname;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 96316fd47233..debad5c04cc0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3364,7 +3364,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
3364{ 3364{
3365 int i; 3365 int i;
3366 3366
3367 for (i = 0; i < I915_MAX_PHYS_OBJECT; i++) 3367 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3368 i915_gem_free_phys_object(dev, i); 3368 i915_gem_free_phys_object(dev, i);
3369} 3369}
3370 3370
@@ -3427,7 +3427,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
3427 ret = i915_gem_init_phys_object(dev, id, 3427 ret = i915_gem_init_phys_object(dev, id,
3428 obj->size); 3428 obj->size);
3429 if (ret) { 3429 if (ret) {
3430 DRM_ERROR("failed to init phys object %d size: %d\n", id, obj->size); 3430 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
3431 goto out; 3431 goto out;
3432 } 3432 }
3433 } 3433 }
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index a5a2f5339e9e..5ee9d4c25753 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -137,10 +137,6 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
137 chan->reg = reg; 137 chan->reg = reg;
138 snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); 138 snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
139 chan->adapter.owner = THIS_MODULE; 139 chan->adapter.owner = THIS_MODULE;
140#ifndef I2C_HW_B_INTELFB
141#define I2C_HW_B_INTELFB I2C_HW_B_I810
142#endif
143 chan->adapter.id = I2C_HW_B_INTELFB;
144 chan->adapter.algo_data = &chan->algo; 140 chan->adapter.algo_data = &chan->algo;
145 chan->adapter.dev.parent = &dev->pdev->dev; 141 chan->adapter.dev.parent = &dev->pdev->dev;
146 chan->algo.setsda = set_data; 142 chan->algo.setsda = set_data;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 2fafdcc108fe..b36a5214d8df 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -311,7 +311,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
311 if (dev_priv->panel_fixed_mode != NULL) { 311 if (dev_priv->panel_fixed_mode != NULL) {
312 struct drm_display_mode *mode; 312 struct drm_display_mode *mode;
313 313
314 mutex_unlock(&dev->mode_config.mutex); 314 mutex_lock(&dev->mode_config.mutex);
315 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); 315 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
316 drm_mode_probed_add(connector, mode); 316 drm_mode_probed_add(connector, mode);
317 mutex_unlock(&dev->mode_config.mutex); 317 mutex_unlock(&dev->mode_config.mutex);
@@ -340,6 +340,18 @@ static void intel_lvds_destroy(struct drm_connector *connector)
340 kfree(connector); 340 kfree(connector);
341} 341}
342 342
343static int intel_lvds_set_property(struct drm_connector *connector,
344 struct drm_property *property,
345 uint64_t value)
346{
347 struct drm_device *dev = connector->dev;
348
349 if (property == dev->mode_config.dpms_property && connector->encoder)
350 intel_lvds_dpms(connector->encoder, (uint32_t)(value & 0xf));
351
352 return 0;
353}
354
343static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { 355static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
344 .dpms = intel_lvds_dpms, 356 .dpms = intel_lvds_dpms,
345 .mode_fixup = intel_lvds_mode_fixup, 357 .mode_fixup = intel_lvds_mode_fixup,
@@ -359,6 +371,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
359 .restore = intel_lvds_restore, 371 .restore = intel_lvds_restore,
360 .detect = intel_lvds_detect, 372 .detect = intel_lvds_detect,
361 .fill_modes = drm_helper_probe_single_connector_modes, 373 .fill_modes = drm_helper_probe_single_connector_modes,
374 .set_property = intel_lvds_set_property,
362 .destroy = intel_lvds_destroy, 375 .destroy = intel_lvds_destroy,
363}; 376};
364 377
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 5d7640e49dc5..6cad69ed21c5 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1218,6 +1218,7 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
1218} 1218}
1219EXPORT_SYMBOL_GPL(hid_connect); 1219EXPORT_SYMBOL_GPL(hid_connect);
1220 1220
1221/* a list of devices for which there is a specialized driver on HID bus */
1221static const struct hid_device_id hid_blacklist[] = { 1222static const struct hid_device_id hid_blacklist[] = {
1222 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, 1223 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
1223 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, 1224 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
@@ -1476,6 +1477,7 @@ static struct bus_type hid_bus_type = {
1476 .uevent = hid_uevent, 1477 .uevent = hid_uevent,
1477}; 1478};
1478 1479
1480/* a list of devices that shouldn't be handled by HID core at all */
1479static const struct hid_device_id hid_ignore_list[] = { 1481static const struct hid_device_id hid_ignore_list[] = {
1480 { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) }, 1482 { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) },
1481 { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) }, 1483 { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) },
@@ -1606,6 +1608,8 @@ static const struct hid_device_id hid_ignore_list[] = {
1606 { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD) }, 1608 { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD) },
1607 { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD2) }, 1609 { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD2) },
1608 { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD3) }, 1610 { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD3) },
1611 { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD4) },
1612 { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD5) },
1609 { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY1) }, 1613 { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY1) },
1610 { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY2) }, 1614 { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY2) },
1611 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, 1615 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index acc1abc834a4..e899f510ebeb 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -362,6 +362,8 @@
362#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD 0x0038 362#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD 0x0038
363#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD2 0x0036 363#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD2 0x0036
364#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD3 0x0034 364#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD3 0x0034
365#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD4 0x0044
366#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD5 0x0045
365 367
366#define USB_VENDOR_ID_SUN 0x0430 368#define USB_VENDOR_ID_SUN 0x0430
367#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab 369#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index d718b1607d0f..25b10dcad90d 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -30,7 +30,7 @@
30#define MS_NOGET 0x10 30#define MS_NOGET 0x10
31 31
32/* 32/*
33 * Microsoft Wireless Desktop Receiver (Model 1028) has several 33 * Microsoft Wireless Desktop Receiver (Model 1028) has
34 * 'Usage Min/Max' where it ought to have 'Physical Min/Max' 34 * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
35 */ 35 */
36static void ms_report_fixup(struct hid_device *hdev, __u8 *rdesc, 36static void ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -38,17 +38,12 @@ static void ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
38{ 38{
39 unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); 39 unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
40 40
41 if ((quirks & MS_RDESC) && rsize == 571 && rdesc[284] == 0x19 && 41 if ((quirks & MS_RDESC) && rsize == 571 && rdesc[557] == 0x19 &&
42 rdesc[286] == 0x2a && rdesc[304] == 0x19 &&
43 rdesc[306] == 0x29 && rdesc[352] == 0x1a &&
44 rdesc[355] == 0x2a && rdesc[557] == 0x19 &&
45 rdesc[559] == 0x29) { 42 rdesc[559] == 0x29) {
46 dev_info(&hdev->dev, "fixing up Microsoft Wireless Receiver " 43 dev_info(&hdev->dev, "fixing up Microsoft Wireless Receiver "
47 "Model 1028 report descriptor\n"); 44 "Model 1028 report descriptor\n");
48 rdesc[284] = rdesc[304] = rdesc[557] = 0x35; 45 rdesc[557] = 0x35;
49 rdesc[352] = 0x36; 46 rdesc[559] = 0x45;
50 rdesc[286] = rdesc[355] = 0x46;
51 rdesc[306] = rdesc[559] = 0x45;
52 } 47 }
53} 48}
54 49
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index d73eea382ab3..4940e4d70c2d 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -656,7 +656,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
656 656
657 case HIDIOCGSTRING: 657 case HIDIOCGSTRING:
658 mutex_lock(&hiddev->existancelock); 658 mutex_lock(&hiddev->existancelock);
659 if (!hiddev->exist) 659 if (hiddev->exist)
660 r = hiddev_ioctl_string(hiddev, cmd, user_arg); 660 r = hiddev_ioctl_string(hiddev, cmd, user_arg);
661 else 661 else
662 r = -ENODEV; 662 r = -ENODEV;
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index e30186236588..678e34b01e52 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -83,7 +83,7 @@
83/* 83/*
84 * Temperature sensors keys (sp78 - 2 bytes). 84 * Temperature sensors keys (sp78 - 2 bytes).
85 */ 85 */
86static const char* temperature_sensors_sets[][36] = { 86static const char *temperature_sensors_sets[][41] = {
87/* Set 0: Macbook Pro */ 87/* Set 0: Macbook Pro */
88 { "TA0P", "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "Th0H", 88 { "TA0P", "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "Th0H",
89 "Th1H", "Tm0P", "Ts0P", "Ts1P", NULL }, 89 "Th1H", "Tm0P", "Ts0P", "Ts1P", NULL },
@@ -135,6 +135,13 @@ static const char* temperature_sensors_sets[][36] = {
135 { "TB0T", "TB1S", "TB1T", "TB2S", "TB2T", "TC0D", "TN0D", "TTF0", 135 { "TB0T", "TB1S", "TB1T", "TB2S", "TB2T", "TC0D", "TN0D", "TTF0",
136 "TV0P", "TVFP", "TW0P", "Th0P", "Tp0P", "Tp1P", "TpFP", "Ts0P", 136 "TV0P", "TVFP", "TW0P", "Th0P", "Tp0P", "Tp1P", "TpFP", "Ts0P",
137 "Ts0S", NULL }, 137 "Ts0S", NULL },
138/* Set 16: Mac Pro 3,1 (2 x Quad-Core) */
139 { "TA0P", "TCAG", "TCAH", "TCBG", "TCBH", "TC0C", "TC0D", "TC0P",
140 "TC1C", "TC1D", "TC2C", "TC2D", "TC3C", "TC3D", "TH0P", "TH1P",
141 "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S", "TM1P",
142 "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S",
143 "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S",
144 NULL },
138}; 145};
139 146
140/* List of keys used to read/write fan speeds */ 147/* List of keys used to read/write fan speeds */
@@ -1153,6 +1160,16 @@ static SENSOR_DEVICE_ATTR(temp34_input, S_IRUGO,
1153 applesmc_show_temperature, NULL, 33); 1160 applesmc_show_temperature, NULL, 33);
1154static SENSOR_DEVICE_ATTR(temp35_input, S_IRUGO, 1161static SENSOR_DEVICE_ATTR(temp35_input, S_IRUGO,
1155 applesmc_show_temperature, NULL, 34); 1162 applesmc_show_temperature, NULL, 34);
1163static SENSOR_DEVICE_ATTR(temp36_input, S_IRUGO,
1164 applesmc_show_temperature, NULL, 35);
1165static SENSOR_DEVICE_ATTR(temp37_input, S_IRUGO,
1166 applesmc_show_temperature, NULL, 36);
1167static SENSOR_DEVICE_ATTR(temp38_input, S_IRUGO,
1168 applesmc_show_temperature, NULL, 37);
1169static SENSOR_DEVICE_ATTR(temp39_input, S_IRUGO,
1170 applesmc_show_temperature, NULL, 38);
1171static SENSOR_DEVICE_ATTR(temp40_input, S_IRUGO,
1172 applesmc_show_temperature, NULL, 39);
1156 1173
1157static struct attribute *temperature_attributes[] = { 1174static struct attribute *temperature_attributes[] = {
1158 &sensor_dev_attr_temp1_input.dev_attr.attr, 1175 &sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -1190,6 +1207,11 @@ static struct attribute *temperature_attributes[] = {
1190 &sensor_dev_attr_temp33_input.dev_attr.attr, 1207 &sensor_dev_attr_temp33_input.dev_attr.attr,
1191 &sensor_dev_attr_temp34_input.dev_attr.attr, 1208 &sensor_dev_attr_temp34_input.dev_attr.attr,
1192 &sensor_dev_attr_temp35_input.dev_attr.attr, 1209 &sensor_dev_attr_temp35_input.dev_attr.attr,
1210 &sensor_dev_attr_temp36_input.dev_attr.attr,
1211 &sensor_dev_attr_temp37_input.dev_attr.attr,
1212 &sensor_dev_attr_temp38_input.dev_attr.attr,
1213 &sensor_dev_attr_temp39_input.dev_attr.attr,
1214 &sensor_dev_attr_temp40_input.dev_attr.attr,
1193 NULL 1215 NULL
1194}; 1216};
1195 1217
@@ -1312,6 +1334,8 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
1312 { .accelerometer = 0, .light = 0, .temperature_set = 14 }, 1334 { .accelerometer = 0, .light = 0, .temperature_set = 14 },
1313/* MacBook Air 2,1: accelerometer, backlight and temperature set 15 */ 1335/* MacBook Air 2,1: accelerometer, backlight and temperature set 15 */
1314 { .accelerometer = 1, .light = 1, .temperature_set = 15 }, 1336 { .accelerometer = 1, .light = 1, .temperature_set = 15 },
1337/* MacPro3,1: temperature set 16 */
1338 { .accelerometer = 0, .light = 0, .temperature_set = 16 },
1315}; 1339};
1316 1340
1317/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". 1341/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1369,6 +1393,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1369 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1393 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1370 DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") }, 1394 DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
1371 &applesmc_dmi_data[4]}, 1395 &applesmc_dmi_data[4]},
1396 { applesmc_dmi_match, "Apple MacPro3", {
1397 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1398 DMI_MATCH(DMI_PRODUCT_NAME, "MacPro3") },
1399 &applesmc_dmi_data[16]},
1372 { applesmc_dmi_match, "Apple MacPro", { 1400 { applesmc_dmi_match, "Apple MacPro", {
1373 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1401 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1374 DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, 1402 DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") },
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
index 75089febbc13..9fee3ca17344 100644
--- a/drivers/i2c/busses/i2c-acorn.c
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -83,7 +83,6 @@ static struct i2c_algo_bit_data ioc_data = {
83}; 83};
84 84
85static struct i2c_adapter ioc_ops = { 85static struct i2c_adapter ioc_ops = {
86 .id = I2C_HW_B_IOC,
87 .algo_data = &ioc_data, 86 .algo_data = &ioc_data,
88}; 87};
89 88
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 9cead9b9458e..981e080b32ae 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -476,7 +476,6 @@ static const struct i2c_algorithm smbus_algorithm = {
476 476
477static struct i2c_adapter ali1535_adapter = { 477static struct i2c_adapter ali1535_adapter = {
478 .owner = THIS_MODULE, 478 .owner = THIS_MODULE,
479 .id = I2C_HW_SMBUS_ALI1535,
480 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 479 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
481 .algo = &smbus_algorithm, 480 .algo = &smbus_algorithm,
482}; 481};
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index dd9e796fad69..f70f46582c6c 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -386,7 +386,6 @@ static const struct i2c_algorithm ali1563_algorithm = {
386 386
387static struct i2c_adapter ali1563_adapter = { 387static struct i2c_adapter ali1563_adapter = {
388 .owner = THIS_MODULE, 388 .owner = THIS_MODULE,
389 .id = I2C_HW_SMBUS_ALI1563,
390 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 389 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
391 .algo = &ali1563_algorithm, 390 .algo = &ali1563_algorithm,
392}; 391};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 234fdde7d40e..39066dee46e3 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -473,7 +473,6 @@ static const struct i2c_algorithm smbus_algorithm = {
473 473
474static struct i2c_adapter ali15x3_adapter = { 474static struct i2c_adapter ali15x3_adapter = {
475 .owner = THIS_MODULE, 475 .owner = THIS_MODULE,
476 .id = I2C_HW_SMBUS_ALI15X3,
477 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 476 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
478 .algo = &smbus_algorithm, 477 .algo = &smbus_algorithm,
479}; 478};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 36bee5b9c952..220f4a1eee1d 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -298,7 +298,6 @@ static const struct i2c_algorithm smbus_algorithm = {
298 298
299struct i2c_adapter amd756_smbus = { 299struct i2c_adapter amd756_smbus = {
300 .owner = THIS_MODULE, 300 .owner = THIS_MODULE,
301 .id = I2C_HW_SMBUS_AMD756,
302 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 301 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
303 .algo = &smbus_algorithm, 302 .algo = &smbus_algorithm,
304}; 303};
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 3972208876b3..edab51973bf5 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -387,7 +387,6 @@ static int __devinit amd8111_probe(struct pci_dev *dev,
387 smbus->adapter.owner = THIS_MODULE; 387 smbus->adapter.owner = THIS_MODULE;
388 snprintf(smbus->adapter.name, sizeof(smbus->adapter.name), 388 snprintf(smbus->adapter.name, sizeof(smbus->adapter.name),
389 "SMBus2 AMD8111 adapter at %04x", smbus->base); 389 "SMBus2 AMD8111 adapter at %04x", smbus->base);
390 smbus->adapter.id = I2C_HW_SMBUS_AMD8111;
391 smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 390 smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
392 smbus->adapter.algo = &smbus_algorithm; 391 smbus->adapter.algo = &smbus_algorithm;
393 smbus->adapter.algo_data = smbus; 392 smbus->adapter.algo_data = smbus;
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 66a04c2c660f..f78ce523e3db 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -400,7 +400,6 @@ i2c_au1550_probe(struct platform_device *pdev)
400 priv->xfer_timeout = 200; 400 priv->xfer_timeout = 200;
401 priv->ack_timeout = 200; 401 priv->ack_timeout = 200;
402 402
403 priv->adap.id = I2C_HW_AU1550_PSC;
404 priv->adap.nr = pdev->id; 403 priv->adap.nr = pdev->id;
405 priv->adap.algo = &au1550_algo; 404 priv->adap.algo = &au1550_algo;
406 priv->adap.algo_data = priv; 405 priv->adap.algo_data = priv;
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 3fd2c417c1e0..fc548b3d002e 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -651,7 +651,6 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
651 iface->timeout_timer.data = (unsigned long)iface; 651 iface->timeout_timer.data = (unsigned long)iface;
652 652
653 p_adap = &iface->adap; 653 p_adap = &iface->adap;
654 p_adap->id = I2C_HW_BLACKFIN;
655 p_adap->nr = pdev->id; 654 p_adap->nr = pdev->id;
656 strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name)); 655 strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name));
657 p_adap->algo = &bfin_twi_algorithm; 656 p_adap->algo = &bfin_twi_algorithm;
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 0ed3ccb81b63..448b4bf35eb7 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -202,7 +202,6 @@ static struct i2c_algo_pcf_data pcf_isa_data = {
202static struct i2c_adapter pcf_isa_ops = { 202static struct i2c_adapter pcf_isa_ops = {
203 .owner = THIS_MODULE, 203 .owner = THIS_MODULE,
204 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 204 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
205 .id = I2C_HW_P_ELEK,
206 .algo_data = &pcf_isa_data, 205 .algo_data = &pcf_isa_data,
207 .name = "i2c-elektor", 206 .name = "i2c-elektor",
208}; 207};
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 648aa7baff83..bec9b845dd16 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -102,7 +102,6 @@ static struct i2c_algo_bit_data hydra_bit_data = {
102static struct i2c_adapter hydra_adap = { 102static struct i2c_adapter hydra_adap = {
103 .owner = THIS_MODULE, 103 .owner = THIS_MODULE,
104 .name = "Hydra i2c", 104 .name = "Hydra i2c",
105 .id = I2C_HW_B_HYDRA,
106 .algo_data = &hydra_bit_data, 105 .algo_data = &hydra_bit_data,
107}; 106};
108 107
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 526625eaa84b..230238df56c4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -556,7 +556,6 @@ static const struct i2c_algorithm smbus_algorithm = {
556 556
557static struct i2c_adapter i801_adapter = { 557static struct i2c_adapter i801_adapter = {
558 .owner = THIS_MODULE, 558 .owner = THIS_MODULE,
559 .id = I2C_HW_SMBUS_I801,
560 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 559 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
561 .algo = &smbus_algorithm, 560 .algo = &smbus_algorithm,
562}; 561};
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 651f2f1ae5b7..88f0db73b364 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -746,7 +746,6 @@ static int __devinit iic_probe(struct of_device *ofdev,
746 adap->dev.parent = &ofdev->dev; 746 adap->dev.parent = &ofdev->dev;
747 strlcpy(adap->name, "IBM IIC", sizeof(adap->name)); 747 strlcpy(adap->name, "IBM IIC", sizeof(adap->name));
748 i2c_set_adapdata(adap, dev); 748 i2c_set_adapdata(adap, dev);
749 adap->id = I2C_HW_OCP;
750 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 749 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
751 adap->algo = &iic_algo; 750 adap->algo = &iic_algo;
752 adap->timeout = 1; 751 adap->timeout = 1;
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index fc2714ac0c0f..3190690c26ce 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -480,7 +480,6 @@ iop3xx_i2c_probe(struct platform_device *pdev)
480 } 480 }
481 481
482 memcpy(new_adapter->name, pdev->name, strlen(pdev->name)); 482 memcpy(new_adapter->name, pdev->name, strlen(pdev->name));
483 new_adapter->id = I2C_HW_IOP3XX;
484 new_adapter->owner = THIS_MODULE; 483 new_adapter->owner = THIS_MODULE;
485 new_adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 484 new_adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
486 new_adapter->dev.parent = &pdev->dev; 485 new_adapter->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
index 05d72e981353..8e8467970481 100644
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ b/drivers/i2c/busses/i2c-ixp2000.c
@@ -116,7 +116,6 @@ static int ixp2000_i2c_probe(struct platform_device *plat_dev)
116 drv_data->algo_data.udelay = 6; 116 drv_data->algo_data.udelay = 6;
117 drv_data->algo_data.timeout = 100; 117 drv_data->algo_data.timeout = 100;
118 118
119 drv_data->adapter.id = I2C_HW_B_IXP2000,
120 strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name, 119 strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
121 sizeof(drv_data->adapter.name)); 120 sizeof(drv_data->adapter.name));
122 drv_data->adapter.algo_data = &drv_data->algo_data, 121 drv_data->adapter.algo_data = &drv_data->algo_data,
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index a9a45fcc8544..aedbbe6618db 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -310,7 +310,6 @@ static const struct i2c_algorithm mpc_algo = {
310static struct i2c_adapter mpc_ops = { 310static struct i2c_adapter mpc_ops = {
311 .owner = THIS_MODULE, 311 .owner = THIS_MODULE,
312 .name = "MPC adapter", 312 .name = "MPC adapter",
313 .id = I2C_HW_MPC107,
314 .algo = &mpc_algo, 313 .algo = &mpc_algo,
315 .timeout = 1, 314 .timeout = 1,
316}; 315};
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 9e8118d2fe64..eeda276f8f16 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -527,7 +527,6 @@ mv64xxx_i2c_probe(struct platform_device *pd)
527 goto exit_unmap_regs; 527 goto exit_unmap_regs;
528 } 528 }
529 drv_data->adapter.dev.parent = &pd->dev; 529 drv_data->adapter.dev.parent = &pd->dev;
530 drv_data->adapter.id = I2C_HW_MV64XXX;
531 drv_data->adapter.algo = &mv64xxx_i2c_algo; 530 drv_data->adapter.algo = &mv64xxx_i2c_algo;
532 drv_data->adapter.owner = THIS_MODULE; 531 drv_data->adapter.owner = THIS_MODULE;
533 drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 532 drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 3b19bc41a60b..05af6cd7f270 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -355,7 +355,6 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
355 return -EBUSY; 355 return -EBUSY;
356 } 356 }
357 smbus->adapter.owner = THIS_MODULE; 357 smbus->adapter.owner = THIS_MODULE;
358 smbus->adapter.id = I2C_HW_SMBUS_NFORCE2;
359 smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 358 smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
360 smbus->adapter.algo = &smbus_algorithm; 359 smbus->adapter.algo = &smbus_algorithm;
361 smbus->adapter.algo_data = smbus; 360 smbus->adapter.algo_data = smbus;
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index b2b8380f6602..322c5691e38e 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -115,7 +115,6 @@ static struct i2c_algo_bit_data parport_algo_data = {
115static struct i2c_adapter parport_adapter = { 115static struct i2c_adapter parport_adapter = {
116 .owner = THIS_MODULE, 116 .owner = THIS_MODULE,
117 .class = I2C_CLASS_HWMON, 117 .class = I2C_CLASS_HWMON,
118 .id = I2C_HW_B_LP,
119 .algo_data = &parport_algo_data, 118 .algo_data = &parport_algo_data,
120 .name = "Parallel port adapter (light)", 119 .name = "Parallel port adapter (light)",
121}; 120};
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index a257cd5cd134..0d8998610c74 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -164,7 +164,6 @@ static void i2c_parport_attach (struct parport *port)
164 /* Fill the rest of the structure */ 164 /* Fill the rest of the structure */
165 adapter->adapter.owner = THIS_MODULE; 165 adapter->adapter.owner = THIS_MODULE;
166 adapter->adapter.class = I2C_CLASS_HWMON; 166 adapter->adapter.class = I2C_CLASS_HWMON;
167 adapter->adapter.id = I2C_HW_B_LP;
168 strlcpy(adapter->adapter.name, "Parallel port adapter", 167 strlcpy(adapter->adapter.name, "Parallel port adapter",
169 sizeof(adapter->adapter.name)); 168 sizeof(adapter->adapter.name));
170 adapter->algo_data = parport_algo_data; 169 adapter->algo_data = parport_algo_data;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 9eb76268ec78..4aa8138cb0a9 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -101,7 +101,6 @@ static struct i2c_algo_pca_data pca_isa_data = {
101 101
102static struct i2c_adapter pca_isa_ops = { 102static struct i2c_adapter pca_isa_ops = {
103 .owner = THIS_MODULE, 103 .owner = THIS_MODULE,
104 .id = I2C_HW_A_ISA,
105 .algo_data = &pca_isa_data, 104 .algo_data = &pca_isa_data,
106 .name = "PCA9564 ISA Adapter", 105 .name = "PCA9564 ISA Adapter",
107 .timeout = 100, 106 .timeout = 100,
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index eaa9b387543e..761f9dd53620 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -403,7 +403,6 @@ static const struct i2c_algorithm smbus_algorithm = {
403 403
404static struct i2c_adapter piix4_adapter = { 404static struct i2c_adapter piix4_adapter = {
405 .owner = THIS_MODULE, 405 .owner = THIS_MODULE,
406 .id = I2C_HW_SMBUS_PIIX4,
407 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 406 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
408 .algo = &smbus_algorithm, 407 .algo = &smbus_algorithm,
409}; 408};
diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
index 4ddefbf238e9..98b1ec489159 100644
--- a/drivers/i2c/busses/i2c-sibyte.c
+++ b/drivers/i2c/busses/i2c-sibyte.c
@@ -155,7 +155,6 @@ static struct i2c_algo_sibyte_data sibyte_board_data[2] = {
155static struct i2c_adapter sibyte_board_adapter[2] = { 155static struct i2c_adapter sibyte_board_adapter[2] = {
156 { 156 {
157 .owner = THIS_MODULE, 157 .owner = THIS_MODULE,
158 .id = I2C_HW_SIBYTE,
159 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 158 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
160 .algo = NULL, 159 .algo = NULL,
161 .algo_data = &sibyte_board_data[0], 160 .algo_data = &sibyte_board_data[0],
@@ -164,7 +163,6 @@ static struct i2c_adapter sibyte_board_adapter[2] = {
164 }, 163 },
165 { 164 {
166 .owner = THIS_MODULE, 165 .owner = THIS_MODULE,
167 .id = I2C_HW_SIBYTE,
168 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 166 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
169 .algo = NULL, 167 .algo = NULL,
170 .algo_data = &sibyte_board_data[1], 168 .algo_data = &sibyte_board_data[1],
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 8ce2daff985c..f320ab27da46 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -365,7 +365,6 @@ static const struct i2c_algorithm smbus_algorithm = {
365 365
366static struct i2c_adapter sis5595_adapter = { 366static struct i2c_adapter sis5595_adapter = {
367 .owner = THIS_MODULE, 367 .owner = THIS_MODULE,
368 .id = I2C_HW_SMBUS_SIS5595,
369 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 368 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
370 .algo = &smbus_algorithm, 369 .algo = &smbus_algorithm,
371}; 370};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 9c9c016ff2b5..50c3610e6028 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -464,7 +464,6 @@ static const struct i2c_algorithm smbus_algorithm = {
464 464
465static struct i2c_adapter sis630_adapter = { 465static struct i2c_adapter sis630_adapter = {
466 .owner = THIS_MODULE, 466 .owner = THIS_MODULE,
467 .id = I2C_HW_SMBUS_SIS630,
468 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 467 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
469 .algo = &smbus_algorithm, 468 .algo = &smbus_algorithm,
470}; 469};
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index f1bba6396641..7e1594b40579 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -241,7 +241,6 @@ static const struct i2c_algorithm smbus_algorithm = {
241 241
242static struct i2c_adapter sis96x_adapter = { 242static struct i2c_adapter sis96x_adapter = {
243 .owner = THIS_MODULE, 243 .owner = THIS_MODULE,
244 .id = I2C_HW_SMBUS_SIS96X,
245 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 244 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
246 .algo = &smbus_algorithm, 245 .algo = &smbus_algorithm,
247}; 246};
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 29cef0433f34..8b24f192103a 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -83,7 +83,6 @@ static struct i2c_algo_bit_data bit_data = {
83 83
84static struct i2c_adapter vt586b_adapter = { 84static struct i2c_adapter vt586b_adapter = {
85 .owner = THIS_MODULE, 85 .owner = THIS_MODULE,
86 .id = I2C_HW_B_VIA,
87 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 86 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
88 .name = "VIA i2c", 87 .name = "VIA i2c",
89 .algo_data = &bit_data, 88 .algo_data = &bit_data,
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 9f194d9efd91..02e6f724b05f 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -321,7 +321,6 @@ static const struct i2c_algorithm smbus_algorithm = {
321 321
322static struct i2c_adapter vt596_adapter = { 322static struct i2c_adapter vt596_adapter = {
323 .owner = THIS_MODULE, 323 .owner = THIS_MODULE,
324 .id = I2C_HW_SMBUS_VIA2,
325 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 324 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
326 .algo = &smbus_algorithm, 325 .algo = &smbus_algorithm,
327}; 326};
diff --git a/drivers/i2c/busses/i2c-voodoo3.c b/drivers/i2c/busses/i2c-voodoo3.c
index 1d4ae26ba73d..1a474acc0ddd 100644
--- a/drivers/i2c/busses/i2c-voodoo3.c
+++ b/drivers/i2c/busses/i2c-voodoo3.c
@@ -163,7 +163,6 @@ static struct i2c_algo_bit_data voo_i2c_bit_data = {
163 163
164static struct i2c_adapter voodoo3_i2c_adapter = { 164static struct i2c_adapter voodoo3_i2c_adapter = {
165 .owner = THIS_MODULE, 165 .owner = THIS_MODULE,
166 .id = I2C_HW_B_VOO,
167 .class = I2C_CLASS_TV_ANALOG, 166 .class = I2C_CLASS_TV_ANALOG,
168 .name = "I2C Voodoo3/Banshee adapter", 167 .name = "I2C Voodoo3/Banshee adapter",
169 .algo_data = &voo_i2c_bit_data, 168 .algo_data = &voo_i2c_bit_data,
@@ -180,7 +179,6 @@ static struct i2c_algo_bit_data voo_ddc_bit_data = {
180 179
181static struct i2c_adapter voodoo3_ddc_adapter = { 180static struct i2c_adapter voodoo3_ddc_adapter = {
182 .owner = THIS_MODULE, 181 .owner = THIS_MODULE,
183 .id = I2C_HW_B_VOO,
184 .class = I2C_CLASS_DDC, 182 .class = I2C_CLASS_DDC,
185 .name = "DDC Voodoo3/Banshee adapter", 183 .name = "DDC Voodoo3/Banshee adapter",
186 .algo_data = &voo_ddc_bit_data, 184 .algo_data = &voo_ddc_bit_data,
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index ed794b145a11..648ecc6f60e6 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -440,7 +440,6 @@ static __init struct scx200_acb_iface *scx200_create_iface(const char *text,
440 i2c_set_adapdata(adapter, iface); 440 i2c_set_adapdata(adapter, iface);
441 snprintf(adapter->name, sizeof(adapter->name), "%s ACB%d", text, index); 441 snprintf(adapter->name, sizeof(adapter->name), "%s ACB%d", text, index);
442 adapter->owner = THIS_MODULE; 442 adapter->owner = THIS_MODULE;
443 adapter->id = I2C_HW_SMBUS_SCX200;
444 adapter->algo = &scx200_acb_algorithm; 443 adapter->algo = &scx200_acb_algorithm;
445 adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 444 adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
446 adapter->dev.parent = dev; 445 adapter->dev.parent = dev;
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index e4c98539c517..162b74a04886 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -82,7 +82,6 @@ static struct i2c_algo_bit_data scx200_i2c_data = {
82static struct i2c_adapter scx200_i2c_ops = { 82static struct i2c_adapter scx200_i2c_ops = {
83 .owner = THIS_MODULE, 83 .owner = THIS_MODULE,
84 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 84 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
85 .id = I2C_HW_B_SCX200,
86 .algo_data = &scx200_i2c_data, 85 .algo_data = &scx200_i2c_data,
87 .name = "NatSemi SCx200 I2C", 86 .name = "NatSemi SCx200 I2C",
88}; 87};
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index b9bef04b7be4..c80312c1f382 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -16,43 +16,6 @@ config DS1682
16 This driver can also be built as a module. If so, the module 16 This driver can also be built as a module. If so, the module
17 will be called ds1682. 17 will be called ds1682.
18 18
19config AT24
20 tristate "EEPROMs from most vendors"
21 depends on SYSFS && EXPERIMENTAL
22 help
23 Enable this driver to get read/write support to most I2C EEPROMs,
24 after you configure the driver to know about each EEPROM on
25 your target board. Use these generic chip names, instead of
26 vendor-specific ones like at24c64 or 24lc02:
27
28 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
29 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
30
31 Unless you like data loss puzzles, always be sure that any chip
32 you configure as a 24c32 (32 kbit) or larger is NOT really a
33 24c16 (16 kbit) or smaller, and vice versa. Marking the chip
34 as read-only won't help recover from this. Also, if your chip
35 has any software write-protect mechanism you may want to review the
36 code to make sure this driver won't turn it on by accident.
37
38 If you use this with an SMBus adapter instead of an I2C adapter,
39 full functionality is not available. Only smaller devices are
40 supported (24c16 and below, max 4 kByte).
41
42 This driver can also be built as a module. If so, the module
43 will be called at24.
44
45config SENSORS_EEPROM
46 tristate "EEPROM reader"
47 depends on EXPERIMENTAL
48 help
49 If you say yes here you get read-only access to the EEPROM data
50 available on modern memory DIMMs and Sony Vaio laptops. Such
51 EEPROMs could theoretically be available on other devices as well.
52
53 This driver can also be built as a module. If so, the module
54 will be called eeprom.
55
56config SENSORS_PCF8574 19config SENSORS_PCF8574
57 tristate "Philips PCF8574 and PCF8574A (DEPRECATED)" 20 tristate "Philips PCF8574 and PCF8574A (DEPRECATED)"
58 depends on EXPERIMENTAL && GPIO_PCF857X = "n" 21 depends on EXPERIMENTAL && GPIO_PCF857X = "n"
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index 00fcb5193ac2..d142f238a2de 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -11,8 +11,6 @@
11# 11#
12 12
13obj-$(CONFIG_DS1682) += ds1682.o 13obj-$(CONFIG_DS1682) += ds1682.o
14obj-$(CONFIG_AT24) += at24.o
15obj-$(CONFIG_SENSORS_EEPROM) += eeprom.o
16obj-$(CONFIG_SENSORS_MAX6875) += max6875.o 14obj-$(CONFIG_SENSORS_MAX6875) += max6875.o
17obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o 15obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o
18obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o 16obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
index a5ba820d69bb..a638e952d67a 100644
--- a/drivers/ide/falconide.c
+++ b/drivers/ide/falconide.c
@@ -82,7 +82,7 @@ static const struct ide_tp_ops falconide_tp_ops = {
82 82
83static const struct ide_port_info falconide_port_info = { 83static const struct ide_port_info falconide_port_info = {
84 .tp_ops = &falconide_tp_ops, 84 .tp_ops = &falconide_tp_ops,
85 .host_flags = IDE_HFLAG_NO_DMA, 85 .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_SERIALIZE,
86}; 86};
87 87
88static void __init falconide_setup_ports(hw_regs_t *hw) 88static void __init falconide_setup_ports(hw_regs_t *hw)
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 312127ea443a..0db1ed9f5fc2 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -649,7 +649,8 @@ static int ide_register_port(ide_hwif_t *hwif)
649 /* register with global device tree */ 649 /* register with global device tree */
650 dev_set_name(&hwif->gendev, hwif->name); 650 dev_set_name(&hwif->gendev, hwif->name);
651 hwif->gendev.driver_data = hwif; 651 hwif->gendev.driver_data = hwif;
652 hwif->gendev.parent = hwif->dev; 652 if (hwif->gendev.parent == NULL)
653 hwif->gendev.parent = hwif->dev;
653 hwif->gendev.release = hwif_release_dev; 654 hwif->gendev.release = hwif_release_dev;
654 655
655 ret = device_register(&hwif->gendev); 656 ret = device_register(&hwif->gendev);
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index a7ac490c9ae3..f38aac78044c 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -346,7 +346,8 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
346{ 346{
347 struct clk *clk; 347 struct clk *clk;
348 struct resource *mem, *irq; 348 struct resource *mem, *irq;
349 unsigned long base, rate; 349 void __iomem *base;
350 unsigned long rate;
350 int i, rc; 351 int i, rc;
351 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 352 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
352 353
@@ -382,11 +383,13 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
382 base = IO_ADDRESS(mem->start); 383 base = IO_ADDRESS(mem->start);
383 384
384 /* Configure the Palm Chip controller */ 385 /* Configure the Palm Chip controller */
385 palm_bk3710_chipinit((void __iomem *)base); 386 palm_bk3710_chipinit(base);
386 387
387 for (i = 0; i < IDE_NR_PORTS - 2; i++) 388 for (i = 0; i < IDE_NR_PORTS - 2; i++)
388 hw.io_ports_array[i] = base + IDE_PALM_ATA_PRI_REG_OFFSET + i; 389 hw.io_ports_array[i] = (unsigned long)
389 hw.io_ports.ctl_addr = base + IDE_PALM_ATA_PRI_CTL_OFFSET; 390 (base + IDE_PALM_ATA_PRI_REG_OFFSET + i);
391 hw.io_ports.ctl_addr = (unsigned long)
392 (base + IDE_PALM_ATA_PRI_CTL_OFFSET);
390 hw.irq = irq->start; 393 hw.irq = irq->start;
391 hw.dev = &pdev->dev; 394 hw.dev = &pdev->dev;
392 hw.chipset = ide_palm3710; 395 hw.chipset = ide_palm3710;
diff --git a/drivers/ieee1394/ieee1394.h b/drivers/ieee1394/ieee1394.h
index e0ae0d3d747f..af320e2c5079 100644
--- a/drivers/ieee1394/ieee1394.h
+++ b/drivers/ieee1394/ieee1394.h
@@ -54,9 +54,7 @@
54#define IEEE1394_SPEED_800 0x03 54#define IEEE1394_SPEED_800 0x03
55#define IEEE1394_SPEED_1600 0x04 55#define IEEE1394_SPEED_1600 0x04
56#define IEEE1394_SPEED_3200 0x05 56#define IEEE1394_SPEED_3200 0x05
57 57#define IEEE1394_SPEED_MAX IEEE1394_SPEED_3200
58/* The current highest tested speed supported by the subsystem */
59#define IEEE1394_SPEED_MAX IEEE1394_SPEED_800
60 58
61/* Maps speed values above to a string representation */ 59/* Maps speed values above to a string representation */
62extern const char *hpsb_speedto_str[]; 60extern const char *hpsb_speedto_str[];
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index dcdb71a7718d..2beb8d94f7bd 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -338,6 +338,7 @@ static void build_speed_map(struct hpsb_host *host, int nodecount)
338 u8 cldcnt[nodecount]; 338 u8 cldcnt[nodecount];
339 u8 *map = host->speed_map; 339 u8 *map = host->speed_map;
340 u8 *speedcap = host->speed; 340 u8 *speedcap = host->speed;
341 u8 local_link_speed = host->csr.lnk_spd;
341 struct selfid *sid; 342 struct selfid *sid;
342 struct ext_selfid *esid; 343 struct ext_selfid *esid;
343 int i, j, n; 344 int i, j, n;
@@ -373,8 +374,8 @@ static void build_speed_map(struct hpsb_host *host, int nodecount)
373 if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++; 374 if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++;
374 375
375 speedcap[n] = sid->speed; 376 speedcap[n] = sid->speed;
376 if (speedcap[n] > host->csr.lnk_spd) 377 if (speedcap[n] > local_link_speed)
377 speedcap[n] = host->csr.lnk_spd; 378 speedcap[n] = local_link_speed;
378 n--; 379 n--;
379 } 380 }
380 } 381 }
@@ -407,12 +408,11 @@ static void build_speed_map(struct hpsb_host *host, int nodecount)
407 } 408 }
408 } 409 }
409 410
410#if SELFID_SPEED_UNKNOWN != IEEE1394_SPEED_MAX 411 /* assume a maximum speed for 1394b PHYs, nodemgr will correct it */
411 /* assume maximum speed for 1394b PHYs, nodemgr will correct it */ 412 if (local_link_speed > SELFID_SPEED_UNKNOWN)
412 for (n = 0; n < nodecount; n++) 413 for (i = 0; i < nodecount; i++)
413 if (speedcap[n] == SELFID_SPEED_UNKNOWN) 414 if (speedcap[i] == SELFID_SPEED_UNKNOWN)
414 speedcap[n] = IEEE1394_SPEED_MAX; 415 speedcap[i] = local_link_speed;
415#endif
416} 416}
417 417
418 418
diff --git a/drivers/ieee1394/ohci1394.h b/drivers/ieee1394/ohci1394.h
index 4320bf010495..7fb8ab9780ae 100644
--- a/drivers/ieee1394/ohci1394.h
+++ b/drivers/ieee1394/ohci1394.h
@@ -26,7 +26,7 @@
26 26
27#define OHCI1394_DRIVER_NAME "ohci1394" 27#define OHCI1394_DRIVER_NAME "ohci1394"
28 28
29#define OHCI1394_MAX_AT_REQ_RETRIES 0x2 29#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
30#define OHCI1394_MAX_AT_RESP_RETRIES 0x2 30#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
31#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 31#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
32#define OHCI1394_MAX_SELF_ID_ERRORS 16 32#define OHCI1394_MAX_SELF_ID_ERRORS 16
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index dc15cadb06ef..38f712036201 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -1419,7 +1419,6 @@ static int __devinit add_card(struct pci_dev *dev,
1419 i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL); 1419 i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL);
1420 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory"); 1420 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1421 1421
1422 i2c_ad->id = I2C_HW_B_PCILYNX;
1423 strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name)); 1422 strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name));
1424 i2c_adapter_data = bit_data; 1423 i2c_adapter_data = bit_data;
1425 i2c_ad->algo_data = &i2c_adapter_data; 1424 i2c_ad->algo_data = &i2c_adapter_data;
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index ab1034ccb7fb..f3fd8657ce4b 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -115,8 +115,8 @@
115 */ 115 */
116static int sbp2_max_speed = IEEE1394_SPEED_MAX; 116static int sbp2_max_speed = IEEE1394_SPEED_MAX;
117module_param_named(max_speed, sbp2_max_speed, int, 0644); 117module_param_named(max_speed, sbp2_max_speed, int, 0644);
118MODULE_PARM_DESC(max_speed, "Force max speed " 118MODULE_PARM_DESC(max_speed, "Limit data transfer speed (5 <= 3200, "
119 "(3 = 800Mb/s, 2 = 400Mb/s, 1 = 200Mb/s, 0 = 100Mb/s)"); 119 "4 <= 1600, 3 <= 800, 2 <= 400, 1 <= 200, 0 = 100 Mb/s)");
120 120
121/* 121/*
122 * Set serialize_io to 0 or N to use dynamically appended lists of command ORBs. 122 * Set serialize_io to 0 or N to use dynamically appended lists of command ORBs.
@@ -256,7 +256,7 @@ static int sbp2_set_busy_timeout(struct sbp2_lu *);
256static int sbp2_max_speed_and_size(struct sbp2_lu *); 256static int sbp2_max_speed_and_size(struct sbp2_lu *);
257 257
258 258
259static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC }; 259static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xa, 0xa, 0xa };
260 260
261static DEFINE_RWLOCK(sbp2_hi_logical_units_lock); 261static DEFINE_RWLOCK(sbp2_hi_logical_units_lock);
262 262
@@ -347,8 +347,8 @@ static struct scsi_host_template sbp2_shost_template = {
347 .sdev_attrs = sbp2_sysfs_sdev_attrs, 347 .sdev_attrs = sbp2_sysfs_sdev_attrs,
348}; 348};
349 349
350/* for match-all entries in sbp2_workarounds_table */ 350#define SBP2_ROM_VALUE_WILDCARD ~0 /* match all */
351#define SBP2_ROM_VALUE_WILDCARD 0x1000000 351#define SBP2_ROM_VALUE_MISSING 0xff000000 /* not present in the unit dir. */
352 352
353/* 353/*
354 * List of devices with known bugs. 354 * List of devices with known bugs.
@@ -359,60 +359,70 @@ static struct scsi_host_template sbp2_shost_template = {
359 */ 359 */
360static const struct { 360static const struct {
361 u32 firmware_revision; 361 u32 firmware_revision;
362 u32 model_id; 362 u32 model;
363 unsigned workarounds; 363 unsigned workarounds;
364} sbp2_workarounds_table[] = { 364} sbp2_workarounds_table[] = {
365 /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { 365 /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
366 .firmware_revision = 0x002800, 366 .firmware_revision = 0x002800,
367 .model_id = 0x001010, 367 .model = 0x001010,
368 .workarounds = SBP2_WORKAROUND_INQUIRY_36 | 368 .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
369 SBP2_WORKAROUND_MODE_SENSE_8 | 369 SBP2_WORKAROUND_MODE_SENSE_8 |
370 SBP2_WORKAROUND_POWER_CONDITION, 370 SBP2_WORKAROUND_POWER_CONDITION,
371 }, 371 },
372 /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { 372 /* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
373 .firmware_revision = 0x002800, 373 .firmware_revision = 0x002800,
374 .model_id = 0x000000, 374 .model = 0x000000,
375 .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY | 375 .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY |
376 SBP2_WORKAROUND_POWER_CONDITION, 376 SBP2_WORKAROUND_POWER_CONDITION,
377 }, 377 },
378 /* Initio bridges, actually only needed for some older ones */ { 378 /* Initio bridges, actually only needed for some older ones */ {
379 .firmware_revision = 0x000200, 379 .firmware_revision = 0x000200,
380 .model_id = SBP2_ROM_VALUE_WILDCARD, 380 .model = SBP2_ROM_VALUE_WILDCARD,
381 .workarounds = SBP2_WORKAROUND_INQUIRY_36, 381 .workarounds = SBP2_WORKAROUND_INQUIRY_36,
382 }, 382 },
383 /* PL-3507 bridge with Prolific firmware */ { 383 /* PL-3507 bridge with Prolific firmware */ {
384 .firmware_revision = 0x012800, 384 .firmware_revision = 0x012800,
385 .model_id = SBP2_ROM_VALUE_WILDCARD, 385 .model = SBP2_ROM_VALUE_WILDCARD,
386 .workarounds = SBP2_WORKAROUND_POWER_CONDITION, 386 .workarounds = SBP2_WORKAROUND_POWER_CONDITION,
387 }, 387 },
388 /* Symbios bridge */ { 388 /* Symbios bridge */ {
389 .firmware_revision = 0xa0b800, 389 .firmware_revision = 0xa0b800,
390 .model_id = SBP2_ROM_VALUE_WILDCARD, 390 .model = SBP2_ROM_VALUE_WILDCARD,
391 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, 391 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
392 }, 392 },
393 /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ { 393 /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
394 .firmware_revision = 0x002600, 394 .firmware_revision = 0x002600,
395 .model_id = SBP2_ROM_VALUE_WILDCARD, 395 .model = SBP2_ROM_VALUE_WILDCARD,
396 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, 396 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
397 }, 397 },
398 /*
399 * iPod 2nd generation: needs 128k max transfer size workaround
400 * iPod 3rd generation: needs fix capacity workaround
401 */
402 {
403 .firmware_revision = 0x0a2700,
404 .model = 0x000000,
405 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS |
406 SBP2_WORKAROUND_FIX_CAPACITY,
407 },
398 /* iPod 4th generation */ { 408 /* iPod 4th generation */ {
399 .firmware_revision = 0x0a2700, 409 .firmware_revision = 0x0a2700,
400 .model_id = 0x000021, 410 .model = 0x000021,
401 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, 411 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
402 }, 412 },
403 /* iPod mini */ { 413 /* iPod mini */ {
404 .firmware_revision = 0x0a2700, 414 .firmware_revision = 0x0a2700,
405 .model_id = 0x000022, 415 .model = 0x000022,
406 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, 416 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
407 }, 417 },
408 /* iPod mini */ { 418 /* iPod mini */ {
409 .firmware_revision = 0x0a2700, 419 .firmware_revision = 0x0a2700,
410 .model_id = 0x000023, 420 .model = 0x000023,
411 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, 421 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
412 }, 422 },
413 /* iPod Photo */ { 423 /* iPod Photo */ {
414 .firmware_revision = 0x0a2700, 424 .firmware_revision = 0x0a2700,
415 .model_id = 0x00007e, 425 .model = 0x00007e,
416 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, 426 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
417 } 427 }
418}; 428};
@@ -1341,13 +1351,15 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
1341 struct csr1212_keyval *kv; 1351 struct csr1212_keyval *kv;
1342 struct csr1212_dentry *dentry; 1352 struct csr1212_dentry *dentry;
1343 u64 management_agent_addr; 1353 u64 management_agent_addr;
1344 u32 unit_characteristics, firmware_revision; 1354 u32 unit_characteristics, firmware_revision, model;
1345 unsigned workarounds; 1355 unsigned workarounds;
1346 int i; 1356 int i;
1347 1357
1348 management_agent_addr = 0; 1358 management_agent_addr = 0;
1349 unit_characteristics = 0; 1359 unit_characteristics = 0;
1350 firmware_revision = 0; 1360 firmware_revision = SBP2_ROM_VALUE_MISSING;
1361 model = ud->flags & UNIT_DIRECTORY_MODEL_ID ?
1362 ud->model_id : SBP2_ROM_VALUE_MISSING;
1351 1363
1352 csr1212_for_each_dir_entry(ud->ne->csr, kv, ud->ud_kv, dentry) { 1364 csr1212_for_each_dir_entry(ud->ne->csr, kv, ud->ud_kv, dentry) {
1353 switch (kv->key.id) { 1365 switch (kv->key.id) {
@@ -1388,9 +1400,9 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
1388 sbp2_workarounds_table[i].firmware_revision != 1400 sbp2_workarounds_table[i].firmware_revision !=
1389 (firmware_revision & 0xffff00)) 1401 (firmware_revision & 0xffff00))
1390 continue; 1402 continue;
1391 if (sbp2_workarounds_table[i].model_id != 1403 if (sbp2_workarounds_table[i].model !=
1392 SBP2_ROM_VALUE_WILDCARD && 1404 SBP2_ROM_VALUE_WILDCARD &&
1393 sbp2_workarounds_table[i].model_id != ud->model_id) 1405 sbp2_workarounds_table[i].model != model)
1394 continue; 1406 continue;
1395 workarounds |= sbp2_workarounds_table[i].workarounds; 1407 workarounds |= sbp2_workarounds_table[i].workarounds;
1396 break; 1408 break;
@@ -1403,7 +1415,7 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
1403 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), 1415 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
1404 workarounds, firmware_revision, 1416 workarounds, firmware_revision,
1405 ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id, 1417 ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
1406 ud->model_id); 1418 model);
1407 1419
1408 /* We would need one SCSI host template for each target to adjust 1420 /* We would need one SCSI host template for each target to adjust
1409 * max_sectors on the fly, therefore warn only. */ 1421 * max_sectors on the fly, therefore warn only. */
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index a3551dd0324d..aa30b5cb3513 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -431,6 +431,7 @@ set_arg(void __user *b, void *val,int len)
431 return 0; 431 return 0;
432} 432}
433 433
434#ifdef CONFIG_IPPP_FILTER
434static int get_filter(void __user *arg, struct sock_filter **p) 435static int get_filter(void __user *arg, struct sock_filter **p)
435{ 436{
436 struct sock_fprog uprog; 437 struct sock_fprog uprog;
@@ -465,6 +466,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
465 *p = code; 466 *p = code;
466 return uprog.len; 467 return uprog.len;
467} 468}
469#endif /* CONFIG_IPPP_FILTER */
468 470
469/* 471/*
470 * ippp device ioctl 472 * ippp device ioctl
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 90663e01a56e..60156dfdc608 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -224,7 +224,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
224 break; 224 break;
225 225
226 /* If the Guest asked to be stopped, we sleep. The Guest's 226 /* If the Guest asked to be stopped, we sleep. The Guest's
227 * clock timer or LHCALL_BREAK from the Waker will wake us. */ 227 * clock timer or LHREQ_BREAK from the Waker will wake us. */
228 if (cpu->halted) { 228 if (cpu->halted) {
229 set_current_state(TASK_INTERRUPTIBLE); 229 set_current_state(TASK_INTERRUPTIBLE);
230 schedule(); 230 schedule();
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 34bc017b8b3c..b8ee103eed5f 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -307,9 +307,8 @@ static int close(struct inode *inode, struct file *file)
307 * kmalloc()ed string, either of which is ok to hand to kfree(). */ 307 * kmalloc()ed string, either of which is ok to hand to kfree(). */
308 if (!IS_ERR(lg->dead)) 308 if (!IS_ERR(lg->dead))
309 kfree(lg->dead); 309 kfree(lg->dead);
310 /* We clear the entire structure, which also marks it as free for the 310 /* Free the memory allocated to the lguest_struct */
311 * next user. */ 311 kfree(lg);
312 memset(lg, 0, sizeof(*lg));
313 /* Release lock and exit. */ 312 /* Release lock and exit. */
314 mutex_unlock(&lguest_lock); 313 mutex_unlock(&lguest_lock);
315 314
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 419c378bd24b..56073199ceba 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -87,14 +87,6 @@ config PHANTOM
87 If you choose to build module, its name will be phantom. If unsure, 87 If you choose to build module, its name will be phantom. If unsure,
88 say N here. 88 say N here.
89 89
90config EEPROM_93CX6
91 tristate "EEPROM 93CX6 support"
92 ---help---
93 This is a driver for the EEPROM chipsets 93c46 and 93c66.
94 The driver supports both read as well as write commands.
95
96 If unsure, say N.
97
98config SGI_IOC4 90config SGI_IOC4
99 tristate "SGI IOC4 Base IO support" 91 tristate "SGI IOC4 Base IO support"
100 depends on PCI 92 depends on PCI
@@ -231,5 +223,6 @@ config DELL_LAPTOP
231 laptops. 223 laptops.
232 224
233source "drivers/misc/c2port/Kconfig" 225source "drivers/misc/c2port/Kconfig"
226source "drivers/misc/eeprom/Kconfig"
234 227
235endif # MISC_DEVICES 228endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d5749a7bc777..bc1199830554 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -13,10 +13,10 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o
13obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o 13obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
14obj-$(CONFIG_PHANTOM) += phantom.o 14obj-$(CONFIG_PHANTOM) += phantom.o
15obj-$(CONFIG_SGI_IOC4) += ioc4.o 15obj-$(CONFIG_SGI_IOC4) += ioc4.o
16obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
17obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o 16obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
18obj-$(CONFIG_KGDB_TESTS) += kgdbts.o 17obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
19obj-$(CONFIG_SGI_XP) += sgi-xp/ 18obj-$(CONFIG_SGI_XP) += sgi-xp/
20obj-$(CONFIG_SGI_GRU) += sgi-gru/ 19obj-$(CONFIG_SGI_GRU) += sgi-gru/
21obj-$(CONFIG_HP_ILO) += hpilo.o 20obj-$(CONFIG_HP_ILO) += hpilo.o
22obj-$(CONFIG_C2PORT) += c2port/ 21obj-$(CONFIG_C2PORT) += c2port/
22obj-y += eeprom/
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
new file mode 100644
index 000000000000..c76df8cda5ef
--- /dev/null
+++ b/drivers/misc/eeprom/Kconfig
@@ -0,0 +1,59 @@
1menu "EEPROM support"
2
3config EEPROM_AT24
4 tristate "I2C EEPROMs from most vendors"
5 depends on I2C && SYSFS && EXPERIMENTAL
6 help
7 Enable this driver to get read/write support to most I2C EEPROMs,
8 after you configure the driver to know about each EEPROM on
9 your target board. Use these generic chip names, instead of
10 vendor-specific ones like at24c64 or 24lc02:
11
12 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
13 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
14
15 Unless you like data loss puzzles, always be sure that any chip
16 you configure as a 24c32 (32 kbit) or larger is NOT really a
17 24c16 (16 kbit) or smaller, and vice versa. Marking the chip
18 as read-only won't help recover from this. Also, if your chip
19 has any software write-protect mechanism you may want to review the
20 code to make sure this driver won't turn it on by accident.
21
22 If you use this with an SMBus adapter instead of an I2C adapter,
23 full functionality is not available. Only smaller devices are
24 supported (24c16 and below, max 4 kByte).
25
26 This driver can also be built as a module. If so, the module
27 will be called at24.
28
29config EEPROM_AT25
30 tristate "SPI EEPROMs from most vendors"
31 depends on SPI && SYSFS
32 help
33 Enable this driver to get read/write support to most SPI EEPROMs,
34 after you configure the board init code to know about each eeprom
35 on your target board.
36
37 This driver can also be built as a module. If so, the module
38 will be called at25.
39
40config EEPROM_LEGACY
41 tristate "Old I2C EEPROM reader"
42 depends on I2C && SYSFS
43 help
44 If you say yes here you get read-only access to the EEPROM data
45 available on modern memory DIMMs and Sony Vaio laptops via I2C. Such
46 EEPROMs could theoretically be available on other devices as well.
47
48 This driver can also be built as a module. If so, the module
49 will be called eeprom.
50
51config EEPROM_93CX6
52 tristate "EEPROM 93CX6 support"
53 help
54 This is a driver for the EEPROM chipsets 93c46 and 93c66.
55 The driver supports both read as well as write commands.
56
57 If unsure, say N.
58
59endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
new file mode 100644
index 000000000000..539dd8f88128
--- /dev/null
+++ b/drivers/misc/eeprom/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_EEPROM_AT24) += at24.o
2obj-$(CONFIG_EEPROM_AT25) += at25.o
3obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o
4obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
diff --git a/drivers/i2c/chips/at24.c b/drivers/misc/eeprom/at24.c
index d4775528abc6..d4775528abc6 100644
--- a/drivers/i2c/chips/at24.c
+++ b/drivers/misc/eeprom/at24.c
diff --git a/drivers/spi/at25.c b/drivers/misc/eeprom/at25.c
index 290dbe99647a..290dbe99647a 100644
--- a/drivers/spi/at25.c
+++ b/drivers/misc/eeprom/at25.c
diff --git a/drivers/i2c/chips/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 2c27193aeaa0..2c27193aeaa0 100644
--- a/drivers/i2c/chips/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
diff --git a/drivers/misc/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 15b1780025c8..15b1780025c8 100644
--- a/drivers/misc/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 05e298289238..10c421b73eaf 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -758,7 +758,7 @@ static void __exit ilo_exit(void)
758 class_destroy(ilo_class); 758 class_destroy(ilo_class);
759} 759}
760 760
761MODULE_VERSION("0.05"); 761MODULE_VERSION("0.06");
762MODULE_ALIAS(ILO_NAME); 762MODULE_ALIAS(ILO_NAME);
763MODULE_DESCRIPTION(ILO_NAME); 763MODULE_DESCRIPTION(ILO_NAME);
764MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>"); 764MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>");
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 9cd2ebe2a3b6..45fd653dbe31 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -49,9 +49,6 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
49 49
50 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) 50 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
51 return; 51 return;
52
53 DBUG_ON(ch->local_msgqueue == NULL);
54 DBUG_ON(ch->remote_msgqueue == NULL);
55 } 52 }
56 53
57 if (!(ch->flags & XPC_C_OPENREPLY)) { 54 if (!(ch->flags & XPC_C_OPENREPLY)) {
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 82fb9958f22f..2e975762c32b 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -1106,8 +1106,6 @@ xpc_process_activate_IRQ_rcvd_sn2(void)
1106 int n_IRQs_expected; 1106 int n_IRQs_expected;
1107 int n_IRQs_detected; 1107 int n_IRQs_detected;
1108 1108
1109 DBUG_ON(xpc_activate_IRQ_rcvd == 0);
1110
1111 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); 1109 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1112 n_IRQs_expected = xpc_activate_IRQ_rcvd; 1110 n_IRQs_expected = xpc_activate_IRQ_rcvd;
1113 xpc_activate_IRQ_rcvd = 0; 1111 xpc_activate_IRQ_rcvd = 0;
@@ -1726,6 +1724,7 @@ xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch)
1726 msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue + 1724 msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue +
1727 (get % ch->local_nentries) * 1725 (get % ch->local_nentries) *
1728 ch->entry_size); 1726 ch->entry_size);
1727 DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
1729 msg->flags = 0; 1728 msg->flags = 0;
1730 } while (++get < ch_sn2->remote_GP.get); 1729 } while (++get < ch_sn2->remote_GP.get);
1731} 1730}
@@ -1740,11 +1739,18 @@ xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch)
1740 struct xpc_msg_sn2 *msg; 1739 struct xpc_msg_sn2 *msg;
1741 s64 put; 1740 s64 put;
1742 1741
1743 put = ch_sn2->w_remote_GP.put; 1742 /* flags are zeroed when the buffer is allocated */
1743 if (ch_sn2->remote_GP.put < ch->remote_nentries)
1744 return;
1745
1746 put = max(ch_sn2->w_remote_GP.put, ch->remote_nentries);
1744 do { 1747 do {
1745 msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + 1748 msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue +
1746 (put % ch->remote_nentries) * 1749 (put % ch->remote_nentries) *
1747 ch->entry_size); 1750 ch->entry_size);
1751 DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
1752 DBUG_ON(!(msg->flags & XPC_M_SN2_DONE));
1753 DBUG_ON(msg->number != put - ch->remote_nentries);
1748 msg->flags = 0; 1754 msg->flags = 0;
1749 } while (++put < ch_sn2->remote_GP.put); 1755 } while (++put < ch_sn2->remote_GP.put);
1750} 1756}
@@ -1836,6 +1842,7 @@ xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number)
1836 */ 1842 */
1837 xpc_clear_remote_msgqueue_flags_sn2(ch); 1843 xpc_clear_remote_msgqueue_flags_sn2(ch);
1838 1844
1845 smp_wmb(); /* ensure flags have been cleared before bte_copy */
1839 ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put; 1846 ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put;
1840 1847
1841 dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, " 1848 dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
@@ -1934,7 +1941,7 @@ xpc_get_deliverable_payload_sn2(struct xpc_channel *ch)
1934 break; 1941 break;
1935 1942
1936 get = ch_sn2->w_local_GP.get; 1943 get = ch_sn2->w_local_GP.get;
1937 rmb(); /* guarantee that .get loads before .put */ 1944 smp_rmb(); /* guarantee that .get loads before .put */
1938 if (get == ch_sn2->w_remote_GP.put) 1945 if (get == ch_sn2->w_remote_GP.put)
1939 break; 1946 break;
1940 1947
@@ -1956,11 +1963,13 @@ xpc_get_deliverable_payload_sn2(struct xpc_channel *ch)
1956 1963
1957 msg = xpc_pull_remote_msg_sn2(ch, get); 1964 msg = xpc_pull_remote_msg_sn2(ch, get);
1958 1965
1959 DBUG_ON(msg != NULL && msg->number != get); 1966 if (msg != NULL) {
1960 DBUG_ON(msg != NULL && (msg->flags & XPC_M_SN2_DONE)); 1967 DBUG_ON(msg->number != get);
1961 DBUG_ON(msg != NULL && !(msg->flags & XPC_M_SN2_READY)); 1968 DBUG_ON(msg->flags & XPC_M_SN2_DONE);
1969 DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
1962 1970
1963 payload = &msg->payload; 1971 payload = &msg->payload;
1972 }
1964 break; 1973 break;
1965 } 1974 }
1966 1975
@@ -2053,7 +2062,7 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
2053 while (1) { 2062 while (1) {
2054 2063
2055 put = ch_sn2->w_local_GP.put; 2064 put = ch_sn2->w_local_GP.put;
2056 rmb(); /* guarantee that .put loads before .get */ 2065 smp_rmb(); /* guarantee that .put loads before .get */
2057 if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) { 2066 if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) {
2058 2067
2059 /* There are available message entries. We need to try 2068 /* There are available message entries. We need to try
@@ -2186,7 +2195,7 @@ xpc_send_payload_sn2(struct xpc_channel *ch, u32 flags, void *payload,
2186 * The preceding store of msg->flags must occur before the following 2195 * The preceding store of msg->flags must occur before the following
2187 * load of local_GP->put. 2196 * load of local_GP->put.
2188 */ 2197 */
2189 mb(); 2198 smp_mb();
2190 2199
2191 /* see if the message is next in line to be sent, if so send it */ 2200 /* see if the message is next in line to be sent, if so send it */
2192 2201
@@ -2277,8 +2286,9 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
2277 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", 2286 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
2278 (void *)msg, msg_number, ch->partid, ch->number); 2287 (void *)msg, msg_number, ch->partid, ch->number);
2279 2288
2280 DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->entry_size) != 2289 DBUG_ON((((u64)msg - (u64)ch->sn.sn2.remote_msgqueue) / ch->entry_size) !=
2281 msg_number % ch->remote_nentries); 2290 msg_number % ch->remote_nentries);
2291 DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
2282 DBUG_ON(msg->flags & XPC_M_SN2_DONE); 2292 DBUG_ON(msg->flags & XPC_M_SN2_DONE);
2283 2293
2284 msg->flags |= XPC_M_SN2_DONE; 2294 msg->flags |= XPC_M_SN2_DONE;
@@ -2287,7 +2297,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
2287 * The preceding store of msg->flags must occur before the following 2297 * The preceding store of msg->flags must occur before the following
2288 * load of local_GP->get. 2298 * load of local_GP->get.
2289 */ 2299 */
2290 mb(); 2300 smp_mb();
2291 2301
2292 /* 2302 /*
2293 * See if this message is next in line to be acknowledged as having 2303 * See if this message is next in line to be acknowledged as having
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 91a55b1b1037..f17f7d40ea2c 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -1423,7 +1423,7 @@ xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
1423 atomic_inc(&ch->n_to_notify); 1423 atomic_inc(&ch->n_to_notify);
1424 1424
1425 msg_slot->key = key; 1425 msg_slot->key = key;
1426 wmb(); /* a non-NULL func must hit memory after the key */ 1426 smp_wmb(); /* a non-NULL func must hit memory after the key */
1427 msg_slot->func = func; 1427 msg_slot->func = func;
1428 1428
1429 if (ch->flags & XPC_C_DISCONNECTING) { 1429 if (ch->flags & XPC_C_DISCONNECTING) {
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index dfa585f7feaf..0efa390978bd 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -76,6 +76,16 @@ config MMC_OMAP
76 76
77 If unsure, say N. 77 If unsure, say N.
78 78
79config MMC_OMAP_HS
80 tristate "TI OMAP High Speed Multimedia Card Interface support"
81 depends on ARCH_OMAP2430 || ARCH_OMAP3
82 help
83 This selects the TI OMAP High Speed Multimedia card Interface.
84 If you have an OMAP2430 or OMAP3 board with a Multimedia Card slot,
85 say Y or M here.
86
87 If unsure, say N.
88
79config MMC_WBSD 89config MMC_WBSD
80 tristate "Winbond W83L51xD SD/MMC Card Interface support" 90 tristate "Winbond W83L51xD SD/MMC Card Interface support"
81 depends on ISA_DMA_API 91 depends on ISA_DMA_API
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index f4853288bbb1..98cab84829b8 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
15obj-$(CONFIG_MMC_WBSD) += wbsd.o 15obj-$(CONFIG_MMC_WBSD) += wbsd.o
16obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 16obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
17obj-$(CONFIG_MMC_OMAP) += omap.o 17obj-$(CONFIG_MMC_OMAP) += omap.o
18obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o
18obj-$(CONFIG_MMC_AT91) += at91_mci.o 19obj-$(CONFIG_MMC_AT91) += at91_mci.o
19obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o 20obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
20obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o 21obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
new file mode 100644
index 000000000000..db37490f67ec
--- /dev/null
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -0,0 +1,1242 @@
1/*
2 * drivers/mmc/host/omap_hsmmc.c
3 *
4 * Driver for OMAP2430/3430 MMC controller.
5 *
6 * Copyright (C) 2007 Texas Instruments.
7 *
8 * Authors:
9 * Syed Mohammed Khasim <x0khasim@ti.com>
10 * Madhusudhan <madhu.cr@ti.com>
11 * Mohit Jalori <mjalori@ti.com>
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 */
17
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/delay.h>
22#include <linux/dma-mapping.h>
23#include <linux/platform_device.h>
24#include <linux/workqueue.h>
25#include <linux/timer.h>
26#include <linux/clk.h>
27#include <linux/mmc/host.h>
28#include <linux/io.h>
29#include <linux/semaphore.h>
30#include <mach/dma.h>
31#include <mach/hardware.h>
32#include <mach/board.h>
33#include <mach/mmc.h>
34#include <mach/cpu.h>
35
36/* OMAP HSMMC Host Controller Registers */
37#define OMAP_HSMMC_SYSCONFIG 0x0010
38#define OMAP_HSMMC_CON 0x002C
39#define OMAP_HSMMC_BLK 0x0104
40#define OMAP_HSMMC_ARG 0x0108
41#define OMAP_HSMMC_CMD 0x010C
42#define OMAP_HSMMC_RSP10 0x0110
43#define OMAP_HSMMC_RSP32 0x0114
44#define OMAP_HSMMC_RSP54 0x0118
45#define OMAP_HSMMC_RSP76 0x011C
46#define OMAP_HSMMC_DATA 0x0120
47#define OMAP_HSMMC_HCTL 0x0128
48#define OMAP_HSMMC_SYSCTL 0x012C
49#define OMAP_HSMMC_STAT 0x0130
50#define OMAP_HSMMC_IE 0x0134
51#define OMAP_HSMMC_ISE 0x0138
52#define OMAP_HSMMC_CAPA 0x0140
53
54#define VS18 (1 << 26)
55#define VS30 (1 << 25)
56#define SDVS18 (0x5 << 9)
57#define SDVS30 (0x6 << 9)
58#define SDVSCLR 0xFFFFF1FF
59#define SDVSDET 0x00000400
60#define AUTOIDLE 0x1
61#define SDBP (1 << 8)
62#define DTO 0xe
63#define ICE 0x1
64#define ICS 0x2
65#define CEN (1 << 2)
66#define CLKD_MASK 0x0000FFC0
67#define CLKD_SHIFT 6
68#define DTO_MASK 0x000F0000
69#define DTO_SHIFT 16
70#define INT_EN_MASK 0x307F0033
71#define INIT_STREAM (1 << 1)
72#define DP_SELECT (1 << 21)
73#define DDIR (1 << 4)
74#define DMA_EN 0x1
75#define MSBS (1 << 5)
76#define BCE (1 << 1)
77#define FOUR_BIT (1 << 1)
78#define CC 0x1
79#define TC 0x02
80#define OD 0x1
81#define ERR (1 << 15)
82#define CMD_TIMEOUT (1 << 16)
83#define DATA_TIMEOUT (1 << 20)
84#define CMD_CRC (1 << 17)
85#define DATA_CRC (1 << 21)
86#define CARD_ERR (1 << 28)
87#define STAT_CLEAR 0xFFFFFFFF
88#define INIT_STREAM_CMD 0x00000000
89#define DUAL_VOLT_OCR_BIT 7
90#define SRC (1 << 25)
91#define SRD (1 << 26)
92
93/*
94 * FIXME: Most likely all the data using these _DEVID defines should come
95 * from the platform_data, or implemented in controller and slot specific
96 * functions.
97 */
98#define OMAP_MMC1_DEVID 0
99#define OMAP_MMC2_DEVID 1
100
101#define OMAP_MMC_DATADIR_NONE 0
102#define OMAP_MMC_DATADIR_READ 1
103#define OMAP_MMC_DATADIR_WRITE 2
104#define MMC_TIMEOUT_MS 20
105#define OMAP_MMC_MASTER_CLOCK 96000000
106#define DRIVER_NAME "mmci-omap-hs"
107
108/*
109 * One controller can have multiple slots, like on some omap boards using
110 * omap.c controller driver. Luckily this is not currently done on any known
111 * omap_hsmmc.c device.
112 */
113#define mmc_slot(host) (host->pdata->slots[host->slot_id])
114
115/*
116 * MMC Host controller read/write API's
117 */
118#define OMAP_HSMMC_READ(base, reg) \
119 __raw_readl((base) + OMAP_HSMMC_##reg)
120
121#define OMAP_HSMMC_WRITE(base, reg, val) \
122 __raw_writel((val), (base) + OMAP_HSMMC_##reg)
123
124struct mmc_omap_host {
125 struct device *dev;
126 struct mmc_host *mmc;
127 struct mmc_request *mrq;
128 struct mmc_command *cmd;
129 struct mmc_data *data;
130 struct clk *fclk;
131 struct clk *iclk;
132 struct clk *dbclk;
133 struct semaphore sem;
134 struct work_struct mmc_carddetect_work;
135 void __iomem *base;
136 resource_size_t mapbase;
137 unsigned int id;
138 unsigned int dma_len;
139 unsigned int dma_dir;
140 unsigned char bus_mode;
141 unsigned char datadir;
142 u32 *buffer;
143 u32 bytesleft;
144 int suspended;
145 int irq;
146 int carddetect;
147 int use_dma, dma_ch;
148 int initstr;
149 int slot_id;
150 int dbclk_enabled;
151 struct omap_mmc_platform_data *pdata;
152};
153
154/*
155 * Stop clock to the card
156 */
157static void omap_mmc_stop_clock(struct mmc_omap_host *host)
158{
159 OMAP_HSMMC_WRITE(host->base, SYSCTL,
160 OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
161 if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0)
162 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
163}
164
165/*
166 * Send init stream sequence to card
167 * before sending IDLE command
168 */
169static void send_init_stream(struct mmc_omap_host *host)
170{
171 int reg = 0;
172 unsigned long timeout;
173
174 disable_irq(host->irq);
175 OMAP_HSMMC_WRITE(host->base, CON,
176 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
177 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
178
179 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
180 while ((reg != CC) && time_before(jiffies, timeout))
181 reg = OMAP_HSMMC_READ(host->base, STAT) & CC;
182
183 OMAP_HSMMC_WRITE(host->base, CON,
184 OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM);
185 enable_irq(host->irq);
186}
187
188static inline
189int mmc_omap_cover_is_closed(struct mmc_omap_host *host)
190{
191 int r = 1;
192
193 if (host->pdata->slots[host->slot_id].get_cover_state)
194 r = host->pdata->slots[host->slot_id].get_cover_state(host->dev,
195 host->slot_id);
196 return r;
197}
198
199static ssize_t
200mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
201 char *buf)
202{
203 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
204 struct mmc_omap_host *host = mmc_priv(mmc);
205
206 return sprintf(buf, "%s\n", mmc_omap_cover_is_closed(host) ? "closed" :
207 "open");
208}
209
210static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
211
212static ssize_t
213mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
214 char *buf)
215{
216 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
217 struct mmc_omap_host *host = mmc_priv(mmc);
218 struct omap_mmc_slot_data slot = host->pdata->slots[host->slot_id];
219
220 return sprintf(buf, "slot:%s\n", slot.name);
221}
222
223static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
224
225/*
226 * Configure the response type and send the cmd.
227 */
228static void
229mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd,
230 struct mmc_data *data)
231{
232 int cmdreg = 0, resptype = 0, cmdtype = 0;
233
234 dev_dbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",
235 mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
236 host->cmd = cmd;
237
238 /*
239 * Clear status bits and enable interrupts
240 */
241 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
242 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
243 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
244
245 if (cmd->flags & MMC_RSP_PRESENT) {
246 if (cmd->flags & MMC_RSP_136)
247 resptype = 1;
248 else
249 resptype = 2;
250 }
251
252 /*
253 * Unlike OMAP1 controller, the cmdtype does not seem to be based on
254 * ac, bc, adtc, bcr. Only commands ending an open ended transfer need
255 * a val of 0x3, rest 0x0.
256 */
257 if (cmd == host->mrq->stop)
258 cmdtype = 0x3;
259
260 cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
261
262 if (data) {
263 cmdreg |= DP_SELECT | MSBS | BCE;
264 if (data->flags & MMC_DATA_READ)
265 cmdreg |= DDIR;
266 else
267 cmdreg &= ~(DDIR);
268 }
269
270 if (host->use_dma)
271 cmdreg |= DMA_EN;
272
273 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
274 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
275}
276
277/*
278 * Notify the transfer complete to MMC core
279 */
280static void
281mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
282{
283 host->data = NULL;
284
285 if (host->use_dma && host->dma_ch != -1)
286 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
287 host->dma_dir);
288
289 host->datadir = OMAP_MMC_DATADIR_NONE;
290
291 if (!data->error)
292 data->bytes_xfered += data->blocks * (data->blksz);
293 else
294 data->bytes_xfered = 0;
295
296 if (!data->stop) {
297 host->mrq = NULL;
298 mmc_request_done(host->mmc, data->mrq);
299 return;
300 }
301 mmc_omap_start_command(host, data->stop, NULL);
302}
303
304/*
305 * Notify the core about command completion
306 */
307static void
308mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
309{
310 host->cmd = NULL;
311
312 if (cmd->flags & MMC_RSP_PRESENT) {
313 if (cmd->flags & MMC_RSP_136) {
314 /* response type 2 */
315 cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10);
316 cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32);
317 cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54);
318 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76);
319 } else {
320 /* response types 1, 1b, 3, 4, 5, 6 */
321 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
322 }
323 }
324 if (host->data == NULL || cmd->error) {
325 host->mrq = NULL;
326 mmc_request_done(host->mmc, cmd->mrq);
327 }
328}
329
330/*
331 * DMA clean up for command errors
332 */
333static void mmc_dma_cleanup(struct mmc_omap_host *host)
334{
335 host->data->error = -ETIMEDOUT;
336
337 if (host->use_dma && host->dma_ch != -1) {
338 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
339 host->dma_dir);
340 omap_free_dma(host->dma_ch);
341 host->dma_ch = -1;
342 up(&host->sem);
343 }
344 host->data = NULL;
345 host->datadir = OMAP_MMC_DATADIR_NONE;
346}
347
348/*
349 * Readable error output
350 */
351#ifdef CONFIG_MMC_DEBUG
352static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status)
353{
354 /* --- means reserved bit without definition at documentation */
355 static const char *mmc_omap_status_bits[] = {
356 "CC", "TC", "BGE", "---", "BWR", "BRR", "---", "---", "CIRQ",
357 "OBI", "---", "---", "---", "---", "---", "ERRI", "CTO", "CCRC",
358 "CEB", "CIE", "DTO", "DCRC", "DEB", "---", "ACE", "---",
359 "---", "---", "---", "CERR", "CERR", "BADA", "---", "---", "---"
360 };
361 char res[256];
362 char *buf = res;
363 int len, i;
364
365 len = sprintf(buf, "MMC IRQ 0x%x :", status);
366 buf += len;
367
368 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
369 if (status & (1 << i)) {
370 len = sprintf(buf, " %s", mmc_omap_status_bits[i]);
371 buf += len;
372 }
373
374 dev_dbg(mmc_dev(host->mmc), "%s\n", res);
375}
376#endif /* CONFIG_MMC_DEBUG */
377
378
379/*
380 * MMC controller IRQ handler
381 */
382static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
383{
384 struct mmc_omap_host *host = dev_id;
385 struct mmc_data *data;
386 int end_cmd = 0, end_trans = 0, status;
387
388 if (host->cmd == NULL && host->data == NULL) {
389 OMAP_HSMMC_WRITE(host->base, STAT,
390 OMAP_HSMMC_READ(host->base, STAT));
391 return IRQ_HANDLED;
392 }
393
394 data = host->data;
395 status = OMAP_HSMMC_READ(host->base, STAT);
396 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
397
398 if (status & ERR) {
399#ifdef CONFIG_MMC_DEBUG
400 mmc_omap_report_irq(host, status);
401#endif
402 if ((status & CMD_TIMEOUT) ||
403 (status & CMD_CRC)) {
404 if (host->cmd) {
405 if (status & CMD_TIMEOUT) {
406 OMAP_HSMMC_WRITE(host->base, SYSCTL,
407 OMAP_HSMMC_READ(host->base,
408 SYSCTL) | SRC);
409 while (OMAP_HSMMC_READ(host->base,
410 SYSCTL) & SRC)
411 ;
412
413 host->cmd->error = -ETIMEDOUT;
414 } else {
415 host->cmd->error = -EILSEQ;
416 }
417 end_cmd = 1;
418 }
419 if (host->data)
420 mmc_dma_cleanup(host);
421 }
422 if ((status & DATA_TIMEOUT) ||
423 (status & DATA_CRC)) {
424 if (host->data) {
425 if (status & DATA_TIMEOUT)
426 mmc_dma_cleanup(host);
427 else
428 host->data->error = -EILSEQ;
429 OMAP_HSMMC_WRITE(host->base, SYSCTL,
430 OMAP_HSMMC_READ(host->base,
431 SYSCTL) | SRD);
432 while (OMAP_HSMMC_READ(host->base,
433 SYSCTL) & SRD)
434 ;
435 end_trans = 1;
436 }
437 }
438 if (status & CARD_ERR) {
439 dev_dbg(mmc_dev(host->mmc),
440 "Ignoring card err CMD%d\n", host->cmd->opcode);
441 if (host->cmd)
442 end_cmd = 1;
443 if (host->data)
444 end_trans = 1;
445 }
446 }
447
448 OMAP_HSMMC_WRITE(host->base, STAT, status);
449
450 if (end_cmd || (status & CC))
451 mmc_omap_cmd_done(host, host->cmd);
452 if (end_trans || (status & TC))
453 mmc_omap_xfer_done(host, data);
454
455 return IRQ_HANDLED;
456}
457
458/*
459 * Switch MMC operating voltage
460 */
461static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd)
462{
463 u32 reg_val = 0;
464 int ret;
465
466 /* Disable the clocks */
467 clk_disable(host->fclk);
468 clk_disable(host->iclk);
469 clk_disable(host->dbclk);
470
471 /* Turn the power off */
472 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
473 if (ret != 0)
474 goto err;
475
476 /* Turn the power ON with given VDD 1.8 or 3.0v */
477 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd);
478 if (ret != 0)
479 goto err;
480
481 clk_enable(host->fclk);
482 clk_enable(host->iclk);
483 clk_enable(host->dbclk);
484
485 OMAP_HSMMC_WRITE(host->base, HCTL,
486 OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR);
487 reg_val = OMAP_HSMMC_READ(host->base, HCTL);
488 /*
489 * If a MMC dual voltage card is detected, the set_ios fn calls
490 * this fn with VDD bit set for 1.8V. Upon card removal from the
491 * slot, omap_mmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
492 *
493 * Only MMC1 supports 3.0V. MMC2 will not function if SDVS30 is
494 * set in HCTL.
495 */
496 if (host->id == OMAP_MMC1_DEVID && (((1 << vdd) == MMC_VDD_32_33) ||
497 ((1 << vdd) == MMC_VDD_33_34)))
498 reg_val |= SDVS30;
499 if ((1 << vdd) == MMC_VDD_165_195)
500 reg_val |= SDVS18;
501
502 OMAP_HSMMC_WRITE(host->base, HCTL, reg_val);
503
504 OMAP_HSMMC_WRITE(host->base, HCTL,
505 OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
506
507 return 0;
508err:
509 dev_dbg(mmc_dev(host->mmc), "Unable to switch operating voltage\n");
510 return ret;
511}
512
513/*
514 * Work Item to notify the core about card insertion/removal
515 */
516static void mmc_omap_detect(struct work_struct *work)
517{
518 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
519 mmc_carddetect_work);
520
521 sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
522 if (host->carddetect) {
523 mmc_detect_change(host->mmc, (HZ * 200) / 1000);
524 } else {
525 OMAP_HSMMC_WRITE(host->base, SYSCTL,
526 OMAP_HSMMC_READ(host->base, SYSCTL) | SRD);
527 while (OMAP_HSMMC_READ(host->base, SYSCTL) & SRD)
528 ;
529
530 mmc_detect_change(host->mmc, (HZ * 50) / 1000);
531 }
532}
533
534/*
535 * ISR for handling card insertion and removal
536 */
537static irqreturn_t omap_mmc_cd_handler(int irq, void *dev_id)
538{
539 struct mmc_omap_host *host = (struct mmc_omap_host *)dev_id;
540
541 host->carddetect = mmc_slot(host).card_detect(irq);
542 schedule_work(&host->mmc_carddetect_work);
543
544 return IRQ_HANDLED;
545}
546
547/*
548 * DMA call back function
549 */
550static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
551{
552 struct mmc_omap_host *host = data;
553
554 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
555 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
556
557 if (host->dma_ch < 0)
558 return;
559
560 omap_free_dma(host->dma_ch);
561 host->dma_ch = -1;
562 /*
563 * DMA Callback: run in interrupt context.
564 * mutex_unlock will through a kernel warning if used.
565 */
566 up(&host->sem);
567}
568
569/*
570 * Configure dma src and destination parameters
571 */
572static int mmc_omap_config_dma_param(int sync_dir, struct mmc_omap_host *host,
573 struct mmc_data *data)
574{
575 if (sync_dir == 0) {
576 omap_set_dma_dest_params(host->dma_ch, 0,
577 OMAP_DMA_AMODE_CONSTANT,
578 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
579 omap_set_dma_src_params(host->dma_ch, 0,
580 OMAP_DMA_AMODE_POST_INC,
581 sg_dma_address(&data->sg[0]), 0, 0);
582 } else {
583 omap_set_dma_src_params(host->dma_ch, 0,
584 OMAP_DMA_AMODE_CONSTANT,
585 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
586 omap_set_dma_dest_params(host->dma_ch, 0,
587 OMAP_DMA_AMODE_POST_INC,
588 sg_dma_address(&data->sg[0]), 0, 0);
589 }
590 return 0;
591}
592/*
593 * Routine to configure and start DMA for the MMC card
594 */
595static int
596mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
597{
598 int sync_dev, sync_dir = 0;
599 int dma_ch = 0, ret = 0, err = 1;
600 struct mmc_data *data = req->data;
601
602 /*
603 * If for some reason the DMA transfer is still active,
604 * we wait for timeout period and free the dma
605 */
606 if (host->dma_ch != -1) {
607 set_current_state(TASK_UNINTERRUPTIBLE);
608 schedule_timeout(100);
609 if (down_trylock(&host->sem)) {
610 omap_free_dma(host->dma_ch);
611 host->dma_ch = -1;
612 up(&host->sem);
613 return err;
614 }
615 } else {
616 if (down_trylock(&host->sem))
617 return err;
618 }
619
620 if (!(data->flags & MMC_DATA_WRITE)) {
621 host->dma_dir = DMA_FROM_DEVICE;
622 if (host->id == OMAP_MMC1_DEVID)
623 sync_dev = OMAP24XX_DMA_MMC1_RX;
624 else
625 sync_dev = OMAP24XX_DMA_MMC2_RX;
626 } else {
627 host->dma_dir = DMA_TO_DEVICE;
628 if (host->id == OMAP_MMC1_DEVID)
629 sync_dev = OMAP24XX_DMA_MMC1_TX;
630 else
631 sync_dev = OMAP24XX_DMA_MMC2_TX;
632 }
633
634 ret = omap_request_dma(sync_dev, "MMC/SD", mmc_omap_dma_cb,
635 host, &dma_ch);
636 if (ret != 0) {
637 dev_dbg(mmc_dev(host->mmc),
638 "%s: omap_request_dma() failed with %d\n",
639 mmc_hostname(host->mmc), ret);
640 return ret;
641 }
642
643 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
644 data->sg_len, host->dma_dir);
645 host->dma_ch = dma_ch;
646
647 if (!(data->flags & MMC_DATA_WRITE))
648 mmc_omap_config_dma_param(1, host, data);
649 else
650 mmc_omap_config_dma_param(0, host, data);
651
652 if ((data->blksz % 4) == 0)
653 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
654 (data->blksz / 4), data->blocks, OMAP_DMA_SYNC_FRAME,
655 sync_dev, sync_dir);
656 else
657 /* REVISIT: The MMC buffer increments only when MSB is written.
658 * Return error for blksz which is non multiple of four.
659 */
660 return -EINVAL;
661
662 omap_start_dma(dma_ch);
663 return 0;
664}
665
666static void set_data_timeout(struct mmc_omap_host *host,
667 struct mmc_request *req)
668{
669 unsigned int timeout, cycle_ns;
670 uint32_t reg, clkd, dto = 0;
671
672 reg = OMAP_HSMMC_READ(host->base, SYSCTL);
673 clkd = (reg & CLKD_MASK) >> CLKD_SHIFT;
674 if (clkd == 0)
675 clkd = 1;
676
677 cycle_ns = 1000000000 / (clk_get_rate(host->fclk) / clkd);
678 timeout = req->data->timeout_ns / cycle_ns;
679 timeout += req->data->timeout_clks;
680 if (timeout) {
681 while ((timeout & 0x80000000) == 0) {
682 dto += 1;
683 timeout <<= 1;
684 }
685 dto = 31 - dto;
686 timeout <<= 1;
687 if (timeout && dto)
688 dto += 1;
689 if (dto >= 13)
690 dto -= 13;
691 else
692 dto = 0;
693 if (dto > 14)
694 dto = 14;
695 }
696
697 reg &= ~DTO_MASK;
698 reg |= dto << DTO_SHIFT;
699 OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);
700}
701
702/*
703 * Configure block length for MMC/SD cards and initiate the transfer.
704 */
705static int
706mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
707{
708 int ret;
709 host->data = req->data;
710
711 if (req->data == NULL) {
712 host->datadir = OMAP_MMC_DATADIR_NONE;
713 OMAP_HSMMC_WRITE(host->base, BLK, 0);
714 return 0;
715 }
716
717 OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
718 | (req->data->blocks << 16));
719 set_data_timeout(host, req);
720
721 host->datadir = (req->data->flags & MMC_DATA_WRITE) ?
722 OMAP_MMC_DATADIR_WRITE : OMAP_MMC_DATADIR_READ;
723
724 if (host->use_dma) {
725 ret = mmc_omap_start_dma_transfer(host, req);
726 if (ret != 0) {
727 dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n");
728 return ret;
729 }
730 }
731 return 0;
732}
733
734/*
735 * Request function. for read/write operation
736 */
737static void omap_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
738{
739 struct mmc_omap_host *host = mmc_priv(mmc);
740
741 WARN_ON(host->mrq != NULL);
742 host->mrq = req;
743 mmc_omap_prepare_data(host, req);
744 mmc_omap_start_command(host, req->cmd, req->data);
745}
746
747
748/* Routine to configure clock values. Exposed API to core */
749static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
750{
751 struct mmc_omap_host *host = mmc_priv(mmc);
752 u16 dsor = 0;
753 unsigned long regval;
754 unsigned long timeout;
755
756 switch (ios->power_mode) {
757 case MMC_POWER_OFF:
758 mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
759 /*
760 * Reset bus voltage to 3V if it got set to 1.8V earlier.
761 * REVISIT: If we are able to detect cards after unplugging
762 * a 1.8V card, this code should not be needed.
763 */
764 if (!(OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET)) {
765 int vdd = fls(host->mmc->ocr_avail) - 1;
766 if (omap_mmc_switch_opcond(host, vdd) != 0)
767 host->mmc->ios.vdd = vdd;
768 }
769 break;
770 case MMC_POWER_UP:
771 mmc_slot(host).set_power(host->dev, host->slot_id, 1, ios->vdd);
772 break;
773 }
774
775 switch (mmc->ios.bus_width) {
776 case MMC_BUS_WIDTH_4:
777 OMAP_HSMMC_WRITE(host->base, HCTL,
778 OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
779 break;
780 case MMC_BUS_WIDTH_1:
781 OMAP_HSMMC_WRITE(host->base, HCTL,
782 OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
783 break;
784 }
785
786 if (host->id == OMAP_MMC1_DEVID) {
787 /* Only MMC1 can operate at 3V/1.8V */
788 if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) &&
789 (ios->vdd == DUAL_VOLT_OCR_BIT)) {
790 /*
791 * The mmc_select_voltage fn of the core does
792 * not seem to set the power_mode to
793 * MMC_POWER_UP upon recalculating the voltage.
794 * vdd 1.8v.
795 */
796 if (omap_mmc_switch_opcond(host, ios->vdd) != 0)
797 dev_dbg(mmc_dev(host->mmc),
798 "Switch operation failed\n");
799 }
800 }
801
802 if (ios->clock) {
803 dsor = OMAP_MMC_MASTER_CLOCK / ios->clock;
804 if (dsor < 1)
805 dsor = 1;
806
807 if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock)
808 dsor++;
809
810 if (dsor > 250)
811 dsor = 250;
812 }
813 omap_mmc_stop_clock(host);
814 regval = OMAP_HSMMC_READ(host->base, SYSCTL);
815 regval = regval & ~(CLKD_MASK);
816 regval = regval | (dsor << 6) | (DTO << 16);
817 OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
818 OMAP_HSMMC_WRITE(host->base, SYSCTL,
819 OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
820
821 /* Wait till the ICS bit is set */
822 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
823 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != 0x2
824 && time_before(jiffies, timeout))
825 msleep(1);
826
827 OMAP_HSMMC_WRITE(host->base, SYSCTL,
828 OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
829
830 if (ios->power_mode == MMC_POWER_ON)
831 send_init_stream(host);
832
833 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
834 OMAP_HSMMC_WRITE(host->base, CON,
835 OMAP_HSMMC_READ(host->base, CON) | OD);
836}
837
838static int omap_hsmmc_get_cd(struct mmc_host *mmc)
839{
840 struct mmc_omap_host *host = mmc_priv(mmc);
841 struct omap_mmc_platform_data *pdata = host->pdata;
842
843 if (!pdata->slots[0].card_detect)
844 return -ENOSYS;
845 return pdata->slots[0].card_detect(pdata->slots[0].card_detect_irq);
846}
847
848static int omap_hsmmc_get_ro(struct mmc_host *mmc)
849{
850 struct mmc_omap_host *host = mmc_priv(mmc);
851 struct omap_mmc_platform_data *pdata = host->pdata;
852
853 if (!pdata->slots[0].get_ro)
854 return -ENOSYS;
855 return pdata->slots[0].get_ro(host->dev, 0);
856}
857
858static struct mmc_host_ops mmc_omap_ops = {
859 .request = omap_mmc_request,
860 .set_ios = omap_mmc_set_ios,
861 .get_cd = omap_hsmmc_get_cd,
862 .get_ro = omap_hsmmc_get_ro,
863 /* NYET -- enable_sdio_irq */
864};
865
866static int __init omap_mmc_probe(struct platform_device *pdev)
867{
868 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
869 struct mmc_host *mmc;
870 struct mmc_omap_host *host = NULL;
871 struct resource *res;
872 int ret = 0, irq;
873 u32 hctl, capa;
874
875 if (pdata == NULL) {
876 dev_err(&pdev->dev, "Platform Data is missing\n");
877 return -ENXIO;
878 }
879
880 if (pdata->nr_slots == 0) {
881 dev_err(&pdev->dev, "No Slots\n");
882 return -ENXIO;
883 }
884
885 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
886 irq = platform_get_irq(pdev, 0);
887 if (res == NULL || irq < 0)
888 return -ENXIO;
889
890 res = request_mem_region(res->start, res->end - res->start + 1,
891 pdev->name);
892 if (res == NULL)
893 return -EBUSY;
894
895 mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
896 if (!mmc) {
897 ret = -ENOMEM;
898 goto err;
899 }
900
901 host = mmc_priv(mmc);
902 host->mmc = mmc;
903 host->pdata = pdata;
904 host->dev = &pdev->dev;
905 host->use_dma = 1;
906 host->dev->dma_mask = &pdata->dma_mask;
907 host->dma_ch = -1;
908 host->irq = irq;
909 host->id = pdev->id;
910 host->slot_id = 0;
911 host->mapbase = res->start;
912 host->base = ioremap(host->mapbase, SZ_4K);
913
914 platform_set_drvdata(pdev, host);
915 INIT_WORK(&host->mmc_carddetect_work, mmc_omap_detect);
916
917 mmc->ops = &mmc_omap_ops;
918 mmc->f_min = 400000;
919 mmc->f_max = 52000000;
920
921 sema_init(&host->sem, 1);
922
923 host->iclk = clk_get(&pdev->dev, "mmchs_ick");
924 if (IS_ERR(host->iclk)) {
925 ret = PTR_ERR(host->iclk);
926 host->iclk = NULL;
927 goto err1;
928 }
929 host->fclk = clk_get(&pdev->dev, "mmchs_fck");
930 if (IS_ERR(host->fclk)) {
931 ret = PTR_ERR(host->fclk);
932 host->fclk = NULL;
933 clk_put(host->iclk);
934 goto err1;
935 }
936
937 if (clk_enable(host->fclk) != 0) {
938 clk_put(host->iclk);
939 clk_put(host->fclk);
940 goto err1;
941 }
942
943 if (clk_enable(host->iclk) != 0) {
944 clk_disable(host->fclk);
945 clk_put(host->iclk);
946 clk_put(host->fclk);
947 goto err1;
948 }
949
950 host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
951 /*
952 * MMC can still work without debounce clock.
953 */
954 if (IS_ERR(host->dbclk))
955 dev_warn(mmc_dev(host->mmc), "Failed to get debounce clock\n");
956 else
957 if (clk_enable(host->dbclk) != 0)
958 dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
959 " clk failed\n");
960 else
961 host->dbclk_enabled = 1;
962
963#ifdef CONFIG_MMC_BLOCK_BOUNCE
964 mmc->max_phys_segs = 1;
965 mmc->max_hw_segs = 1;
966#endif
967 mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
968 mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
969 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
970 mmc->max_seg_size = mmc->max_req_size;
971
972 mmc->ocr_avail = mmc_slot(host).ocr_mask;
973 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
974
975 if (pdata->slots[host->slot_id].wires >= 4)
976 mmc->caps |= MMC_CAP_4_BIT_DATA;
977
978 /* Only MMC1 supports 3.0V */
979 if (host->id == OMAP_MMC1_DEVID) {
980 hctl = SDVS30;
981 capa = VS30 | VS18;
982 } else {
983 hctl = SDVS18;
984 capa = VS18;
985 }
986
987 OMAP_HSMMC_WRITE(host->base, HCTL,
988 OMAP_HSMMC_READ(host->base, HCTL) | hctl);
989
990 OMAP_HSMMC_WRITE(host->base, CAPA,
991 OMAP_HSMMC_READ(host->base, CAPA) | capa);
992
993 /* Set the controller to AUTO IDLE mode */
994 OMAP_HSMMC_WRITE(host->base, SYSCONFIG,
995 OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE);
996
997 /* Set SD bus power bit */
998 OMAP_HSMMC_WRITE(host->base, HCTL,
999 OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
1000
1001 /* Request IRQ for MMC operations */
1002 ret = request_irq(host->irq, mmc_omap_irq, IRQF_DISABLED,
1003 mmc_hostname(mmc), host);
1004 if (ret) {
1005 dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
1006 goto err_irq;
1007 }
1008
1009 if (pdata->init != NULL) {
1010 if (pdata->init(&pdev->dev) != 0) {
1011 dev_dbg(mmc_dev(host->mmc),
1012 "Unable to configure MMC IRQs\n");
1013 goto err_irq_cd_init;
1014 }
1015 }
1016
1017 /* Request IRQ for card detect */
1018 if ((mmc_slot(host).card_detect_irq) && (mmc_slot(host).card_detect)) {
1019 ret = request_irq(mmc_slot(host).card_detect_irq,
1020 omap_mmc_cd_handler,
1021 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
1022 | IRQF_DISABLED,
1023 mmc_hostname(mmc), host);
1024 if (ret) {
1025 dev_dbg(mmc_dev(host->mmc),
1026 "Unable to grab MMC CD IRQ\n");
1027 goto err_irq_cd;
1028 }
1029 }
1030
1031 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
1032 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
1033
1034 mmc_add_host(mmc);
1035
1036 if (host->pdata->slots[host->slot_id].name != NULL) {
1037 ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
1038 if (ret < 0)
1039 goto err_slot_name;
1040 }
1041 if (mmc_slot(host).card_detect_irq && mmc_slot(host).card_detect &&
1042 host->pdata->slots[host->slot_id].get_cover_state) {
1043 ret = device_create_file(&mmc->class_dev,
1044 &dev_attr_cover_switch);
1045 if (ret < 0)
1046 goto err_cover_switch;
1047 }
1048
1049 return 0;
1050
1051err_cover_switch:
1052 device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
1053err_slot_name:
1054 mmc_remove_host(mmc);
1055err_irq_cd:
1056 free_irq(mmc_slot(host).card_detect_irq, host);
1057err_irq_cd_init:
1058 free_irq(host->irq, host);
1059err_irq:
1060 clk_disable(host->fclk);
1061 clk_disable(host->iclk);
1062 clk_put(host->fclk);
1063 clk_put(host->iclk);
1064 if (host->dbclk_enabled) {
1065 clk_disable(host->dbclk);
1066 clk_put(host->dbclk);
1067 }
1068
1069err1:
1070 iounmap(host->base);
1071err:
1072 dev_dbg(mmc_dev(host->mmc), "Probe Failed\n");
1073 release_mem_region(res->start, res->end - res->start + 1);
1074 if (host)
1075 mmc_free_host(mmc);
1076 return ret;
1077}
1078
1079static int omap_mmc_remove(struct platform_device *pdev)
1080{
1081 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1082 struct resource *res;
1083
1084 if (host) {
1085 mmc_remove_host(host->mmc);
1086 if (host->pdata->cleanup)
1087 host->pdata->cleanup(&pdev->dev);
1088 free_irq(host->irq, host);
1089 if (mmc_slot(host).card_detect_irq)
1090 free_irq(mmc_slot(host).card_detect_irq, host);
1091 flush_scheduled_work();
1092
1093 clk_disable(host->fclk);
1094 clk_disable(host->iclk);
1095 clk_put(host->fclk);
1096 clk_put(host->iclk);
1097 if (host->dbclk_enabled) {
1098 clk_disable(host->dbclk);
1099 clk_put(host->dbclk);
1100 }
1101
1102 mmc_free_host(host->mmc);
1103 iounmap(host->base);
1104 }
1105
1106 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1107 if (res)
1108 release_mem_region(res->start, res->end - res->start + 1);
1109 platform_set_drvdata(pdev, NULL);
1110
1111 return 0;
1112}
1113
1114#ifdef CONFIG_PM
1115static int omap_mmc_suspend(struct platform_device *pdev, pm_message_t state)
1116{
1117 int ret = 0;
1118 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1119
1120 if (host && host->suspended)
1121 return 0;
1122
1123 if (host) {
1124 ret = mmc_suspend_host(host->mmc, state);
1125 if (ret == 0) {
1126 host->suspended = 1;
1127
1128 OMAP_HSMMC_WRITE(host->base, ISE, 0);
1129 OMAP_HSMMC_WRITE(host->base, IE, 0);
1130
1131 if (host->pdata->suspend) {
1132 ret = host->pdata->suspend(&pdev->dev,
1133 host->slot_id);
1134 if (ret)
1135 dev_dbg(mmc_dev(host->mmc),
1136 "Unable to handle MMC board"
1137 " level suspend\n");
1138 }
1139
1140 if (!(OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET)) {
1141 OMAP_HSMMC_WRITE(host->base, HCTL,
1142 OMAP_HSMMC_READ(host->base, HCTL)
1143 & SDVSCLR);
1144 OMAP_HSMMC_WRITE(host->base, HCTL,
1145 OMAP_HSMMC_READ(host->base, HCTL)
1146 | SDVS30);
1147 OMAP_HSMMC_WRITE(host->base, HCTL,
1148 OMAP_HSMMC_READ(host->base, HCTL)
1149 | SDBP);
1150 }
1151
1152 clk_disable(host->fclk);
1153 clk_disable(host->iclk);
1154 clk_disable(host->dbclk);
1155 }
1156
1157 }
1158 return ret;
1159}
1160
1161/* Routine to resume the MMC device */
1162static int omap_mmc_resume(struct platform_device *pdev)
1163{
1164 int ret = 0;
1165 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1166
1167 if (host && !host->suspended)
1168 return 0;
1169
1170 if (host) {
1171
1172 ret = clk_enable(host->fclk);
1173 if (ret)
1174 goto clk_en_err;
1175
1176 ret = clk_enable(host->iclk);
1177 if (ret) {
1178 clk_disable(host->fclk);
1179 clk_put(host->fclk);
1180 goto clk_en_err;
1181 }
1182
1183 if (clk_enable(host->dbclk) != 0)
1184 dev_dbg(mmc_dev(host->mmc),
1185 "Enabling debounce clk failed\n");
1186
1187 if (host->pdata->resume) {
1188 ret = host->pdata->resume(&pdev->dev, host->slot_id);
1189 if (ret)
1190 dev_dbg(mmc_dev(host->mmc),
1191 "Unmask interrupt failed\n");
1192 }
1193
1194 /* Notify the core to resume the host */
1195 ret = mmc_resume_host(host->mmc);
1196 if (ret == 0)
1197 host->suspended = 0;
1198 }
1199
1200 return ret;
1201
1202clk_en_err:
1203 dev_dbg(mmc_dev(host->mmc),
1204 "Failed to enable MMC clocks during resume\n");
1205 return ret;
1206}
1207
1208#else
1209#define omap_mmc_suspend NULL
1210#define omap_mmc_resume NULL
1211#endif
1212
1213static struct platform_driver omap_mmc_driver = {
1214 .probe = omap_mmc_probe,
1215 .remove = omap_mmc_remove,
1216 .suspend = omap_mmc_suspend,
1217 .resume = omap_mmc_resume,
1218 .driver = {
1219 .name = DRIVER_NAME,
1220 .owner = THIS_MODULE,
1221 },
1222};
1223
1224static int __init omap_mmc_init(void)
1225{
1226 /* Register the MMC driver */
1227 return platform_driver_register(&omap_mmc_driver);
1228}
1229
1230static void __exit omap_mmc_cleanup(void)
1231{
1232 /* Unregister MMC driver */
1233 platform_driver_unregister(&omap_mmc_driver);
1234}
1235
1236module_init(omap_mmc_init);
1237module_exit(omap_mmc_cleanup);
1238
1239MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
1240MODULE_LICENSE("GPL");
1241MODULE_ALIAS("platform:" DRIVER_NAME);
1242MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index fcc98a4cce3c..35a98eec7414 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -20,7 +20,7 @@
20#include <linux/irq.h> 20#include <linux/irq.h>
21#include <linux/io.h> 21#include <linux/io.h>
22 22
23#include <asm/dma.h> 23#include <mach/dma.h>
24 24
25#include <mach/regs-sdi.h> 25#include <mach/regs-sdi.h>
26#include <mach/regs-gpio.h> 26#include <mach/regs-gpio.h>
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 65929db29446..1f6eb2578717 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -676,7 +676,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
676 676
677 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->numchips = %d\n", 677 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->numchips = %d\n",
678 chip->numchips); 678 chip->numchips);
679 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chipsize = %ld\n", 679 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
680 chip->chipsize); 680 chip->chipsize);
681 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->pagemask = %8x\n", 681 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
682 chip->pagemask); 682 chip->pagemask);
@@ -703,7 +703,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
703 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.layout = %p\n", 703 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.layout = %p\n",
704 chip->ecc.layout); 704 chip->ecc.layout);
705 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags); 705 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
706 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->size = %d\n", mtd->size); 706 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
707 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->erasesize = %d\n", 707 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
708 mtd->erasesize); 708 mtd->erasesize);
709 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->writesize = %d\n", 709 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->writesize = %d\n",
@@ -932,8 +932,8 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
932#endif 932#endif
933 add_mtd_device(&priv->mtd); 933 add_mtd_device(&priv->mtd);
934 934
935 printk(KERN_INFO "eLBC NAND device at 0x%zx, bank %d\n", 935 printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
936 res.start, priv->bank); 936 (unsigned long long)res.start, priv->bank);
937 return 0; 937 return 0;
938 938
939err: 939err:
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 9bd6c9ac8443..a8b9376cf324 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -107,7 +107,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev,
107 if (pasemi_nand_mtd) 107 if (pasemi_nand_mtd)
108 return -ENODEV; 108 return -ENODEV;
109 109
110 pr_debug("pasemi_nand at %lx-%lx\n", res.start, res.end); 110 pr_debug("pasemi_nand at %llx-%llx\n", res.start, res.end);
111 111
112 /* Allocate memory for MTD device structure and private data */ 112 /* Allocate memory for MTD device structure and private data */
113 pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) + 113 pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
@@ -170,7 +170,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev,
170 goto out_lpc; 170 goto out_lpc;
171 } 171 }
172 172
173 printk(KERN_INFO "PA Semi NAND flash at %08lx, control at I/O %x\n", 173 printk(KERN_INFO "PA Semi NAND flash at %08llx, control at I/O %x\n",
174 res.start, lpcctl); 174 res.start, lpcctl);
175 175
176 return 0; 176 return 0;
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 96ecc1766fa8..77a4f1446156 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -629,7 +629,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
629 } 629 }
630 630
631 if (c->gpio_irq) { 631 if (c->gpio_irq) {
632 if ((r = omap_request_gpio(c->gpio_irq)) < 0) { 632 if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
633 dev_err(&pdev->dev, "Failed to request GPIO%d for " 633 dev_err(&pdev->dev, "Failed to request GPIO%d for "
634 "OneNAND\n", c->gpio_irq); 634 "OneNAND\n", c->gpio_irq);
635 goto err_iounmap; 635 goto err_iounmap;
@@ -726,7 +726,7 @@ err_release_dma:
726 free_irq(gpio_to_irq(c->gpio_irq), c); 726 free_irq(gpio_to_irq(c->gpio_irq), c);
727err_release_gpio: 727err_release_gpio:
728 if (c->gpio_irq) 728 if (c->gpio_irq)
729 omap_free_gpio(c->gpio_irq); 729 gpio_free(c->gpio_irq);
730err_iounmap: 730err_iounmap:
731 iounmap(c->onenand.base); 731 iounmap(c->onenand.base);
732err_release_mem_region: 732err_release_mem_region:
@@ -761,7 +761,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
761 platform_set_drvdata(pdev, NULL); 761 platform_set_drvdata(pdev, NULL);
762 if (c->gpio_irq) { 762 if (c->gpio_irq) {
763 free_irq(gpio_to_irq(c->gpio_irq), c); 763 free_irq(gpio_to_irq(c->gpio_irq), c);
764 omap_free_gpio(c->gpio_irq); 764 gpio_free(c->gpio_irq);
765 } 765 }
766 iounmap(c->onenand.base); 766 iounmap(c->onenand.base);
767 release_mem_region(c->phys_base, ONENAND_IO_SIZE); 767 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug
index 1e2ee22edeff..2246f154e2f7 100644
--- a/drivers/mtd/ubi/Kconfig.debug
+++ b/drivers/mtd/ubi/Kconfig.debug
@@ -33,16 +33,6 @@ config MTD_UBI_DEBUG_DISABLE_BGT
33 This option switches the background thread off by default. The thread 33 This option switches the background thread off by default. The thread
34 may be also be enabled/disabled via UBI sysfs. 34 may be also be enabled/disabled via UBI sysfs.
35 35
36config MTD_UBI_DEBUG_USERSPACE_IO
37 bool "Direct user-space write/erase support"
38 default n
39 depends on MTD_UBI_DEBUG
40 help
41 By default, users cannot directly write and erase individual
42 eraseblocks of dynamic volumes, and have to use update operation
43 instead. This option enables this capability - it is very useful for
44 debugging and testing.
45
46config MTD_UBI_DEBUG_EMULATE_BITFLIPS 36config MTD_UBI_DEBUG_EMULATE_BITFLIPS
47 bool "Emulate flash bit-flips" 37 bool "Emulate flash bit-flips"
48 depends on MTD_UBI_DEBUG 38 depends on MTD_UBI_DEBUG
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 9082768cc6c3..4048db83aef6 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -263,8 +263,12 @@ static ssize_t dev_attribute_show(struct device *dev,
263 return ret; 263 return ret;
264} 264}
265 265
266/* Fake "release" method for UBI devices */ 266static void dev_release(struct device *dev)
267static void dev_release(struct device *dev) { } 267{
268 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
269
270 kfree(ubi);
271}
268 272
269/** 273/**
270 * ubi_sysfs_init - initialize sysfs for an UBI device. 274 * ubi_sysfs_init - initialize sysfs for an UBI device.
@@ -380,7 +384,7 @@ static void free_user_volumes(struct ubi_device *ubi)
380 */ 384 */
381static int uif_init(struct ubi_device *ubi) 385static int uif_init(struct ubi_device *ubi)
382{ 386{
383 int i, err, do_free = 0; 387 int i, err;
384 dev_t dev; 388 dev_t dev;
385 389
386 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); 390 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
@@ -427,13 +431,10 @@ static int uif_init(struct ubi_device *ubi)
427 431
428out_volumes: 432out_volumes:
429 kill_volumes(ubi); 433 kill_volumes(ubi);
430 do_free = 0;
431out_sysfs: 434out_sysfs:
432 ubi_sysfs_close(ubi); 435 ubi_sysfs_close(ubi);
433 cdev_del(&ubi->cdev); 436 cdev_del(&ubi->cdev);
434out_unreg: 437out_unreg:
435 if (do_free)
436 free_user_volumes(ubi);
437 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 438 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
438 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); 439 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
439 return err; 440 return err;
@@ -947,6 +948,12 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
947 if (ubi->bgt_thread) 948 if (ubi->bgt_thread)
948 kthread_stop(ubi->bgt_thread); 949 kthread_stop(ubi->bgt_thread);
949 950
951 /*
952 * Get a reference to the device in order to prevent 'dev_release()'
953 * from freeing @ubi object.
954 */
955 get_device(&ubi->dev);
956
950 uif_close(ubi); 957 uif_close(ubi);
951 ubi_wl_close(ubi); 958 ubi_wl_close(ubi);
952 free_internal_volumes(ubi); 959 free_internal_volumes(ubi);
@@ -958,7 +965,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
958 vfree(ubi->dbg_peb_buf); 965 vfree(ubi->dbg_peb_buf);
959#endif 966#endif
960 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); 967 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
961 kfree(ubi); 968 put_device(&ubi->dev);
962 return 0; 969 return 0;
963} 970}
964 971
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 98cf31ed0814..e63c8fc3df3a 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -40,9 +40,9 @@
40#include <linux/ioctl.h> 40#include <linux/ioctl.h>
41#include <linux/capability.h> 41#include <linux/capability.h>
42#include <linux/uaccess.h> 42#include <linux/uaccess.h>
43#include <linux/smp_lock.h> 43#include <linux/compat.h>
44#include <linux/math64.h>
44#include <mtd/ubi-user.h> 45#include <mtd/ubi-user.h>
45#include <asm/div64.h>
46#include "ubi.h" 46#include "ubi.h"
47 47
48/** 48/**
@@ -195,7 +195,6 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
195 int err, lnum, off, len, tbuf_size; 195 int err, lnum, off, len, tbuf_size;
196 size_t count_save = count; 196 size_t count_save = count;
197 void *tbuf; 197 void *tbuf;
198 uint64_t tmp;
199 198
200 dbg_gen("read %zd bytes from offset %lld of volume %d", 199 dbg_gen("read %zd bytes from offset %lld of volume %d",
201 count, *offp, vol->vol_id); 200 count, *offp, vol->vol_id);
@@ -225,10 +224,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
225 return -ENOMEM; 224 return -ENOMEM;
226 225
227 len = count > tbuf_size ? tbuf_size : count; 226 len = count > tbuf_size ? tbuf_size : count;
228 227 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
229 tmp = *offp;
230 off = do_div(tmp, vol->usable_leb_size);
231 lnum = tmp;
232 228
233 do { 229 do {
234 cond_resched(); 230 cond_resched();
@@ -263,12 +259,9 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
263 return err ? err : count_save - count; 259 return err ? err : count_save - count;
264} 260}
265 261
266#ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
267
268/* 262/*
269 * This function allows to directly write to dynamic UBI volumes, without 263 * This function allows to directly write to dynamic UBI volumes, without
270 * issuing the volume update operation. Available only as a debugging feature. 264 * issuing the volume update operation.
271 * Very useful for testing UBI.
272 */ 265 */
273static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, 266static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
274 size_t count, loff_t *offp) 267 size_t count, loff_t *offp)
@@ -279,7 +272,9 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
279 int lnum, off, len, tbuf_size, err = 0; 272 int lnum, off, len, tbuf_size, err = 0;
280 size_t count_save = count; 273 size_t count_save = count;
281 char *tbuf; 274 char *tbuf;
282 uint64_t tmp; 275
276 if (!vol->direct_writes)
277 return -EPERM;
283 278
284 dbg_gen("requested: write %zd bytes to offset %lld of volume %u", 279 dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
285 count, *offp, vol->vol_id); 280 count, *offp, vol->vol_id);
@@ -287,10 +282,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
287 if (vol->vol_type == UBI_STATIC_VOLUME) 282 if (vol->vol_type == UBI_STATIC_VOLUME)
288 return -EROFS; 283 return -EROFS;
289 284
290 tmp = *offp; 285 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
291 off = do_div(tmp, vol->usable_leb_size);
292 lnum = tmp;
293
294 if (off & (ubi->min_io_size - 1)) { 286 if (off & (ubi->min_io_size - 1)) {
295 dbg_err("unaligned position"); 287 dbg_err("unaligned position");
296 return -EINVAL; 288 return -EINVAL;
@@ -347,10 +339,6 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
347 return err ? err : count_save - count; 339 return err ? err : count_save - count;
348} 340}
349 341
350#else
351#define vol_cdev_direct_write(file, buf, count, offp) (-EPERM)
352#endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */
353
354static ssize_t vol_cdev_write(struct file *file, const char __user *buf, 342static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
355 size_t count, loff_t *offp) 343 size_t count, loff_t *offp)
356{ 344{
@@ -402,8 +390,8 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
402 return count; 390 return count;
403} 391}
404 392
405static int vol_cdev_ioctl(struct inode *inode, struct file *file, 393static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
406 unsigned int cmd, unsigned long arg) 394 unsigned long arg)
407{ 395{
408 int err = 0; 396 int err = 0;
409 struct ubi_volume_desc *desc = file->private_data; 397 struct ubi_volume_desc *desc = file->private_data;
@@ -487,7 +475,6 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
487 break; 475 break;
488 } 476 }
489 477
490#ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
491 /* Logical eraseblock erasure command */ 478 /* Logical eraseblock erasure command */
492 case UBI_IOCEBER: 479 case UBI_IOCEBER:
493 { 480 {
@@ -518,13 +505,77 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
518 err = ubi_wl_flush(ubi); 505 err = ubi_wl_flush(ubi);
519 break; 506 break;
520 } 507 }
521#endif 508
509 /* Logical eraseblock map command */
510 case UBI_IOCEBMAP:
511 {
512 struct ubi_map_req req;
513
514 err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
515 if (err) {
516 err = -EFAULT;
517 break;
518 }
519 err = ubi_leb_map(desc, req.lnum, req.dtype);
520 break;
521 }
522
523 /* Logical eraseblock un-map command */
524 case UBI_IOCEBUNMAP:
525 {
526 int32_t lnum;
527
528 err = get_user(lnum, (__user int32_t *)argp);
529 if (err) {
530 err = -EFAULT;
531 break;
532 }
533 err = ubi_leb_unmap(desc, lnum);
534 break;
535 }
536
537 /* Check if logical eraseblock is mapped command */
538 case UBI_IOCEBISMAP:
539 {
540 int32_t lnum;
541
542 err = get_user(lnum, (__user int32_t *)argp);
543 if (err) {
544 err = -EFAULT;
545 break;
546 }
547 err = ubi_is_mapped(desc, lnum);
548 break;
549 }
550
551 /* Set volume property command*/
552 case UBI_IOCSETPROP:
553 {
554 struct ubi_set_prop_req req;
555
556 err = copy_from_user(&req, argp,
557 sizeof(struct ubi_set_prop_req));
558 if (err) {
559 err = -EFAULT;
560 break;
561 }
562 switch (req.property) {
563 case UBI_PROP_DIRECT_WRITE:
564 mutex_lock(&ubi->volumes_mutex);
565 desc->vol->direct_writes = !!req.value;
566 mutex_unlock(&ubi->volumes_mutex);
567 break;
568 default:
569 err = -EINVAL;
570 break;
571 }
572 break;
573 }
522 574
523 default: 575 default:
524 err = -ENOTTY; 576 err = -ENOTTY;
525 break; 577 break;
526 } 578 }
527
528 return err; 579 return err;
529} 580}
530 581
@@ -762,8 +813,8 @@ out_free:
762 return err; 813 return err;
763} 814}
764 815
765static int ubi_cdev_ioctl(struct inode *inode, struct file *file, 816static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
766 unsigned int cmd, unsigned long arg) 817 unsigned long arg)
767{ 818{
768 int err = 0; 819 int err = 0;
769 struct ubi_device *ubi; 820 struct ubi_device *ubi;
@@ -773,7 +824,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
773 if (!capable(CAP_SYS_RESOURCE)) 824 if (!capable(CAP_SYS_RESOURCE))
774 return -EPERM; 825 return -EPERM;
775 826
776 ubi = ubi_get_by_major(imajor(inode)); 827 ubi = ubi_get_by_major(imajor(file->f_mapping->host));
777 if (!ubi) 828 if (!ubi)
778 return -ENODEV; 829 return -ENODEV;
779 830
@@ -843,7 +894,6 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
843 case UBI_IOCRSVOL: 894 case UBI_IOCRSVOL:
844 { 895 {
845 int pebs; 896 int pebs;
846 uint64_t tmp;
847 struct ubi_rsvol_req req; 897 struct ubi_rsvol_req req;
848 898
849 dbg_gen("re-size volume"); 899 dbg_gen("re-size volume");
@@ -863,9 +913,8 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
863 break; 913 break;
864 } 914 }
865 915
866 tmp = req.bytes; 916 pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
867 pebs = !!do_div(tmp, desc->vol->usable_leb_size); 917 desc->vol->usable_leb_size);
868 pebs += tmp;
869 918
870 mutex_lock(&ubi->volumes_mutex); 919 mutex_lock(&ubi->volumes_mutex);
871 err = ubi_resize_volume(desc, pebs); 920 err = ubi_resize_volume(desc, pebs);
@@ -909,8 +958,8 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
909 return err; 958 return err;
910} 959}
911 960
912static int ctrl_cdev_ioctl(struct inode *inode, struct file *file, 961static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
913 unsigned int cmd, unsigned long arg) 962 unsigned long arg)
914{ 963{
915 int err = 0; 964 int err = 0;
916 void __user *argp = (void __user *)arg; 965 void __user *argp = (void __user *)arg;
@@ -986,26 +1035,59 @@ static int ctrl_cdev_ioctl(struct inode *inode, struct file *file,
986 return err; 1035 return err;
987} 1036}
988 1037
989/* UBI control character device operations */ 1038#ifdef CONFIG_COMPAT
990struct file_operations ubi_ctrl_cdev_operations = { 1039static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd,
991 .ioctl = ctrl_cdev_ioctl, 1040 unsigned long arg)
992 .owner = THIS_MODULE, 1041{
1042 unsigned long translated_arg = (unsigned long)compat_ptr(arg);
1043
1044 return vol_cdev_ioctl(file, cmd, translated_arg);
1045}
1046
1047static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd,
1048 unsigned long arg)
1049{
1050 unsigned long translated_arg = (unsigned long)compat_ptr(arg);
1051
1052 return ubi_cdev_ioctl(file, cmd, translated_arg);
1053}
1054
1055static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd,
1056 unsigned long arg)
1057{
1058 unsigned long translated_arg = (unsigned long)compat_ptr(arg);
1059
1060 return ctrl_cdev_ioctl(file, cmd, translated_arg);
1061}
1062#else
1063#define vol_cdev_compat_ioctl NULL
1064#define ubi_cdev_compat_ioctl NULL
1065#define ctrl_cdev_compat_ioctl NULL
1066#endif
1067
1068/* UBI volume character device operations */
1069const struct file_operations ubi_vol_cdev_operations = {
1070 .owner = THIS_MODULE,
1071 .open = vol_cdev_open,
1072 .release = vol_cdev_release,
1073 .llseek = vol_cdev_llseek,
1074 .read = vol_cdev_read,
1075 .write = vol_cdev_write,
1076 .unlocked_ioctl = vol_cdev_ioctl,
1077 .compat_ioctl = vol_cdev_compat_ioctl,
993}; 1078};
994 1079
995/* UBI character device operations */ 1080/* UBI character device operations */
996struct file_operations ubi_cdev_operations = { 1081const struct file_operations ubi_cdev_operations = {
997 .owner = THIS_MODULE, 1082 .owner = THIS_MODULE,
998 .ioctl = ubi_cdev_ioctl, 1083 .llseek = no_llseek,
999 .llseek = no_llseek, 1084 .unlocked_ioctl = ubi_cdev_ioctl,
1085 .compat_ioctl = ubi_cdev_compat_ioctl,
1000}; 1086};
1001 1087
1002/* UBI volume character device operations */ 1088/* UBI control character device operations */
1003struct file_operations ubi_vol_cdev_operations = { 1089const struct file_operations ubi_ctrl_cdev_operations = {
1004 .owner = THIS_MODULE, 1090 .owner = THIS_MODULE,
1005 .open = vol_cdev_open, 1091 .unlocked_ioctl = ctrl_cdev_ioctl,
1006 .release = vol_cdev_release, 1092 .compat_ioctl = ctrl_cdev_compat_ioctl,
1007 .llseek = vol_cdev_llseek,
1008 .read = vol_cdev_read,
1009 .write = vol_cdev_write,
1010 .ioctl = vol_cdev_ioctl,
1011}; 1093};
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 6dd4f5e77f82..49cd55ade9c8 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -28,7 +28,7 @@
28 * eraseblock size is equivalent to the logical eraseblock size of the volume. 28 * eraseblock size is equivalent to the logical eraseblock size of the volume.
29 */ 29 */
30 30
31#include <asm/div64.h> 31#include <linux/math64.h>
32#include "ubi.h" 32#include "ubi.h"
33 33
34/** 34/**
@@ -109,7 +109,6 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
109 int err = 0, lnum, offs, total_read; 109 int err = 0, lnum, offs, total_read;
110 struct ubi_volume *vol; 110 struct ubi_volume *vol;
111 struct ubi_device *ubi; 111 struct ubi_device *ubi;
112 uint64_t tmp = from;
113 112
114 dbg_gen("read %zd bytes from offset %lld", len, from); 113 dbg_gen("read %zd bytes from offset %lld", len, from);
115 114
@@ -119,9 +118,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
119 vol = container_of(mtd, struct ubi_volume, gluebi_mtd); 118 vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
120 ubi = vol->ubi; 119 ubi = vol->ubi;
121 120
122 offs = do_div(tmp, mtd->erasesize); 121 lnum = div_u64_rem(from, mtd->erasesize, &offs);
123 lnum = tmp;
124
125 total_read = len; 122 total_read = len;
126 while (total_read) { 123 while (total_read) {
127 size_t to_read = mtd->erasesize - offs; 124 size_t to_read = mtd->erasesize - offs;
@@ -160,7 +157,6 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
160 int err = 0, lnum, offs, total_written; 157 int err = 0, lnum, offs, total_written;
161 struct ubi_volume *vol; 158 struct ubi_volume *vol;
162 struct ubi_device *ubi; 159 struct ubi_device *ubi;
163 uint64_t tmp = to;
164 160
165 dbg_gen("write %zd bytes to offset %lld", len, to); 161 dbg_gen("write %zd bytes to offset %lld", len, to);
166 162
@@ -173,8 +169,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
173 if (ubi->ro_mode) 169 if (ubi->ro_mode)
174 return -EROFS; 170 return -EROFS;
175 171
176 offs = do_div(tmp, mtd->erasesize); 172 lnum = div_u64_rem(to, mtd->erasesize, &offs);
177 lnum = tmp;
178 173
179 if (len % mtd->writesize || offs % mtd->writesize) 174 if (len % mtd->writesize || offs % mtd->writesize)
180 return -EINVAL; 175 return -EINVAL;
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index ecde202a5a12..c3d653ba5ca0 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -42,7 +42,7 @@
42 42
43#include <linux/err.h> 43#include <linux/err.h>
44#include <linux/crc32.h> 44#include <linux/crc32.h>
45#include <asm/div64.h> 45#include <linux/math64.h>
46#include "ubi.h" 46#include "ubi.h"
47 47
48#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 48#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
@@ -904,10 +904,8 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
904 dbg_msg("scanning is finished"); 904 dbg_msg("scanning is finished");
905 905
906 /* Calculate mean erase counter */ 906 /* Calculate mean erase counter */
907 if (si->ec_count) { 907 if (si->ec_count)
908 do_div(si->ec_sum, si->ec_count); 908 si->mean_ec = div_u64(si->ec_sum, si->ec_count);
909 si->mean_ec = si->ec_sum;
910 }
911 909
912 if (si->is_empty) 910 if (si->is_empty)
913 ubi_msg("empty MTD device detected"); 911 ubi_msg("empty MTD device detected");
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 4a8ec485c91d..c055511bb1b2 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -206,6 +206,7 @@ struct ubi_volume_desc;
206 * @upd_marker: %1 if the update marker is set for this volume 206 * @upd_marker: %1 if the update marker is set for this volume
207 * @updating: %1 if the volume is being updated 207 * @updating: %1 if the volume is being updated
208 * @changing_leb: %1 if the atomic LEB change ioctl command is in progress 208 * @changing_leb: %1 if the atomic LEB change ioctl command is in progress
209 * @direct_writes: %1 if direct writes are enabled for this volume
209 * 210 *
210 * @gluebi_desc: gluebi UBI volume descriptor 211 * @gluebi_desc: gluebi UBI volume descriptor
211 * @gluebi_refcount: reference count of the gluebi MTD device 212 * @gluebi_refcount: reference count of the gluebi MTD device
@@ -253,6 +254,7 @@ struct ubi_volume {
253 unsigned int upd_marker:1; 254 unsigned int upd_marker:1;
254 unsigned int updating:1; 255 unsigned int updating:1;
255 unsigned int changing_leb:1; 256 unsigned int changing_leb:1;
257 unsigned int direct_writes:1;
256 258
257#ifdef CONFIG_MTD_UBI_GLUEBI 259#ifdef CONFIG_MTD_UBI_GLUEBI
258 /* 260 /*
@@ -304,7 +306,8 @@ struct ubi_wl_entry;
304 * @vtbl_size: size of the volume table in bytes 306 * @vtbl_size: size of the volume table in bytes
305 * @vtbl: in-RAM volume table copy 307 * @vtbl: in-RAM volume table copy
306 * @volumes_mutex: protects on-flash volume table and serializes volume 308 * @volumes_mutex: protects on-flash volume table and serializes volume
307 * changes, like creation, deletion, update, re-size and re-name 309 * changes, like creation, deletion, update, re-size,
310 * re-name and set property
308 * 311 *
309 * @max_ec: current highest erase counter value 312 * @max_ec: current highest erase counter value
310 * @mean_ec: current mean erase counter value 313 * @mean_ec: current mean erase counter value
@@ -449,9 +452,9 @@ struct ubi_device {
449}; 452};
450 453
451extern struct kmem_cache *ubi_wl_entry_slab; 454extern struct kmem_cache *ubi_wl_entry_slab;
452extern struct file_operations ubi_ctrl_cdev_operations; 455extern const struct file_operations ubi_ctrl_cdev_operations;
453extern struct file_operations ubi_cdev_operations; 456extern const struct file_operations ubi_cdev_operations;
454extern struct file_operations ubi_vol_cdev_operations; 457extern const struct file_operations ubi_vol_cdev_operations;
455extern struct class *ubi_class; 458extern struct class *ubi_class;
456extern struct mutex ubi_devices_mutex; 459extern struct mutex ubi_devices_mutex;
457 460
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 8b89cc18ff0b..6b4d1ae891ae 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -40,7 +40,7 @@
40 40
41#include <linux/err.h> 41#include <linux/err.h>
42#include <linux/uaccess.h> 42#include <linux/uaccess.h>
43#include <asm/div64.h> 43#include <linux/math64.h>
44#include "ubi.h" 44#include "ubi.h"
45 45
46/** 46/**
@@ -89,7 +89,6 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
89 long long bytes) 89 long long bytes)
90{ 90{
91 int err; 91 int err;
92 uint64_t tmp;
93 struct ubi_vtbl_record vtbl_rec; 92 struct ubi_vtbl_record vtbl_rec;
94 93
95 dbg_gen("clear update marker for volume %d", vol->vol_id); 94 dbg_gen("clear update marker for volume %d", vol->vol_id);
@@ -101,9 +100,9 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
101 100
102 if (vol->vol_type == UBI_STATIC_VOLUME) { 101 if (vol->vol_type == UBI_STATIC_VOLUME) {
103 vol->corrupted = 0; 102 vol->corrupted = 0;
104 vol->used_bytes = tmp = bytes; 103 vol->used_bytes = bytes;
105 vol->last_eb_bytes = do_div(tmp, vol->usable_leb_size); 104 vol->used_ebs = div_u64_rem(bytes, vol->usable_leb_size,
106 vol->used_ebs = tmp; 105 &vol->last_eb_bytes);
107 if (vol->last_eb_bytes) 106 if (vol->last_eb_bytes)
108 vol->used_ebs += 1; 107 vol->used_ebs += 1;
109 else 108 else
@@ -131,7 +130,6 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
131 long long bytes) 130 long long bytes)
132{ 131{
133 int i, err; 132 int i, err;
134 uint64_t tmp;
135 133
136 dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes); 134 dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes);
137 ubi_assert(!vol->updating && !vol->changing_leb); 135 ubi_assert(!vol->updating && !vol->changing_leb);
@@ -161,9 +159,8 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
161 if (!vol->upd_buf) 159 if (!vol->upd_buf)
162 return -ENOMEM; 160 return -ENOMEM;
163 161
164 tmp = bytes; 162 vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
165 vol->upd_ebs = !!do_div(tmp, vol->usable_leb_size); 163 vol->usable_leb_size);
166 vol->upd_ebs += tmp;
167 vol->upd_bytes = bytes; 164 vol->upd_bytes = bytes;
168 vol->upd_received = 0; 165 vol->upd_received = 0;
169 return 0; 166 return 0;
@@ -282,7 +279,6 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
282int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, 279int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
283 const void __user *buf, int count) 280 const void __user *buf, int count)
284{ 281{
285 uint64_t tmp;
286 int lnum, offs, err = 0, len, to_write = count; 282 int lnum, offs, err = 0, len, to_write = count;
287 283
288 dbg_gen("write %d of %lld bytes, %lld already passed", 284 dbg_gen("write %d of %lld bytes, %lld already passed",
@@ -291,10 +287,7 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
291 if (ubi->ro_mode) 287 if (ubi->ro_mode)
292 return -EROFS; 288 return -EROFS;
293 289
294 tmp = vol->upd_received; 290 lnum = div_u64_rem(vol->upd_received, vol->usable_leb_size, &offs);
295 offs = do_div(tmp, vol->usable_leb_size);
296 lnum = tmp;
297
298 if (vol->upd_received + count > vol->upd_bytes) 291 if (vol->upd_received + count > vol->upd_bytes)
299 to_write = count = vol->upd_bytes - vol->upd_received; 292 to_write = count = vol->upd_bytes - vol->upd_received;
300 293
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 22e1d7398fce..df5483562b7a 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -24,7 +24,7 @@
24 */ 24 */
25 25
26#include <linux/err.h> 26#include <linux/err.h>
27#include <asm/div64.h> 27#include <linux/math64.h>
28#include "ubi.h" 28#include "ubi.h"
29 29
30#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 30#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
@@ -205,7 +205,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
205 int i, err, vol_id = req->vol_id, do_free = 1; 205 int i, err, vol_id = req->vol_id, do_free = 1;
206 struct ubi_volume *vol; 206 struct ubi_volume *vol;
207 struct ubi_vtbl_record vtbl_rec; 207 struct ubi_vtbl_record vtbl_rec;
208 uint64_t bytes;
209 dev_t dev; 208 dev_t dev;
210 209
211 if (ubi->ro_mode) 210 if (ubi->ro_mode)
@@ -255,10 +254,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
255 254
256 /* Calculate how many eraseblocks are requested */ 255 /* Calculate how many eraseblocks are requested */
257 vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment; 256 vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
258 bytes = req->bytes; 257 vol->reserved_pebs += div_u64(req->bytes + vol->usable_leb_size - 1,
259 if (do_div(bytes, vol->usable_leb_size)) 258 vol->usable_leb_size);
260 vol->reserved_pebs = 1;
261 vol->reserved_pebs += bytes;
262 259
263 /* Reserve physical eraseblocks */ 260 /* Reserve physical eraseblocks */
264 if (vol->reserved_pebs > ubi->avail_pebs) { 261 if (vol->reserved_pebs > ubi->avail_pebs) {
@@ -301,10 +298,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
301 vol->used_bytes = 298 vol->used_bytes =
302 (long long)vol->used_ebs * vol->usable_leb_size; 299 (long long)vol->used_ebs * vol->usable_leb_size;
303 } else { 300 } else {
304 bytes = vol->used_bytes; 301 vol->used_ebs = div_u64_rem(vol->used_bytes,
305 vol->last_eb_bytes = do_div(bytes, vol->usable_leb_size); 302 vol->usable_leb_size,
306 vol->used_ebs = bytes; 303 &vol->last_eb_bytes);
307 if (vol->last_eb_bytes) 304 if (vol->last_eb_bytes != 0)
308 vol->used_ebs += 1; 305 vol->used_ebs += 1;
309 else 306 else
310 vol->last_eb_bytes = vol->usable_leb_size; 307 vol->last_eb_bytes = vol->usable_leb_size;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 9fe8cb7d43ac..6bdfd47d679d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1829,7 +1829,7 @@ config 68360_ENET
1829 1829
1830config FEC 1830config FEC
1831 bool "FEC ethernet controller (of ColdFire CPUs)" 1831 bool "FEC ethernet controller (of ColdFire CPUs)"
1832 depends on M523x || M527x || M5272 || M528x || M520x 1832 depends on M523x || M527x || M5272 || M528x || M520x || M532x
1833 help 1833 help
1834 Say Y here if you want to use the built-in 10/100 Fast ethernet 1834 Say Y here if you want to use the built-in 10/100 Fast ethernet
1835 controller on some Motorola ColdFire processors. 1835 controller on some Motorola ColdFire processors.
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 0c628a9e5339..c2d012fcc29b 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -208,9 +208,9 @@ am79c961_init_for_open(struct net_device *dev)
208 /* 208 /*
209 * Stop the chip. 209 * Stop the chip.
210 */ 210 */
211 spin_lock_irqsave(priv->chip_lock, flags); 211 spin_lock_irqsave(&priv->chip_lock, flags);
212 write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP); 212 write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP);
213 spin_unlock_irqrestore(priv->chip_lock, flags); 213 spin_unlock_irqrestore(&priv->chip_lock, flags);
214 214
215 write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */ 215 write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */
216 write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */ 216 write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */
@@ -332,10 +332,10 @@ am79c961_close(struct net_device *dev)
332 netif_stop_queue(dev); 332 netif_stop_queue(dev);
333 netif_carrier_off(dev); 333 netif_carrier_off(dev);
334 334
335 spin_lock_irqsave(priv->chip_lock, flags); 335 spin_lock_irqsave(&priv->chip_lock, flags);
336 write_rreg (dev->base_addr, CSR0, CSR0_STOP); 336 write_rreg (dev->base_addr, CSR0, CSR0_STOP);
337 write_rreg (dev->base_addr, CSR3, CSR3_MASKALL); 337 write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
338 spin_unlock_irqrestore(priv->chip_lock, flags); 338 spin_unlock_irqrestore(&priv->chip_lock, flags);
339 339
340 free_irq (dev->irq, dev); 340 free_irq (dev->irq, dev);
341 341
@@ -391,7 +391,7 @@ static void am79c961_setmulticastlist (struct net_device *dev)
391 am79c961_mc_hash(dmi, multi_hash); 391 am79c961_mc_hash(dmi, multi_hash);
392 } 392 }
393 393
394 spin_lock_irqsave(priv->chip_lock, flags); 394 spin_lock_irqsave(&priv->chip_lock, flags);
395 395
396 stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP; 396 stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
397 397
@@ -405,9 +405,9 @@ static void am79c961_setmulticastlist (struct net_device *dev)
405 * Spin waiting for chip to report suspend mode 405 * Spin waiting for chip to report suspend mode
406 */ 406 */
407 while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) { 407 while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) {
408 spin_unlock_irqrestore(priv->chip_lock, flags); 408 spin_unlock_irqrestore(&priv->chip_lock, flags);
409 nop(); 409 nop();
410 spin_lock_irqsave(priv->chip_lock, flags); 410 spin_lock_irqsave(&priv->chip_lock, flags);
411 } 411 }
412 } 412 }
413 413
@@ -429,7 +429,7 @@ static void am79c961_setmulticastlist (struct net_device *dev)
429 write_rreg(dev->base_addr, CTRL1, 0); 429 write_rreg(dev->base_addr, CTRL1, 0);
430 } 430 }
431 431
432 spin_unlock_irqrestore(priv->chip_lock, flags); 432 spin_unlock_irqrestore(&priv->chip_lock, flags);
433} 433}
434 434
435static void am79c961_timeout(struct net_device *dev) 435static void am79c961_timeout(struct net_device *dev)
@@ -467,10 +467,10 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
467 am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP); 467 am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP);
468 priv->txhead = head; 468 priv->txhead = head;
469 469
470 spin_lock_irqsave(priv->chip_lock, flags); 470 spin_lock_irqsave(&priv->chip_lock, flags);
471 write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA); 471 write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
472 dev->trans_start = jiffies; 472 dev->trans_start = jiffies;
473 spin_unlock_irqrestore(priv->chip_lock, flags); 473 spin_unlock_irqrestore(&priv->chip_lock, flags);
474 474
475 /* 475 /*
476 * If the next packet is owned by the ethernet device, 476 * If the next packet is owned by the ethernet device,
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 6fcccef4cf3d..15a5cf0f676b 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -1,6 +1,6 @@
1/* bnx2x.h: Broadcom Everest network driver. 1/* bnx2x.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2008 Broadcom Corporation 3 * Copyright (c) 2007-2009 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -271,14 +271,7 @@ struct bnx2x_fastpath {
271 271
272#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 272#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
273 273
274#define BNX2X_HAS_TX_WORK(fp) \ 274#define BNX2X_HAS_WORK(fp) (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))
275 ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \
276 (fp->tx_pkt_prod != fp->tx_pkt_cons))
277
278#define BNX2X_HAS_RX_WORK(fp) \
279 (fp->rx_comp_cons != rx_cons_sb)
280
281#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
282 275
283 276
284/* MC hsi */ 277/* MC hsi */
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index fefa6ab13064..aea26b4dc453 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -1,4 +1,4 @@
1/* Copyright 2008 Broadcom Corporation 1/* Copyright 2008-2009 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -317,6 +317,9 @@ static u8 bnx2x_emac_enable(struct link_params *params,
317 val &= ~0x810; 317 val &= ~0x810;
318 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); 318 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
319 319
320 /* enable emac */
321 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
322
320 /* enable emac for jumbo packets */ 323 /* enable emac for jumbo packets */
321 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, 324 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
322 (EMAC_RX_MTU_SIZE_JUMBO_ENA | 325 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
@@ -1609,7 +1612,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1609 u32 gp_status) 1612 u32 gp_status)
1610{ 1613{
1611 struct bnx2x *bp = params->bp; 1614 struct bnx2x *bp = params->bp;
1612 1615 u16 new_line_speed;
1613 u8 rc = 0; 1616 u8 rc = 0;
1614 vars->link_status = 0; 1617 vars->link_status = 0;
1615 1618
@@ -1629,7 +1632,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1629 1632
1630 switch (gp_status & GP_STATUS_SPEED_MASK) { 1633 switch (gp_status & GP_STATUS_SPEED_MASK) {
1631 case GP_STATUS_10M: 1634 case GP_STATUS_10M:
1632 vars->line_speed = SPEED_10; 1635 new_line_speed = SPEED_10;
1633 if (vars->duplex == DUPLEX_FULL) 1636 if (vars->duplex == DUPLEX_FULL)
1634 vars->link_status |= LINK_10TFD; 1637 vars->link_status |= LINK_10TFD;
1635 else 1638 else
@@ -1637,7 +1640,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1637 break; 1640 break;
1638 1641
1639 case GP_STATUS_100M: 1642 case GP_STATUS_100M:
1640 vars->line_speed = SPEED_100; 1643 new_line_speed = SPEED_100;
1641 if (vars->duplex == DUPLEX_FULL) 1644 if (vars->duplex == DUPLEX_FULL)
1642 vars->link_status |= LINK_100TXFD; 1645 vars->link_status |= LINK_100TXFD;
1643 else 1646 else
@@ -1646,7 +1649,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1646 1649
1647 case GP_STATUS_1G: 1650 case GP_STATUS_1G:
1648 case GP_STATUS_1G_KX: 1651 case GP_STATUS_1G_KX:
1649 vars->line_speed = SPEED_1000; 1652 new_line_speed = SPEED_1000;
1650 if (vars->duplex == DUPLEX_FULL) 1653 if (vars->duplex == DUPLEX_FULL)
1651 vars->link_status |= LINK_1000TFD; 1654 vars->link_status |= LINK_1000TFD;
1652 else 1655 else
@@ -1654,7 +1657,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1654 break; 1657 break;
1655 1658
1656 case GP_STATUS_2_5G: 1659 case GP_STATUS_2_5G:
1657 vars->line_speed = SPEED_2500; 1660 new_line_speed = SPEED_2500;
1658 if (vars->duplex == DUPLEX_FULL) 1661 if (vars->duplex == DUPLEX_FULL)
1659 vars->link_status |= LINK_2500TFD; 1662 vars->link_status |= LINK_2500TFD;
1660 else 1663 else
@@ -1671,32 +1674,32 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1671 case GP_STATUS_10G_KX4: 1674 case GP_STATUS_10G_KX4:
1672 case GP_STATUS_10G_HIG: 1675 case GP_STATUS_10G_HIG:
1673 case GP_STATUS_10G_CX4: 1676 case GP_STATUS_10G_CX4:
1674 vars->line_speed = SPEED_10000; 1677 new_line_speed = SPEED_10000;
1675 vars->link_status |= LINK_10GTFD; 1678 vars->link_status |= LINK_10GTFD;
1676 break; 1679 break;
1677 1680
1678 case GP_STATUS_12G_HIG: 1681 case GP_STATUS_12G_HIG:
1679 vars->line_speed = SPEED_12000; 1682 new_line_speed = SPEED_12000;
1680 vars->link_status |= LINK_12GTFD; 1683 vars->link_status |= LINK_12GTFD;
1681 break; 1684 break;
1682 1685
1683 case GP_STATUS_12_5G: 1686 case GP_STATUS_12_5G:
1684 vars->line_speed = SPEED_12500; 1687 new_line_speed = SPEED_12500;
1685 vars->link_status |= LINK_12_5GTFD; 1688 vars->link_status |= LINK_12_5GTFD;
1686 break; 1689 break;
1687 1690
1688 case GP_STATUS_13G: 1691 case GP_STATUS_13G:
1689 vars->line_speed = SPEED_13000; 1692 new_line_speed = SPEED_13000;
1690 vars->link_status |= LINK_13GTFD; 1693 vars->link_status |= LINK_13GTFD;
1691 break; 1694 break;
1692 1695
1693 case GP_STATUS_15G: 1696 case GP_STATUS_15G:
1694 vars->line_speed = SPEED_15000; 1697 new_line_speed = SPEED_15000;
1695 vars->link_status |= LINK_15GTFD; 1698 vars->link_status |= LINK_15GTFD;
1696 break; 1699 break;
1697 1700
1698 case GP_STATUS_16G: 1701 case GP_STATUS_16G:
1699 vars->line_speed = SPEED_16000; 1702 new_line_speed = SPEED_16000;
1700 vars->link_status |= LINK_16GTFD; 1703 vars->link_status |= LINK_16GTFD;
1701 break; 1704 break;
1702 1705
@@ -1708,6 +1711,15 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1708 break; 1711 break;
1709 } 1712 }
1710 1713
1714 /* Upon link speed change set the NIG into drain mode.
1715 Comes to deals with possible FIFO glitch due to clk change
1716 when speed is decreased without link down indicator */
1717 if (new_line_speed != vars->line_speed) {
1718 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
1719 + params->port*4, 0);
1720 msleep(1);
1721 }
1722 vars->line_speed = new_line_speed;
1711 vars->link_status |= LINK_STATUS_SERDES_LINK; 1723 vars->link_status |= LINK_STATUS_SERDES_LINK;
1712 1724
1713 if ((params->req_line_speed == SPEED_AUTO_NEG) && 1725 if ((params->req_line_speed == SPEED_AUTO_NEG) &&
@@ -3571,7 +3583,7 @@ static void bnx2x_set_xgxs_loopback(struct link_params *params,
3571 (MDIO_REG_BANK_CL73_IEEEB0 + 3583 (MDIO_REG_BANK_CL73_IEEEB0 +
3572 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 3584 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
3573 0x6041); 3585 0x6041);
3574 3586 msleep(200);
3575 /* set aer mmd back */ 3587 /* set aer mmd back */
3576 bnx2x_set_aer_mmd(params, vars); 3588 bnx2x_set_aer_mmd(params, vars);
3577 3589
@@ -3870,9 +3882,15 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3870 } 3882 }
3871 3883
3872 if (vars->phy_flags & PHY_XGXS_FLAG) { 3884 if (vars->phy_flags & PHY_XGXS_FLAG) {
3873 if (params->req_line_speed && 3885 if ((params->req_line_speed &&
3874 ((params->req_line_speed == SPEED_100) || 3886 ((params->req_line_speed == SPEED_100) ||
3875 (params->req_line_speed == SPEED_10))) { 3887 (params->req_line_speed == SPEED_10))) ||
3888 (!params->req_line_speed &&
3889 (params->speed_cap_mask >=
3890 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
3891 (params->speed_cap_mask <
3892 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
3893 )) {
3876 vars->phy_flags |= PHY_SGMII_FLAG; 3894 vars->phy_flags |= PHY_SGMII_FLAG;
3877 } else { 3895 } else {
3878 vars->phy_flags &= ~PHY_SGMII_FLAG; 3896 vars->phy_flags &= ~PHY_SGMII_FLAG;
@@ -4194,6 +4212,11 @@ static u8 bnx2x_update_link_down(struct link_params *params,
4194 /* activate nig drain */ 4212 /* activate nig drain */
4195 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 4213 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
4196 4214
4215 /* disable emac */
4216 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
4217
4218 msleep(10);
4219
4197 /* reset BigMac */ 4220 /* reset BigMac */
4198 bnx2x_bmac_rx_disable(bp, params->port); 4221 bnx2x_bmac_rx_disable(bp, params->port);
4199 REG_WR(bp, GRCBASE_MISC + 4222 REG_WR(bp, GRCBASE_MISC +
@@ -4238,6 +4261,7 @@ static u8 bnx2x_update_link_up(struct link_params *params,
4238 4261
4239 /* update shared memory */ 4262 /* update shared memory */
4240 bnx2x_update_mng(params, vars->link_status); 4263 bnx2x_update_mng(params, vars->link_status);
4264 msleep(20);
4241 return rc; 4265 return rc;
4242} 4266}
4243/* This function should called upon link interrupt */ 4267/* This function should called upon link interrupt */
@@ -4276,6 +4300,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
4276 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 4300 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
4277 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); 4301 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
4278 4302
4303 /* disable emac */
4304 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
4305
4279 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4306 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
4280 4307
4281 /* Check external link change only for non-direct */ 4308 /* Check external link change only for non-direct */
@@ -4377,10 +4404,11 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
4377 ext_phy_addr[port], 4404 ext_phy_addr[port],
4378 MDIO_PMA_DEVAD, 4405 MDIO_PMA_DEVAD,
4379 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 4406 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
4380 if (fw_ver1 == 0) { 4407 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
4381 DP(NETIF_MSG_LINK, 4408 DP(NETIF_MSG_LINK,
4382 "bnx2x_8073_common_init_phy port %x " 4409 "bnx2x_8073_common_init_phy port %x:"
4383 "fw Download failed\n", port); 4410 "Download failed. fw version = 0x%x\n",
4411 port, fw_ver1);
4384 return -EINVAL; 4412 return -EINVAL;
4385 } 4413 }
4386 4414
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 7c533797c064..d3e7775a9ccf 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1,6 +1,6 @@
1/* bnx2x_main.c: Broadcom Everest network driver. 1/* bnx2x_main.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2008 Broadcom Corporation 3 * Copyright (c) 2007-2009 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -57,8 +57,8 @@
57#include "bnx2x.h" 57#include "bnx2x.h"
58#include "bnx2x_init.h" 58#include "bnx2x_init.h"
59 59
60#define DRV_MODULE_VERSION "1.45.23" 60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2008/11/03" 61#define DRV_MODULE_RELDATE "2009/01/26"
62#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
63 63
64/* Time in jiffies before concluding the transmitter is hung */ 64/* Time in jiffies before concluding the transmitter is hung */
@@ -69,7 +69,7 @@ static char version[] __devinitdata =
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 70
71MODULE_AUTHOR("Eliezer Tamir"); 71MODULE_AUTHOR("Eliezer Tamir");
72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION); 74MODULE_VERSION(DRV_MODULE_VERSION);
75 75
@@ -733,6 +733,24 @@ static u16 bnx2x_ack_int(struct bnx2x *bp)
733 * fast path service functions 733 * fast path service functions
734 */ 734 */
735 735
736static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
737{
738 u16 tx_cons_sb;
739
740 /* Tell compiler that status block fields can change */
741 barrier();
742 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743 return (fp->tx_pkt_cons != tx_cons_sb);
744}
745
746static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
747{
748 /* Tell compiler that consumer and producer can change */
749 barrier();
750 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
751
752}
753
736/* free skb in the packet ring at pos idx 754/* free skb in the packet ring at pos idx
737 * return idx of last bd freed 755 * return idx of last bd freed
738 */ 756 */
@@ -5137,12 +5155,21 @@ static void enable_blocks_attention(struct bnx2x *bp)
5137} 5155}
5138 5156
5139 5157
5158static void bnx2x_reset_common(struct bnx2x *bp)
5159{
5160 /* reset_common */
5161 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5162 0xd3ffff7f);
5163 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5164}
5165
5140static int bnx2x_init_common(struct bnx2x *bp) 5166static int bnx2x_init_common(struct bnx2x *bp)
5141{ 5167{
5142 u32 val, i; 5168 u32 val, i;
5143 5169
5144 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); 5170 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5145 5171
5172 bnx2x_reset_common(bp);
5146 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 5173 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5147 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); 5174 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5148 5175
@@ -6123,8 +6150,8 @@ static void bnx2x_netif_start(struct bnx2x *bp)
6123static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) 6150static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6124{ 6151{
6125 bnx2x_int_disable_sync(bp, disable_hw); 6152 bnx2x_int_disable_sync(bp, disable_hw);
6153 bnx2x_napi_disable(bp);
6126 if (netif_running(bp->dev)) { 6154 if (netif_running(bp->dev)) {
6127 bnx2x_napi_disable(bp);
6128 netif_tx_disable(bp->dev); 6155 netif_tx_disable(bp->dev);
6129 bp->dev->trans_start = jiffies; /* prevent tx timeout */ 6156 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6130 } 6157 }
@@ -6144,7 +6171,7 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6144 * multicast 64-127:port0 128-191:port1 6171 * multicast 64-127:port0 128-191:port1
6145 */ 6172 */
6146 config->hdr.length_6b = 2; 6173 config->hdr.length_6b = 2;
6147 config->hdr.offset = port ? 31 : 0; 6174 config->hdr.offset = port ? 32 : 0;
6148 config->hdr.client_id = BP_CL_ID(bp); 6175 config->hdr.client_id = BP_CL_ID(bp);
6149 config->hdr.reserved1 = 0; 6176 config->hdr.reserved1 = 0;
6150 6177
@@ -6308,7 +6335,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev);
6308static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 6335static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6309{ 6336{
6310 u32 load_code; 6337 u32 load_code;
6311 int i, rc; 6338 int i, rc = 0;
6312#ifdef BNX2X_STOP_ON_ERROR 6339#ifdef BNX2X_STOP_ON_ERROR
6313 if (unlikely(bp->panic)) 6340 if (unlikely(bp->panic))
6314 return -EPERM; 6341 return -EPERM;
@@ -6316,48 +6343,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6316 6343
6317 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 6344 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6318 6345
6319 /* Send LOAD_REQUEST command to MCP
6320 Returns the type of LOAD command:
6321 if it is the first port to be initialized
6322 common blocks should be initialized, otherwise - not
6323 */
6324 if (!BP_NOMCP(bp)) {
6325 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6326 if (!load_code) {
6327 BNX2X_ERR("MCP response failure, aborting\n");
6328 return -EBUSY;
6329 }
6330 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6331 return -EBUSY; /* other port in diagnostic mode */
6332
6333 } else {
6334 int port = BP_PORT(bp);
6335
6336 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6337 load_count[0], load_count[1], load_count[2]);
6338 load_count[0]++;
6339 load_count[1 + port]++;
6340 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6341 load_count[0], load_count[1], load_count[2]);
6342 if (load_count[0] == 1)
6343 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6344 else if (load_count[1 + port] == 1)
6345 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6346 else
6347 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6348 }
6349
6350 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6351 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6352 bp->port.pmf = 1;
6353 else
6354 bp->port.pmf = 0;
6355 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6356
6357 /* if we can't use MSI-X we only need one fp,
6358 * so try to enable MSI-X with the requested number of fp's
6359 * and fallback to inta with one fp
6360 */
6361 if (use_inta) { 6346 if (use_inta) {
6362 bp->num_queues = 1; 6347 bp->num_queues = 1;
6363 6348
@@ -6372,7 +6357,15 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6372 else 6357 else
6373 bp->num_queues = 1; 6358 bp->num_queues = 1;
6374 6359
6375 if (bnx2x_enable_msix(bp)) { 6360 DP(NETIF_MSG_IFUP,
6361 "set number of queues to %d\n", bp->num_queues);
6362
6363 /* if we can't use MSI-X we only need one fp,
6364 * so try to enable MSI-X with the requested number of fp's
6365 * and fallback to MSI or legacy INTx with one fp
6366 */
6367 rc = bnx2x_enable_msix(bp);
6368 if (rc) {
6376 /* failed to enable MSI-X */ 6369 /* failed to enable MSI-X */
6377 bp->num_queues = 1; 6370 bp->num_queues = 1;
6378 if (use_multi) 6371 if (use_multi)
@@ -6380,8 +6373,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6380 " to enable MSI-X\n"); 6373 " to enable MSI-X\n");
6381 } 6374 }
6382 } 6375 }
6383 DP(NETIF_MSG_IFUP,
6384 "set number of queues to %d\n", bp->num_queues);
6385 6376
6386 if (bnx2x_alloc_mem(bp)) 6377 if (bnx2x_alloc_mem(bp))
6387 return -ENOMEM; 6378 return -ENOMEM;
@@ -6390,30 +6381,85 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6390 bnx2x_fp(bp, i, disable_tpa) = 6381 bnx2x_fp(bp, i, disable_tpa) =
6391 ((bp->flags & TPA_ENABLE_FLAG) == 0); 6382 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6392 6383
6384 for_each_queue(bp, i)
6385 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6386 bnx2x_poll, 128);
6387
6388#ifdef BNX2X_STOP_ON_ERROR
6389 for_each_queue(bp, i) {
6390 struct bnx2x_fastpath *fp = &bp->fp[i];
6391
6392 fp->poll_no_work = 0;
6393 fp->poll_calls = 0;
6394 fp->poll_max_calls = 0;
6395 fp->poll_complete = 0;
6396 fp->poll_exit = 0;
6397 }
6398#endif
6399 bnx2x_napi_enable(bp);
6400
6393 if (bp->flags & USING_MSIX_FLAG) { 6401 if (bp->flags & USING_MSIX_FLAG) {
6394 rc = bnx2x_req_msix_irqs(bp); 6402 rc = bnx2x_req_msix_irqs(bp);
6395 if (rc) { 6403 if (rc) {
6396 pci_disable_msix(bp->pdev); 6404 pci_disable_msix(bp->pdev);
6397 goto load_error; 6405 goto load_error1;
6398 } 6406 }
6407 printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
6399 } else { 6408 } else {
6400 bnx2x_ack_int(bp); 6409 bnx2x_ack_int(bp);
6401 rc = bnx2x_req_irq(bp); 6410 rc = bnx2x_req_irq(bp);
6402 if (rc) { 6411 if (rc) {
6403 BNX2X_ERR("IRQ request failed, aborting\n"); 6412 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6404 goto load_error; 6413 goto load_error1;
6405 } 6414 }
6406 } 6415 }
6407 6416
6408 for_each_queue(bp, i) 6417 /* Send LOAD_REQUEST command to MCP
6409 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 6418 Returns the type of LOAD command:
6410 bnx2x_poll, 128); 6419 if it is the first port to be initialized
6420 common blocks should be initialized, otherwise - not
6421 */
6422 if (!BP_NOMCP(bp)) {
6423 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6424 if (!load_code) {
6425 BNX2X_ERR("MCP response failure, aborting\n");
6426 rc = -EBUSY;
6427 goto load_error2;
6428 }
6429 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6430 rc = -EBUSY; /* other port in diagnostic mode */
6431 goto load_error2;
6432 }
6433
6434 } else {
6435 int port = BP_PORT(bp);
6436
6437 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6438 load_count[0], load_count[1], load_count[2]);
6439 load_count[0]++;
6440 load_count[1 + port]++;
6441 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6442 load_count[0], load_count[1], load_count[2]);
6443 if (load_count[0] == 1)
6444 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6445 else if (load_count[1 + port] == 1)
6446 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6447 else
6448 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6449 }
6450
6451 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6452 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6453 bp->port.pmf = 1;
6454 else
6455 bp->port.pmf = 0;
6456 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6411 6457
6412 /* Initialize HW */ 6458 /* Initialize HW */
6413 rc = bnx2x_init_hw(bp, load_code); 6459 rc = bnx2x_init_hw(bp, load_code);
6414 if (rc) { 6460 if (rc) {
6415 BNX2X_ERR("HW init failed, aborting\n"); 6461 BNX2X_ERR("HW init failed, aborting\n");
6416 goto load_int_disable; 6462 goto load_error2;
6417 } 6463 }
6418 6464
6419 /* Setup NIC internals and enable interrupts */ 6465 /* Setup NIC internals and enable interrupts */
@@ -6425,7 +6471,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6425 if (!load_code) { 6471 if (!load_code) {
6426 BNX2X_ERR("MCP response failure, aborting\n"); 6472 BNX2X_ERR("MCP response failure, aborting\n");
6427 rc = -EBUSY; 6473 rc = -EBUSY;
6428 goto load_rings_free; 6474 goto load_error3;
6429 } 6475 }
6430 } 6476 }
6431 6477
@@ -6434,7 +6480,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6434 rc = bnx2x_setup_leading(bp); 6480 rc = bnx2x_setup_leading(bp);
6435 if (rc) { 6481 if (rc) {
6436 BNX2X_ERR("Setup leading failed!\n"); 6482 BNX2X_ERR("Setup leading failed!\n");
6437 goto load_netif_stop; 6483 goto load_error3;
6438 } 6484 }
6439 6485
6440 if (CHIP_IS_E1H(bp)) 6486 if (CHIP_IS_E1H(bp))
@@ -6447,7 +6493,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6447 for_each_nondefault_queue(bp, i) { 6493 for_each_nondefault_queue(bp, i) {
6448 rc = bnx2x_setup_multi(bp, i); 6494 rc = bnx2x_setup_multi(bp, i);
6449 if (rc) 6495 if (rc)
6450 goto load_netif_stop; 6496 goto load_error3;
6451 } 6497 }
6452 6498
6453 if (CHIP_IS_E1(bp)) 6499 if (CHIP_IS_E1(bp))
@@ -6463,18 +6509,18 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6463 case LOAD_NORMAL: 6509 case LOAD_NORMAL:
6464 /* Tx queue should be only reenabled */ 6510 /* Tx queue should be only reenabled */
6465 netif_wake_queue(bp->dev); 6511 netif_wake_queue(bp->dev);
6512 /* Initialize the receive filter. */
6466 bnx2x_set_rx_mode(bp->dev); 6513 bnx2x_set_rx_mode(bp->dev);
6467 break; 6514 break;
6468 6515
6469 case LOAD_OPEN: 6516 case LOAD_OPEN:
6470 netif_start_queue(bp->dev); 6517 netif_start_queue(bp->dev);
6518 /* Initialize the receive filter. */
6471 bnx2x_set_rx_mode(bp->dev); 6519 bnx2x_set_rx_mode(bp->dev);
6472 if (bp->flags & USING_MSIX_FLAG)
6473 printk(KERN_INFO PFX "%s: using MSI-X\n",
6474 bp->dev->name);
6475 break; 6520 break;
6476 6521
6477 case LOAD_DIAG: 6522 case LOAD_DIAG:
6523 /* Initialize the receive filter. */
6478 bnx2x_set_rx_mode(bp->dev); 6524 bnx2x_set_rx_mode(bp->dev);
6479 bp->state = BNX2X_STATE_DIAG; 6525 bp->state = BNX2X_STATE_DIAG;
6480 break; 6526 break;
@@ -6492,20 +6538,25 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6492 6538
6493 return 0; 6539 return 0;
6494 6540
6495load_netif_stop: 6541load_error3:
6496 bnx2x_napi_disable(bp); 6542 bnx2x_int_disable_sync(bp, 1);
6497load_rings_free: 6543 if (!BP_NOMCP(bp)) {
6544 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6545 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6546 }
6547 bp->port.pmf = 0;
6498 /* Free SKBs, SGEs, TPA pool and driver internals */ 6548 /* Free SKBs, SGEs, TPA pool and driver internals */
6499 bnx2x_free_skbs(bp); 6549 bnx2x_free_skbs(bp);
6500 for_each_queue(bp, i) 6550 for_each_queue(bp, i)
6501 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 6551 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6502load_int_disable: 6552load_error2:
6503 bnx2x_int_disable_sync(bp, 1);
6504 /* Release IRQs */ 6553 /* Release IRQs */
6505 bnx2x_free_irq(bp); 6554 bnx2x_free_irq(bp);
6506load_error: 6555load_error1:
6556 bnx2x_napi_disable(bp);
6557 for_each_queue(bp, i)
6558 netif_napi_del(&bnx2x_fp(bp, i, napi));
6507 bnx2x_free_mem(bp); 6559 bnx2x_free_mem(bp);
6508 bp->port.pmf = 0;
6509 6560
6510 /* TBD we really need to reset the chip 6561 /* TBD we really need to reset the chip
6511 if we want to recover from this */ 6562 if we want to recover from this */
@@ -6578,6 +6629,7 @@ static int bnx2x_stop_leading(struct bnx2x *bp)
6578 } 6629 }
6579 cnt--; 6630 cnt--;
6580 msleep(1); 6631 msleep(1);
6632 rmb(); /* Refresh the dsb_sp_prod */
6581 } 6633 }
6582 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; 6634 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6583 bp->fp[0].state = BNX2X_FP_STATE_CLOSED; 6635 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
@@ -6629,14 +6681,6 @@ static void bnx2x_reset_port(struct bnx2x *bp)
6629 /* TODO: Close Doorbell port? */ 6681 /* TODO: Close Doorbell port? */
6630} 6682}
6631 6683
6632static void bnx2x_reset_common(struct bnx2x *bp)
6633{
6634 /* reset_common */
6635 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6636 0xd3ffff7f);
6637 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6638}
6639
6640static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) 6684static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6641{ 6685{
6642 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", 6686 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
@@ -6677,20 +6721,22 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6677 bnx2x_set_storm_rx_mode(bp); 6721 bnx2x_set_storm_rx_mode(bp);
6678 6722
6679 bnx2x_netif_stop(bp, 1); 6723 bnx2x_netif_stop(bp, 1);
6680 if (!netif_running(bp->dev)) 6724
6681 bnx2x_napi_disable(bp);
6682 del_timer_sync(&bp->timer); 6725 del_timer_sync(&bp->timer);
6683 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 6726 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6684 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6727 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6685 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 6728 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6686 6729
6730 /* Release IRQs */
6731 bnx2x_free_irq(bp);
6732
6687 /* Wait until tx fast path tasks complete */ 6733 /* Wait until tx fast path tasks complete */
6688 for_each_queue(bp, i) { 6734 for_each_queue(bp, i) {
6689 struct bnx2x_fastpath *fp = &bp->fp[i]; 6735 struct bnx2x_fastpath *fp = &bp->fp[i];
6690 6736
6691 cnt = 1000; 6737 cnt = 1000;
6692 smp_rmb(); 6738 smp_rmb();
6693 while (BNX2X_HAS_TX_WORK(fp)) { 6739 while (bnx2x_has_tx_work_unload(fp)) {
6694 6740
6695 bnx2x_tx_int(fp, 1000); 6741 bnx2x_tx_int(fp, 1000);
6696 if (!cnt) { 6742 if (!cnt) {
@@ -6711,9 +6757,6 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6711 /* Give HW time to discard old tx messages */ 6757 /* Give HW time to discard old tx messages */
6712 msleep(1); 6758 msleep(1);
6713 6759
6714 /* Release IRQs */
6715 bnx2x_free_irq(bp);
6716
6717 if (CHIP_IS_E1(bp)) { 6760 if (CHIP_IS_E1(bp)) {
6718 struct mac_configuration_cmd *config = 6761 struct mac_configuration_cmd *config =
6719 bnx2x_sp(bp, mcast_config); 6762 bnx2x_sp(bp, mcast_config);
@@ -6822,6 +6865,8 @@ unload_error:
6822 bnx2x_free_skbs(bp); 6865 bnx2x_free_skbs(bp);
6823 for_each_queue(bp, i) 6866 for_each_queue(bp, i)
6824 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 6867 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6868 for_each_queue(bp, i)
6869 netif_napi_del(&bnx2x_fp(bp, i, napi));
6825 bnx2x_free_mem(bp); 6870 bnx2x_free_mem(bp);
6826 6871
6827 bp->state = BNX2X_STATE_CLOSED; 6872 bp->state = BNX2X_STATE_CLOSED;
@@ -6874,10 +6919,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6874 */ 6919 */
6875 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 6920 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6876 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 6921 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6877 if (val == 0x7)
6878 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6879 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6880
6881 if (val == 0x7) { 6922 if (val == 0x7) {
6882 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6923 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6883 /* save our func */ 6924 /* save our func */
@@ -6885,6 +6926,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6885 u32 swap_en; 6926 u32 swap_en;
6886 u32 swap_val; 6927 u32 swap_val;
6887 6928
6929 /* clear the UNDI indication */
6930 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6931
6888 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 6932 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6889 6933
6890 /* try unload UNDI on port 0 */ 6934 /* try unload UNDI on port 0 */
@@ -6910,6 +6954,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6910 bnx2x_fw_command(bp, reset_code); 6954 bnx2x_fw_command(bp, reset_code);
6911 } 6955 }
6912 6956
6957 /* now it's safe to release the lock */
6958 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6959
6913 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 : 6960 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6914 HC_REG_CONFIG_0), 0x1000); 6961 HC_REG_CONFIG_0), 0x1000);
6915 6962
@@ -6954,7 +7001,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6954 bp->fw_seq = 7001 bp->fw_seq =
6955 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7002 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6956 DRV_MSG_SEQ_NUMBER_MASK); 7003 DRV_MSG_SEQ_NUMBER_MASK);
6957 } 7004
7005 } else
7006 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6958 } 7007 }
6959} 7008}
6960 7009
@@ -6971,7 +7020,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6971 id |= ((val & 0xf) << 12); 7020 id |= ((val & 0xf) << 12);
6972 val = REG_RD(bp, MISC_REG_CHIP_METAL); 7021 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6973 id |= ((val & 0xff) << 4); 7022 id |= ((val & 0xff) << 4);
6974 REG_RD(bp, MISC_REG_BOND_ID); 7023 val = REG_RD(bp, MISC_REG_BOND_ID);
6975 id |= (val & 0xf); 7024 id |= (val & 0xf);
6976 bp->common.chip_id = id; 7025 bp->common.chip_id = id;
6977 bp->link_params.chip_id = bp->common.chip_id; 7026 bp->link_params.chip_id = bp->common.chip_id;
@@ -8103,6 +8152,9 @@ static int bnx2x_get_eeprom(struct net_device *dev,
8103 struct bnx2x *bp = netdev_priv(dev); 8152 struct bnx2x *bp = netdev_priv(dev);
8104 int rc; 8153 int rc;
8105 8154
8155 if (!netif_running(dev))
8156 return -EAGAIN;
8157
8106 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" 8158 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8107 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", 8159 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8108 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, 8160 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
@@ -8705,18 +8757,17 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8705 8757
8706 if (loopback_mode == BNX2X_MAC_LOOPBACK) { 8758 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8707 bp->link_params.loopback_mode = LOOPBACK_BMAC; 8759 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8708 bnx2x_acquire_phy_lock(bp);
8709 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8760 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8710 bnx2x_release_phy_lock(bp);
8711 8761
8712 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { 8762 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8763 u16 cnt = 1000;
8713 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 8764 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8714 bnx2x_acquire_phy_lock(bp);
8715 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8765 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8716 bnx2x_release_phy_lock(bp);
8717 /* wait until link state is restored */ 8766 /* wait until link state is restored */
8718 bnx2x_wait_for_link(bp, link_up); 8767 if (link_up)
8719 8768 while (cnt-- && bnx2x_test_link(&bp->link_params,
8769 &bp->link_vars))
8770 msleep(10);
8720 } else 8771 } else
8721 return -EINVAL; 8772 return -EINVAL;
8722 8773
@@ -8822,6 +8873,7 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8822 return BNX2X_LOOPBACK_FAILED; 8873 return BNX2X_LOOPBACK_FAILED;
8823 8874
8824 bnx2x_netif_stop(bp, 1); 8875 bnx2x_netif_stop(bp, 1);
8876 bnx2x_acquire_phy_lock(bp);
8825 8877
8826 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) { 8878 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8827 DP(NETIF_MSG_PROBE, "MAC loopback failed\n"); 8879 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
@@ -8833,6 +8885,7 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8833 rc |= BNX2X_PHY_LOOPBACK_FAILED; 8885 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8834 } 8886 }
8835 8887
8888 bnx2x_release_phy_lock(bp);
8836 bnx2x_netif_start(bp); 8889 bnx2x_netif_start(bp);
8837 8890
8838 return rc; 8891 return rc;
@@ -8906,7 +8959,10 @@ static int bnx2x_test_intr(struct bnx2x *bp)
8906 return -ENODEV; 8959 return -ENODEV;
8907 8960
8908 config->hdr.length_6b = 0; 8961 config->hdr.length_6b = 0;
8909 config->hdr.offset = 0; 8962 if (CHIP_IS_E1(bp))
8963 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8964 else
8965 config->hdr.offset = BP_FUNC(bp);
8910 config->hdr.client_id = BP_CL_ID(bp); 8966 config->hdr.client_id = BP_CL_ID(bp);
8911 config->hdr.reserved1 = 0; 8967 config->hdr.reserved1 = 0;
8912 8968
@@ -9271,6 +9327,18 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9271 return 0; 9327 return 0;
9272} 9328}
9273 9329
9330static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9331{
9332 u16 rx_cons_sb;
9333
9334 /* Tell compiler that status block fields can change */
9335 barrier();
9336 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9337 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9338 rx_cons_sb++;
9339 return (fp->rx_comp_cons != rx_cons_sb);
9340}
9341
9274/* 9342/*
9275 * net_device service functions 9343 * net_device service functions
9276 */ 9344 */
@@ -9281,7 +9349,6 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9281 napi); 9349 napi);
9282 struct bnx2x *bp = fp->bp; 9350 struct bnx2x *bp = fp->bp;
9283 int work_done = 0; 9351 int work_done = 0;
9284 u16 rx_cons_sb;
9285 9352
9286#ifdef BNX2X_STOP_ON_ERROR 9353#ifdef BNX2X_STOP_ON_ERROR
9287 if (unlikely(bp->panic)) 9354 if (unlikely(bp->panic))
@@ -9294,19 +9361,12 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9294 9361
9295 bnx2x_update_fpsb_idx(fp); 9362 bnx2x_update_fpsb_idx(fp);
9296 9363
9297 if (BNX2X_HAS_TX_WORK(fp)) 9364 if (bnx2x_has_tx_work(fp))
9298 bnx2x_tx_int(fp, budget); 9365 bnx2x_tx_int(fp, budget);
9299 9366
9300 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 9367 if (bnx2x_has_rx_work(fp))
9301 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9302 rx_cons_sb++;
9303 if (BNX2X_HAS_RX_WORK(fp))
9304 work_done = bnx2x_rx_int(fp, budget); 9368 work_done = bnx2x_rx_int(fp, budget);
9305
9306 rmb(); /* BNX2X_HAS_WORK() reads the status block */ 9369 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9307 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9308 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9309 rx_cons_sb++;
9310 9370
9311 /* must not complete if we consumed full budget */ 9371 /* must not complete if we consumed full budget */
9312 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { 9372 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
@@ -9417,6 +9477,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9417 return rc; 9477 return rc;
9418} 9478}
9419 9479
9480#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9420/* check if packet requires linearization (packet is too fragmented) */ 9481/* check if packet requires linearization (packet is too fragmented) */
9421static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, 9482static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9422 u32 xmit_type) 9483 u32 xmit_type)
@@ -9494,6 +9555,7 @@ exit_lbl:
9494 9555
9495 return to_copy; 9556 return to_copy;
9496} 9557}
9558#endif
9497 9559
9498/* called with netif_tx_lock 9560/* called with netif_tx_lock
9499 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 9561 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
@@ -9534,6 +9596,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9534 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 9596 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9535 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 9597 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9536 9598
9599#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9537 /* First, check if we need to linearize the skb 9600 /* First, check if we need to linearize the skb
9538 (due to FW restrictions) */ 9601 (due to FW restrictions) */
9539 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { 9602 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
@@ -9546,6 +9609,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9546 return NETDEV_TX_OK; 9609 return NETDEV_TX_OK;
9547 } 9610 }
9548 } 9611 }
9612#endif
9549 9613
9550 /* 9614 /*
9551 Please read carefully. First we use one BD which we mark as start, 9615 Please read carefully. First we use one BD which we mark as start,
@@ -9776,6 +9840,8 @@ static int bnx2x_open(struct net_device *dev)
9776{ 9840{
9777 struct bnx2x *bp = netdev_priv(dev); 9841 struct bnx2x *bp = netdev_priv(dev);
9778 9842
9843 netif_carrier_off(dev);
9844
9779 bnx2x_set_power_state(bp, PCI_D0); 9845 bnx2x_set_power_state(bp, PCI_D0);
9780 9846
9781 return bnx2x_nic_load(bp, LOAD_OPEN); 9847 return bnx2x_nic_load(bp, LOAD_OPEN);
@@ -9859,7 +9925,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
9859 for (; i < old; i++) { 9925 for (; i < old; i++) {
9860 if (CAM_IS_INVALID(config-> 9926 if (CAM_IS_INVALID(config->
9861 config_table[i])) { 9927 config_table[i])) {
9862 i--; /* already invalidated */ 9928 /* already invalidated */
9863 break; 9929 break;
9864 } 9930 }
9865 /* invalidate */ 9931 /* invalidate */
@@ -10269,22 +10335,18 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10269 return rc; 10335 return rc;
10270 } 10336 }
10271 10337
10272 rc = register_netdev(dev);
10273 if (rc) {
10274 dev_err(&pdev->dev, "Cannot register net device\n");
10275 goto init_one_exit;
10276 }
10277
10278 pci_set_drvdata(pdev, dev); 10338 pci_set_drvdata(pdev, dev);
10279 10339
10280 rc = bnx2x_init_bp(bp); 10340 rc = bnx2x_init_bp(bp);
10341 if (rc)
10342 goto init_one_exit;
10343
10344 rc = register_netdev(dev);
10281 if (rc) { 10345 if (rc) {
10282 unregister_netdev(dev); 10346 dev_err(&pdev->dev, "Cannot register net device\n");
10283 goto init_one_exit; 10347 goto init_one_exit;
10284 } 10348 }
10285 10349
10286 netif_carrier_off(dev);
10287
10288 bp->common.name = board_info[ent->driver_data].name; 10350 bp->common.name = board_info[ent->driver_data].name;
10289 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," 10351 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10290 " IRQ %d, ", dev->name, bp->common.name, 10352 " IRQ %d, ", dev->name, bp->common.name,
@@ -10432,6 +10494,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10432 bnx2x_free_skbs(bp); 10494 bnx2x_free_skbs(bp);
10433 for_each_queue(bp, i) 10495 for_each_queue(bp, i)
10434 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 10496 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10497 for_each_queue(bp, i)
10498 netif_napi_del(&bnx2x_fp(bp, i, napi));
10435 bnx2x_free_mem(bp); 10499 bnx2x_free_mem(bp);
10436 10500
10437 bp->state = BNX2X_STATE_CLOSED; 10501 bp->state = BNX2X_STATE_CLOSED;
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index a67b0c358ae4..d084e5fc4b51 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -1,6 +1,6 @@
1/* bnx2x_reg.h: Broadcom Everest network driver. 1/* bnx2x_reg.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2008 Broadcom Corporation 3 * Copyright (c) 2007-2009 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 14f9fb3e8795..379a1324db4e 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2104,6 +2104,7 @@ static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
2104{ 2104{
2105 lro_mgr->dev = qs->netdev; 2105 lro_mgr->dev = qs->netdev;
2106 lro_mgr->features = LRO_F_NAPI; 2106 lro_mgr->features = LRO_F_NAPI;
2107 lro_mgr->frag_align_pad = NET_IP_ALIGN;
2107 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; 2108 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
2108 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; 2109 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2109 lro_mgr->max_desc = T3_MAX_LRO_SES; 2110 lro_mgr->max_desc = T3_MAX_LRO_SES;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 26474c92193f..c986978ce761 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
31 31
32char e1000_driver_name[] = "e1000"; 32char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34#define DRV_VERSION "7.3.20-k3-NAPI" 34#define DRV_VERSION "7.3.21-k3-NAPI"
35const char e1000_driver_version[] = DRV_VERSION; 35const char e1000_driver_version[] = DRV_VERSION;
36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
37 37
@@ -3712,7 +3712,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
3712 struct e1000_hw *hw = &adapter->hw; 3712 struct e1000_hw *hw = &adapter->hw;
3713 u32 rctl, icr = er32(ICR); 3713 u32 rctl, icr = er32(ICR);
3714 3714
3715 if (unlikely(!icr)) 3715 if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags)))
3716 return IRQ_NONE; /* Not our interrupt */ 3716 return IRQ_NONE; /* Not our interrupt */
3717 3717
3718 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 3718 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index cf43ee743b3c..0890162953e9 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -981,11 +981,15 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
981 ew32(PBA_ECC, reg); 981 ew32(PBA_ECC, reg);
982 } 982 }
983 983
984 /* PCI-Ex Control Register */ 984 /* PCI-Ex Control Registers */
985 if (hw->mac.type == e1000_82574) { 985 if (hw->mac.type == e1000_82574) {
986 reg = er32(GCR); 986 reg = er32(GCR);
987 reg |= (1 << 22); 987 reg |= (1 << 22);
988 ew32(GCR, reg); 988 ew32(GCR, reg);
989
990 reg = er32(GCR2);
991 reg |= 1;
992 ew32(GCR2, reg);
989 } 993 }
990 994
991 return; 995 return;
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index f25e961c6b3b..2d4ce0492df0 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -206,6 +206,7 @@ enum e1e_registers {
206 E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ 206 E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
207 E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ 207 E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
208 E1000_GCR = 0x05B00, /* PCI-Ex Control */ 208 E1000_GCR = 0x05B00, /* PCI-Ex Control */
209 E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
209 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ 210 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
210 E1000_SWSM = 0x05B50, /* SW Semaphore */ 211 E1000_SWSM = 0x05B50, /* SW Semaphore */
211 E1000_FWSM = 0x05B54, /* FW Semaphore */ 212 E1000_FWSM = 0x05B54, /* FW Semaphore */
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 7e33c129d51c..2769083bfe83 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1698,7 +1698,7 @@ static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_priva
1698 /* 1698 /*
1699 * Set MII speed to 2.5 MHz 1699 * Set MII speed to 2.5 MHz
1700 */ 1700 */
1701 fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; 1701 fep->phy_speed = (MCF_CLK / 3) / (2500000 * 2 ) * 2;
1702 fecp->fec_mii_speed = fep->phy_speed; 1702 fecp->fec_mii_speed = fep->phy_speed;
1703 1703
1704 fec_restart(dev, 0); 1704 fec_restart(dev, 0);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index ea530673236e..3f7eab42aef1 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1423,15 +1423,11 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1423{ 1423{
1424 struct gfar_private *priv = netdev_priv(dev); 1424 struct gfar_private *priv = netdev_priv(dev);
1425 unsigned long flags; 1425 unsigned long flags;
1426 struct vlan_group *old_grp;
1427 u32 tempval; 1426 u32 tempval;
1428 1427
1429 spin_lock_irqsave(&priv->rxlock, flags); 1428 spin_lock_irqsave(&priv->rxlock, flags);
1430 1429
1431 old_grp = priv->vlgrp; 1430 priv->vlgrp = grp;
1432
1433 if (old_grp == grp)
1434 return;
1435 1431
1436 if (grp) { 1432 if (grp) {
1437 /* Enable VLAN tag insertion */ 1433 /* Enable VLAN tag insertion */
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index f3706e415b45..f49a426ad681 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -234,6 +234,8 @@ static int gfar_mdio_probe(struct of_device *ofdev,
234 if (NULL == new_bus) 234 if (NULL == new_bus)
235 return -ENOMEM; 235 return -ENOMEM;
236 236
237 device_init_wakeup(&ofdev->dev, 1);
238
237 new_bus->name = "Gianfar MII Bus", 239 new_bus->name = "Gianfar MII Bus",
238 new_bus->read = &gfar_mdio_read, 240 new_bus->read = &gfar_mdio_read,
239 new_bus->write = &gfar_mdio_write, 241 new_bus->write = &gfar_mdio_write,
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index f5e2e7235fcb..13ca73f96ec6 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -699,11 +699,18 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
699 699
700 /* SGMII link check is done through the PCS register. */ 700 /* SGMII link check is done through the PCS register. */
701 if ((hw->phy.media_type != e1000_media_type_copper) || 701 if ((hw->phy.media_type != e1000_media_type_copper) ||
702 (igb_sgmii_active_82575(hw))) 702 (igb_sgmii_active_82575(hw))) {
703 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 703 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
704 &duplex); 704 &duplex);
705 else 705 /*
706 * Use this flag to determine if link needs to be checked or
707 * not. If we have link clear the flag so that we do not
708 * continue to check for link.
709 */
710 hw->mac.get_link_status = !hw->mac.serdes_has_link;
711 } else {
706 ret_val = igb_check_for_copper_link(hw); 712 ret_val = igb_check_for_copper_link(hw);
713 }
707 714
708 return ret_val; 715 return ret_val;
709} 716}
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 5a27825cc48a..aebef8e48e76 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -300,11 +300,10 @@ struct igb_adapter {
300 300
301#define IGB_FLAG_HAS_MSI (1 << 0) 301#define IGB_FLAG_HAS_MSI (1 << 0)
302#define IGB_FLAG_MSI_ENABLE (1 << 1) 302#define IGB_FLAG_MSI_ENABLE (1 << 1)
303#define IGB_FLAG_HAS_DCA (1 << 2) 303#define IGB_FLAG_DCA_ENABLED (1 << 2)
304#define IGB_FLAG_DCA_ENABLED (1 << 3) 304#define IGB_FLAG_IN_NETPOLL (1 << 3)
305#define IGB_FLAG_IN_NETPOLL (1 << 5) 305#define IGB_FLAG_QUAD_PORT_A (1 << 4)
306#define IGB_FLAG_QUAD_PORT_A (1 << 6) 306#define IGB_FLAG_NEED_CTX_IDX (1 << 5)
307#define IGB_FLAG_NEED_CTX_IDX (1 << 7)
308 307
309enum e1000_state_t { 308enum e1000_state_t {
310 __IGB_TESTING, 309 __IGB_TESTING,
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index b82b0fb2056c..a50db5398fa5 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -206,10 +206,11 @@ static int __init igb_init_module(void)
206 206
207 global_quad_port_a = 0; 207 global_quad_port_a = 0;
208 208
209 ret = pci_register_driver(&igb_driver);
210#ifdef CONFIG_IGB_DCA 209#ifdef CONFIG_IGB_DCA
211 dca_register_notify(&dca_notifier); 210 dca_register_notify(&dca_notifier);
212#endif 211#endif
212
213 ret = pci_register_driver(&igb_driver);
213 return ret; 214 return ret;
214} 215}
215 216
@@ -1156,11 +1157,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1156 1157
1157 /* set flags */ 1158 /* set flags */
1158 switch (hw->mac.type) { 1159 switch (hw->mac.type) {
1159 case e1000_82576:
1160 case e1000_82575: 1160 case e1000_82575:
1161 adapter->flags |= IGB_FLAG_HAS_DCA;
1162 adapter->flags |= IGB_FLAG_NEED_CTX_IDX; 1161 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1163 break; 1162 break;
1163 case e1000_82576:
1164 default: 1164 default:
1165 break; 1165 break;
1166 } 1166 }
@@ -1310,8 +1310,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1310 goto err_register; 1310 goto err_register;
1311 1311
1312#ifdef CONFIG_IGB_DCA 1312#ifdef CONFIG_IGB_DCA
1313 if ((adapter->flags & IGB_FLAG_HAS_DCA) && 1313 if (dca_add_requester(&pdev->dev) == 0) {
1314 (dca_add_requester(&pdev->dev) == 0)) {
1315 adapter->flags |= IGB_FLAG_DCA_ENABLED; 1314 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1316 dev_info(&pdev->dev, "DCA enabled\n"); 1315 dev_info(&pdev->dev, "DCA enabled\n");
1317 /* Always use CB2 mode, difference is masked 1316 /* Always use CB2 mode, difference is masked
@@ -1835,11 +1834,11 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1835 rctl |= E1000_RCTL_SECRC; 1834 rctl |= E1000_RCTL_SECRC;
1836 1835
1837 /* 1836 /*
1838 * disable store bad packets, long packet enable, and clear size bits. 1837 * disable store bad packets and clear size bits.
1839 */ 1838 */
1840 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_LPE | E1000_RCTL_SZ_256); 1839 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
1841 1840
1842 if (adapter->netdev->mtu > ETH_DATA_LEN) 1841 /* enable LPE when to prevent packets larger than max_frame_size */
1843 rctl |= E1000_RCTL_LPE; 1842 rctl |= E1000_RCTL_LPE;
1844 1843
1845 /* Setup buffer sizes */ 1844 /* Setup buffer sizes */
@@ -1865,7 +1864,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1865 */ 1864 */
1866 /* allocations using alloc_page take too long for regular MTU 1865 /* allocations using alloc_page take too long for regular MTU
1867 * so only enable packet split for jumbo frames */ 1866 * so only enable packet split for jumbo frames */
1868 if (rctl & E1000_RCTL_LPE) { 1867 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1869 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128; 1868 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
1870 srrctl |= adapter->rx_ps_hdr_size << 1869 srrctl |= adapter->rx_ps_hdr_size <<
1871 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 1870 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
@@ -3473,19 +3472,16 @@ static int __igb_notify_dca(struct device *dev, void *data)
3473 struct e1000_hw *hw = &adapter->hw; 3472 struct e1000_hw *hw = &adapter->hw;
3474 unsigned long event = *(unsigned long *)data; 3473 unsigned long event = *(unsigned long *)data;
3475 3474
3476 if (!(adapter->flags & IGB_FLAG_HAS_DCA))
3477 goto out;
3478
3479 switch (event) { 3475 switch (event) {
3480 case DCA_PROVIDER_ADD: 3476 case DCA_PROVIDER_ADD:
3481 /* if already enabled, don't do it again */ 3477 /* if already enabled, don't do it again */
3482 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3478 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3483 break; 3479 break;
3484 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3485 /* Always use CB2 mode, difference is masked 3480 /* Always use CB2 mode, difference is masked
3486 * in the CB driver. */ 3481 * in the CB driver. */
3487 wr32(E1000_DCA_CTRL, 2); 3482 wr32(E1000_DCA_CTRL, 2);
3488 if (dca_add_requester(dev) == 0) { 3483 if (dca_add_requester(dev) == 0) {
3484 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3489 dev_info(&adapter->pdev->dev, "DCA enabled\n"); 3485 dev_info(&adapter->pdev->dev, "DCA enabled\n");
3490 igb_setup_dca(adapter); 3486 igb_setup_dca(adapter);
3491 break; 3487 break;
@@ -3502,7 +3498,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
3502 } 3498 }
3503 break; 3499 break;
3504 } 3500 }
3505out: 3501
3506 return 0; 3502 return 0;
3507} 3503}
3508 3504
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index acef3c65cd2c..d2f4d5f508b7 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -318,6 +318,9 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
318 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 318 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
319 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 319 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
320 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 320 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
321 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
322 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
323 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
321 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 324 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
322 rx_ring->cpu = cpu; 325 rx_ring->cpu = cpu;
323 } 326 }
@@ -1741,6 +1744,32 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1741 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1744 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1742} 1745}
1743 1746
1747static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1748{
1749 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1750 struct ixgbe_hw *hw = &adapter->hw;
1751
1752 /* add VID to filter table */
1753 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
1754}
1755
1756static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1757{
1758 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1759 struct ixgbe_hw *hw = &adapter->hw;
1760
1761 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1762 ixgbe_irq_disable(adapter);
1763
1764 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1765
1766 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1767 ixgbe_irq_enable(adapter);
1768
1769 /* remove VID from filter table */
1770 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
1771}
1772
1744static void ixgbe_vlan_rx_register(struct net_device *netdev, 1773static void ixgbe_vlan_rx_register(struct net_device *netdev,
1745 struct vlan_group *grp) 1774 struct vlan_group *grp)
1746{ 1775{
@@ -1760,6 +1789,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1760 ctrl |= IXGBE_VLNCTRL_VME; 1789 ctrl |= IXGBE_VLNCTRL_VME;
1761 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1790 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1762 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1791 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1792 ixgbe_vlan_rx_add_vid(netdev, 0);
1763 1793
1764 if (grp) { 1794 if (grp) {
1765 /* enable VLAN tag insert/strip */ 1795 /* enable VLAN tag insert/strip */
@@ -1773,32 +1803,6 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1773 ixgbe_irq_enable(adapter); 1803 ixgbe_irq_enable(adapter);
1774} 1804}
1775 1805
1776static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1777{
1778 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1779 struct ixgbe_hw *hw = &adapter->hw;
1780
1781 /* add VID to filter table */
1782 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
1783}
1784
1785static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1786{
1787 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1788 struct ixgbe_hw *hw = &adapter->hw;
1789
1790 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1791 ixgbe_irq_disable(adapter);
1792
1793 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1794
1795 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1796 ixgbe_irq_enable(adapter);
1797
1798 /* remove VID from filter table */
1799 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
1800}
1801
1802static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) 1806static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
1803{ 1807{
1804 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp); 1808 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
@@ -2074,6 +2078,9 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2074 2078
2075 ixgbe_irq_enable(adapter); 2079 ixgbe_irq_enable(adapter);
2076 2080
2081 /* enable transmits */
2082 netif_tx_start_all_queues(netdev);
2083
2077 /* bring the link up in the watchdog, this could race with our first 2084 /* bring the link up in the watchdog, this could race with our first
2078 * link up interrupt but shouldn't be a problem */ 2085 * link up interrupt but shouldn't be a problem */
2079 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2086 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -3475,7 +3482,6 @@ static void ixgbe_watchdog_task(struct work_struct *work)
3475 (FLOW_TX ? "TX" : "None")))); 3482 (FLOW_TX ? "TX" : "None"))));
3476 3483
3477 netif_carrier_on(netdev); 3484 netif_carrier_on(netdev);
3478 netif_tx_wake_all_queues(netdev);
3479 } else { 3485 } else {
3480 /* Force detection of hung controller */ 3486 /* Force detection of hung controller */
3481 adapter->detect_tx_hung = true; 3487 adapter->detect_tx_hung = true;
@@ -3487,7 +3493,6 @@ static void ixgbe_watchdog_task(struct work_struct *work)
3487 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n", 3493 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
3488 netdev->name); 3494 netdev->name);
3489 netif_carrier_off(netdev); 3495 netif_carrier_off(netdev);
3490 netif_tx_stop_all_queues(netdev);
3491 } 3496 }
3492 } 3497 }
3493 3498
@@ -4218,7 +4223,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
4218 } 4223 }
4219 4224
4220 netif_carrier_off(netdev); 4225 netif_carrier_off(netdev);
4221 netif_tx_stop_all_queues(netdev);
4222 4226
4223 strcpy(netdev->name, "eth%d"); 4227 strcpy(netdev->name, "eth%d");
4224 err = register_netdev(netdev); 4228 err = register_netdev(netdev);
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 83a11ff9ffd1..f011c57c9205 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -404,6 +404,9 @@
404#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ 404#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
405#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ 405#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
406#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ 406#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
407#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
408#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */
409#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */
407 410
408#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ 411#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
409#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ 412#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 1d6e48e13366..75010cac76ac 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -416,6 +416,9 @@ static int korina_rx(struct net_device *dev, int limit)
416 if (devcs & ETH_RX_MP) 416 if (devcs & ETH_RX_MP)
417 dev->stats.multicast++; 417 dev->stats.multicast++;
418 418
419 /* 16 bit align */
420 skb_reserve(skb_new, 2);
421
419 lp->rx_skb[lp->rx_next_done] = skb_new; 422 lp->rx_skb[lp->rx_next_done] = skb_new;
420 } 423 }
421 424
@@ -740,6 +743,7 @@ static struct ethtool_ops netdev_ethtool_ops = {
740static void korina_alloc_ring(struct net_device *dev) 743static void korina_alloc_ring(struct net_device *dev)
741{ 744{
742 struct korina_private *lp = netdev_priv(dev); 745 struct korina_private *lp = netdev_priv(dev);
746 struct sk_buff *skb;
743 int i; 747 int i;
744 748
745 /* Initialize the transmit descriptors */ 749 /* Initialize the transmit descriptors */
@@ -755,8 +759,6 @@ static void korina_alloc_ring(struct net_device *dev)
755 759
756 /* Initialize the receive descriptors */ 760 /* Initialize the receive descriptors */
757 for (i = 0; i < KORINA_NUM_RDS; i++) { 761 for (i = 0; i < KORINA_NUM_RDS; i++) {
758 struct sk_buff *skb = lp->rx_skb[i];
759
760 skb = dev_alloc_skb(KORINA_RBSIZE + 2); 762 skb = dev_alloc_skb(KORINA_RBSIZE + 2);
761 if (!skb) 763 if (!skb)
762 break; 764 break;
@@ -769,11 +771,12 @@ static void korina_alloc_ring(struct net_device *dev)
769 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]); 771 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
770 } 772 }
771 773
772 /* loop back */ 774 /* loop back receive descriptors, so the last
773 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[0]); 775 * descriptor points to the first one */
774 lp->rx_next_done = 0; 776 lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]);
777 lp->rd_ring[i - 1].control |= DMA_DESC_COD;
775 778
776 lp->rd_ring[i].control |= DMA_DESC_COD; 779 lp->rx_next_done = 0;
777 lp->rx_chain_head = 0; 780 lp->rx_chain_head = 0;
778 lp->rx_chain_tail = 0; 781 lp->rx_chain_tail = 0;
779 lp->rx_chain_status = desc_empty; 782 lp->rx_chain_status = desc_empty;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index a04da4ecaa88..f6c4936e2fa8 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -321,6 +321,10 @@ static void macb_tx(struct macb *bp)
321 printk(KERN_ERR "%s: TX underrun, resetting buffers\n", 321 printk(KERN_ERR "%s: TX underrun, resetting buffers\n",
322 bp->dev->name); 322 bp->dev->name);
323 323
324 /* Transfer ongoing, disable transmitter, to avoid confusion */
325 if (status & MACB_BIT(TGO))
326 macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
327
324 head = bp->tx_head; 328 head = bp->tx_head;
325 329
326 /*Mark all the buffer as used to avoid sending a lost buffer*/ 330 /*Mark all the buffer as used to avoid sending a lost buffer*/
@@ -343,6 +347,10 @@ static void macb_tx(struct macb *bp)
343 } 347 }
344 348
345 bp->tx_head = bp->tx_tail = 0; 349 bp->tx_head = bp->tx_tail = 0;
350
351 /* Enable the transmitter again */
352 if (status & MACB_BIT(TGO))
353 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
346 } 354 }
347 355
348 if (!(status & MACB_BIT(COMP))) 356 if (!(status & MACB_BIT(COMP)))
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 7253a499d9c8..5f31bbb614af 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -136,21 +136,23 @@ static char mv643xx_eth_driver_version[] = "1.4";
136/* 136/*
137 * SDMA configuration register. 137 * SDMA configuration register.
138 */ 138 */
139#define RX_BURST_SIZE_4_64BIT (2 << 1)
139#define RX_BURST_SIZE_16_64BIT (4 << 1) 140#define RX_BURST_SIZE_16_64BIT (4 << 1)
140#define BLM_RX_NO_SWAP (1 << 4) 141#define BLM_RX_NO_SWAP (1 << 4)
141#define BLM_TX_NO_SWAP (1 << 5) 142#define BLM_TX_NO_SWAP (1 << 5)
143#define TX_BURST_SIZE_4_64BIT (2 << 22)
142#define TX_BURST_SIZE_16_64BIT (4 << 22) 144#define TX_BURST_SIZE_16_64BIT (4 << 22)
143 145
144#if defined(__BIG_ENDIAN) 146#if defined(__BIG_ENDIAN)
145#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 147#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
146 (RX_BURST_SIZE_16_64BIT | \ 148 (RX_BURST_SIZE_4_64BIT | \
147 TX_BURST_SIZE_16_64BIT) 149 TX_BURST_SIZE_4_64BIT)
148#elif defined(__LITTLE_ENDIAN) 150#elif defined(__LITTLE_ENDIAN)
149#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 151#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
150 (RX_BURST_SIZE_16_64BIT | \ 152 (RX_BURST_SIZE_4_64BIT | \
151 BLM_RX_NO_SWAP | \ 153 BLM_RX_NO_SWAP | \
152 BLM_TX_NO_SWAP | \ 154 BLM_TX_NO_SWAP | \
153 TX_BURST_SIZE_16_64BIT) 155 TX_BURST_SIZE_4_64BIT)
154#else 156#else
155#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 157#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
156#endif 158#endif
@@ -1594,7 +1596,7 @@ oom:
1594 entry = addr_crc(a); 1596 entry = addr_crc(a);
1595 } 1597 }
1596 1598
1597 table[entry >> 2] |= 1 << (entry & 3); 1599 table[entry >> 2] |= 1 << (8 * (entry & 3));
1598 } 1600 }
1599 1601
1600 for (i = 0; i < 0x100; i += 4) { 1602 for (i = 0; i < 0x100; i += 4) {
@@ -2210,6 +2212,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
2210 struct mv643xx_eth_private *mp = netdev_priv(dev); 2212 struct mv643xx_eth_private *mp = netdev_priv(dev);
2211 int i; 2213 int i;
2212 2214
2215 wrlp(mp, INT_MASK_EXT, 0x00000000);
2213 wrlp(mp, INT_MASK, 0x00000000); 2216 wrlp(mp, INT_MASK, 0x00000000);
2214 rdlp(mp, INT_MASK); 2217 rdlp(mp, INT_MASK);
2215 2218
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 6bb71b687f7b..e9c1296b267e 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1,7 +1,7 @@
1/************************************************************************* 1/*************************************************************************
2 * myri10ge.c: Myricom Myri-10G Ethernet driver. 2 * myri10ge.c: Myricom Myri-10G Ethernet driver.
3 * 3 *
4 * Copyright (C) 2005 - 2007 Myricom, Inc. 4 * Copyright (C) 2005 - 2009 Myricom, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
@@ -75,7 +75,7 @@
75#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
76#include "myri10ge_mcp_gen_header.h" 76#include "myri10ge_mcp_gen_header.h"
77 77
78#define MYRI10GE_VERSION_STR "1.4.4-1.398" 78#define MYRI10GE_VERSION_STR "1.4.4-1.401"
79 79
80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81MODULE_AUTHOR("Maintainer: help@myri.com"); 81MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -3786,7 +3786,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3786 if (status != 0) { 3786 if (status != 0) {
3787 dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n", 3787 dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
3788 status); 3788 status);
3789 goto abort_with_netdev; 3789 goto abort_with_enabled;
3790 } 3790 }
3791 3791
3792 pci_set_master(pdev); 3792 pci_set_master(pdev);
@@ -3801,13 +3801,13 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3801 } 3801 }
3802 if (status != 0) { 3802 if (status != 0) {
3803 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); 3803 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
3804 goto abort_with_netdev; 3804 goto abort_with_enabled;
3805 } 3805 }
3806 (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3806 (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3807 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 3807 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
3808 &mgp->cmd_bus, GFP_KERNEL); 3808 &mgp->cmd_bus, GFP_KERNEL);
3809 if (mgp->cmd == NULL) 3809 if (mgp->cmd == NULL)
3810 goto abort_with_netdev; 3810 goto abort_with_enabled;
3811 3811
3812 mgp->board_span = pci_resource_len(pdev, 0); 3812 mgp->board_span = pci_resource_len(pdev, 0);
3813 mgp->iomem_base = pci_resource_start(pdev, 0); 3813 mgp->iomem_base = pci_resource_start(pdev, 0);
@@ -3943,8 +3943,10 @@ abort_with_mtrr:
3943 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3943 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
3944 mgp->cmd, mgp->cmd_bus); 3944 mgp->cmd, mgp->cmd_bus);
3945 3945
3946abort_with_netdev: 3946abort_with_enabled:
3947 pci_disable_device(pdev);
3947 3948
3949abort_with_netdev:
3948 free_netdev(netdev); 3950 free_netdev(netdev);
3949 return status; 3951 return status;
3950} 3952}
@@ -3990,6 +3992,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
3990 mgp->cmd, mgp->cmd_bus); 3992 mgp->cmd, mgp->cmd_bus);
3991 3993
3992 free_netdev(netdev); 3994 free_netdev(netdev);
3995 pci_disable_device(pdev);
3993 pci_set_drvdata(pdev, NULL); 3996 pci_set_drvdata(pdev, NULL);
3994} 3997}
3995 3998
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index c11c568fd7db..9c78c963b721 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -146,7 +146,7 @@
146 146
147#define MAX_RX_BUFFER_LENGTH 1760 147#define MAX_RX_BUFFER_LENGTH 1760
148#define MAX_RX_JUMBO_BUFFER_LENGTH 8062 148#define MAX_RX_JUMBO_BUFFER_LENGTH 8062
149#define MAX_RX_LRO_BUFFER_LENGTH ((48*1024)-512) 149#define MAX_RX_LRO_BUFFER_LENGTH (8062)
150#define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - 2) 150#define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - 2)
151#define RX_JUMBO_DMA_MAP_LEN \ 151#define RX_JUMBO_DMA_MAP_LEN \
152 (MAX_RX_JUMBO_BUFFER_LENGTH - 2) 152 (MAX_RX_JUMBO_BUFFER_LENGTH - 2)
@@ -207,11 +207,11 @@
207 207
208#define MAX_CMD_DESCRIPTORS 4096 208#define MAX_CMD_DESCRIPTORS 4096
209#define MAX_RCV_DESCRIPTORS 16384 209#define MAX_RCV_DESCRIPTORS 16384
210#define MAX_CMD_DESCRIPTORS_HOST (MAX_CMD_DESCRIPTORS / 4) 210#define MAX_CMD_DESCRIPTORS_HOST 1024
211#define MAX_RCV_DESCRIPTORS_1G (MAX_RCV_DESCRIPTORS / 4) 211#define MAX_RCV_DESCRIPTORS_1G 2048
212#define MAX_RCV_DESCRIPTORS_10G 8192 212#define MAX_RCV_DESCRIPTORS_10G 4096
213#define MAX_JUMBO_RCV_DESCRIPTORS 1024 213#define MAX_JUMBO_RCV_DESCRIPTORS 1024
214#define MAX_LRO_RCV_DESCRIPTORS 64 214#define MAX_LRO_RCV_DESCRIPTORS 8
215#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS 215#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
216#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS 216#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS
217#define MAX_RCV_DESC MAX_RCV_DESCRIPTORS 217#define MAX_RCV_DESC MAX_RCV_DESCRIPTORS
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index c0bd40fcf708..0894a7be0225 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -561,7 +561,10 @@ netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
561 } 561 }
562 ring->tx_pending = adapter->max_tx_desc_count; 562 ring->tx_pending = adapter->max_tx_desc_count;
563 563
564 ring->rx_max_pending = MAX_RCV_DESCRIPTORS; 564 if (adapter->ahw.board_type == NETXEN_NIC_GBE)
565 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
566 else
567 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
565 ring->tx_max_pending = MAX_CMD_DESCRIPTORS_HOST; 568 ring->tx_max_pending = MAX_CMD_DESCRIPTORS_HOST;
566 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS; 569 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS;
567 ring->rx_mini_max_pending = 0; 570 ring->rx_mini_max_pending = 0;
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index ca7c8d8050c9..ffd37bea1628 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -947,8 +947,10 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
947 } 947 }
948 for (i = 0; i < n; i++) { 948 for (i = 0; i < n; i++) {
949 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || 949 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
950 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) 950 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
951 kfree(buf);
951 return -EIO; 952 return -EIO;
953 }
952 954
953 buf[i].addr = addr; 955 buf[i].addr = addr;
954 buf[i].data = val; 956 buf[i].data = val;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index d854f07ef4d3..645d384fe87e 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -735,17 +735,18 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
735 735
736 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); 736 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
737 737
738 /* ScatterGather support */ 738 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
739 netdev->features = NETIF_F_SG; 739 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
740 netdev->features |= NETIF_F_IP_CSUM; 740
741 netdev->features |= NETIF_F_TSO;
742 if (NX_IS_REVISION_P3(revision_id)) { 741 if (NX_IS_REVISION_P3(revision_id)) {
743 netdev->features |= NETIF_F_IPV6_CSUM; 742 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
744 netdev->features |= NETIF_F_TSO6; 743 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
745 } 744 }
746 745
747 if (adapter->pci_using_dac) 746 if (adapter->pci_using_dac) {
748 netdev->features |= NETIF_F_HIGHDMA; 747 netdev->features |= NETIF_F_HIGHDMA;
748 netdev->vlan_features |= NETIF_F_HIGHDMA;
749 }
749 750
750 /* 751 /*
751 * Set the CRB window to invalid. If any register in window 0 is 752 * Set the CRB window to invalid. If any register in window 0 is
@@ -1166,6 +1167,14 @@ static bool netxen_tso_check(struct net_device *netdev,
1166{ 1167{
1167 bool tso = false; 1168 bool tso = false;
1168 u8 opcode = TX_ETHER_PKT; 1169 u8 opcode = TX_ETHER_PKT;
1170 __be16 protocol = skb->protocol;
1171 u16 flags = 0;
1172
1173 if (protocol == __constant_htons(ETH_P_8021Q)) {
1174 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data;
1175 protocol = vh->h_vlan_encapsulated_proto;
1176 flags = FLAGS_VLAN_TAGGED;
1177 }
1169 1178
1170 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && 1179 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1171 skb_shinfo(skb)->gso_size > 0) { 1180 skb_shinfo(skb)->gso_size > 0) {
@@ -1174,21 +1183,21 @@ static bool netxen_tso_check(struct net_device *netdev,
1174 desc->total_hdr_length = 1183 desc->total_hdr_length =
1175 skb_transport_offset(skb) + tcp_hdrlen(skb); 1184 skb_transport_offset(skb) + tcp_hdrlen(skb);
1176 1185
1177 opcode = (skb->protocol == htons(ETH_P_IPV6)) ? 1186 opcode = (protocol == __constant_htons(ETH_P_IPV6)) ?
1178 TX_TCP_LSO6 : TX_TCP_LSO; 1187 TX_TCP_LSO6 : TX_TCP_LSO;
1179 tso = true; 1188 tso = true;
1180 1189
1181 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 1190 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1182 u8 l4proto; 1191 u8 l4proto;
1183 1192
1184 if (skb->protocol == htons(ETH_P_IP)) { 1193 if (protocol == __constant_htons(ETH_P_IP)) {
1185 l4proto = ip_hdr(skb)->protocol; 1194 l4proto = ip_hdr(skb)->protocol;
1186 1195
1187 if (l4proto == IPPROTO_TCP) 1196 if (l4proto == IPPROTO_TCP)
1188 opcode = TX_TCP_PKT; 1197 opcode = TX_TCP_PKT;
1189 else if(l4proto == IPPROTO_UDP) 1198 else if(l4proto == IPPROTO_UDP)
1190 opcode = TX_UDP_PKT; 1199 opcode = TX_UDP_PKT;
1191 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1200 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
1192 l4proto = ipv6_hdr(skb)->nexthdr; 1201 l4proto = ipv6_hdr(skb)->nexthdr;
1193 1202
1194 if (l4proto == IPPROTO_TCP) 1203 if (l4proto == IPPROTO_TCP)
@@ -1199,7 +1208,7 @@ static bool netxen_tso_check(struct net_device *netdev,
1199 } 1208 }
1200 desc->tcp_hdr_offset = skb_transport_offset(skb); 1209 desc->tcp_hdr_offset = skb_transport_offset(skb);
1201 desc->ip_hdr_offset = skb_network_offset(skb); 1210 desc->ip_hdr_offset = skb_network_offset(skb);
1202 netxen_set_tx_flags_opcode(desc, 0, opcode); 1211 netxen_set_tx_flags_opcode(desc, flags, opcode);
1203 return tso; 1212 return tso;
1204} 1213}
1205 1214
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 11adf6ed4628..811a637695ca 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -296,9 +296,8 @@ static int mdio_bus_suspend(struct device * dev, pm_message_t state)
296 struct phy_driver *phydrv = to_phy_driver(drv); 296 struct phy_driver *phydrv = to_phy_driver(drv);
297 struct phy_device *phydev = to_phy_device(dev); 297 struct phy_device *phydev = to_phy_device(dev);
298 298
299 if ((!device_may_wakeup(phydev->dev.parent)) && 299 if (drv && phydrv->suspend && !device_may_wakeup(phydev->dev.parent))
300 (phydrv && phydrv->suspend)) 300 ret = phydrv->suspend(phydev);
301 ret = phydrv->suspend(phydev);
302 301
303 return ret; 302 return ret;
304} 303}
@@ -310,8 +309,7 @@ static int mdio_bus_resume(struct device * dev)
310 struct phy_driver *phydrv = to_phy_driver(drv); 309 struct phy_driver *phydrv = to_phy_driver(drv);
311 struct phy_device *phydev = to_phy_device(dev); 310 struct phy_device *phydev = to_phy_device(dev);
312 311
313 if ((!device_may_wakeup(phydev->dev.parent)) && 312 if (drv && phydrv->resume && !device_may_wakeup(phydev->dev.parent))
314 (phydrv && phydrv->resume))
315 ret = phydrv->resume(phydev); 313 ret = phydrv->resume(phydev);
316 314
317 return ret; 315 return ret;
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c05d38d46350..1387187543e4 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -81,6 +81,9 @@ static struct phy_driver lan83c185_driver = {
81 .ack_interrupt = smsc_phy_ack_interrupt, 81 .ack_interrupt = smsc_phy_ack_interrupt,
82 .config_intr = smsc_phy_config_intr, 82 .config_intr = smsc_phy_config_intr,
83 83
84 .suspend = genphy_suspend,
85 .resume = genphy_resume,
86
84 .driver = { .owner = THIS_MODULE, } 87 .driver = { .owner = THIS_MODULE, }
85}; 88};
86 89
@@ -102,6 +105,9 @@ static struct phy_driver lan8187_driver = {
102 .ack_interrupt = smsc_phy_ack_interrupt, 105 .ack_interrupt = smsc_phy_ack_interrupt,
103 .config_intr = smsc_phy_config_intr, 106 .config_intr = smsc_phy_config_intr,
104 107
108 .suspend = genphy_suspend,
109 .resume = genphy_resume,
110
105 .driver = { .owner = THIS_MODULE, } 111 .driver = { .owner = THIS_MODULE, }
106}; 112};
107 113
@@ -123,6 +129,9 @@ static struct phy_driver lan8700_driver = {
123 .ack_interrupt = smsc_phy_ack_interrupt, 129 .ack_interrupt = smsc_phy_ack_interrupt,
124 .config_intr = smsc_phy_config_intr, 130 .config_intr = smsc_phy_config_intr,
125 131
132 .suspend = genphy_suspend,
133 .resume = genphy_resume,
134
126 .driver = { .owner = THIS_MODULE, } 135 .driver = { .owner = THIS_MODULE, }
127}; 136};
128 137
@@ -144,6 +153,9 @@ static struct phy_driver lan911x_int_driver = {
144 .ack_interrupt = smsc_phy_ack_interrupt, 153 .ack_interrupt = smsc_phy_ack_interrupt,
145 .config_intr = smsc_phy_config_intr, 154 .config_intr = smsc_phy_config_intr,
146 155
156 .suspend = genphy_suspend,
157 .resume = genphy_resume,
158
147 .driver = { .owner = THIS_MODULE, } 159 .driver = { .owner = THIS_MODULE, }
148}; 160};
149 161
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 72fd9e97c190..b2dcdb5ed8bd 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -438,7 +438,6 @@ static void r6040_down(struct net_device *dev)
438{ 438{
439 struct r6040_private *lp = netdev_priv(dev); 439 struct r6040_private *lp = netdev_priv(dev);
440 void __iomem *ioaddr = lp->base; 440 void __iomem *ioaddr = lp->base;
441 struct pci_dev *pdev = lp->pdev;
442 int limit = 2048; 441 int limit = 2048;
443 u16 *adrp; 442 u16 *adrp;
444 u16 cmd; 443 u16 cmd;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 7673fd92eaf5..ab0e09bf154d 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -676,9 +676,8 @@ static int efx_init_port(struct efx_nic *efx)
676 rc = efx->phy_op->init(efx); 676 rc = efx->phy_op->init(efx);
677 if (rc) 677 if (rc)
678 return rc; 678 return rc;
679 efx->phy_op->reconfigure(efx);
680
681 mutex_lock(&efx->mac_lock); 679 mutex_lock(&efx->mac_lock);
680 efx->phy_op->reconfigure(efx);
682 rc = falcon_switch_mac(efx); 681 rc = falcon_switch_mac(efx);
683 mutex_unlock(&efx->mac_lock); 682 mutex_unlock(&efx->mac_lock);
684 if (rc) 683 if (rc)
@@ -686,7 +685,7 @@ static int efx_init_port(struct efx_nic *efx)
686 efx->mac_op->reconfigure(efx); 685 efx->mac_op->reconfigure(efx);
687 686
688 efx->port_initialized = true; 687 efx->port_initialized = true;
689 efx->stats_enabled = true; 688 efx_stats_enable(efx);
690 return 0; 689 return 0;
691 690
692fail: 691fail:
@@ -735,6 +734,7 @@ static void efx_fini_port(struct efx_nic *efx)
735 if (!efx->port_initialized) 734 if (!efx->port_initialized)
736 return; 735 return;
737 736
737 efx_stats_disable(efx);
738 efx->phy_op->fini(efx); 738 efx->phy_op->fini(efx);
739 efx->port_initialized = false; 739 efx->port_initialized = false;
740 740
@@ -1361,6 +1361,20 @@ static int efx_net_stop(struct net_device *net_dev)
1361 return 0; 1361 return 0;
1362} 1362}
1363 1363
1364void efx_stats_disable(struct efx_nic *efx)
1365{
1366 spin_lock(&efx->stats_lock);
1367 ++efx->stats_disable_count;
1368 spin_unlock(&efx->stats_lock);
1369}
1370
1371void efx_stats_enable(struct efx_nic *efx)
1372{
1373 spin_lock(&efx->stats_lock);
1374 --efx->stats_disable_count;
1375 spin_unlock(&efx->stats_lock);
1376}
1377
1364/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1378/* Context: process, dev_base_lock or RTNL held, non-blocking. */
1365static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1379static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1366{ 1380{
@@ -1369,12 +1383,12 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1369 struct net_device_stats *stats = &net_dev->stats; 1383 struct net_device_stats *stats = &net_dev->stats;
1370 1384
1371 /* Update stats if possible, but do not wait if another thread 1385 /* Update stats if possible, but do not wait if another thread
1372 * is updating them (or resetting the NIC); slightly stale 1386 * is updating them or if MAC stats fetches are temporarily
1373 * stats are acceptable. 1387 * disabled; slightly stale stats are acceptable.
1374 */ 1388 */
1375 if (!spin_trylock(&efx->stats_lock)) 1389 if (!spin_trylock(&efx->stats_lock))
1376 return stats; 1390 return stats;
1377 if (efx->stats_enabled) { 1391 if (!efx->stats_disable_count) {
1378 efx->mac_op->update_stats(efx); 1392 efx->mac_op->update_stats(efx);
1379 falcon_update_nic_stats(efx); 1393 falcon_update_nic_stats(efx);
1380 } 1394 }
@@ -1622,16 +1636,12 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1622 1636
1623/* Tears down the entire software state and most of the hardware state 1637/* Tears down the entire software state and most of the hardware state
1624 * before reset. */ 1638 * before reset. */
1625void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) 1639void efx_reset_down(struct efx_nic *efx, enum reset_type method,
1640 struct ethtool_cmd *ecmd)
1626{ 1641{
1627 EFX_ASSERT_RESET_SERIALISED(efx); 1642 EFX_ASSERT_RESET_SERIALISED(efx);
1628 1643
1629 /* The net_dev->get_stats handler is quite slow, and will fail 1644 efx_stats_disable(efx);
1630 * if a fetch is pending over reset. Serialise against it. */
1631 spin_lock(&efx->stats_lock);
1632 efx->stats_enabled = false;
1633 spin_unlock(&efx->stats_lock);
1634
1635 efx_stop_all(efx); 1645 efx_stop_all(efx);
1636 mutex_lock(&efx->mac_lock); 1646 mutex_lock(&efx->mac_lock);
1637 mutex_lock(&efx->spi_lock); 1647 mutex_lock(&efx->spi_lock);
@@ -1639,6 +1649,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1639 efx->phy_op->get_settings(efx, ecmd); 1649 efx->phy_op->get_settings(efx, ecmd);
1640 1650
1641 efx_fini_channels(efx); 1651 efx_fini_channels(efx);
1652 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
1653 efx->phy_op->fini(efx);
1642} 1654}
1643 1655
1644/* This function will always ensure that the locks acquired in 1656/* This function will always ensure that the locks acquired in
@@ -1646,7 +1658,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1646 * that we were unable to reinitialise the hardware, and the 1658 * that we were unable to reinitialise the hardware, and the
1647 * driver should be disabled. If ok is false, then the rx and tx 1659 * driver should be disabled. If ok is false, then the rx and tx
1648 * engines are not restarted, pending a RESET_DISABLE. */ 1660 * engines are not restarted, pending a RESET_DISABLE. */
1649int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok) 1661int efx_reset_up(struct efx_nic *efx, enum reset_type method,
1662 struct ethtool_cmd *ecmd, bool ok)
1650{ 1663{
1651 int rc; 1664 int rc;
1652 1665
@@ -1658,6 +1671,15 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
1658 ok = false; 1671 ok = false;
1659 } 1672 }
1660 1673
1674 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
1675 if (ok) {
1676 rc = efx->phy_op->init(efx);
1677 if (rc)
1678 ok = false;
1679 } else
1680 efx->port_initialized = false;
1681 }
1682
1661 if (ok) { 1683 if (ok) {
1662 efx_init_channels(efx); 1684 efx_init_channels(efx);
1663 1685
@@ -1670,7 +1692,7 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
1670 1692
1671 if (ok) { 1693 if (ok) {
1672 efx_start_all(efx); 1694 efx_start_all(efx);
1673 efx->stats_enabled = true; 1695 efx_stats_enable(efx);
1674 } 1696 }
1675 return rc; 1697 return rc;
1676} 1698}
@@ -1702,7 +1724,7 @@ static int efx_reset(struct efx_nic *efx)
1702 1724
1703 EFX_INFO(efx, "resetting (%d)\n", method); 1725 EFX_INFO(efx, "resetting (%d)\n", method);
1704 1726
1705 efx_reset_down(efx, &ecmd); 1727 efx_reset_down(efx, method, &ecmd);
1706 1728
1707 rc = falcon_reset_hw(efx, method); 1729 rc = falcon_reset_hw(efx, method);
1708 if (rc) { 1730 if (rc) {
@@ -1721,10 +1743,10 @@ static int efx_reset(struct efx_nic *efx)
1721 1743
1722 /* Leave device stopped if necessary */ 1744 /* Leave device stopped if necessary */
1723 if (method == RESET_TYPE_DISABLE) { 1745 if (method == RESET_TYPE_DISABLE) {
1724 efx_reset_up(efx, &ecmd, false); 1746 efx_reset_up(efx, method, &ecmd, false);
1725 rc = -EIO; 1747 rc = -EIO;
1726 } else { 1748 } else {
1727 rc = efx_reset_up(efx, &ecmd, true); 1749 rc = efx_reset_up(efx, method, &ecmd, true);
1728 } 1750 }
1729 1751
1730out_disable: 1752out_disable:
@@ -1876,6 +1898,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1876 efx->rx_checksum_enabled = true; 1898 efx->rx_checksum_enabled = true;
1877 spin_lock_init(&efx->netif_stop_lock); 1899 spin_lock_init(&efx->netif_stop_lock);
1878 spin_lock_init(&efx->stats_lock); 1900 spin_lock_init(&efx->stats_lock);
1901 efx->stats_disable_count = 1;
1879 mutex_init(&efx->mac_lock); 1902 mutex_init(&efx->mac_lock);
1880 efx->mac_op = &efx_dummy_mac_operations; 1903 efx->mac_op = &efx_dummy_mac_operations;
1881 efx->phy_op = &efx_dummy_phy_operations; 1904 efx->phy_op = &efx_dummy_phy_operations;
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 0dd7a532c78a..55d0f131b0e9 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -36,13 +36,16 @@ extern void efx_process_channel_now(struct efx_channel *channel);
36extern void efx_flush_queues(struct efx_nic *efx); 36extern void efx_flush_queues(struct efx_nic *efx);
37 37
38/* Ports */ 38/* Ports */
39extern void efx_stats_disable(struct efx_nic *efx);
40extern void efx_stats_enable(struct efx_nic *efx);
39extern void efx_reconfigure_port(struct efx_nic *efx); 41extern void efx_reconfigure_port(struct efx_nic *efx);
40extern void __efx_reconfigure_port(struct efx_nic *efx); 42extern void __efx_reconfigure_port(struct efx_nic *efx);
41 43
42/* Reset handling */ 44/* Reset handling */
43extern void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd); 45extern void efx_reset_down(struct efx_nic *efx, enum reset_type method,
44extern int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, 46 struct ethtool_cmd *ecmd);
45 bool ok); 47extern int efx_reset_up(struct efx_nic *efx, enum reset_type method,
48 struct ethtool_cmd *ecmd, bool ok);
46 49
47/* Global */ 50/* Global */
48extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); 51extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 53d259e90187..7b5924c039b3 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -219,9 +219,6 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
219 struct efx_nic *efx = netdev_priv(net_dev); 219 struct efx_nic *efx = netdev_priv(net_dev);
220 int rc; 220 int rc;
221 221
222 if (EFX_WORKAROUND_13963(efx) && !ecmd->autoneg)
223 return -EINVAL;
224
225 /* Falcon GMAC does not support 1000Mbps HD */ 222 /* Falcon GMAC does not support 1000Mbps HD */
226 if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { 223 if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
227 EFX_LOG(efx, "rejecting unsupported 1000Mbps HD" 224 EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 5b9f2d9cc4ed..d5378e60fcdd 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -824,10 +824,6 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
824 rx_ev_pause_frm ? " [PAUSE]" : ""); 824 rx_ev_pause_frm ? " [PAUSE]" : "");
825 } 825 }
826#endif 826#endif
827
828 if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) &&
829 efx->phy_type == PHY_TYPE_SFX7101))
830 tenxpress_crc_err(efx);
831} 827}
832 828
833/* Handle receive events that are not in-order. */ 829/* Handle receive events that are not in-order. */
@@ -1887,7 +1883,7 @@ static int falcon_reset_macs(struct efx_nic *efx)
1887 1883
1888 /* MAC stats will fail whilst the TX fifo is draining. Serialise 1884 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1889 * the drain sequence with the statistics fetch */ 1885 * the drain sequence with the statistics fetch */
1890 spin_lock(&efx->stats_lock); 1886 efx_stats_disable(efx);
1891 1887
1892 falcon_read(efx, &reg, MAC0_CTRL_REG_KER); 1888 falcon_read(efx, &reg, MAC0_CTRL_REG_KER);
1893 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1); 1889 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1);
@@ -1917,7 +1913,7 @@ static int falcon_reset_macs(struct efx_nic *efx)
1917 udelay(10); 1913 udelay(10);
1918 } 1914 }
1919 1915
1920 spin_unlock(&efx->stats_lock); 1916 efx_stats_enable(efx);
1921 1917
1922 /* If we've reset the EM block and the link is up, then 1918 /* If we've reset the EM block and the link is up, then
1923 * we'll have to kick the XAUI link so the PHY can recover */ 1919 * we'll have to kick the XAUI link so the PHY can recover */
@@ -2277,6 +2273,10 @@ int falcon_switch_mac(struct efx_nic *efx)
2277 struct efx_mac_operations *old_mac_op = efx->mac_op; 2273 struct efx_mac_operations *old_mac_op = efx->mac_op;
2278 efx_oword_t nic_stat; 2274 efx_oword_t nic_stat;
2279 unsigned strap_val; 2275 unsigned strap_val;
2276 int rc = 0;
2277
2278 /* Don't try to fetch MAC stats while we're switching MACs */
2279 efx_stats_disable(efx);
2280 2280
2281 /* Internal loopbacks override the phy speed setting */ 2281 /* Internal loopbacks override the phy speed setting */
2282 if (efx->loopback_mode == LOOPBACK_GMAC) { 2282 if (efx->loopback_mode == LOOPBACK_GMAC) {
@@ -2287,16 +2287,12 @@ int falcon_switch_mac(struct efx_nic *efx)
2287 efx->link_fd = true; 2287 efx->link_fd = true;
2288 } 2288 }
2289 2289
2290 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2290 efx->mac_op = (EFX_IS10G(efx) ? 2291 efx->mac_op = (EFX_IS10G(efx) ?
2291 &falcon_xmac_operations : &falcon_gmac_operations); 2292 &falcon_xmac_operations : &falcon_gmac_operations);
2292 if (old_mac_op == efx->mac_op)
2293 return 0;
2294
2295 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2296
2297 /* Not all macs support a mac-level link state */
2298 efx->mac_up = true;
2299 2293
2294 /* Always push the NIC_STAT_REG setting even if the mac hasn't
2295 * changed, because this function is run post online reset */
2300 falcon_read(efx, &nic_stat, NIC_STAT_REG); 2296 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2301 strap_val = EFX_IS10G(efx) ? 5 : 3; 2297 strap_val = EFX_IS10G(efx) ? 5 : 3;
2302 if (falcon_rev(efx) >= FALCON_REV_B0) { 2298 if (falcon_rev(efx) >= FALCON_REV_B0) {
@@ -2309,9 +2305,17 @@ int falcon_switch_mac(struct efx_nic *efx)
2309 BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val); 2305 BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
2310 } 2306 }
2311 2307
2308 if (old_mac_op == efx->mac_op)
2309 goto out;
2312 2310
2313 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G'); 2311 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
2314 return falcon_reset_macs(efx); 2312 /* Not all macs support a mac-level link state */
2313 efx->mac_up = true;
2314
2315 rc = falcon_reset_macs(efx);
2316out:
2317 efx_stats_enable(efx);
2318 return rc;
2315} 2319}
2316 2320
2317/* This call is responsible for hooking in the MAC and PHY operations */ 2321/* This call is responsible for hooking in the MAC and PHY operations */
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index f6a16428113d..f9e2f95c3b48 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -15,6 +15,7 @@
15#include "net_driver.h" 15#include "net_driver.h"
16#include "mdio_10g.h" 16#include "mdio_10g.h"
17#include "boards.h" 17#include "boards.h"
18#include "workarounds.h"
18 19
19int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd, 20int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd,
20 int spins, int spintime) 21 int spins, int spintime)
@@ -179,17 +180,12 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
179 return false; 180 return false;
180 else if (efx_phy_mode_disabled(efx->phy_mode)) 181 else if (efx_phy_mode_disabled(efx->phy_mode))
181 return false; 182 return false;
182 else if (efx->loopback_mode == LOOPBACK_PHYXS) { 183 else if (efx->loopback_mode == LOOPBACK_PHYXS)
183 mmd_mask &= ~(MDIO_MMDREG_DEVS_PHYXS | 184 mmd_mask &= ~(MDIO_MMDREG_DEVS_PHYXS |
184 MDIO_MMDREG_DEVS_PCS | 185 MDIO_MMDREG_DEVS_PCS |
185 MDIO_MMDREG_DEVS_PMAPMD | 186 MDIO_MMDREG_DEVS_PMAPMD |
186 MDIO_MMDREG_DEVS_AN); 187 MDIO_MMDREG_DEVS_AN);
187 if (!mmd_mask) { 188 else if (efx->loopback_mode == LOOPBACK_PCS)
188 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
189 MDIO_PHYXS_STATUS2);
190 return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN));
191 }
192 } else if (efx->loopback_mode == LOOPBACK_PCS)
193 mmd_mask &= ~(MDIO_MMDREG_DEVS_PCS | 189 mmd_mask &= ~(MDIO_MMDREG_DEVS_PCS |
194 MDIO_MMDREG_DEVS_PMAPMD | 190 MDIO_MMDREG_DEVS_PMAPMD |
195 MDIO_MMDREG_DEVS_AN); 191 MDIO_MMDREG_DEVS_AN);
@@ -197,6 +193,13 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
197 mmd_mask &= ~(MDIO_MMDREG_DEVS_PMAPMD | 193 mmd_mask &= ~(MDIO_MMDREG_DEVS_PMAPMD |
198 MDIO_MMDREG_DEVS_AN); 194 MDIO_MMDREG_DEVS_AN);
199 195
196 if (!mmd_mask) {
197 /* Use presence of XGMII faults in leui of link state */
198 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
199 MDIO_PHYXS_STATUS2);
200 return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN));
201 }
202
200 while (mmd_mask) { 203 while (mmd_mask) {
201 if (mmd_mask & 1) { 204 if (mmd_mask & 1) {
202 /* Double reads because link state is latched, and a 205 /* Double reads because link state is latched, and a
@@ -263,7 +266,7 @@ void mdio_clause45_set_mmds_lpower(struct efx_nic *efx,
263 } 266 }
264} 267}
265 268
266static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp) 269static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr)
267{ 270{
268 int phy_id = efx->mii.phy_id; 271 int phy_id = efx->mii.phy_id;
269 u32 result = 0; 272 u32 result = 0;
@@ -278,9 +281,6 @@ static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp)
278 result |= ADVERTISED_100baseT_Half; 281 result |= ADVERTISED_100baseT_Half;
279 if (reg & ADVERTISE_100FULL) 282 if (reg & ADVERTISE_100FULL)
280 result |= ADVERTISED_100baseT_Full; 283 result |= ADVERTISED_100baseT_Full;
281 if (reg & LPA_RESV)
282 result |= xnp;
283
284 return result; 284 return result;
285} 285}
286 286
@@ -310,7 +310,7 @@ void mdio_clause45_get_settings(struct efx_nic *efx,
310 */ 310 */
311void mdio_clause45_get_settings_ext(struct efx_nic *efx, 311void mdio_clause45_get_settings_ext(struct efx_nic *efx,
312 struct ethtool_cmd *ecmd, 312 struct ethtool_cmd *ecmd,
313 u32 xnp, u32 xnp_lpa) 313 u32 npage_adv, u32 npage_lpa)
314{ 314{
315 int phy_id = efx->mii.phy_id; 315 int phy_id = efx->mii.phy_id;
316 int reg; 316 int reg;
@@ -361,8 +361,8 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx,
361 ecmd->autoneg = AUTONEG_ENABLE; 361 ecmd->autoneg = AUTONEG_ENABLE;
362 ecmd->advertising |= 362 ecmd->advertising |=
363 ADVERTISED_Autoneg | 363 ADVERTISED_Autoneg |
364 mdio_clause45_get_an(efx, 364 mdio_clause45_get_an(efx, MDIO_AN_ADVERTISE) |
365 MDIO_AN_ADVERTISE, xnp); 365 npage_adv;
366 } else 366 } else
367 ecmd->autoneg = AUTONEG_DISABLE; 367 ecmd->autoneg = AUTONEG_DISABLE;
368 } else 368 } else
@@ -371,27 +371,30 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx,
371 if (ecmd->autoneg) { 371 if (ecmd->autoneg) {
372 /* If AN is complete, report best common mode, 372 /* If AN is complete, report best common mode,
373 * otherwise report best advertised mode. */ 373 * otherwise report best advertised mode. */
374 u32 common = ecmd->advertising; 374 u32 modes = 0;
375 if (mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, 375 if (mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
376 MDIO_MMDREG_STAT1) & 376 MDIO_MMDREG_STAT1) &
377 (1 << MDIO_AN_STATUS_AN_DONE_LBN)) { 377 (1 << MDIO_AN_STATUS_AN_DONE_LBN))
378 common &= mdio_clause45_get_an(efx, MDIO_AN_LPA, 378 modes = (ecmd->advertising &
379 xnp_lpa); 379 (mdio_clause45_get_an(efx, MDIO_AN_LPA) |
380 } 380 npage_lpa));
381 if (common & ADVERTISED_10000baseT_Full) { 381 if (modes == 0)
382 modes = ecmd->advertising;
383
384 if (modes & ADVERTISED_10000baseT_Full) {
382 ecmd->speed = SPEED_10000; 385 ecmd->speed = SPEED_10000;
383 ecmd->duplex = DUPLEX_FULL; 386 ecmd->duplex = DUPLEX_FULL;
384 } else if (common & (ADVERTISED_1000baseT_Full | 387 } else if (modes & (ADVERTISED_1000baseT_Full |
385 ADVERTISED_1000baseT_Half)) { 388 ADVERTISED_1000baseT_Half)) {
386 ecmd->speed = SPEED_1000; 389 ecmd->speed = SPEED_1000;
387 ecmd->duplex = !!(common & ADVERTISED_1000baseT_Full); 390 ecmd->duplex = !!(modes & ADVERTISED_1000baseT_Full);
388 } else if (common & (ADVERTISED_100baseT_Full | 391 } else if (modes & (ADVERTISED_100baseT_Full |
389 ADVERTISED_100baseT_Half)) { 392 ADVERTISED_100baseT_Half)) {
390 ecmd->speed = SPEED_100; 393 ecmd->speed = SPEED_100;
391 ecmd->duplex = !!(common & ADVERTISED_100baseT_Full); 394 ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full);
392 } else { 395 } else {
393 ecmd->speed = SPEED_10; 396 ecmd->speed = SPEED_10;
394 ecmd->duplex = !!(common & ADVERTISED_10baseT_Full); 397 ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full);
395 } 398 }
396 } else { 399 } else {
397 /* Report forced settings */ 400 /* Report forced settings */
@@ -415,7 +418,7 @@ int mdio_clause45_set_settings(struct efx_nic *efx,
415 int phy_id = efx->mii.phy_id; 418 int phy_id = efx->mii.phy_id;
416 struct ethtool_cmd prev; 419 struct ethtool_cmd prev;
417 u32 required; 420 u32 required;
418 int ctrl1_bits, reg; 421 int reg;
419 422
420 efx->phy_op->get_settings(efx, &prev); 423 efx->phy_op->get_settings(efx, &prev);
421 424
@@ -430,99 +433,83 @@ int mdio_clause45_set_settings(struct efx_nic *efx,
430 if (prev.port != PORT_TP || ecmd->port != PORT_TP) 433 if (prev.port != PORT_TP || ecmd->port != PORT_TP)
431 return -EINVAL; 434 return -EINVAL;
432 435
433 /* Check that PHY supports these settings and work out the 436 /* Check that PHY supports these settings */
434 * basic control bits */ 437 if (ecmd->autoneg) {
435 if (ecmd->duplex) { 438 required = SUPPORTED_Autoneg;
439 } else if (ecmd->duplex) {
436 switch (ecmd->speed) { 440 switch (ecmd->speed) {
437 case SPEED_10: 441 case SPEED_10: required = SUPPORTED_10baseT_Full; break;
438 ctrl1_bits = BMCR_FULLDPLX; 442 case SPEED_100: required = SUPPORTED_100baseT_Full; break;
439 required = SUPPORTED_10baseT_Full; 443 default: return -EINVAL;
440 break;
441 case SPEED_100:
442 ctrl1_bits = BMCR_SPEED100 | BMCR_FULLDPLX;
443 required = SUPPORTED_100baseT_Full;
444 break;
445 case SPEED_1000:
446 ctrl1_bits = BMCR_SPEED1000 | BMCR_FULLDPLX;
447 required = SUPPORTED_1000baseT_Full;
448 break;
449 case SPEED_10000:
450 ctrl1_bits = (BMCR_SPEED1000 | BMCR_SPEED100 |
451 BMCR_FULLDPLX);
452 required = SUPPORTED_10000baseT_Full;
453 break;
454 default:
455 return -EINVAL;
456 } 444 }
457 } else { 445 } else {
458 switch (ecmd->speed) { 446 switch (ecmd->speed) {
459 case SPEED_10: 447 case SPEED_10: required = SUPPORTED_10baseT_Half; break;
460 ctrl1_bits = 0; 448 case SPEED_100: required = SUPPORTED_100baseT_Half; break;
461 required = SUPPORTED_10baseT_Half; 449 default: return -EINVAL;
462 break;
463 case SPEED_100:
464 ctrl1_bits = BMCR_SPEED100;
465 required = SUPPORTED_100baseT_Half;
466 break;
467 case SPEED_1000:
468 ctrl1_bits = BMCR_SPEED1000;
469 required = SUPPORTED_1000baseT_Half;
470 break;
471 default:
472 return -EINVAL;
473 } 450 }
474 } 451 }
475 if (ecmd->autoneg)
476 required |= SUPPORTED_Autoneg;
477 required |= ecmd->advertising; 452 required |= ecmd->advertising;
478 if (required & ~prev.supported) 453 if (required & ~prev.supported)
479 return -EINVAL; 454 return -EINVAL;
480 455
481 /* Set the basic control bits */ 456 if (ecmd->autoneg) {
482 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, 457 bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full
483 MDIO_MMDREG_CTRL1); 458 || EFX_WORKAROUND_13204(efx));
484 reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX | 0x003c); 459
485 reg |= ctrl1_bits; 460 /* Set up the base page */
486 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL1, 461 reg = ADVERTISE_CSMA;
487 reg); 462 if (ecmd->advertising & ADVERTISED_10baseT_Half)
488 463 reg |= ADVERTISE_10HALF;
489 /* Set the AN registers */ 464 if (ecmd->advertising & ADVERTISED_10baseT_Full)
490 if (ecmd->autoneg != prev.autoneg || 465 reg |= ADVERTISE_10FULL;
491 ecmd->advertising != prev.advertising) { 466 if (ecmd->advertising & ADVERTISED_100baseT_Half)
492 bool xnp = false; 467 reg |= ADVERTISE_100HALF;
493 468 if (ecmd->advertising & ADVERTISED_100baseT_Full)
494 if (efx->phy_op->set_xnp_advertise) 469 reg |= ADVERTISE_100FULL;
495 xnp = efx->phy_op->set_xnp_advertise(efx, 470 if (xnp)
496 ecmd->advertising); 471 reg |= ADVERTISE_RESV;
497 472 else if (ecmd->advertising & (ADVERTISED_1000baseT_Half |
498 if (ecmd->autoneg) { 473 ADVERTISED_1000baseT_Full))
499 reg = 0; 474 reg |= ADVERTISE_NPAGE;
500 if (ecmd->advertising & ADVERTISED_10baseT_Half) 475 reg |= efx_fc_advertise(efx->wanted_fc);
501 reg |= ADVERTISE_10HALF; 476 mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
502 if (ecmd->advertising & ADVERTISED_10baseT_Full) 477 MDIO_AN_ADVERTISE, reg);
503 reg |= ADVERTISE_10FULL; 478
504 if (ecmd->advertising & ADVERTISED_100baseT_Half) 479 /* Set up the (extended) next page if necessary */
505 reg |= ADVERTISE_100HALF; 480 if (efx->phy_op->set_npage_adv)
506 if (ecmd->advertising & ADVERTISED_100baseT_Full) 481 efx->phy_op->set_npage_adv(efx, ecmd->advertising);
507 reg |= ADVERTISE_100FULL;
508 if (xnp)
509 reg |= ADVERTISE_RESV;
510 mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
511 MDIO_AN_ADVERTISE, reg);
512 }
513 482
483 /* Enable and restart AN */
514 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, 484 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
515 MDIO_MMDREG_CTRL1); 485 MDIO_MMDREG_CTRL1);
516 if (ecmd->autoneg) 486 reg |= BMCR_ANENABLE;
517 reg |= BMCR_ANENABLE | BMCR_ANRESTART; 487 if (!(EFX_WORKAROUND_15195(efx) &&
518 else 488 LOOPBACK_MASK(efx) & efx->phy_op->loopbacks))
519 reg &= ~BMCR_ANENABLE; 489 reg |= BMCR_ANRESTART;
520 if (xnp) 490 if (xnp)
521 reg |= 1 << MDIO_AN_CTRL_XNP_LBN; 491 reg |= 1 << MDIO_AN_CTRL_XNP_LBN;
522 else 492 else
523 reg &= ~(1 << MDIO_AN_CTRL_XNP_LBN); 493 reg &= ~(1 << MDIO_AN_CTRL_XNP_LBN);
524 mdio_clause45_write(efx, phy_id, MDIO_MMD_AN, 494 mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
525 MDIO_MMDREG_CTRL1, reg); 495 MDIO_MMDREG_CTRL1, reg);
496 } else {
497 /* Disable AN */
498 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN,
499 MDIO_MMDREG_CTRL1,
500 __ffs(BMCR_ANENABLE), false);
501
502 /* Set the basic control bits */
503 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
504 MDIO_MMDREG_CTRL1);
505 reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX |
506 0x003c);
507 if (ecmd->speed == SPEED_100)
508 reg |= BMCR_SPEED100;
509 if (ecmd->duplex)
510 reg |= BMCR_FULLDPLX;
511 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
512 MDIO_MMDREG_CTRL1, reg);
526 } 513 }
527 514
528 return 0; 515 return 0;
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 09bf801d0569..8ba49773ce7e 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -155,7 +155,8 @@
155#define MDIO_AN_XNP 22 155#define MDIO_AN_XNP 22
156#define MDIO_AN_LPA_XNP 25 156#define MDIO_AN_LPA_XNP 25
157 157
158#define MDIO_AN_10GBT_ADVERTISE 32 158#define MDIO_AN_10GBT_CTRL 32
159#define MDIO_AN_10GBT_CTRL_ADV_10G_LBN 12
159#define MDIO_AN_10GBT_STATUS (33) 160#define MDIO_AN_10GBT_STATUS (33)
160#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */ 161#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */
161#define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */ 162#define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 5f255f75754e..e019ad1fb9a0 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -566,7 +566,7 @@ struct efx_mac_operations {
566 * @poll: Poll for hardware state. Serialised by the mac_lock. 566 * @poll: Poll for hardware state. Serialised by the mac_lock.
567 * @get_settings: Get ethtool settings. Serialised by the mac_lock. 567 * @get_settings: Get ethtool settings. Serialised by the mac_lock.
568 * @set_settings: Set ethtool settings. Serialised by the mac_lock. 568 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
569 * @set_xnp_advertise: Set abilities advertised in Extended Next Page 569 * @set_npage_adv: Set abilities advertised in (Extended) Next Page
570 * (only needed where AN bit is set in mmds) 570 * (only needed where AN bit is set in mmds)
571 * @num_tests: Number of PHY-specific tests/results 571 * @num_tests: Number of PHY-specific tests/results
572 * @test_names: Names of the tests/results 572 * @test_names: Names of the tests/results
@@ -586,7 +586,7 @@ struct efx_phy_operations {
586 struct ethtool_cmd *ecmd); 586 struct ethtool_cmd *ecmd);
587 int (*set_settings) (struct efx_nic *efx, 587 int (*set_settings) (struct efx_nic *efx,
588 struct ethtool_cmd *ecmd); 588 struct ethtool_cmd *ecmd);
589 bool (*set_xnp_advertise) (struct efx_nic *efx, u32); 589 void (*set_npage_adv) (struct efx_nic *efx, u32);
590 u32 num_tests; 590 u32 num_tests;
591 const char *const *test_names; 591 const char *const *test_names;
592 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); 592 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
@@ -754,8 +754,7 @@ union efx_multicast_hash {
754 * &struct net_device_stats. 754 * &struct net_device_stats.
755 * @stats_buffer: DMA buffer for statistics 755 * @stats_buffer: DMA buffer for statistics
756 * @stats_lock: Statistics update lock. Serialises statistics fetches 756 * @stats_lock: Statistics update lock. Serialises statistics fetches
757 * @stats_enabled: Temporarily disable statistics fetches. 757 * @stats_disable_count: Nest count for disabling statistics fetches
758 * Serialised by @stats_lock
759 * @mac_op: MAC interface 758 * @mac_op: MAC interface
760 * @mac_address: Permanent MAC address 759 * @mac_address: Permanent MAC address
761 * @phy_type: PHY type 760 * @phy_type: PHY type
@@ -837,7 +836,7 @@ struct efx_nic {
837 struct efx_mac_stats mac_stats; 836 struct efx_mac_stats mac_stats;
838 struct efx_buffer stats_buffer; 837 struct efx_buffer stats_buffer;
839 spinlock_t stats_lock; 838 spinlock_t stats_lock;
840 bool stats_enabled; 839 unsigned int stats_disable_count;
841 840
842 struct efx_mac_operations *mac_op; 841 struct efx_mac_operations *mac_op;
843 unsigned char mac_address[ETH_ALEN]; 842 unsigned char mac_address[ETH_ALEN];
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 58c493ef81bb..07e855c148bc 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -17,7 +17,6 @@ extern struct efx_phy_operations falcon_sfx7101_phy_ops;
17extern struct efx_phy_operations falcon_sft9001_phy_ops; 17extern struct efx_phy_operations falcon_sft9001_phy_ops;
18 18
19extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink); 19extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink);
20extern void tenxpress_crc_err(struct efx_nic *efx);
21 20
22/**************************************************************************** 21/****************************************************************************
23 * Exported functions from the driver for XFP optical PHYs 22 * Exported functions from the driver for XFP optical PHYs
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index dba0d64d50cd..0a598084c513 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -665,6 +665,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
665{ 665{
666 enum efx_loopback_mode loopback_mode = efx->loopback_mode; 666 enum efx_loopback_mode loopback_mode = efx->loopback_mode;
667 int phy_mode = efx->phy_mode; 667 int phy_mode = efx->phy_mode;
668 enum reset_type reset_method = RESET_TYPE_INVISIBLE;
668 struct ethtool_cmd ecmd; 669 struct ethtool_cmd ecmd;
669 struct efx_channel *channel; 670 struct efx_channel *channel;
670 int rc_test = 0, rc_reset = 0, rc; 671 int rc_test = 0, rc_reset = 0, rc;
@@ -718,21 +719,21 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
718 mutex_unlock(&efx->mac_lock); 719 mutex_unlock(&efx->mac_lock);
719 720
720 /* free up all consumers of SRAM (including all the queues) */ 721 /* free up all consumers of SRAM (including all the queues) */
721 efx_reset_down(efx, &ecmd); 722 efx_reset_down(efx, reset_method, &ecmd);
722 723
723 rc = efx_test_chip(efx, tests); 724 rc = efx_test_chip(efx, tests);
724 if (rc && !rc_test) 725 if (rc && !rc_test)
725 rc_test = rc; 726 rc_test = rc;
726 727
727 /* reset the chip to recover from the register test */ 728 /* reset the chip to recover from the register test */
728 rc_reset = falcon_reset_hw(efx, RESET_TYPE_ALL); 729 rc_reset = falcon_reset_hw(efx, reset_method);
729 730
730 /* Ensure that the phy is powered and out of loopback 731 /* Ensure that the phy is powered and out of loopback
731 * for the bist and loopback tests */ 732 * for the bist and loopback tests */
732 efx->phy_mode &= ~PHY_MODE_LOW_POWER; 733 efx->phy_mode &= ~PHY_MODE_LOW_POWER;
733 efx->loopback_mode = LOOPBACK_NONE; 734 efx->loopback_mode = LOOPBACK_NONE;
734 735
735 rc = efx_reset_up(efx, &ecmd, rc_reset == 0); 736 rc = efx_reset_up(efx, reset_method, &ecmd, rc_reset == 0);
736 if (rc && !rc_reset) 737 if (rc && !rc_reset)
737 rc_reset = rc; 738 rc_reset = rc;
738 739
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 16b80acb9992..cb25ae5b257a 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -186,19 +186,22 @@ static int sfn4111t_reset(struct efx_nic *efx)
186{ 186{
187 efx_oword_t reg; 187 efx_oword_t reg;
188 188
189 /* GPIO pins are also used for I2C, so block that temporarily */ 189 /* GPIO 3 and the GPIO register are shared with I2C, so block that */
190 mutex_lock(&efx->i2c_adap.bus_lock); 190 mutex_lock(&efx->i2c_adap.bus_lock);
191 191
192 /* Pull RST_N (GPIO 2) low then let it up again, setting the
193 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
194 * output enables; the output levels should always be 0 (low)
195 * and we rely on external pull-ups. */
192 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 196 falcon_read(efx, &reg, GPIO_CTL_REG_KER);
193 EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true); 197 EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true);
194 EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, false);
195 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 198 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
196 msleep(1000); 199 msleep(1000);
197 EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, true); 200 EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false);
198 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, true); 201 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN,
199 EFX_SET_OWORD_FIELD(reg, GPIO3_OUT, 202 !!(efx->phy_mode & PHY_MODE_SPECIAL));
200 !(efx->phy_mode & PHY_MODE_SPECIAL));
201 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 203 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
204 msleep(1);
202 205
203 mutex_unlock(&efx->i2c_adap.bus_lock); 206 mutex_unlock(&efx->i2c_adap.bus_lock);
204 207
@@ -232,12 +235,18 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
232 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { 235 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
233 err = -EBUSY; 236 err = -EBUSY;
234 } else { 237 } else {
238 /* Reset the PHY, reconfigure the MAC and enable/disable
239 * MAC stats accordingly. */
235 efx->phy_mode = new_mode; 240 efx->phy_mode = new_mode;
241 if (new_mode & PHY_MODE_SPECIAL)
242 efx_stats_disable(efx);
236 if (efx->board_info.type == EFX_BOARD_SFE4001) 243 if (efx->board_info.type == EFX_BOARD_SFE4001)
237 err = sfe4001_poweron(efx); 244 err = sfe4001_poweron(efx);
238 else 245 else
239 err = sfn4111t_reset(efx); 246 err = sfn4111t_reset(efx);
240 efx_reconfigure_port(efx); 247 efx_reconfigure_port(efx);
248 if (!(new_mode & PHY_MODE_SPECIAL))
249 efx_stats_enable(efx);
241 } 250 }
242 rtnl_unlock(); 251 rtnl_unlock();
243 252
@@ -326,6 +335,11 @@ int sfe4001_init(struct efx_nic *efx)
326 efx->board_info.monitor = sfe4001_check_hw; 335 efx->board_info.monitor = sfe4001_check_hw;
327 efx->board_info.fini = sfe4001_fini; 336 efx->board_info.fini = sfe4001_fini;
328 337
338 if (efx->phy_mode & PHY_MODE_SPECIAL) {
339 /* PHY won't generate a 156.25 MHz clock and MAC stats fetch
340 * will fail. */
341 efx_stats_disable(efx);
342 }
329 rc = sfe4001_poweron(efx); 343 rc = sfe4001_poweron(efx);
330 if (rc) 344 if (rc)
331 goto fail_ioexp; 345 goto fail_ioexp;
@@ -372,17 +386,25 @@ static void sfn4111t_fini(struct efx_nic *efx)
372 i2c_unregister_device(efx->board_info.hwmon_client); 386 i2c_unregister_device(efx->board_info.hwmon_client);
373} 387}
374 388
375static struct i2c_board_info sfn4111t_hwmon_info = { 389static struct i2c_board_info sfn4111t_a0_hwmon_info = {
376 I2C_BOARD_INFO("max6647", 0x4e), 390 I2C_BOARD_INFO("max6647", 0x4e),
377 .irq = -1, 391 .irq = -1,
378}; 392};
379 393
394static struct i2c_board_info sfn4111t_r5_hwmon_info = {
395 I2C_BOARD_INFO("max6646", 0x4d),
396 .irq = -1,
397};
398
380int sfn4111t_init(struct efx_nic *efx) 399int sfn4111t_init(struct efx_nic *efx)
381{ 400{
382 int rc; 401 int rc;
383 402
384 efx->board_info.hwmon_client = 403 efx->board_info.hwmon_client =
385 i2c_new_device(&efx->i2c_adap, &sfn4111t_hwmon_info); 404 i2c_new_device(&efx->i2c_adap,
405 (efx->board_info.minor < 5) ?
406 &sfn4111t_a0_hwmon_info :
407 &sfn4111t_r5_hwmon_info);
386 if (!efx->board_info.hwmon_client) 408 if (!efx->board_info.hwmon_client)
387 return -EIO; 409 return -EIO;
388 410
@@ -394,8 +416,10 @@ int sfn4111t_init(struct efx_nic *efx)
394 if (rc) 416 if (rc)
395 goto fail_hwmon; 417 goto fail_hwmon;
396 418
397 if (efx->phy_mode & PHY_MODE_SPECIAL) 419 if (efx->phy_mode & PHY_MODE_SPECIAL) {
420 efx_stats_disable(efx);
398 sfn4111t_reset(efx); 421 sfn4111t_reset(efx);
422 }
399 423
400 return 0; 424 return 0;
401 425
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 9ecb77da9545..f0efd246962c 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -67,6 +67,8 @@
67#define PMA_PMD_EXT_CLK312_WIDTH 1 67#define PMA_PMD_EXT_CLK312_WIDTH 1
68#define PMA_PMD_EXT_LPOWER_LBN 12 68#define PMA_PMD_EXT_LPOWER_LBN 12
69#define PMA_PMD_EXT_LPOWER_WIDTH 1 69#define PMA_PMD_EXT_LPOWER_WIDTH 1
70#define PMA_PMD_EXT_ROBUST_LBN 14
71#define PMA_PMD_EXT_ROBUST_WIDTH 1
70#define PMA_PMD_EXT_SSR_LBN 15 72#define PMA_PMD_EXT_SSR_LBN 15
71#define PMA_PMD_EXT_SSR_WIDTH 1 73#define PMA_PMD_EXT_SSR_WIDTH 1
72 74
@@ -177,35 +179,24 @@
177#define C22EXT_STATUS_LINK_LBN 2 179#define C22EXT_STATUS_LINK_LBN 2
178#define C22EXT_STATUS_LINK_WIDTH 1 180#define C22EXT_STATUS_LINK_WIDTH 1
179 181
180#define C22EXT_MSTSLV_REG 49162 182#define C22EXT_MSTSLV_CTRL 49161
181#define C22EXT_MSTSLV_1000_HD_LBN 10 183#define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8
182#define C22EXT_MSTSLV_1000_HD_WIDTH 1 184#define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9
183#define C22EXT_MSTSLV_1000_FD_LBN 11 185
184#define C22EXT_MSTSLV_1000_FD_WIDTH 1 186#define C22EXT_MSTSLV_STATUS 49162
187#define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10
188#define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11
185 189
186/* Time to wait between powering down the LNPGA and turning off the power 190/* Time to wait between powering down the LNPGA and turning off the power
187 * rails */ 191 * rails */
188#define LNPGA_PDOWN_WAIT (HZ / 5) 192#define LNPGA_PDOWN_WAIT (HZ / 5)
189 193
190static int crc_error_reset_threshold = 100;
191module_param(crc_error_reset_threshold, int, 0644);
192MODULE_PARM_DESC(crc_error_reset_threshold,
193 "Max number of CRC errors before XAUI reset");
194
195struct tenxpress_phy_data { 194struct tenxpress_phy_data {
196 enum efx_loopback_mode loopback_mode; 195 enum efx_loopback_mode loopback_mode;
197 atomic_t bad_crc_count;
198 enum efx_phy_mode phy_mode; 196 enum efx_phy_mode phy_mode;
199 int bad_lp_tries; 197 int bad_lp_tries;
200}; 198};
201 199
202void tenxpress_crc_err(struct efx_nic *efx)
203{
204 struct tenxpress_phy_data *phy_data = efx->phy_data;
205 if (phy_data != NULL)
206 atomic_inc(&phy_data->bad_crc_count);
207}
208
209static ssize_t show_phy_short_reach(struct device *dev, 200static ssize_t show_phy_short_reach(struct device *dev,
210 struct device_attribute *attr, char *buf) 201 struct device_attribute *attr, char *buf)
211{ 202{
@@ -284,7 +275,9 @@ static int tenxpress_init(struct efx_nic *efx)
284 PMA_PMD_XCONTROL_REG); 275 PMA_PMD_XCONTROL_REG);
285 reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) | 276 reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) |
286 (1 << PMA_PMD_EXT_CLK_OUT_LBN) | 277 (1 << PMA_PMD_EXT_CLK_OUT_LBN) |
287 (1 << PMA_PMD_EXT_CLK312_LBN)); 278 (1 << PMA_PMD_EXT_CLK312_LBN) |
279 (1 << PMA_PMD_EXT_ROBUST_LBN));
280
288 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, 281 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
289 PMA_PMD_XCONTROL_REG, reg); 282 PMA_PMD_XCONTROL_REG, reg);
290 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT, 283 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
@@ -346,6 +339,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
346 rc = tenxpress_init(efx); 339 rc = tenxpress_init(efx);
347 if (rc < 0) 340 if (rc < 0)
348 goto fail; 341 goto fail;
342 mdio_clause45_set_pause(efx);
349 343
350 if (efx->phy_type == PHY_TYPE_SFT9001B) { 344 if (efx->phy_type == PHY_TYPE_SFT9001B) {
351 rc = device_create_file(&efx->pci_dev->dev, 345 rc = device_create_file(&efx->pci_dev->dev,
@@ -376,8 +370,8 @@ static int tenxpress_special_reset(struct efx_nic *efx)
376 370
377 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so 371 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
378 * a special software reset can glitch the XGMAC sufficiently for stats 372 * a special software reset can glitch the XGMAC sufficiently for stats
379 * requests to fail. Since we don't often special_reset, just lock. */ 373 * requests to fail. */
380 spin_lock(&efx->stats_lock); 374 efx_stats_disable(efx);
381 375
382 /* Initiate reset */ 376 /* Initiate reset */
383 reg = mdio_clause45_read(efx, efx->mii.phy_id, 377 reg = mdio_clause45_read(efx, efx->mii.phy_id,
@@ -392,17 +386,17 @@ static int tenxpress_special_reset(struct efx_nic *efx)
392 rc = mdio_clause45_wait_reset_mmds(efx, 386 rc = mdio_clause45_wait_reset_mmds(efx,
393 TENXPRESS_REQUIRED_DEVS); 387 TENXPRESS_REQUIRED_DEVS);
394 if (rc < 0) 388 if (rc < 0)
395 goto unlock; 389 goto out;
396 390
397 /* Try and reconfigure the device */ 391 /* Try and reconfigure the device */
398 rc = tenxpress_init(efx); 392 rc = tenxpress_init(efx);
399 if (rc < 0) 393 if (rc < 0)
400 goto unlock; 394 goto out;
401 395
402 /* Wait for the XGXS state machine to churn */ 396 /* Wait for the XGXS state machine to churn */
403 mdelay(10); 397 mdelay(10);
404unlock: 398out:
405 spin_unlock(&efx->stats_lock); 399 efx_stats_enable(efx);
406 return rc; 400 return rc;
407} 401}
408 402
@@ -520,7 +514,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
520{ 514{
521 struct tenxpress_phy_data *phy_data = efx->phy_data; 515 struct tenxpress_phy_data *phy_data = efx->phy_data;
522 struct ethtool_cmd ecmd; 516 struct ethtool_cmd ecmd;
523 bool phy_mode_change, loop_reset, loop_toggle, loopback; 517 bool phy_mode_change, loop_reset;
524 518
525 if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) { 519 if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) {
526 phy_data->phy_mode = efx->phy_mode; 520 phy_data->phy_mode = efx->phy_mode;
@@ -531,12 +525,10 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
531 525
532 phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL && 526 phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL &&
533 phy_data->phy_mode != PHY_MODE_NORMAL); 527 phy_data->phy_mode != PHY_MODE_NORMAL);
534 loopback = LOOPBACK_MASK(efx) & efx->phy_op->loopbacks;
535 loop_toggle = LOOPBACK_CHANGED(phy_data, efx, efx->phy_op->loopbacks);
536 loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) || 528 loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) ||
537 LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY)); 529 LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY));
538 530
539 if (loop_reset || loop_toggle || loopback || phy_mode_change) { 531 if (loop_reset || phy_mode_change) {
540 int rc; 532 int rc;
541 533
542 efx->phy_op->get_settings(efx, &ecmd); 534 efx->phy_op->get_settings(efx, &ecmd);
@@ -551,20 +543,6 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
551 falcon_reset_xaui(efx); 543 falcon_reset_xaui(efx);
552 } 544 }
553 545
554 if (efx->phy_type != PHY_TYPE_SFX7101) {
555 /* Only change autoneg once, on coming out or
556 * going into loopback */
557 if (loop_toggle)
558 ecmd.autoneg = !loopback;
559 if (loopback) {
560 ecmd.duplex = DUPLEX_FULL;
561 if (efx->loopback_mode == LOOPBACK_GPHY)
562 ecmd.speed = SPEED_1000;
563 else
564 ecmd.speed = SPEED_10000;
565 }
566 }
567
568 rc = efx->phy_op->set_settings(efx, &ecmd); 546 rc = efx->phy_op->set_settings(efx, &ecmd);
569 WARN_ON(rc); 547 WARN_ON(rc);
570 } 548 }
@@ -623,13 +601,6 @@ static void tenxpress_phy_poll(struct efx_nic *efx)
623 601
624 if (phy_data->phy_mode != PHY_MODE_NORMAL) 602 if (phy_data->phy_mode != PHY_MODE_NORMAL)
625 return; 603 return;
626
627 if (EFX_WORKAROUND_10750(efx) &&
628 atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
629 EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n");
630 falcon_reset_xaui(efx);
631 atomic_set(&phy_data->bad_crc_count, 0);
632 }
633} 604}
634 605
635static void tenxpress_phy_fini(struct efx_nic *efx) 606static void tenxpress_phy_fini(struct efx_nic *efx)
@@ -772,107 +743,76 @@ reset:
772 return rc; 743 return rc;
773} 744}
774 745
775static u32 tenxpress_get_xnp_lpa(struct efx_nic *efx) 746static void
747tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
776{ 748{
777 int phy = efx->mii.phy_id; 749 int phy_id = efx->mii.phy_id;
778 u32 lpa = 0; 750 u32 adv = 0, lpa = 0;
779 int reg; 751 int reg;
780 752
781 if (efx->phy_type != PHY_TYPE_SFX7101) { 753 if (efx->phy_type != PHY_TYPE_SFX7101) {
782 reg = mdio_clause45_read(efx, phy, MDIO_MMD_C22EXT, 754 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
783 C22EXT_MSTSLV_REG); 755 C22EXT_MSTSLV_CTRL);
784 if (reg & (1 << C22EXT_MSTSLV_1000_HD_LBN)) 756 if (reg & (1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN))
757 adv |= ADVERTISED_1000baseT_Full;
758 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
759 C22EXT_MSTSLV_STATUS);
760 if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN))
785 lpa |= ADVERTISED_1000baseT_Half; 761 lpa |= ADVERTISED_1000baseT_Half;
786 if (reg & (1 << C22EXT_MSTSLV_1000_FD_LBN)) 762 if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN))
787 lpa |= ADVERTISED_1000baseT_Full; 763 lpa |= ADVERTISED_1000baseT_Full;
788 } 764 }
789 reg = mdio_clause45_read(efx, phy, MDIO_MMD_AN, MDIO_AN_10GBT_STATUS); 765 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
766 MDIO_AN_10GBT_CTRL);
767 if (reg & (1 << MDIO_AN_10GBT_CTRL_ADV_10G_LBN))
768 adv |= ADVERTISED_10000baseT_Full;
769 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
770 MDIO_AN_10GBT_STATUS);
790 if (reg & (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN)) 771 if (reg & (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN))
791 lpa |= ADVERTISED_10000baseT_Full; 772 lpa |= ADVERTISED_10000baseT_Full;
792 return lpa;
793}
794 773
795static void sfx7101_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 774 mdio_clause45_get_settings_ext(efx, ecmd, adv, lpa);
796{ 775
797 mdio_clause45_get_settings_ext(efx, ecmd, ADVERTISED_10000baseT_Full, 776 if (efx->phy_type != PHY_TYPE_SFX7101)
798 tenxpress_get_xnp_lpa(efx)); 777 ecmd->supported |= (SUPPORTED_100baseT_Full |
799 ecmd->supported |= SUPPORTED_10000baseT_Full; 778 SUPPORTED_1000baseT_Full);
800 ecmd->advertising |= ADVERTISED_10000baseT_Full; 779
780 /* In loopback, the PHY automatically brings up the correct interface,
781 * but doesn't advertise the correct speed. So override it */
782 if (efx->loopback_mode == LOOPBACK_GPHY)
783 ecmd->speed = SPEED_1000;
784 else if (LOOPBACK_MASK(efx) & efx->phy_op->loopbacks)
785 ecmd->speed = SPEED_10000;
801} 786}
802 787
803static void sft9001_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 788static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
804{ 789{
805 int phy_id = efx->mii.phy_id; 790 if (!ecmd->autoneg)
806 u32 xnp_adv = 0; 791 return -EINVAL;
807 int reg;
808
809 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
810 PMA_PMD_SPEED_ENABLE_REG);
811 if (EFX_WORKAROUND_13204(efx) && (reg & (1 << PMA_PMD_100TX_ADV_LBN)))
812 xnp_adv |= ADVERTISED_100baseT_Full;
813 if (reg & (1 << PMA_PMD_1000T_ADV_LBN))
814 xnp_adv |= ADVERTISED_1000baseT_Full;
815 if (reg & (1 << PMA_PMD_10000T_ADV_LBN))
816 xnp_adv |= ADVERTISED_10000baseT_Full;
817
818 mdio_clause45_get_settings_ext(efx, ecmd, xnp_adv,
819 tenxpress_get_xnp_lpa(efx));
820
821 ecmd->supported |= (SUPPORTED_100baseT_Half |
822 SUPPORTED_100baseT_Full |
823 SUPPORTED_1000baseT_Full);
824 792
825 /* Use the vendor defined C22ext register for duplex settings */ 793 return mdio_clause45_set_settings(efx, ecmd);
826 if (ecmd->speed != SPEED_10000 && !ecmd->autoneg) {
827 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
828 GPHY_XCONTROL_REG);
829 ecmd->duplex = (reg & (1 << GPHY_DUPLEX_LBN) ?
830 DUPLEX_FULL : DUPLEX_HALF);
831 }
832} 794}
833 795
834static int sft9001_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 796static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
835{ 797{
836 int phy_id = efx->mii.phy_id; 798 mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_AN,
837 int rc; 799 MDIO_AN_10GBT_CTRL,
838 800 MDIO_AN_10GBT_CTRL_ADV_10G_LBN,
839 rc = mdio_clause45_set_settings(efx, ecmd); 801 advertising & ADVERTISED_10000baseT_Full);
840 if (rc)
841 return rc;
842
843 if (ecmd->speed != SPEED_10000 && !ecmd->autoneg)
844 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
845 GPHY_XCONTROL_REG, GPHY_DUPLEX_LBN,
846 ecmd->duplex == DUPLEX_FULL);
847
848 return rc;
849} 802}
850 803
851static bool sft9001_set_xnp_advertise(struct efx_nic *efx, u32 advertising) 804static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
852{ 805{
853 int phy = efx->mii.phy_id; 806 int phy_id = efx->mii.phy_id;
854 int reg = mdio_clause45_read(efx, phy, MDIO_MMD_PMAPMD, 807
855 PMA_PMD_SPEED_ENABLE_REG); 808 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
856 bool enabled; 809 C22EXT_MSTSLV_CTRL,
857 810 C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN,
858 reg &= ~((1 << 2) | (1 << 3)); 811 advertising & ADVERTISED_1000baseT_Full);
859 if (EFX_WORKAROUND_13204(efx) && 812 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN,
860 (advertising & ADVERTISED_100baseT_Full)) 813 MDIO_AN_10GBT_CTRL,
861 reg |= 1 << PMA_PMD_100TX_ADV_LBN; 814 MDIO_AN_10GBT_CTRL_ADV_10G_LBN,
862 if (advertising & ADVERTISED_1000baseT_Full) 815 advertising & ADVERTISED_10000baseT_Full);
863 reg |= 1 << PMA_PMD_1000T_ADV_LBN;
864 if (advertising & ADVERTISED_10000baseT_Full)
865 reg |= 1 << PMA_PMD_10000T_ADV_LBN;
866 mdio_clause45_write(efx, phy, MDIO_MMD_PMAPMD,
867 PMA_PMD_SPEED_ENABLE_REG, reg);
868
869 enabled = (advertising &
870 (ADVERTISED_1000baseT_Half |
871 ADVERTISED_1000baseT_Full |
872 ADVERTISED_10000baseT_Full));
873 if (EFX_WORKAROUND_13204(efx))
874 enabled |= (advertising & ADVERTISED_100baseT_Full);
875 return enabled;
876} 816}
877 817
878struct efx_phy_operations falcon_sfx7101_phy_ops = { 818struct efx_phy_operations falcon_sfx7101_phy_ops = {
@@ -882,8 +822,9 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
882 .poll = tenxpress_phy_poll, 822 .poll = tenxpress_phy_poll,
883 .fini = tenxpress_phy_fini, 823 .fini = tenxpress_phy_fini,
884 .clear_interrupt = efx_port_dummy_op_void, 824 .clear_interrupt = efx_port_dummy_op_void,
885 .get_settings = sfx7101_get_settings, 825 .get_settings = tenxpress_get_settings,
886 .set_settings = mdio_clause45_set_settings, 826 .set_settings = tenxpress_set_settings,
827 .set_npage_adv = sfx7101_set_npage_adv,
887 .num_tests = ARRAY_SIZE(sfx7101_test_names), 828 .num_tests = ARRAY_SIZE(sfx7101_test_names),
888 .test_names = sfx7101_test_names, 829 .test_names = sfx7101_test_names,
889 .run_tests = sfx7101_run_tests, 830 .run_tests = sfx7101_run_tests,
@@ -898,9 +839,9 @@ struct efx_phy_operations falcon_sft9001_phy_ops = {
898 .poll = tenxpress_phy_poll, 839 .poll = tenxpress_phy_poll,
899 .fini = tenxpress_phy_fini, 840 .fini = tenxpress_phy_fini,
900 .clear_interrupt = efx_port_dummy_op_void, 841 .clear_interrupt = efx_port_dummy_op_void,
901 .get_settings = sft9001_get_settings, 842 .get_settings = tenxpress_get_settings,
902 .set_settings = sft9001_set_settings, 843 .set_settings = tenxpress_set_settings,
903 .set_xnp_advertise = sft9001_set_xnp_advertise, 844 .set_npage_adv = sft9001_set_npage_adv,
904 .num_tests = ARRAY_SIZE(sft9001_test_names), 845 .num_tests = ARRAY_SIZE(sft9001_test_names),
905 .test_names = sft9001_test_names, 846 .test_names = sft9001_test_names,
906 .run_tests = sft9001_run_tests, 847 .run_tests = sft9001_run_tests,
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 82e03e1d7371..78de68f4a95b 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -18,8 +18,8 @@
18#define EFX_WORKAROUND_ALWAYS(efx) 1 18#define EFX_WORKAROUND_ALWAYS(efx) 1
19#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) 19#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1)
20#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx) 20#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx)
21#define EFX_WORKAROUND_SFX7101(efx) ((efx)->phy_type == PHY_TYPE_SFX7101) 21#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \
22#define EFX_WORKAROUND_SFT9001A(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A) 22 (efx)->phy_type == PHY_TYPE_SFT9001B)
23 23
24/* XAUI resets if link not detected */ 24/* XAUI resets if link not detected */
25#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS 25#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
@@ -29,8 +29,6 @@
29#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G 29#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
30/* TX pkt parser problem with <= 16 byte TXes */ 30/* TX pkt parser problem with <= 16 byte TXes */
31#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS 31#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
32/* Low rate CRC errors require XAUI reset */
33#define EFX_WORKAROUND_10750 EFX_WORKAROUND_SFX7101
34/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor 32/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
35 * or a PCIe error (bug 11028) */ 33 * or a PCIe error (bug 11028) */
36#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS 34#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
@@ -55,8 +53,8 @@
55#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A 53#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
56 54
57/* Need to send XNP pages for 100BaseT */ 55/* Need to send XNP pages for 100BaseT */
58#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001A 56#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001
59/* Need to keep AN enabled */ 57/* Don't restart AN in near-side loopback */
60#define EFX_WORKAROUND_13963 EFX_WORKAROUND_SFT9001A 58#define EFX_WORKAROUND_15195 EFX_WORKAROUND_SFT9001
61 59
62#endif /* EFX_WORKAROUNDS_H */ 60#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 607efeaf0bc5..9a00e5566af7 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -1003,9 +1003,9 @@ static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1003 break; 1003 break;
1004 case SKFP_CLR_STATS: /* Zero out the driver statistics */ 1004 case SKFP_CLR_STATS: /* Zero out the driver statistics */
1005 if (!capable(CAP_NET_ADMIN)) { 1005 if (!capable(CAP_NET_ADMIN)) {
1006 memset(&lp->MacStat, 0, sizeof(lp->MacStat));
1007 } else {
1008 status = -EPERM; 1006 status = -EPERM;
1007 } else {
1008 memset(&lp->MacStat, 0, sizeof(lp->MacStat));
1009 } 1009 }
1010 break; 1010 break;
1011 default: 1011 default:
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 3668e81e474d..994703cc0db3 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1403,9 +1403,6 @@ static int sky2_up(struct net_device *dev)
1403 1403
1404 } 1404 }
1405 1405
1406 if (netif_msg_ifup(sky2))
1407 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
1408
1409 netif_carrier_off(dev); 1406 netif_carrier_off(dev);
1410 1407
1411 /* must be power of 2 */ 1408 /* must be power of 2 */
@@ -1484,6 +1481,9 @@ static int sky2_up(struct net_device *dev)
1484 sky2_write32(hw, B0_IMSK, imask); 1481 sky2_write32(hw, B0_IMSK, imask);
1485 1482
1486 sky2_set_multicast(dev); 1483 sky2_set_multicast(dev);
1484
1485 if (netif_msg_ifup(sky2))
1486 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
1487 return 0; 1487 return 0;
1488 1488
1489err_out: 1489err_out:
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index f513bdf1c887..783c1a7b869e 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -953,7 +953,7 @@ smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
953 do { 953 do {
954 udelay(1); 954 udelay(1);
955 val = smsc911x_reg_read(pdata, RX_DP_CTRL); 955 val = smsc911x_reg_read(pdata, RX_DP_CTRL);
956 } while (timeout-- && (val & RX_DP_CTRL_RX_FFWD_)); 956 } while (--timeout && (val & RX_DP_CTRL_RX_FFWD_));
957 957
958 if (unlikely(timeout == 0)) 958 if (unlikely(timeout == 0))
959 SMSC_WARNING(HW, "Timed out waiting for " 959 SMSC_WARNING(HW, "Timed out waiting for "
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index c14a4c6452c7..d801900a5036 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1378,6 +1378,7 @@ static int smsc9420_open(struct net_device *dev)
1378 1378
1379 /* test the IRQ connection to the ISR */ 1379 /* test the IRQ connection to the ISR */
1380 smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq); 1380 smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq);
1381 pd->software_irq_signal = false;
1381 1382
1382 spin_lock_irqsave(&pd->int_lock, flags); 1383 spin_lock_irqsave(&pd->int_lock, flags);
1383 /* configure interrupt deassertion timer and enable interrupts */ 1384 /* configure interrupt deassertion timer and enable interrupts */
@@ -1393,8 +1394,6 @@ static int smsc9420_open(struct net_device *dev)
1393 smsc9420_pci_flush_write(pd); 1394 smsc9420_pci_flush_write(pd);
1394 1395
1395 timeout = 1000; 1396 timeout = 1000;
1396 pd->software_irq_signal = false;
1397 smp_wmb();
1398 while (timeout--) { 1397 while (timeout--) {
1399 if (pd->software_irq_signal) 1398 if (pd->software_irq_signal)
1400 break; 1399 break;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 5e2dbaee125b..8b3f84685387 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -7535,11 +7535,58 @@ static int tg3_test_msi(struct tg3 *tp)
7535 return err; 7535 return err;
7536} 7536}
7537 7537
7538static int tg3_request_firmware(struct tg3 *tp)
7539{
7540 const __be32 *fw_data;
7541
7542 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
7543 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
7544 tp->dev->name, tp->fw_needed);
7545 return -ENOENT;
7546 }
7547
7548 fw_data = (void *)tp->fw->data;
7549
7550 /* Firmware blob starts with version numbers, followed by
7551 * start address and _full_ length including BSS sections
7552 * (which must be longer than the actual data, of course
7553 */
7554
7555 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
7556 if (tp->fw_len < (tp->fw->size - 12)) {
7557 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
7558 tp->dev->name, tp->fw_len, tp->fw_needed);
7559 release_firmware(tp->fw);
7560 tp->fw = NULL;
7561 return -EINVAL;
7562 }
7563
7564 /* We no longer need firmware; we have it. */
7565 tp->fw_needed = NULL;
7566 return 0;
7567}
7568
7538static int tg3_open(struct net_device *dev) 7569static int tg3_open(struct net_device *dev)
7539{ 7570{
7540 struct tg3 *tp = netdev_priv(dev); 7571 struct tg3 *tp = netdev_priv(dev);
7541 int err; 7572 int err;
7542 7573
7574 if (tp->fw_needed) {
7575 err = tg3_request_firmware(tp);
7576 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7577 if (err)
7578 return err;
7579 } else if (err) {
7580 printk(KERN_WARNING "%s: TSO capability disabled.\n",
7581 tp->dev->name);
7582 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
7583 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7584 printk(KERN_NOTICE "%s: TSO capability restored.\n",
7585 tp->dev->name);
7586 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
7587 }
7588 }
7589
7543 netif_carrier_off(tp->dev); 7590 netif_carrier_off(tp->dev);
7544 7591
7545 err = tg3_set_power_state(tp, PCI_D0); 7592 err = tg3_set_power_state(tp, PCI_D0);
@@ -12934,7 +12981,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
12934 struct net_device *dev; 12981 struct net_device *dev;
12935 struct tg3 *tp; 12982 struct tg3 *tp;
12936 int err, pm_cap; 12983 int err, pm_cap;
12937 const char *fw_name = NULL;
12938 char str[40]; 12984 char str[40];
12939 u64 dma_mask, persist_dma_mask; 12985 u64 dma_mask, persist_dma_mask;
12940 12986
@@ -13091,7 +13137,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13091 tg3_init_bufmgr_config(tp); 13137 tg3_init_bufmgr_config(tp);
13092 13138
13093 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) 13139 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13094 fw_name = FIRMWARE_TG3; 13140 tp->fw_needed = FIRMWARE_TG3;
13095 13141
13096 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 13142 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13097 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 13143 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
@@ -13104,37 +13150,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13104 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 13150 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13105 } else { 13151 } else {
13106 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG; 13152 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13107 }
13108 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
13109 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) 13153 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13110 fw_name = FIRMWARE_TG3TSO5; 13154 tp->fw_needed = FIRMWARE_TG3TSO5;
13111 else 13155 else
13112 fw_name = FIRMWARE_TG3TSO; 13156 tp->fw_needed = FIRMWARE_TG3TSO;
13113 }
13114
13115 if (fw_name) {
13116 const __be32 *fw_data;
13117
13118 err = request_firmware(&tp->fw, fw_name, &tp->pdev->dev);
13119 if (err) {
13120 printk(KERN_ERR "tg3: Failed to load firmware \"%s\"\n",
13121 fw_name);
13122 goto err_out_iounmap;
13123 }
13124
13125 fw_data = (void *)tp->fw->data;
13126
13127 /* Firmware blob starts with version numbers, followed by
13128 start address and _full_ length including BSS sections
13129 (which must be longer than the actual data, of course */
13130
13131 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
13132 if (tp->fw_len < (tp->fw->size - 12)) {
13133 printk(KERN_ERR "tg3: bogus length %d in \"%s\"\n",
13134 tp->fw_len, fw_name);
13135 err = -EINVAL;
13136 goto err_out_fw;
13137 }
13138 } 13157 }
13139 13158
13140 /* TSO is on by default on chips that support hardware TSO. 13159 /* TSO is on by default on chips that support hardware TSO.
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index ae5da603c6af..508def3e077f 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2764,6 +2764,7 @@ struct tg3 {
2764 struct ethtool_coalesce coal; 2764 struct ethtool_coalesce coal;
2765 2765
2766 /* firmware info */ 2766 /* firmware info */
2767 const char *fw_needed;
2767 const struct firmware *fw; 2768 const struct firmware *fw;
2768 u32 fw_len; /* includes BSS */ 2769 u32 fw_len; /* includes BSS */
2769}; 2770};
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 1210fb3748a7..db7d5e11855d 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -9,6 +9,11 @@
9 9
10 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} 10 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
11 for more information on this driver. 11 for more information on this driver.
12
13 DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller
14 Hardware Reference Manual" is currently available at :
15 http://developer.intel.com/design/network/manuals/278074.htm
16
12 Please submit bugs to http://bugzilla.kernel.org/ . 17 Please submit bugs to http://bugzilla.kernel.org/ .
13*/ 18*/
14 19
@@ -32,7 +37,11 @@ void t21142_media_task(struct work_struct *work)
32 int csr12 = ioread32(ioaddr + CSR12); 37 int csr12 = ioread32(ioaddr + CSR12);
33 int next_tick = 60*HZ; 38 int next_tick = 60*HZ;
34 int new_csr6 = 0; 39 int new_csr6 = 0;
40 int csr14 = ioread32(ioaddr + CSR14);
35 41
42 /* CSR12[LS10,LS100] are not reliable during autonegotiation */
43 if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
44 csr12 |= 6;
36 if (tulip_debug > 2) 45 if (tulip_debug > 2)
37 printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n", 46 printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
38 dev->name, csr12, medianame[dev->if_port]); 47 dev->name, csr12, medianame[dev->if_port]);
@@ -76,7 +85,7 @@ void t21142_media_task(struct work_struct *work)
76 new_csr6 = 0x83860000; 85 new_csr6 = 0x83860000;
77 dev->if_port = 3; 86 dev->if_port = 3;
78 iowrite32(0, ioaddr + CSR13); 87 iowrite32(0, ioaddr + CSR13);
79 iowrite32(0x0003FF7F, ioaddr + CSR14); 88 iowrite32(0x0003FFFF, ioaddr + CSR14);
80 iowrite16(8, ioaddr + CSR15); 89 iowrite16(8, ioaddr + CSR15);
81 iowrite32(1, ioaddr + CSR13); 90 iowrite32(1, ioaddr + CSR13);
82 } 91 }
@@ -132,10 +141,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
132 struct tulip_private *tp = netdev_priv(dev); 141 struct tulip_private *tp = netdev_priv(dev);
133 void __iomem *ioaddr = tp->base_addr; 142 void __iomem *ioaddr = tp->base_addr;
134 int csr12 = ioread32(ioaddr + CSR12); 143 int csr12 = ioread32(ioaddr + CSR12);
144 int csr14 = ioread32(ioaddr + CSR14);
135 145
146 /* CSR12[LS10,LS100] are not reliable during autonegotiation */
147 if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
148 csr12 |= 6;
136 if (tulip_debug > 1) 149 if (tulip_debug > 1)
137 printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, " 150 printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
138 "%8.8x.\n", dev->name, csr12, csr5, ioread32(ioaddr + CSR14)); 151 "%8.8x.\n", dev->name, csr12, csr5, csr14);
139 152
140 /* If NWay finished and we have a negotiated partner capability. */ 153 /* If NWay finished and we have a negotiated partner capability. */
141 if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) { 154 if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
@@ -143,7 +156,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
143 int negotiated = tp->sym_advertise & (csr12 >> 16); 156 int negotiated = tp->sym_advertise & (csr12 >> 16);
144 tp->lpar = csr12 >> 16; 157 tp->lpar = csr12 >> 16;
145 tp->nwayset = 1; 158 tp->nwayset = 1;
146 if (negotiated & 0x0100) dev->if_port = 5; 159 /* If partner cannot negotiate, it is 10Mbps Half Duplex */
160 if (!(csr12 & 0x8000)) dev->if_port = 0;
161 else if (negotiated & 0x0100) dev->if_port = 5;
147 else if (negotiated & 0x0080) dev->if_port = 3; 162 else if (negotiated & 0x0080) dev->if_port = 3;
148 else if (negotiated & 0x0040) dev->if_port = 4; 163 else if (negotiated & 0x0040) dev->if_port = 4;
149 else if (negotiated & 0x0020) dev->if_port = 0; 164 else if (negotiated & 0x0020) dev->if_port = 0;
@@ -214,7 +229,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
214 tp->timer.expires = RUN_AT(3*HZ); 229 tp->timer.expires = RUN_AT(3*HZ);
215 add_timer(&tp->timer); 230 add_timer(&tp->timer);
216 } else if (dev->if_port == 5) 231 } else if (dev->if_port == 5)
217 iowrite32(ioread32(ioaddr + CSR14) & ~0x080, ioaddr + CSR14); 232 iowrite32(csr14 & ~0x080, ioaddr + CSR14);
218 } else if (dev->if_port == 0 || dev->if_port == 4) { 233 } else if (dev->if_port == 0 || dev->if_port == 4) {
219 if ((csr12 & 4) == 0) 234 if ((csr12 & 4) == 0)
220 printk(KERN_INFO"%s: 21143 10baseT link beat good.\n", 235 printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 11441225bf41..e87986867ba5 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1536,6 +1536,11 @@ static void adjust_link(struct net_device *dev)
1536static int init_phy(struct net_device *dev) 1536static int init_phy(struct net_device *dev)
1537{ 1537{
1538 struct ucc_geth_private *priv = netdev_priv(dev); 1538 struct ucc_geth_private *priv = netdev_priv(dev);
1539 struct device_node *np = priv->node;
1540 struct device_node *phy, *mdio;
1541 const phandle *ph;
1542 char bus_name[MII_BUS_ID_SIZE];
1543 const unsigned int *id;
1539 struct phy_device *phydev; 1544 struct phy_device *phydev;
1540 char phy_id[BUS_ID_SIZE]; 1545 char phy_id[BUS_ID_SIZE];
1541 1546
@@ -1543,8 +1548,18 @@ static int init_phy(struct net_device *dev)
1543 priv->oldspeed = 0; 1548 priv->oldspeed = 0;
1544 priv->oldduplex = -1; 1549 priv->oldduplex = -1;
1545 1550
1546 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->ug_info->mdio_bus, 1551 ph = of_get_property(np, "phy-handle", NULL);
1547 priv->ug_info->phy_address); 1552 phy = of_find_node_by_phandle(*ph);
1553 mdio = of_get_parent(phy);
1554
1555 id = of_get_property(phy, "reg", NULL);
1556
1557 of_node_put(phy);
1558 of_node_put(mdio);
1559
1560 uec_mdio_bus_name(bus_name, mdio);
1561 snprintf(phy_id, sizeof(phy_id), "%s:%02x",
1562 bus_name, *id);
1548 1563
1549 phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface); 1564 phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface);
1550 1565
@@ -3748,6 +3763,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3748 3763
3749 ugeth->ug_info = ug_info; 3764 ugeth->ug_info = ug_info;
3750 ugeth->dev = dev; 3765 ugeth->dev = dev;
3766 ugeth->node = np;
3751 3767
3752 return 0; 3768 return 0;
3753} 3769}
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 8f699cb773ee..16cbe42ba43c 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -1186,6 +1186,8 @@ struct ucc_geth_private {
1186 int oldspeed; 1186 int oldspeed;
1187 int oldduplex; 1187 int oldduplex;
1188 int oldlink; 1188 int oldlink;
1189
1190 struct device_node *node;
1189}; 1191};
1190 1192
1191void uec_set_ethtool_ops(struct net_device *netdev); 1193void uec_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index c001d261366b..54635911305c 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -156,7 +156,7 @@ static int uec_mdio_probe(struct of_device *ofdev, const struct of_device_id *ma
156 if (err) 156 if (err)
157 goto reg_map_fail; 157 goto reg_map_fail;
158 158
159 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); 159 uec_mdio_bus_name(new_bus->id, np);
160 160
161 new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL); 161 new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL);
162 162
@@ -283,3 +283,13 @@ void uec_mdio_exit(void)
283{ 283{
284 of_unregister_platform_driver(&uec_mdio_driver); 284 of_unregister_platform_driver(&uec_mdio_driver);
285} 285}
286
287void uec_mdio_bus_name(char *name, struct device_node *np)
288{
289 const u32 *reg;
290
291 reg = of_get_property(np, "reg", NULL);
292
293 snprintf(name, MII_BUS_ID_SIZE, "%s@%x", np->name, reg ? *reg : 0);
294}
295
diff --git a/drivers/net/ucc_geth_mii.h b/drivers/net/ucc_geth_mii.h
index 1e45b2028a50..840cf80235b7 100644
--- a/drivers/net/ucc_geth_mii.h
+++ b/drivers/net/ucc_geth_mii.h
@@ -97,4 +97,5 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
97int uec_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); 97int uec_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
98int __init uec_mdio_init(void); 98int __init uec_mdio_init(void);
99void uec_mdio_exit(void); 99void uec_mdio_exit(void);
100void uec_mdio_bus_name(char *name, struct device_node *np);
100#endif /* __UEC_MII_H */ 101#endif /* __UEC_MII_H */
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 5385d66b306e..ced8f36ebd01 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -94,10 +94,18 @@ static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
94{ 94{
95 struct usb_device *xdev = dev->udev; 95 struct usb_device *xdev = dev->udev;
96 int ret; 96 int ret;
97 void *buffer;
98
99 buffer = kmalloc(size, GFP_NOIO);
100 if (buffer == NULL)
101 return -ENOMEM;
97 102
98 ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ, 103 ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ,
99 MCS7830_RD_BMREQ, 0x0000, index, data, 104 MCS7830_RD_BMREQ, 0x0000, index, buffer,
100 size, MCS7830_CTRL_TIMEOUT); 105 size, MCS7830_CTRL_TIMEOUT);
106 memcpy(data, buffer, size);
107 kfree(buffer);
108
101 return ret; 109 return ret;
102} 110}
103 111
@@ -105,10 +113,18 @@ static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data)
105{ 113{
106 struct usb_device *xdev = dev->udev; 114 struct usb_device *xdev = dev->udev;
107 int ret; 115 int ret;
116 void *buffer;
117
118 buffer = kmalloc(size, GFP_NOIO);
119 if (buffer == NULL)
120 return -ENOMEM;
121
122 memcpy(buffer, data, size);
108 123
109 ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ, 124 ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
110 MCS7830_WR_BMREQ, 0x0000, index, data, 125 MCS7830_WR_BMREQ, 0x0000, index, buffer,
111 size, MCS7830_CTRL_TIMEOUT); 126 size, MCS7830_CTRL_TIMEOUT);
127 kfree(buffer);
112 return ret; 128 return ret;
113} 129}
114 130
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index a75f91dc3153..c5691fdb7079 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1302,7 +1302,7 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1302static int velocity_init_td_ring(struct velocity_info *vptr) 1302static int velocity_init_td_ring(struct velocity_info *vptr)
1303{ 1303{
1304 dma_addr_t curr; 1304 dma_addr_t curr;
1305 unsigned int j; 1305 int j;
1306 1306
1307 /* Init the TD ring entries */ 1307 /* Init the TD ring entries */
1308 for (j = 0; j < vptr->tx.numq; j++) { 1308 for (j = 0; j < vptr->tx.numq; j++) {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 43f6523c40be..c68808336c8c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -24,6 +24,7 @@
24#include <linux/virtio.h> 24#include <linux/virtio.h>
25#include <linux/virtio_net.h> 25#include <linux/virtio_net.h>
26#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
27#include <linux/if_vlan.h>
27 28
28static int napi_weight = 128; 29static int napi_weight = 128;
29module_param(napi_weight, int, 0444); 30module_param(napi_weight, int, 0444);
@@ -33,7 +34,7 @@ module_param(csum, bool, 0444);
33module_param(gso, bool, 0444); 34module_param(gso, bool, 0444);
34 35
35/* FIXME: MTU in config. */ 36/* FIXME: MTU in config. */
36#define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN) 37#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
37#define GOOD_COPY_LEN 128 38#define GOOD_COPY_LEN 128
38 39
39struct virtnet_info 40struct virtnet_info
@@ -286,7 +287,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
286 skb_put(skb, MAX_PACKET_LEN); 287 skb_put(skb, MAX_PACKET_LEN);
287 288
288 hdr = skb_vnet_hdr(skb); 289 hdr = skb_vnet_hdr(skb);
289 sg_init_one(sg, hdr, sizeof(*hdr)); 290 sg_set_buf(sg, hdr, sizeof(*hdr));
290 291
291 if (vi->big_packets) { 292 if (vi->big_packets) {
292 for (i = 0; i < MAX_SKB_FRAGS; i++) { 293 for (i = 0; i < MAX_SKB_FRAGS; i++) {
@@ -487,9 +488,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
487 488
488 /* Encode metadata header at front. */ 489 /* Encode metadata header at front. */
489 if (vi->mergeable_rx_bufs) 490 if (vi->mergeable_rx_bufs)
490 sg_init_one(sg, mhdr, sizeof(*mhdr)); 491 sg_set_buf(sg, mhdr, sizeof(*mhdr));
491 else 492 else
492 sg_init_one(sg, hdr, sizeof(*hdr)); 493 sg_set_buf(sg, hdr, sizeof(*hdr));
493 494
494 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 495 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
495 496
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index d3d37fed6893..15d9f51b292c 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -609,7 +609,7 @@ void i2400m_msg_to_dev_cancel_wait(struct i2400m *i2400m, int code)
609 spin_lock_irqsave(&i2400m->rx_lock, flags); 609 spin_lock_irqsave(&i2400m->rx_lock, flags);
610 ack_skb = i2400m->ack_skb; 610 ack_skb = i2400m->ack_skb;
611 if (ack_skb && !IS_ERR(ack_skb)) 611 if (ack_skb && !IS_ERR(ack_skb))
612 kfree(ack_skb); 612 kfree_skb(ack_skb);
613 i2400m->ack_skb = ERR_PTR(code); 613 i2400m->ack_skb = ERR_PTR(code);
614 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 614 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
615} 615}
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 626632985977..9b81af3f80a9 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -234,20 +234,6 @@ struct dentry *debugfs_create_i2400m_reset(
234 &fops_i2400m_reset); 234 &fops_i2400m_reset);
235} 235}
236 236
237/*
238 * Debug levels control; see debug.h
239 */
240struct d_level D_LEVEL[] = {
241 D_SUBMODULE_DEFINE(control),
242 D_SUBMODULE_DEFINE(driver),
243 D_SUBMODULE_DEFINE(debugfs),
244 D_SUBMODULE_DEFINE(fw),
245 D_SUBMODULE_DEFINE(netdev),
246 D_SUBMODULE_DEFINE(rfkill),
247 D_SUBMODULE_DEFINE(rx),
248 D_SUBMODULE_DEFINE(tx),
249};
250size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
251 237
252#define __debugfs_register(prefix, name, parent) \ 238#define __debugfs_register(prefix, name, parent) \
253do { \ 239do { \
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 5f98047e18cf..e80a0b65a754 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -707,6 +707,22 @@ void i2400m_release(struct i2400m *i2400m)
707EXPORT_SYMBOL_GPL(i2400m_release); 707EXPORT_SYMBOL_GPL(i2400m_release);
708 708
709 709
710/*
711 * Debug levels control; see debug.h
712 */
713struct d_level D_LEVEL[] = {
714 D_SUBMODULE_DEFINE(control),
715 D_SUBMODULE_DEFINE(driver),
716 D_SUBMODULE_DEFINE(debugfs),
717 D_SUBMODULE_DEFINE(fw),
718 D_SUBMODULE_DEFINE(netdev),
719 D_SUBMODULE_DEFINE(rfkill),
720 D_SUBMODULE_DEFINE(rx),
721 D_SUBMODULE_DEFINE(tx),
722};
723size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
724
725
710static 726static
711int __init i2400m_driver_init(void) 727int __init i2400m_driver_init(void)
712{ 728{
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
index 074cc1f89853..a314799967cf 100644
--- a/drivers/net/wimax/i2400m/usb-rx.c
+++ b/drivers/net/wimax/i2400m/usb-rx.c
@@ -184,6 +184,8 @@ void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu)
184 * NOTE: this function might realloc the skb (if it is too small), 184 * NOTE: this function might realloc the skb (if it is too small),
185 * so always update with the one returned. 185 * so always update with the one returned.
186 * ERR_PTR() is < 0 on error. 186 * ERR_PTR() is < 0 on error.
187 * Will return NULL if it cannot reallocate -- this can be
188 * considered a transient retryable error.
187 */ 189 */
188static 190static
189struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb) 191struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
@@ -243,8 +245,8 @@ retry:
243 if (printk_ratelimit()) 245 if (printk_ratelimit())
244 dev_err(dev, "RX: Can't reallocate skb to %d; " 246 dev_err(dev, "RX: Can't reallocate skb to %d; "
245 "RX dropped\n", rx_size); 247 "RX dropped\n", rx_size);
246 kfree(rx_skb); 248 kfree_skb(rx_skb);
247 result = 0; 249 rx_skb = NULL;
248 goto out; /* drop it...*/ 250 goto out; /* drop it...*/
249 } 251 }
250 kfree_skb(rx_skb); 252 kfree_skb(rx_skb);
@@ -344,7 +346,8 @@ int i2400mu_rxd(void *_i2400mu)
344 if (IS_ERR(rx_skb)) 346 if (IS_ERR(rx_skb))
345 goto out; 347 goto out;
346 atomic_dec(&i2400mu->rx_pending_count); 348 atomic_dec(&i2400mu->rx_pending_count);
347 if (rx_skb->len == 0) { /* some ignorable condition */ 349 if (rx_skb == NULL || rx_skb->len == 0) {
350 /* some "ignorable" condition */
348 kfree_skb(rx_skb); 351 kfree_skb(rx_skb);
349 continue; 352 continue;
350 } 353 }
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 8ef87356e083..a533ed60bb4d 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -1028,6 +1028,8 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
1028 * it's done by reseting the chip. To accomplish this we must 1028 * it's done by reseting the chip. To accomplish this we must
1029 * first cleanup any pending DMA, then restart stuff after a la 1029 * first cleanup any pending DMA, then restart stuff after a la
1030 * ath5k_init. 1030 * ath5k_init.
1031 *
1032 * Called with sc->lock.
1031 */ 1033 */
1032static int 1034static int
1033ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) 1035ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
@@ -2814,11 +2816,17 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
2814{ 2816{
2815 struct ath5k_softc *sc = hw->priv; 2817 struct ath5k_softc *sc = hw->priv;
2816 struct ieee80211_conf *conf = &hw->conf; 2818 struct ieee80211_conf *conf = &hw->conf;
2819 int ret;
2820
2821 mutex_lock(&sc->lock);
2817 2822
2818 sc->bintval = conf->beacon_int; 2823 sc->bintval = conf->beacon_int;
2819 sc->power_level = conf->power_level; 2824 sc->power_level = conf->power_level;
2820 2825
2821 return ath5k_chan_set(sc, conf->channel); 2826 ret = ath5k_chan_set(sc, conf->channel);
2827
2828 mutex_unlock(&sc->lock);
2829 return ret;
2822} 2830}
2823 2831
2824static int 2832static int
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
index 04ab457a8faa..1b71b934bb5e 100644
--- a/drivers/net/wireless/ath9k/rc.c
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -490,7 +490,7 @@ static inline int ath_rc_get_nextvalid_txrate(struct ath_rate_table *rate_table,
490 490
491static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw) 491static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
492{ 492{
493 if (WLAN_RC_PHY_HT(phy) & !(capflag & WLAN_RC_HT_FLAG)) 493 if (WLAN_RC_PHY_HT(phy) && !(capflag & WLAN_RC_HT_FLAG))
494 return 0; 494 return 0;
495 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG)) 495 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
496 return 0; 496 return 0;
diff --git a/drivers/net/wireless/ath9k/regd_common.h b/drivers/net/wireless/ath9k/regd_common.h
index 9112c030b1e8..6df1b3b77c25 100644
--- a/drivers/net/wireless/ath9k/regd_common.h
+++ b/drivers/net/wireless/ath9k/regd_common.h
@@ -228,7 +228,7 @@ enum {
228}; 228};
229 229
230#define REG_DOMAIN_2GHZ_MASK (REQ_MASK & \ 230#define REG_DOMAIN_2GHZ_MASK (REQ_MASK & \
231 (!(ADHOC_NO_11A | DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB))) 231 (~(ADHOC_NO_11A | DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB)))
232#define REG_DOMAIN_5GHZ_MASK REQ_MASK 232#define REG_DOMAIN_5GHZ_MASK REQ_MASK
233 233
234static struct reg_dmn_pair_mapping regDomainPairs[] = { 234static struct reg_dmn_pair_mapping regDomainPairs[] = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 9b60a0c5de5f..21c841847d88 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -638,12 +638,16 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
638 s8 scale_action = 0; 638 s8 scale_action = 0;
639 unsigned long flags; 639 unsigned long flags;
640 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 640 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
641 u16 fc, rate_mask; 641 u16 fc;
642 u16 rate_mask = 0;
642 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r; 643 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r;
643 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 644 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
644 645
645 IWL_DEBUG_RATE("enter\n"); 646 IWL_DEBUG_RATE("enter\n");
646 647
648 if (sta)
649 rate_mask = sta->supp_rates[sband->band];
650
647 /* Send management frames and broadcast/multicast data using lowest 651 /* Send management frames and broadcast/multicast data using lowest
648 * rate. */ 652 * rate. */
649 fc = le16_to_cpu(hdr->frame_control); 653 fc = le16_to_cpu(hdr->frame_control);
@@ -651,11 +655,15 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
651 is_multicast_ether_addr(hdr->addr1) || 655 is_multicast_ether_addr(hdr->addr1) ||
652 !sta || !priv_sta) { 656 !sta || !priv_sta) {
653 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 657 IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
654 info->control.rates[0].idx = rate_lowest_index(sband, sta); 658 if (!rate_mask)
659 info->control.rates[0].idx =
660 rate_lowest_index(sband, NULL);
661 else
662 info->control.rates[0].idx =
663 rate_lowest_index(sband, sta);
655 return; 664 return;
656 } 665 }
657 666
658 rate_mask = sta->supp_rates[sband->band];
659 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1); 667 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1);
660 668
661 if (sband->band == IEEE80211_BAND_5GHZ) 669 if (sband->band == IEEE80211_BAND_5GHZ)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index f3f17929ca0b..27f50471aed8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -944,7 +944,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
944 } 944 }
945 945
946 /* See if there's a better rate or modulation mode to try. */ 946 /* See if there's a better rate or modulation mode to try. */
947 rs_rate_scale_perform(priv, hdr, sta, lq_sta); 947 if (sta && sta->supp_rates[sband->band])
948 rs_rate_scale_perform(priv, hdr, sta, lq_sta);
948out: 949out:
949 return; 950 return;
950} 951}
@@ -2101,14 +2102,23 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2101 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2102 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2102 struct iwl_lq_sta *lq_sta = priv_sta; 2103 struct iwl_lq_sta *lq_sta = priv_sta;
2103 int rate_idx; 2104 int rate_idx;
2105 u64 mask_bit = 0;
2104 2106
2105 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n"); 2107 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n");
2106 2108
2109 if (sta)
2110 mask_bit = sta->supp_rates[sband->band];
2111
2107 /* Send management frames and broadcast/multicast data using lowest 2112 /* Send management frames and broadcast/multicast data using lowest
2108 * rate. */ 2113 * rate. */
2109 if (!ieee80211_is_data(hdr->frame_control) || 2114 if (!ieee80211_is_data(hdr->frame_control) ||
2110 is_multicast_ether_addr(hdr->addr1) || !sta || !lq_sta) { 2115 is_multicast_ether_addr(hdr->addr1) || !sta || !lq_sta) {
2111 info->control.rates[0].idx = rate_lowest_index(sband, sta); 2116 if (!mask_bit)
2117 info->control.rates[0].idx =
2118 rate_lowest_index(sband, NULL);
2119 else
2120 info->control.rates[0].idx =
2121 rate_lowest_index(sband, sta);
2112 return; 2122 return;
2113 } 2123 }
2114 2124
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 5da6b35cd26d..b35c8813bef4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1719,6 +1719,10 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1719 priv->ucode_data_backup.len = data_size; 1719 priv->ucode_data_backup.len = data_size;
1720 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); 1720 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1721 1721
1722 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
1723 !priv->ucode_data_backup.v_addr)
1724 goto err_pci_alloc;
1725
1722 /* Initialization instructions and data */ 1726 /* Initialization instructions and data */
1723 if (init_size && init_data_size) { 1727 if (init_size && init_data_size) {
1724 priv->ucode_init.len = init_size; 1728 priv->ucode_init.len = init_size;
@@ -2482,7 +2486,7 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2482 dev_kfree_skb_any(skb); 2486 dev_kfree_skb_any(skb);
2483 2487
2484 IWL_DEBUG_MACDUMP("leave\n"); 2488 IWL_DEBUG_MACDUMP("leave\n");
2485 return 0; 2489 return NETDEV_TX_OK;
2486} 2490}
2487 2491
2488static int iwl_mac_add_interface(struct ieee80211_hw *hw, 2492static int iwl_mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 8c71ad4f88c5..4b35b30e493e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -224,7 +224,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
224 IWL_ERROR("Error: Response NULL in '%s'\n", 224 IWL_ERROR("Error: Response NULL in '%s'\n",
225 get_cmd_string(cmd->id)); 225 get_cmd_string(cmd->id));
226 ret = -EIO; 226 ret = -EIO;
227 goto out; 227 goto cancel;
228 } 228 }
229 229
230 ret = 0; 230 ret = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index d64580805d6e..95d01984c80e 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -745,7 +745,7 @@ static int iwl3945_send_cmd_sync(struct iwl3945_priv *priv, struct iwl3945_host_
745 IWL_ERROR("Error: Response NULL in '%s'\n", 745 IWL_ERROR("Error: Response NULL in '%s'\n",
746 get_cmd_string(cmd->id)); 746 get_cmd_string(cmd->id));
747 ret = -EIO; 747 ret = -EIO;
748 goto out; 748 goto cancel;
749 } 749 }
750 750
751 ret = 0; 751 ret = 0;
@@ -6538,7 +6538,7 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
6538 dev_kfree_skb_any(skb); 6538 dev_kfree_skb_any(skb);
6539 6539
6540 IWL_DEBUG_MAC80211("leave\n"); 6540 IWL_DEBUG_MAC80211("leave\n");
6541 return 0; 6541 return NETDEV_TX_OK;
6542} 6542}
6543 6543
6544static int iwl3945_mac_add_interface(struct ieee80211_hw *hw, 6544static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index e173b1b46c23..f6a79a653b7b 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -32,7 +32,7 @@ struct txpd {
32 u8 pktdelay_2ms; 32 u8 pktdelay_2ms;
33 /* reserved */ 33 /* reserved */
34 u8 reserved1; 34 u8 reserved1;
35}; 35} __attribute__ ((packed));
36 36
37/* RxPD Descriptor */ 37/* RxPD Descriptor */
38struct rxpd { 38struct rxpd {
@@ -63,7 +63,7 @@ struct rxpd {
63 /* Pkt Priority */ 63 /* Pkt Priority */
64 u8 priority; 64 u8 priority;
65 u8 reserved[3]; 65 u8 reserved[3];
66}; 66} __attribute__ ((packed));
67 67
68struct cmd_header { 68struct cmd_header {
69 __le16 command; 69 __le16 command;
@@ -97,7 +97,7 @@ struct enc_key {
97struct lbs_offset_value { 97struct lbs_offset_value {
98 u32 offset; 98 u32 offset;
99 u32 value; 99 u32 value;
100}; 100} __attribute__ ((packed));
101 101
102/* Define general data structure */ 102/* Define general data structure */
103/* cmd_DS_GEN */ 103/* cmd_DS_GEN */
@@ -107,7 +107,7 @@ struct cmd_ds_gen {
107 __le16 seqnum; 107 __le16 seqnum;
108 __le16 result; 108 __le16 result;
109 void *cmdresp[0]; 109 void *cmdresp[0];
110}; 110} __attribute__ ((packed));
111 111
112#define S_DS_GEN sizeof(struct cmd_ds_gen) 112#define S_DS_GEN sizeof(struct cmd_ds_gen)
113 113
@@ -163,7 +163,7 @@ struct cmd_ds_802_11_subscribe_event {
163 * bump this up a bit. 163 * bump this up a bit.
164 */ 164 */
165 uint8_t tlv[128]; 165 uint8_t tlv[128];
166}; 166} __attribute__ ((packed));
167 167
168/* 168/*
169 * This scan handle Country Information IE(802.11d compliant) 169 * This scan handle Country Information IE(802.11d compliant)
@@ -180,7 +180,7 @@ struct cmd_ds_802_11_scan {
180 mrvlietypes_chanlistparamset_t ChanListParamSet; 180 mrvlietypes_chanlistparamset_t ChanListParamSet;
181 mrvlietypes_ratesparamset_t OpRateSet; 181 mrvlietypes_ratesparamset_t OpRateSet;
182#endif 182#endif
183}; 183} __attribute__ ((packed));
184 184
185struct cmd_ds_802_11_scan_rsp { 185struct cmd_ds_802_11_scan_rsp {
186 struct cmd_header hdr; 186 struct cmd_header hdr;
@@ -188,7 +188,7 @@ struct cmd_ds_802_11_scan_rsp {
188 __le16 bssdescriptsize; 188 __le16 bssdescriptsize;
189 uint8_t nr_sets; 189 uint8_t nr_sets;
190 uint8_t bssdesc_and_tlvbuffer[0]; 190 uint8_t bssdesc_and_tlvbuffer[0];
191}; 191} __attribute__ ((packed));
192 192
193struct cmd_ds_802_11_get_log { 193struct cmd_ds_802_11_get_log {
194 struct cmd_header hdr; 194 struct cmd_header hdr;
@@ -206,33 +206,33 @@ struct cmd_ds_802_11_get_log {
206 __le32 fcserror; 206 __le32 fcserror;
207 __le32 txframe; 207 __le32 txframe;
208 __le32 wepundecryptable; 208 __le32 wepundecryptable;
209}; 209} __attribute__ ((packed));
210 210
211struct cmd_ds_mac_control { 211struct cmd_ds_mac_control {
212 struct cmd_header hdr; 212 struct cmd_header hdr;
213 __le16 action; 213 __le16 action;
214 u16 reserved; 214 u16 reserved;
215}; 215} __attribute__ ((packed));
216 216
217struct cmd_ds_mac_multicast_adr { 217struct cmd_ds_mac_multicast_adr {
218 struct cmd_header hdr; 218 struct cmd_header hdr;
219 __le16 action; 219 __le16 action;
220 __le16 nr_of_adrs; 220 __le16 nr_of_adrs;
221 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; 221 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
222}; 222} __attribute__ ((packed));
223 223
224struct cmd_ds_802_11_authenticate { 224struct cmd_ds_802_11_authenticate {
225 u8 macaddr[ETH_ALEN]; 225 u8 macaddr[ETH_ALEN];
226 u8 authtype; 226 u8 authtype;
227 u8 reserved[10]; 227 u8 reserved[10];
228}; 228} __attribute__ ((packed));
229 229
230struct cmd_ds_802_11_deauthenticate { 230struct cmd_ds_802_11_deauthenticate {
231 struct cmd_header hdr; 231 struct cmd_header hdr;
232 232
233 u8 macaddr[ETH_ALEN]; 233 u8 macaddr[ETH_ALEN];
234 __le16 reasoncode; 234 __le16 reasoncode;
235}; 235} __attribute__ ((packed));
236 236
237struct cmd_ds_802_11_associate { 237struct cmd_ds_802_11_associate {
238 u8 peerstaaddr[6]; 238 u8 peerstaaddr[6];
@@ -251,7 +251,7 @@ struct cmd_ds_802_11_associate {
251 251
252struct cmd_ds_802_11_associate_rsp { 252struct cmd_ds_802_11_associate_rsp {
253 struct ieeetypes_assocrsp assocRsp; 253 struct ieeetypes_assocrsp assocRsp;
254}; 254} __attribute__ ((packed));
255 255
256struct cmd_ds_802_11_set_wep { 256struct cmd_ds_802_11_set_wep {
257 struct cmd_header hdr; 257 struct cmd_header hdr;
@@ -265,7 +265,7 @@ struct cmd_ds_802_11_set_wep {
265 /* 40, 128bit or TXWEP */ 265 /* 40, 128bit or TXWEP */
266 uint8_t keytype[4]; 266 uint8_t keytype[4];
267 uint8_t keymaterial[4][16]; 267 uint8_t keymaterial[4][16];
268}; 268} __attribute__ ((packed));
269 269
270struct cmd_ds_802_3_get_stat { 270struct cmd_ds_802_3_get_stat {
271 __le32 xmitok; 271 __le32 xmitok;
@@ -274,7 +274,7 @@ struct cmd_ds_802_3_get_stat {
274 __le32 rcverror; 274 __le32 rcverror;
275 __le32 rcvnobuffer; 275 __le32 rcvnobuffer;
276 __le32 rcvcrcerror; 276 __le32 rcvcrcerror;
277}; 277} __attribute__ ((packed));
278 278
279struct cmd_ds_802_11_get_stat { 279struct cmd_ds_802_11_get_stat {
280 __le32 txfragmentcnt; 280 __le32 txfragmentcnt;
@@ -294,7 +294,7 @@ struct cmd_ds_802_11_get_stat {
294 __le32 txbeacon; 294 __le32 txbeacon;
295 __le32 rxbeacon; 295 __le32 rxbeacon;
296 __le32 wepundecryptable; 296 __le32 wepundecryptable;
297}; 297} __attribute__ ((packed));
298 298
299struct cmd_ds_802_11_snmp_mib { 299struct cmd_ds_802_11_snmp_mib {
300 struct cmd_header hdr; 300 struct cmd_header hdr;
@@ -303,58 +303,58 @@ struct cmd_ds_802_11_snmp_mib {
303 __le16 oid; 303 __le16 oid;
304 __le16 bufsize; 304 __le16 bufsize;
305 u8 value[128]; 305 u8 value[128];
306}; 306} __attribute__ ((packed));
307 307
308struct cmd_ds_mac_reg_map { 308struct cmd_ds_mac_reg_map {
309 __le16 buffersize; 309 __le16 buffersize;
310 u8 regmap[128]; 310 u8 regmap[128];
311 __le16 reserved; 311 __le16 reserved;
312}; 312} __attribute__ ((packed));
313 313
314struct cmd_ds_bbp_reg_map { 314struct cmd_ds_bbp_reg_map {
315 __le16 buffersize; 315 __le16 buffersize;
316 u8 regmap[128]; 316 u8 regmap[128];
317 __le16 reserved; 317 __le16 reserved;
318}; 318} __attribute__ ((packed));
319 319
320struct cmd_ds_rf_reg_map { 320struct cmd_ds_rf_reg_map {
321 __le16 buffersize; 321 __le16 buffersize;
322 u8 regmap[64]; 322 u8 regmap[64];
323 __le16 reserved; 323 __le16 reserved;
324}; 324} __attribute__ ((packed));
325 325
326struct cmd_ds_mac_reg_access { 326struct cmd_ds_mac_reg_access {
327 __le16 action; 327 __le16 action;
328 __le16 offset; 328 __le16 offset;
329 __le32 value; 329 __le32 value;
330}; 330} __attribute__ ((packed));
331 331
332struct cmd_ds_bbp_reg_access { 332struct cmd_ds_bbp_reg_access {
333 __le16 action; 333 __le16 action;
334 __le16 offset; 334 __le16 offset;
335 u8 value; 335 u8 value;
336 u8 reserved[3]; 336 u8 reserved[3];
337}; 337} __attribute__ ((packed));
338 338
339struct cmd_ds_rf_reg_access { 339struct cmd_ds_rf_reg_access {
340 __le16 action; 340 __le16 action;
341 __le16 offset; 341 __le16 offset;
342 u8 value; 342 u8 value;
343 u8 reserved[3]; 343 u8 reserved[3];
344}; 344} __attribute__ ((packed));
345 345
346struct cmd_ds_802_11_radio_control { 346struct cmd_ds_802_11_radio_control {
347 struct cmd_header hdr; 347 struct cmd_header hdr;
348 348
349 __le16 action; 349 __le16 action;
350 __le16 control; 350 __le16 control;
351}; 351} __attribute__ ((packed));
352 352
353struct cmd_ds_802_11_beacon_control { 353struct cmd_ds_802_11_beacon_control {
354 __le16 action; 354 __le16 action;
355 __le16 beacon_enable; 355 __le16 beacon_enable;
356 __le16 beacon_period; 356 __le16 beacon_period;
357}; 357} __attribute__ ((packed));
358 358
359struct cmd_ds_802_11_sleep_params { 359struct cmd_ds_802_11_sleep_params {
360 struct cmd_header hdr; 360 struct cmd_header hdr;
@@ -379,7 +379,7 @@ struct cmd_ds_802_11_sleep_params {
379 379
380 /* reserved field, should be set to zero */ 380 /* reserved field, should be set to zero */
381 __le16 reserved; 381 __le16 reserved;
382}; 382} __attribute__ ((packed));
383 383
384struct cmd_ds_802_11_inactivity_timeout { 384struct cmd_ds_802_11_inactivity_timeout {
385 struct cmd_header hdr; 385 struct cmd_header hdr;
@@ -389,7 +389,7 @@ struct cmd_ds_802_11_inactivity_timeout {
389 389
390 /* Inactivity timeout in msec */ 390 /* Inactivity timeout in msec */
391 __le16 timeout; 391 __le16 timeout;
392}; 392} __attribute__ ((packed));
393 393
394struct cmd_ds_802_11_rf_channel { 394struct cmd_ds_802_11_rf_channel {
395 struct cmd_header hdr; 395 struct cmd_header hdr;
@@ -399,7 +399,7 @@ struct cmd_ds_802_11_rf_channel {
399 __le16 rftype; /* unused */ 399 __le16 rftype; /* unused */
400 __le16 reserved; /* unused */ 400 __le16 reserved; /* unused */
401 u8 channellist[32]; /* unused */ 401 u8 channellist[32]; /* unused */
402}; 402} __attribute__ ((packed));
403 403
404struct cmd_ds_802_11_rssi { 404struct cmd_ds_802_11_rssi {
405 /* weighting factor */ 405 /* weighting factor */
@@ -408,21 +408,21 @@ struct cmd_ds_802_11_rssi {
408 __le16 reserved_0; 408 __le16 reserved_0;
409 __le16 reserved_1; 409 __le16 reserved_1;
410 __le16 reserved_2; 410 __le16 reserved_2;
411}; 411} __attribute__ ((packed));
412 412
413struct cmd_ds_802_11_rssi_rsp { 413struct cmd_ds_802_11_rssi_rsp {
414 __le16 SNR; 414 __le16 SNR;
415 __le16 noisefloor; 415 __le16 noisefloor;
416 __le16 avgSNR; 416 __le16 avgSNR;
417 __le16 avgnoisefloor; 417 __le16 avgnoisefloor;
418}; 418} __attribute__ ((packed));
419 419
420struct cmd_ds_802_11_mac_address { 420struct cmd_ds_802_11_mac_address {
421 struct cmd_header hdr; 421 struct cmd_header hdr;
422 422
423 __le16 action; 423 __le16 action;
424 u8 macadd[ETH_ALEN]; 424 u8 macadd[ETH_ALEN];
425}; 425} __attribute__ ((packed));
426 426
427struct cmd_ds_802_11_rf_tx_power { 427struct cmd_ds_802_11_rf_tx_power {
428 struct cmd_header hdr; 428 struct cmd_header hdr;
@@ -431,7 +431,7 @@ struct cmd_ds_802_11_rf_tx_power {
431 __le16 curlevel; 431 __le16 curlevel;
432 s8 maxlevel; 432 s8 maxlevel;
433 s8 minlevel; 433 s8 minlevel;
434}; 434} __attribute__ ((packed));
435 435
436struct cmd_ds_802_11_rf_antenna { 436struct cmd_ds_802_11_rf_antenna {
437 __le16 action; 437 __le16 action;
@@ -439,33 +439,33 @@ struct cmd_ds_802_11_rf_antenna {
439 /* Number of antennas or 0xffff(diversity) */ 439 /* Number of antennas or 0xffff(diversity) */
440 __le16 antennamode; 440 __le16 antennamode;
441 441
442}; 442} __attribute__ ((packed));
443 443
444struct cmd_ds_802_11_monitor_mode { 444struct cmd_ds_802_11_monitor_mode {
445 __le16 action; 445 __le16 action;
446 __le16 mode; 446 __le16 mode;
447}; 447} __attribute__ ((packed));
448 448
449struct cmd_ds_set_boot2_ver { 449struct cmd_ds_set_boot2_ver {
450 struct cmd_header hdr; 450 struct cmd_header hdr;
451 451
452 __le16 action; 452 __le16 action;
453 __le16 version; 453 __le16 version;
454}; 454} __attribute__ ((packed));
455 455
456struct cmd_ds_802_11_fw_wake_method { 456struct cmd_ds_802_11_fw_wake_method {
457 struct cmd_header hdr; 457 struct cmd_header hdr;
458 458
459 __le16 action; 459 __le16 action;
460 __le16 method; 460 __le16 method;
461}; 461} __attribute__ ((packed));
462 462
463struct cmd_ds_802_11_sleep_period { 463struct cmd_ds_802_11_sleep_period {
464 struct cmd_header hdr; 464 struct cmd_header hdr;
465 465
466 __le16 action; 466 __le16 action;
467 __le16 period; 467 __le16 period;
468}; 468} __attribute__ ((packed));
469 469
470struct cmd_ds_802_11_ps_mode { 470struct cmd_ds_802_11_ps_mode {
471 __le16 action; 471 __le16 action;
@@ -473,7 +473,7 @@ struct cmd_ds_802_11_ps_mode {
473 __le16 multipledtim; 473 __le16 multipledtim;
474 __le16 reserved; 474 __le16 reserved;
475 __le16 locallisteninterval; 475 __le16 locallisteninterval;
476}; 476} __attribute__ ((packed));
477 477
478struct cmd_confirm_sleep { 478struct cmd_confirm_sleep {
479 struct cmd_header hdr; 479 struct cmd_header hdr;
@@ -483,7 +483,7 @@ struct cmd_confirm_sleep {
483 __le16 multipledtim; 483 __le16 multipledtim;
484 __le16 reserved; 484 __le16 reserved;
485 __le16 locallisteninterval; 485 __le16 locallisteninterval;
486}; 486} __attribute__ ((packed));
487 487
488struct cmd_ds_802_11_data_rate { 488struct cmd_ds_802_11_data_rate {
489 struct cmd_header hdr; 489 struct cmd_header hdr;
@@ -491,14 +491,14 @@ struct cmd_ds_802_11_data_rate {
491 __le16 action; 491 __le16 action;
492 __le16 reserved; 492 __le16 reserved;
493 u8 rates[MAX_RATES]; 493 u8 rates[MAX_RATES];
494}; 494} __attribute__ ((packed));
495 495
496struct cmd_ds_802_11_rate_adapt_rateset { 496struct cmd_ds_802_11_rate_adapt_rateset {
497 struct cmd_header hdr; 497 struct cmd_header hdr;
498 __le16 action; 498 __le16 action;
499 __le16 enablehwauto; 499 __le16 enablehwauto;
500 __le16 bitmap; 500 __le16 bitmap;
501}; 501} __attribute__ ((packed));
502 502
503struct cmd_ds_802_11_ad_hoc_start { 503struct cmd_ds_802_11_ad_hoc_start {
504 struct cmd_header hdr; 504 struct cmd_header hdr;
@@ -520,7 +520,7 @@ struct cmd_ds_802_11_ad_hoc_result {
520 520
521 u8 pad[3]; 521 u8 pad[3];
522 u8 bssid[ETH_ALEN]; 522 u8 bssid[ETH_ALEN];
523}; 523} __attribute__ ((packed));
524 524
525struct adhoc_bssdesc { 525struct adhoc_bssdesc {
526 u8 bssid[ETH_ALEN]; 526 u8 bssid[ETH_ALEN];
@@ -578,7 +578,7 @@ struct MrvlIEtype_keyParamSet {
578 578
579 /* key material of size keylen */ 579 /* key material of size keylen */
580 u8 key[32]; 580 u8 key[32];
581}; 581} __attribute__ ((packed));
582 582
583#define MAX_WOL_RULES 16 583#define MAX_WOL_RULES 16
584 584
@@ -590,7 +590,7 @@ struct host_wol_rule {
590 __le16 reserve; 590 __le16 reserve;
591 __be32 sig_mask; 591 __be32 sig_mask;
592 __be32 signature; 592 __be32 signature;
593}; 593} __attribute__ ((packed));
594 594
595struct wol_config { 595struct wol_config {
596 uint8_t action; 596 uint8_t action;
@@ -598,8 +598,7 @@ struct wol_config {
598 uint8_t no_rules_in_cmd; 598 uint8_t no_rules_in_cmd;
599 uint8_t result; 599 uint8_t result;
600 struct host_wol_rule rule[MAX_WOL_RULES]; 600 struct host_wol_rule rule[MAX_WOL_RULES];
601}; 601} __attribute__ ((packed));
602
603 602
604struct cmd_ds_host_sleep { 603struct cmd_ds_host_sleep {
605 struct cmd_header hdr; 604 struct cmd_header hdr;
diff --git a/drivers/net/wireless/orinoco/orinoco.c b/drivers/net/wireless/orinoco/orinoco.c
index c3bb85e0251e..45a04faa7818 100644
--- a/drivers/net/wireless/orinoco/orinoco.c
+++ b/drivers/net/wireless/orinoco/orinoco.c
@@ -1673,7 +1673,7 @@ static void print_linkstatus(struct net_device *dev, u16 status)
1673 s = "UNKNOWN"; 1673 s = "UNKNOWN";
1674 } 1674 }
1675 1675
1676 printk(KERN_INFO "%s: New link status: %s (%04x)\n", 1676 printk(KERN_DEBUG "%s: New link status: %s (%04x)\n",
1677 dev->name, s, status); 1677 dev->name, s, status);
1678} 1678}
1679 1679
@@ -5068,33 +5068,30 @@ static int orinoco_ioctl_set_genie(struct net_device *dev,
5068 struct orinoco_private *priv = netdev_priv(dev); 5068 struct orinoco_private *priv = netdev_priv(dev);
5069 u8 *buf; 5069 u8 *buf;
5070 unsigned long flags; 5070 unsigned long flags;
5071 int err = 0;
5072 5071
5073 /* cut off at IEEE80211_MAX_DATA_LEN */ 5072 /* cut off at IEEE80211_MAX_DATA_LEN */
5074 if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) || 5073 if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) ||
5075 (wrqu->data.length && (extra == NULL))) 5074 (wrqu->data.length && (extra == NULL)))
5076 return -EINVAL; 5075 return -EINVAL;
5077 5076
5078 if (orinoco_lock(priv, &flags) != 0)
5079 return -EBUSY;
5080
5081 if (wrqu->data.length) { 5077 if (wrqu->data.length) {
5082 buf = kmalloc(wrqu->data.length, GFP_KERNEL); 5078 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
5083 if (buf == NULL) { 5079 if (buf == NULL)
5084 err = -ENOMEM; 5080 return -ENOMEM;
5085 goto out;
5086 }
5087 5081
5088 memcpy(buf, extra, wrqu->data.length); 5082 memcpy(buf, extra, wrqu->data.length);
5089 kfree(priv->wpa_ie); 5083 } else
5090 priv->wpa_ie = buf; 5084 buf = NULL;
5091 priv->wpa_ie_len = wrqu->data.length; 5085
5092 } else { 5086 if (orinoco_lock(priv, &flags) != 0) {
5093 kfree(priv->wpa_ie); 5087 kfree(buf);
5094 priv->wpa_ie = NULL; 5088 return -EBUSY;
5095 priv->wpa_ie_len = 0;
5096 } 5089 }
5097 5090
5091 kfree(priv->wpa_ie);
5092 priv->wpa_ie = buf;
5093 priv->wpa_ie_len = wrqu->data.length;
5094
5098 if (priv->wpa_ie) { 5095 if (priv->wpa_ie) {
5099 /* Looks like wl_lkm wants to check the auth alg, and 5096 /* Looks like wl_lkm wants to check the auth alg, and
5100 * somehow pass it to the firmware. 5097 * somehow pass it to the firmware.
@@ -5103,9 +5100,8 @@ static int orinoco_ioctl_set_genie(struct net_device *dev,
5103 */ 5100 */
5104 } 5101 }
5105 5102
5106out:
5107 orinoco_unlock(priv, &flags); 5103 orinoco_unlock(priv, &flags);
5108 return err; 5104 return 0;
5109} 5105}
5110 5106
5111static int orinoco_ioctl_get_genie(struct net_device *dev, 5107static int orinoco_ioctl_get_genie(struct net_device *dev,
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index c6a370fa9bcb..34561e6e816b 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -451,8 +451,8 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
451 } 451 }
452 if (err) 452 if (err)
453 goto err; 453 goto err;
454 454 }
455 } 455 break;
456 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: 456 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION:
457 priv->iq_autocal = kmalloc(data_len, GFP_KERNEL); 457 priv->iq_autocal = kmalloc(data_len, GFP_KERNEL);
458 if (!priv->iq_autocal) { 458 if (!priv->iq_autocal) {
@@ -745,7 +745,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
745 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry); 745 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
746 struct p54_hdr *entry_hdr; 746 struct p54_hdr *entry_hdr;
747 struct p54_tx_data *entry_data; 747 struct p54_tx_data *entry_data;
748 int pad = 0; 748 unsigned int pad = 0, frame_len;
749 749
750 range = (void *)info->rate_driver_data; 750 range = (void *)info->rate_driver_data;
751 if (range->start_addr != addr) { 751 if (range->start_addr != addr) {
@@ -768,6 +768,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
768 __skb_unlink(entry, &priv->tx_queue); 768 __skb_unlink(entry, &priv->tx_queue);
769 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 769 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
770 770
771 frame_len = entry->len;
771 entry_hdr = (struct p54_hdr *) entry->data; 772 entry_hdr = (struct p54_hdr *) entry->data;
772 entry_data = (struct p54_tx_data *) entry_hdr->data; 773 entry_data = (struct p54_tx_data *) entry_hdr->data;
773 priv->tx_stats[entry_data->hw_queue].len--; 774 priv->tx_stats[entry_data->hw_queue].len--;
@@ -814,15 +815,28 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
814 info->status.ack_signal = p54_rssi_to_dbm(dev, 815 info->status.ack_signal = p54_rssi_to_dbm(dev,
815 (int)payload->ack_rssi); 816 (int)payload->ack_rssi);
816 817
817 if (entry_data->key_type == P54_CRYPTO_TKIPMICHAEL) { 818 /* Undo all changes to the frame. */
819 switch (entry_data->key_type) {
820 case P54_CRYPTO_TKIPMICHAEL: {
818 u8 *iv = (u8 *)(entry_data->align + pad + 821 u8 *iv = (u8 *)(entry_data->align + pad +
819 entry_data->crypt_offset); 822 entry_data->crypt_offset);
820 823
821 /* Restore the original TKIP IV. */ 824 /* Restore the original TKIP IV. */
822 iv[2] = iv[0]; 825 iv[2] = iv[0];
823 iv[0] = iv[1]; 826 iv[0] = iv[1];
824 iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */ 827 iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */
828
829 frame_len -= 12; /* remove TKIP_MMIC + TKIP_ICV */
830 break;
831 }
832 case P54_CRYPTO_AESCCMP:
833 frame_len -= 8; /* remove CCMP_MIC */
834 break;
835 case P54_CRYPTO_WEP:
836 frame_len -= 4; /* remove WEP_ICV */
837 break;
825 } 838 }
839 skb_trim(entry, frame_len);
826 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); 840 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
827 ieee80211_tx_status_irqsafe(dev, entry); 841 ieee80211_tx_status_irqsafe(dev, entry);
828 goto out; 842 goto out;
@@ -1147,7 +1161,7 @@ static int p54_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta,
1147 1161
1148 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, 1162 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET,
1149 sizeof(struct p54_hdr) + sizeof(*tim), 1163 sizeof(struct p54_hdr) + sizeof(*tim),
1150 P54_CONTROL_TYPE_TIM, GFP_KERNEL); 1164 P54_CONTROL_TYPE_TIM, GFP_ATOMIC);
1151 if (!skb) 1165 if (!skb)
1152 return -ENOMEM; 1166 return -ENOMEM;
1153 1167
@@ -1610,7 +1624,7 @@ static int p54_scan(struct ieee80211_hw *dev, u16 mode, u16 dwell)
1610 1624
1611 err: 1625 err:
1612 printk(KERN_ERR "%s: frequency change failed\n", wiphy_name(dev->wiphy)); 1626 printk(KERN_ERR "%s: frequency change failed\n", wiphy_name(dev->wiphy));
1613 kfree_skb(skb); 1627 p54_free_skb(dev, skb);
1614 return -EINVAL; 1628 return -EINVAL;
1615} 1629}
1616 1630
@@ -2077,7 +2091,7 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
2077 algo = P54_CRYPTO_AESCCMP; 2091 algo = P54_CRYPTO_AESCCMP;
2078 break; 2092 break;
2079 default: 2093 default:
2080 return -EINVAL; 2094 return -EOPNOTSUPP;
2081 } 2095 }
2082 } 2096 }
2083 2097
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 6a6a72f6f82c..5de2ebfb28c7 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -144,11 +144,8 @@ static void p54u_tx_cb(struct urb *urb)
144 struct sk_buff *skb = urb->context; 144 struct sk_buff *skb = urb->context;
145 struct ieee80211_hw *dev = (struct ieee80211_hw *) 145 struct ieee80211_hw *dev = (struct ieee80211_hw *)
146 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); 146 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
147 struct p54u_priv *priv = dev->priv;
148 147
149 skb_pull(skb, priv->common.tx_hdr_len); 148 p54_free_skb(dev, skb);
150 if (FREE_AFTER_TX(skb))
151 p54_free_skb(dev, skb);
152} 149}
153 150
154static void p54u_tx_dummy_cb(struct urb *urb) { } 151static void p54u_tx_dummy_cb(struct urb *urb) { }
@@ -230,7 +227,10 @@ static void p54u_tx_3887(struct ieee80211_hw *dev, struct sk_buff *skb)
230 p54u_tx_dummy_cb, dev); 227 p54u_tx_dummy_cb, dev);
231 usb_fill_bulk_urb(data_urb, priv->udev, 228 usb_fill_bulk_urb(data_urb, priv->udev,
232 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 229 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
233 skb->data, skb->len, p54u_tx_cb, skb); 230 skb->data, skb->len, FREE_AFTER_TX(skb) ?
231 p54u_tx_cb : p54u_tx_dummy_cb, skb);
232 addr_urb->transfer_flags |= URB_ZERO_PACKET;
233 data_urb->transfer_flags |= URB_ZERO_PACKET;
234 234
235 usb_anchor_urb(addr_urb, &priv->submitted); 235 usb_anchor_urb(addr_urb, &priv->submitted);
236 err = usb_submit_urb(addr_urb, GFP_ATOMIC); 236 err = usb_submit_urb(addr_urb, GFP_ATOMIC);
@@ -239,7 +239,7 @@ static void p54u_tx_3887(struct ieee80211_hw *dev, struct sk_buff *skb)
239 goto out; 239 goto out;
240 } 240 }
241 241
242 usb_anchor_urb(addr_urb, &priv->submitted); 242 usb_anchor_urb(data_urb, &priv->submitted);
243 err = usb_submit_urb(data_urb, GFP_ATOMIC); 243 err = usb_submit_urb(data_urb, GFP_ATOMIC);
244 if (err) 244 if (err)
245 usb_unanchor_urb(data_urb); 245 usb_unanchor_urb(data_urb);
@@ -269,28 +269,24 @@ static void p54u_tx_lm87(struct ieee80211_hw *dev, struct sk_buff *skb)
269{ 269{
270 struct p54u_priv *priv = dev->priv; 270 struct p54u_priv *priv = dev->priv;
271 struct urb *data_urb; 271 struct urb *data_urb;
272 struct lm87_tx_hdr *hdr; 272 struct lm87_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr);
273 __le32 checksum;
274 __le32 addr = ((struct p54_hdr *)skb->data)->req_id;
275 273
276 data_urb = usb_alloc_urb(0, GFP_ATOMIC); 274 data_urb = usb_alloc_urb(0, GFP_ATOMIC);
277 if (!data_urb) 275 if (!data_urb)
278 return; 276 return;
279 277
280 checksum = p54u_lm87_chksum((__le32 *)skb->data, skb->len); 278 hdr->chksum = p54u_lm87_chksum((__le32 *)skb->data, skb->len);
281 hdr = (struct lm87_tx_hdr *)skb_push(skb, sizeof(*hdr)); 279 hdr->device_addr = ((struct p54_hdr *)skb->data)->req_id;
282 hdr->chksum = checksum;
283 hdr->device_addr = addr;
284 280
285 usb_fill_bulk_urb(data_urb, priv->udev, 281 usb_fill_bulk_urb(data_urb, priv->udev,
286 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 282 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
287 skb->data, skb->len, p54u_tx_cb, skb); 283 hdr, skb->len + sizeof(*hdr), FREE_AFTER_TX(skb) ?
284 p54u_tx_cb : p54u_tx_dummy_cb, skb);
288 data_urb->transfer_flags |= URB_ZERO_PACKET; 285 data_urb->transfer_flags |= URB_ZERO_PACKET;
289 286
290 usb_anchor_urb(data_urb, &priv->submitted); 287 usb_anchor_urb(data_urb, &priv->submitted);
291 if (usb_submit_urb(data_urb, GFP_ATOMIC)) { 288 if (usb_submit_urb(data_urb, GFP_ATOMIC)) {
292 usb_unanchor_urb(data_urb); 289 usb_unanchor_urb(data_urb);
293 skb_pull(skb, sizeof(*hdr));
294 p54_free_skb(dev, skb); 290 p54_free_skb(dev, skb);
295 } 291 }
296 usb_free_urb(data_urb); 292 usb_free_urb(data_urb);
@@ -300,11 +296,9 @@ static void p54u_tx_net2280(struct ieee80211_hw *dev, struct sk_buff *skb)
300{ 296{
301 struct p54u_priv *priv = dev->priv; 297 struct p54u_priv *priv = dev->priv;
302 struct urb *int_urb, *data_urb; 298 struct urb *int_urb, *data_urb;
303 struct net2280_tx_hdr *hdr; 299 struct net2280_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr);
304 struct net2280_reg_write *reg; 300 struct net2280_reg_write *reg;
305 int err = 0; 301 int err = 0;
306 __le32 addr = ((struct p54_hdr *) skb->data)->req_id;
307 __le16 len = cpu_to_le16(skb->len);
308 302
309 reg = kmalloc(sizeof(*reg), GFP_ATOMIC); 303 reg = kmalloc(sizeof(*reg), GFP_ATOMIC);
310 if (!reg) 304 if (!reg)
@@ -327,10 +321,9 @@ static void p54u_tx_net2280(struct ieee80211_hw *dev, struct sk_buff *skb)
327 reg->addr = cpu_to_le32(P54U_DEV_BASE); 321 reg->addr = cpu_to_le32(P54U_DEV_BASE);
328 reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA); 322 reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA);
329 323
330 hdr = (void *)skb_push(skb, sizeof(*hdr));
331 memset(hdr, 0, sizeof(*hdr)); 324 memset(hdr, 0, sizeof(*hdr));
332 hdr->len = len; 325 hdr->len = cpu_to_le16(skb->len);
333 hdr->device_addr = addr; 326 hdr->device_addr = ((struct p54_hdr *) skb->data)->req_id;
334 327
335 usb_fill_bulk_urb(int_urb, priv->udev, 328 usb_fill_bulk_urb(int_urb, priv->udev,
336 usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg), 329 usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg),
@@ -341,11 +334,13 @@ static void p54u_tx_net2280(struct ieee80211_hw *dev, struct sk_buff *skb)
341 * free what's inside the transfer_buffer after the callback routine 334 * free what's inside the transfer_buffer after the callback routine
342 * has completed. 335 * has completed.
343 */ 336 */
344 int_urb->transfer_flags |= URB_FREE_BUFFER; 337 int_urb->transfer_flags |= URB_FREE_BUFFER | URB_ZERO_PACKET;
345 338
346 usb_fill_bulk_urb(data_urb, priv->udev, 339 usb_fill_bulk_urb(data_urb, priv->udev,
347 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 340 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
348 skb->data, skb->len, p54u_tx_cb, skb); 341 hdr, skb->len + sizeof(*hdr), FREE_AFTER_TX(skb) ?
342 p54u_tx_cb : p54u_tx_dummy_cb, skb);
343 data_urb->transfer_flags |= URB_ZERO_PACKET;
349 344
350 usb_anchor_urb(int_urb, &priv->submitted); 345 usb_anchor_urb(int_urb, &priv->submitted);
351 err = usb_submit_urb(int_urb, GFP_ATOMIC); 346 err = usb_submit_urb(int_urb, GFP_ATOMIC);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 607ce9f61b54..ed93ac41297f 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1649,9 +1649,7 @@ static char *rndis_translate_scan(struct net_device *dev,
1649 char *end_buf, 1649 char *end_buf,
1650 struct ndis_80211_bssid_ex *bssid) 1650 struct ndis_80211_bssid_ex *bssid)
1651{ 1651{
1652#ifdef DEBUG
1653 struct usbnet *usbdev = netdev_priv(dev); 1652 struct usbnet *usbdev = netdev_priv(dev);
1654#endif
1655 u8 *ie; 1653 u8 *ie;
1656 char *current_val; 1654 char *current_val;
1657 int bssid_len, ie_len, i; 1655 int bssid_len, ie_len, i;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 746a8f36b931..0709decec9c2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -154,6 +154,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
154 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 154 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
155 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 155 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
156 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 156 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
157 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
157 struct ieee80211_rate *rate = 158 struct ieee80211_rate *rate =
158 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 159 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
159 const struct rt2x00_rate *hwrate; 160 const struct rt2x00_rate *hwrate;
@@ -313,7 +314,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
313 * When preamble is enabled we should set the 314 * When preamble is enabled we should set the
314 * preamble bit for the signal. 315 * preamble bit for the signal.
315 */ 316 */
316 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 317 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
317 txdesc->signal |= 0x08; 318 txdesc->signal |= 0x08;
318 } 319 }
319} 320}
diff --git a/drivers/net/wireless/rt2x00/rt2x00rfkill.c b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
index c3f53a92180a..3298cae1e12d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00rfkill.c
+++ b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
@@ -162,7 +162,7 @@ void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
162 162
163void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) 163void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
164{ 164{
165 if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->flags)) 165 if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
166 return; 166 return;
167 167
168 cancel_delayed_work_sync(&rt2x00dev->rfkill_work); 168 cancel_delayed_work_sync(&rt2x00dev->rfkill_work);
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 6ad6bac37706..22bc07ef2f37 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -273,6 +273,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
273 273
274 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, ep), 274 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, ep),
275 buf, skb->len, rtl8187_tx_cb, skb); 275 buf, skb->len, rtl8187_tx_cb, skb);
276 urb->transfer_flags |= URB_ZERO_PACKET;
276 usb_anchor_urb(urb, &priv->anchored); 277 usb_anchor_urb(urb, &priv->anchored);
277 rc = usb_submit_urb(urb, GFP_ATOMIC); 278 rc = usb_submit_urb(urb, GFP_ATOMIC);
278 if (rc < 0) { 279 if (rc < 0) {
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
index 4e75e8e7fa90..78df281b297a 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
@@ -285,7 +285,10 @@ static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
285 ofdm_power = priv->channels[channel - 1].hw_value >> 4; 285 ofdm_power = priv->channels[channel - 1].hw_value >> 4;
286 286
287 cck_power = min(cck_power, (u8)11); 287 cck_power = min(cck_power, (u8)11);
288 ofdm_power = min(ofdm_power, (u8)35); 288 if (ofdm_power > (u8)15)
289 ofdm_power = 25;
290 else
291 ofdm_power += 10;
289 292
290 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 293 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
291 rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1); 294 rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1);
@@ -536,7 +539,10 @@ static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
536 cck_power += priv->txpwr_base & 0xF; 539 cck_power += priv->txpwr_base & 0xF;
537 cck_power = min(cck_power, (u8)35); 540 cck_power = min(cck_power, (u8)35);
538 541
539 ofdm_power = min(ofdm_power, (u8)15); 542 if (ofdm_power > (u8)15)
543 ofdm_power = 25;
544 else
545 ofdm_power += 10;
540 ofdm_power += priv->txpwr_base >> 4; 546 ofdm_power += priv->txpwr_base >> 4;
541 ofdm_power = min(ofdm_power, (u8)35); 547 ofdm_power = min(ofdm_power, (u8)35);
542 548
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index b5db57d2fcf5..17527f765b39 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -84,6 +84,7 @@ static struct usb_device_id usb_ids[] = {
84 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B }, 84 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B },
85 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, 85 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
86 { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B }, 86 { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
87 { USB_DEVICE(0x0df6, 0x0036), .driver_info = DEVICE_ZD1211B },
87 /* "Driverless" devices that need ejecting */ 88 /* "Driverless" devices that need ejecting */
88 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 89 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
89 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, 90 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 2e03b6d796d3..e76d715e4342 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -393,16 +393,21 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
393 return; 393 return;
394 394
395fail: 395fail:
396 entry->event = NULL;
396 cpu_buf->sample_lost_overflow++; 397 cpu_buf->sample_lost_overflow++;
397} 398}
398 399
399int oprofile_add_data(struct op_entry *entry, unsigned long val) 400int oprofile_add_data(struct op_entry *entry, unsigned long val)
400{ 401{
402 if (!entry->event)
403 return 0;
401 return op_cpu_buffer_add_data(entry, val); 404 return op_cpu_buffer_add_data(entry, val);
402} 405}
403 406
404int oprofile_write_commit(struct op_entry *entry) 407int oprofile_write_commit(struct op_entry *entry)
405{ 408{
409 if (!entry->event)
410 return -EINVAL;
406 return op_cpu_buffer_write_commit(entry); 411 return op_cpu_buffer_write_commit(entry);
407} 412}
408 413
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 63f81c44846a..272995d20293 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -66,6 +66,13 @@ static inline void op_cpu_buffer_reset(int cpu)
66 cpu_buf->last_task = NULL; 66 cpu_buf->last_task = NULL;
67} 67}
68 68
69/*
70 * op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be
71 * called only if op_cpu_buffer_write_reserve() did not return NULL or
72 * entry->event != NULL, otherwise entry->size or entry->event will be
73 * used uninitialized.
74 */
75
69struct op_sample 76struct op_sample
70*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size); 77*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
71int op_cpu_buffer_write_commit(struct op_entry *entry); 78int op_cpu_buffer_write_commit(struct op_entry *entry);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 3fac8f81d59d..a70cf16ee1ad 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -668,7 +668,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
668 * @dev: instance of PCI owned by the driver that's asking 668 * @dev: instance of PCI owned by the driver that's asking
669 * @mask: number of address bits this PCI device can handle 669 * @mask: number of address bits this PCI device can handle
670 * 670 *
671 * See Documentation/DMA-mapping.txt 671 * See Documentation/PCI/PCI-DMA-mapping.txt
672 */ 672 */
673static int sba_dma_supported( struct device *dev, u64 mask) 673static int sba_dma_supported( struct device *dev, u64 mask)
674{ 674{
@@ -680,8 +680,8 @@ static int sba_dma_supported( struct device *dev, u64 mask)
680 return(0); 680 return(0);
681 } 681 }
682 682
683 /* Documentation/DMA-mapping.txt tells drivers to try 64-bit first, 683 /* Documentation/PCI/PCI-DMA-mapping.txt tells drivers to try 64-bit
684 * then fall back to 32-bit if that fails. 684 * first, then fall back to 32-bit if that fails.
685 * We are just "encouraging" 32-bit DMA masks here since we can 685 * We are just "encouraging" 32-bit DMA masks here since we can
686 * never allow IOMMU bypass unless we add special support for ZX1. 686 * never allow IOMMU bypass unless we add special support for ZX1.
687 */ 687 */
@@ -706,7 +706,7 @@ static int sba_dma_supported( struct device *dev, u64 mask)
706 * @size: number of bytes to map in driver buffer. 706 * @size: number of bytes to map in driver buffer.
707 * @direction: R/W or both. 707 * @direction: R/W or both.
708 * 708 *
709 * See Documentation/DMA-mapping.txt 709 * See Documentation/PCI/PCI-DMA-mapping.txt
710 */ 710 */
711static dma_addr_t 711static dma_addr_t
712sba_map_single(struct device *dev, void *addr, size_t size, 712sba_map_single(struct device *dev, void *addr, size_t size,
@@ -785,7 +785,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
785 * @size: number of bytes mapped in driver buffer. 785 * @size: number of bytes mapped in driver buffer.
786 * @direction: R/W or both. 786 * @direction: R/W or both.
787 * 787 *
788 * See Documentation/DMA-mapping.txt 788 * See Documentation/PCI/PCI-DMA-mapping.txt
789 */ 789 */
790static void 790static void
791sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, 791sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
@@ -861,7 +861,7 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
861 * @size: number of bytes mapped in driver buffer. 861 * @size: number of bytes mapped in driver buffer.
862 * @dma_handle: IOVA of new buffer. 862 * @dma_handle: IOVA of new buffer.
863 * 863 *
864 * See Documentation/DMA-mapping.txt 864 * See Documentation/PCI/PCI-DMA-mapping.txt
865 */ 865 */
866static void *sba_alloc_consistent(struct device *hwdev, size_t size, 866static void *sba_alloc_consistent(struct device *hwdev, size_t size,
867 dma_addr_t *dma_handle, gfp_t gfp) 867 dma_addr_t *dma_handle, gfp_t gfp)
@@ -892,7 +892,7 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size,
892 * @vaddr: virtual address IOVA of "consistent" buffer. 892 * @vaddr: virtual address IOVA of "consistent" buffer.
893 * @dma_handler: IO virtual address of "consistent" buffer. 893 * @dma_handler: IO virtual address of "consistent" buffer.
894 * 894 *
895 * See Documentation/DMA-mapping.txt 895 * See Documentation/PCI/PCI-DMA-mapping.txt
896 */ 896 */
897static void 897static void
898sba_free_consistent(struct device *hwdev, size_t size, void *vaddr, 898sba_free_consistent(struct device *hwdev, size_t size, void *vaddr,
@@ -927,7 +927,7 @@ int dump_run_sg = 0;
927 * @nents: number of entries in list 927 * @nents: number of entries in list
928 * @direction: R/W or both. 928 * @direction: R/W or both.
929 * 929 *
930 * See Documentation/DMA-mapping.txt 930 * See Documentation/PCI/PCI-DMA-mapping.txt
931 */ 931 */
932static int 932static int
933sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, 933sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
@@ -1011,7 +1011,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
1011 * @nents: number of entries in list 1011 * @nents: number of entries in list
1012 * @direction: R/W or both. 1012 * @direction: R/W or both.
1013 * 1013 *
1014 * See Documentation/DMA-mapping.txt 1014 * See Documentation/PCI/PCI-DMA-mapping.txt
1015 */ 1015 */
1016static void 1016static void
1017sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, 1017sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 5482d4ed8256..c2485542f543 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -126,8 +126,10 @@ static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status)
126 mutex_lock(&slot->ctrl->crit_sect); 126 mutex_lock(&slot->ctrl->crit_sect);
127 127
128 /* has it been >1 sec since our last toggle? */ 128 /* has it been >1 sec since our last toggle? */
129 if ((get_seconds() - slot->last_emi_toggle) < 1) 129 if ((get_seconds() - slot->last_emi_toggle) < 1) {
130 mutex_unlock(&slot->ctrl->crit_sect);
130 return -EINVAL; 131 return -EINVAL;
132 }
131 133
132 /* see what our current state is */ 134 /* see what our current state is */
133 retval = get_lock_status(hotplug_slot, &value); 135 retval = get_lock_status(hotplug_slot, &value);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index b4a90badd0a6..896a15d70f5b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -398,21 +398,19 @@ static int msi_capability_init(struct pci_dev *dev)
398 entry->msi_attrib.masked = 1; 398 entry->msi_attrib.masked = 1;
399 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 399 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
400 entry->msi_attrib.pos = pos; 400 entry->msi_attrib.pos = pos;
401 if (entry->msi_attrib.maskbit) {
402 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
403 entry->msi_attrib.is_64);
404 }
405 entry->dev = dev; 401 entry->dev = dev;
406 if (entry->msi_attrib.maskbit) { 402 if (entry->msi_attrib.maskbit) {
407 unsigned int maskbits, temp; 403 unsigned int base, maskbits, temp;
404
405 base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
406 entry->mask_base = (void __iomem *)(long)base;
407
408 /* All MSIs are unmasked by default, Mask them all */ 408 /* All MSIs are unmasked by default, Mask them all */
409 pci_read_config_dword(dev, 409 pci_read_config_dword(dev, base, &maskbits);
410 msi_mask_bits_reg(pos, entry->msi_attrib.is_64),
411 &maskbits);
412 temp = (1 << multi_msi_capable(control)); 410 temp = (1 << multi_msi_capable(control));
413 temp = ((temp - 1) & ~temp); 411 temp = ((temp - 1) & ~temp);
414 maskbits |= temp; 412 maskbits |= temp;
415 pci_write_config_dword(dev, entry->msi_attrib.is_64, maskbits); 413 pci_write_config_dword(dev, base, maskbits);
416 entry->msi_attrib.maskbits_mask = temp; 414 entry->msi_attrib.maskbits_mask = temp;
417 } 415 }
418 list_add_tail(&entry->list, &dev->msi_list); 416 list_add_tail(&entry->list, &dev->msi_list);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index c697f2680856..9de07b75b993 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -355,17 +355,27 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
355 int i = 0; 355 int i = 0;
356 356
357 if (drv && drv->suspend) { 357 if (drv && drv->suspend) {
358 pci_dev->state_saved = false;
359
358 i = drv->suspend(pci_dev, state); 360 i = drv->suspend(pci_dev, state);
359 suspend_report_result(drv->suspend, i); 361 suspend_report_result(drv->suspend, i);
360 } else { 362 if (i)
361 pci_save_state(pci_dev); 363 return i;
362 /* 364
363 * This is for compatibility with existing code with legacy PM 365 if (pci_dev->state_saved)
364 * support. 366 goto Fixup;
365 */ 367
366 pci_pm_set_unknown_state(pci_dev); 368 if (WARN_ON_ONCE(pci_dev->current_state != PCI_D0))
369 goto Fixup;
367 } 370 }
368 371
372 pci_save_state(pci_dev);
373 /*
374 * This is for compatibility with existing code with legacy PM support.
375 */
376 pci_pm_set_unknown_state(pci_dev);
377
378 Fixup:
369 pci_fixup_device(pci_fixup_suspend, pci_dev); 379 pci_fixup_device(pci_fixup_suspend, pci_dev);
370 380
371 return i; 381 return i;
@@ -386,81 +396,34 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
386 396
387static int pci_legacy_resume_early(struct device *dev) 397static int pci_legacy_resume_early(struct device *dev)
388{ 398{
389 int error = 0;
390 struct pci_dev * pci_dev = to_pci_dev(dev); 399 struct pci_dev * pci_dev = to_pci_dev(dev);
391 struct pci_driver * drv = pci_dev->driver; 400 struct pci_driver * drv = pci_dev->driver;
392 401
393 pci_fixup_device(pci_fixup_resume_early, pci_dev); 402 return drv && drv->resume_early ?
394 403 drv->resume_early(pci_dev) : 0;
395 if (drv && drv->resume_early)
396 error = drv->resume_early(pci_dev);
397 return error;
398} 404}
399 405
400static int pci_legacy_resume(struct device *dev) 406static int pci_legacy_resume(struct device *dev)
401{ 407{
402 int error;
403 struct pci_dev * pci_dev = to_pci_dev(dev); 408 struct pci_dev * pci_dev = to_pci_dev(dev);
404 struct pci_driver * drv = pci_dev->driver; 409 struct pci_driver * drv = pci_dev->driver;
405 410
406 pci_fixup_device(pci_fixup_resume, pci_dev); 411 pci_fixup_device(pci_fixup_resume, pci_dev);
407 412
408 if (drv && drv->resume) { 413 return drv && drv->resume ?
409 error = drv->resume(pci_dev); 414 drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev);
410 } else {
411 /* restore the PCI config space */
412 pci_restore_state(pci_dev);
413 error = pci_pm_reenable_device(pci_dev);
414 }
415 return error;
416} 415}
417 416
418/* Auxiliary functions used by the new power management framework */ 417/* Auxiliary functions used by the new power management framework */
419 418
420static int pci_restore_standard_config(struct pci_dev *pci_dev)
421{
422 struct pci_dev *parent = pci_dev->bus->self;
423 int error = 0;
424
425 /* Check if the device's bus is operational */
426 if (!parent || parent->current_state == PCI_D0) {
427 pci_restore_state(pci_dev);
428 pci_update_current_state(pci_dev, PCI_D0);
429 } else {
430 dev_warn(&pci_dev->dev, "unable to restore config, "
431 "bridge %s in low power state D%d\n", pci_name(parent),
432 parent->current_state);
433 pci_dev->current_state = PCI_UNKNOWN;
434 error = -EAGAIN;
435 }
436
437 return error;
438}
439
440static bool pci_is_bridge(struct pci_dev *pci_dev)
441{
442 return !!(pci_dev->subordinate);
443}
444
445static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) 419static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
446{ 420{
447 if (pci_restore_standard_config(pci_dev)) 421 pci_restore_standard_config(pci_dev);
448 pci_fixup_device(pci_fixup_resume_early, pci_dev); 422 pci_fixup_device(pci_fixup_resume_early, pci_dev);
449} 423}
450 424
451static int pci_pm_default_resume(struct pci_dev *pci_dev) 425static int pci_pm_default_resume(struct pci_dev *pci_dev)
452{ 426{
453 /*
454 * pci_restore_standard_config() should have been called once already,
455 * but it would have failed if the device's parent bridge had not been
456 * in power state D0 at that time. Check it and try again if necessary.
457 */
458 if (pci_dev->current_state == PCI_UNKNOWN) {
459 int error = pci_restore_standard_config(pci_dev);
460 if (error)
461 return error;
462 }
463
464 pci_fixup_device(pci_fixup_resume, pci_dev); 427 pci_fixup_device(pci_fixup_resume, pci_dev);
465 428
466 if (!pci_is_bridge(pci_dev)) 429 if (!pci_is_bridge(pci_dev))
@@ -575,11 +538,11 @@ static int pci_pm_resume_noirq(struct device *dev)
575 struct device_driver *drv = dev->driver; 538 struct device_driver *drv = dev->driver;
576 int error = 0; 539 int error = 0;
577 540
541 pci_pm_default_resume_noirq(pci_dev);
542
578 if (pci_has_legacy_pm_support(pci_dev)) 543 if (pci_has_legacy_pm_support(pci_dev))
579 return pci_legacy_resume_early(dev); 544 return pci_legacy_resume_early(dev);
580 545
581 pci_pm_default_resume_noirq(pci_dev);
582
583 if (drv && drv->pm && drv->pm->resume_noirq) 546 if (drv && drv->pm && drv->pm->resume_noirq)
584 error = drv->pm->resume_noirq(dev); 547 error = drv->pm->resume_noirq(dev);
585 548
@@ -730,11 +693,11 @@ static int pci_pm_restore_noirq(struct device *dev)
730 struct device_driver *drv = dev->driver; 693 struct device_driver *drv = dev->driver;
731 int error = 0; 694 int error = 0;
732 695
696 pci_pm_default_resume_noirq(pci_dev);
697
733 if (pci_has_legacy_pm_support(pci_dev)) 698 if (pci_has_legacy_pm_support(pci_dev))
734 return pci_legacy_resume_early(dev); 699 return pci_legacy_resume_early(dev);
735 700
736 pci_pm_default_resume_noirq(pci_dev);
737
738 if (drv && drv->pm && drv->pm->restore_noirq) 701 if (drv && drv->pm && drv->pm->restore_noirq)
739 error = drv->pm->restore_noirq(dev); 702 error = drv->pm->restore_noirq(dev);
740 703
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e491fdedf705..17bd9325a245 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -22,7 +22,7 @@
22#include <asm/dma.h> /* isa_dma_bridge_buggy */ 22#include <asm/dma.h> /* isa_dma_bridge_buggy */
23#include "pci.h" 23#include "pci.h"
24 24
25unsigned int pci_pm_d3_delay = 10; 25unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
26 26
27#ifdef CONFIG_PCI_DOMAINS 27#ifdef CONFIG_PCI_DOMAINS
28int pci_domains_supported = 1; 28int pci_domains_supported = 1;
@@ -426,6 +426,7 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
426 * given PCI device 426 * given PCI device
427 * @dev: PCI device to handle. 427 * @dev: PCI device to handle.
428 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 428 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
429 * @wait: If 'true', wait for the device to change its power state
429 * 430 *
430 * RETURN VALUE: 431 * RETURN VALUE:
431 * -EINVAL if the requested state is invalid. 432 * -EINVAL if the requested state is invalid.
@@ -435,7 +436,7 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
435 * 0 if device's power state has been successfully changed. 436 * 0 if device's power state has been successfully changed.
436 */ 437 */
437static int 438static int
438pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) 439pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
439{ 440{
440 u16 pmcsr; 441 u16 pmcsr;
441 bool need_restore = false; 442 bool need_restore = false;
@@ -480,8 +481,10 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
480 break; 481 break;
481 case PCI_UNKNOWN: /* Boot-up */ 482 case PCI_UNKNOWN: /* Boot-up */
482 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 483 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
483 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 484 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) {
484 need_restore = true; 485 need_restore = true;
486 wait = true;
487 }
485 /* Fall-through: force to D0 */ 488 /* Fall-through: force to D0 */
486 default: 489 default:
487 pmcsr = 0; 490 pmcsr = 0;
@@ -491,12 +494,15 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
491 /* enter specified state */ 494 /* enter specified state */
492 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 495 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
493 496
497 if (!wait)
498 return 0;
499
494 /* Mandatory power management transition delays */ 500 /* Mandatory power management transition delays */
495 /* see PCI PM 1.1 5.6.1 table 18 */ 501 /* see PCI PM 1.1 5.6.1 table 18 */
496 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 502 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
497 msleep(pci_pm_d3_delay); 503 msleep(pci_pm_d3_delay);
498 else if (state == PCI_D2 || dev->current_state == PCI_D2) 504 else if (state == PCI_D2 || dev->current_state == PCI_D2)
499 udelay(200); 505 udelay(PCI_PM_D2_DELAY);
500 506
501 dev->current_state = state; 507 dev->current_state = state;
502 508
@@ -515,7 +521,7 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
515 if (need_restore) 521 if (need_restore)
516 pci_restore_bars(dev); 522 pci_restore_bars(dev);
517 523
518 if (dev->bus->self) 524 if (wait && dev->bus->self)
519 pcie_aspm_pm_state_change(dev->bus->self); 525 pcie_aspm_pm_state_change(dev->bus->self);
520 526
521 return 0; 527 return 0;
@@ -585,7 +591,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
585 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 591 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
586 return 0; 592 return 0;
587 593
588 error = pci_raw_set_power_state(dev, state); 594 error = pci_raw_set_power_state(dev, state, true);
589 595
590 if (state > PCI_D0 && platform_pci_power_manageable(dev)) { 596 if (state > PCI_D0 && platform_pci_power_manageable(dev)) {
591 /* Allow the platform to finalize the transition */ 597 /* Allow the platform to finalize the transition */
@@ -730,6 +736,7 @@ pci_save_state(struct pci_dev *dev)
730 /* XXX: 100% dword access ok here? */ 736 /* XXX: 100% dword access ok here? */
731 for (i = 0; i < 16; i++) 737 for (i = 0; i < 16; i++)
732 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 738 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
739 dev->state_saved = true;
733 if ((i = pci_save_pcie_state(dev)) != 0) 740 if ((i = pci_save_pcie_state(dev)) != 0)
734 return i; 741 return i;
735 if ((i = pci_save_pcix_state(dev)) != 0) 742 if ((i = pci_save_pcix_state(dev)) != 0)
@@ -1374,6 +1381,50 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1374} 1381}
1375 1382
1376/** 1383/**
1384 * pci_restore_standard_config - restore standard config registers of PCI device
1385 * @dev: PCI device to handle
1386 *
1387 * This function assumes that the device's configuration space is accessible.
1388 * If the device needs to be powered up, the function will wait for it to
1389 * change the state.
1390 */
1391int pci_restore_standard_config(struct pci_dev *dev)
1392{
1393 pci_power_t prev_state;
1394 int error;
1395
1396 pci_restore_state(dev);
1397 pci_update_current_state(dev, PCI_D0);
1398
1399 prev_state = dev->current_state;
1400 if (prev_state == PCI_D0)
1401 return 0;
1402
1403 error = pci_raw_set_power_state(dev, PCI_D0, false);
1404 if (error)
1405 return error;
1406
1407 if (pci_is_bridge(dev)) {
1408 if (prev_state > PCI_D1)
1409 mdelay(PCI_PM_BUS_WAIT);
1410 } else {
1411 switch(prev_state) {
1412 case PCI_D3cold:
1413 case PCI_D3hot:
1414 mdelay(pci_pm_d3_delay);
1415 break;
1416 case PCI_D2:
1417 udelay(PCI_PM_D2_DELAY);
1418 break;
1419 }
1420 }
1421
1422 dev->current_state = PCI_D0;
1423
1424 return 0;
1425}
1426
1427/**
1377 * pci_enable_ari - enable ARI forwarding if hardware support it 1428 * pci_enable_ari - enable ARI forwarding if hardware support it
1378 * @dev: the PCI device 1429 * @dev: the PCI device
1379 */ 1430 */
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 1351bb4addde..26ddf78ac300 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -49,6 +49,12 @@ extern void pci_disable_enabled_device(struct pci_dev *dev);
49extern void pci_pm_init(struct pci_dev *dev); 49extern void pci_pm_init(struct pci_dev *dev);
50extern void platform_pci_wakeup_init(struct pci_dev *dev); 50extern void platform_pci_wakeup_init(struct pci_dev *dev);
51extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); 51extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
52extern int pci_restore_standard_config(struct pci_dev *dev);
53
54static inline bool pci_is_bridge(struct pci_dev *pci_dev)
55{
56 return !!(pci_dev->subordinate);
57}
52 58
53extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); 59extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
54extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); 60extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 7c789f0a94d7..de91ddab0a86 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -382,6 +382,11 @@ static int __init hp_wmi_input_setup(void)
382 case KE_SW: 382 case KE_SW:
383 set_bit(EV_SW, hp_wmi_input_dev->evbit); 383 set_bit(EV_SW, hp_wmi_input_dev->evbit);
384 set_bit(key->keycode, hp_wmi_input_dev->swbit); 384 set_bit(key->keycode, hp_wmi_input_dev->swbit);
385
386 /* Set initial dock state */
387 input_report_switch(hp_wmi_input_dev, key->keycode,
388 hp_wmi_dock_state());
389 input_sync(hp_wmi_input_dev);
385 break; 390 break;
386 } 391 }
387 } 392 }
@@ -441,6 +446,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
441 bluetooth_rfkill->toggle_radio = hp_wmi_bluetooth_set; 446 bluetooth_rfkill->toggle_radio = hp_wmi_bluetooth_set;
442 bluetooth_rfkill->user_claim_unsupported = 1; 447 bluetooth_rfkill->user_claim_unsupported = 1;
443 err = rfkill_register(bluetooth_rfkill); 448 err = rfkill_register(bluetooth_rfkill);
449 if (err)
444 goto register_bluetooth_error; 450 goto register_bluetooth_error;
445 } 451 }
446 452
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index d30bb766fcef..b56a704409d2 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -20,7 +20,7 @@
20 20
21static inline unsigned int get_irq_flags(struct resource *res) 21static inline unsigned int get_irq_flags(struct resource *res)
22{ 22{
23 unsigned int flags = IRQF_DISABLED | IRQF_SHARED; 23 unsigned int flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
24 24
25 flags |= res->flags & IRQF_TRIGGER_MASK; 25 flags |= res->flags & IRQF_TRIGGER_MASK;
26 26
diff --git a/drivers/regulator/bq24022.c b/drivers/regulator/bq24022.c
index 366565aba865..c175e38a4cd5 100644
--- a/drivers/regulator/bq24022.c
+++ b/drivers/regulator/bq24022.c
@@ -152,11 +152,7 @@ static void __exit bq24022_exit(void)
152 platform_driver_unregister(&bq24022_driver); 152 platform_driver_unregister(&bq24022_driver);
153} 153}
154 154
155/* 155module_init(bq24022_init);
156 * make sure this is probed before gpio_vbus and pda_power,
157 * but after asic3 or other GPIO expander drivers.
158 */
159subsys_initcall(bq24022_init);
160module_exit(bq24022_exit); 156module_exit(bq24022_exit);
161 157
162MODULE_AUTHOR("Philipp Zabel"); 158MODULE_AUTHOR("Philipp Zabel");
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 7aa35248181b..5056e23e4414 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1435,7 +1435,7 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
1435 struct platform_device *pdev; 1435 struct platform_device *pdev;
1436 int ret; 1436 int ret;
1437 1437
1438 if (lednum > ARRAY_SIZE(wm8350->pmic.led) || lednum < 0) { 1438 if (lednum >= ARRAY_SIZE(wm8350->pmic.led) || lednum < 0) {
1439 dev_err(wm8350->dev, "Invalid LED index %d\n", lednum); 1439 dev_err(wm8350->dev, "Invalid LED index %d\n", lednum);
1440 return -ENODEV; 1440 return -ENODEV;
1441 } 1441 }
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index acca6678cb2b..49c3bfa1afd7 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -70,7 +70,9 @@ static char debug_buffer[255];
70static void lcs_tasklet(unsigned long); 70static void lcs_tasklet(unsigned long);
71static void lcs_start_kernel_thread(struct work_struct *); 71static void lcs_start_kernel_thread(struct work_struct *);
72static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); 72static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
73#ifdef CONFIG_IP_MULTICAST
73static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); 74static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
75#endif /* CONFIG_IP_MULTICAST */
74static int lcs_recovery(void *ptr); 76static int lcs_recovery(void *ptr);
75 77
76/** 78/**
@@ -1285,6 +1287,8 @@ out:
1285 lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD); 1287 lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
1286 return 0; 1288 return 0;
1287} 1289}
1290#endif /* CONFIG_IP_MULTICAST */
1291
1288/** 1292/**
1289 * function called by net device to 1293 * function called by net device to
1290 * handle multicast address relevant things 1294 * handle multicast address relevant things
@@ -1292,6 +1296,7 @@ out:
1292static void 1296static void
1293lcs_set_multicast_list(struct net_device *dev) 1297lcs_set_multicast_list(struct net_device *dev)
1294{ 1298{
1299#ifdef CONFIG_IP_MULTICAST
1295 struct lcs_card *card; 1300 struct lcs_card *card;
1296 1301
1297 LCS_DBF_TEXT(4, trace, "setmulti"); 1302 LCS_DBF_TEXT(4, trace, "setmulti");
@@ -1299,9 +1304,8 @@ lcs_set_multicast_list(struct net_device *dev)
1299 1304
1300 if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) 1305 if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
1301 schedule_work(&card->kernel_thread_starter); 1306 schedule_work(&card->kernel_thread_starter);
1302}
1303
1304#endif /* CONFIG_IP_MULTICAST */ 1307#endif /* CONFIG_IP_MULTICAST */
1308}
1305 1309
1306static long 1310static long
1307lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb) 1311lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 91ef669d98f6..a1a511bdec8c 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1322,7 +1322,9 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1322 &evt->ext_list_token); 1322 &evt->ext_list_token);
1323 1323
1324 if (!evt->ext_list) { 1324 if (!evt->ext_list) {
1325 scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n"); 1325 scsi_dma_unmap(scmd);
1326 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1327 scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1326 return -ENOMEM; 1328 return -ENOMEM;
1327 } 1329 }
1328 } 1330 }
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 7225b6e2029e..257c24115de9 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1981,6 +1981,7 @@ void iscsi_pool_free(struct iscsi_pool *q)
1981 kfree(q->pool[i]); 1981 kfree(q->pool[i]);
1982 if (q->pool) 1982 if (q->pool)
1983 kfree(q->pool); 1983 kfree(q->pool);
1984 kfree(q->queue);
1984} 1985}
1985EXPORT_SYMBOL_GPL(iscsi_pool_free); 1986EXPORT_SYMBOL_GPL(iscsi_pool_free);
1986 1987
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index c7acef50d5da..33a3c13fd893 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1016,6 +1016,9 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1016 struct Scsi_Host *host = rport_to_shost(rport); 1016 struct Scsi_Host *host = rport_to_shost(rport);
1017 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1017 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1018 1018
1019 if (!fcport)
1020 return;
1021
1019 qla2x00_abort_fcport_cmds(fcport); 1022 qla2x00_abort_fcport_cmds(fcport);
1020 1023
1021 /* 1024 /*
@@ -1033,6 +1036,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1033{ 1036{
1034 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1037 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1035 1038
1039 if (!fcport)
1040 return;
1041
1036 /* 1042 /*
1037 * At this point all fcport's software-states are cleared. Perform any 1043 * At this point all fcport's software-states are cleared. Perform any
1038 * final cleanup of firmware resources (PCBs and XCBs). 1044 * final cleanup of firmware resources (PCBs and XCBs).
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ba4913353752..a336b4bc81a7 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -34,6 +34,7 @@ extern void qla24xx_update_fw_options(scsi_qla_host_t *);
34extern void qla81xx_update_fw_options(scsi_qla_host_t *); 34extern void qla81xx_update_fw_options(scsi_qla_host_t *);
35extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *); 35extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
36extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *); 36extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
37extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *);
37 38
38extern int qla2x00_loop_resync(scsi_qla_host_t *); 39extern int qla2x00_loop_resync(scsi_qla_host_t *);
39 40
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9ad4d0968e5c..f6368a1d3021 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3562,6 +3562,9 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
3562 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE); 3562 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
3563 RD_REG_DWORD(&reg->hccr); 3563 RD_REG_DWORD(&reg->hccr);
3564 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3564 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3565
3566 if (IS_NOPOLLING_TYPE(ha))
3567 ha->isp_ops->enable_intrs(ha);
3565} 3568}
3566 3569
3567/* On sparc systems, obtain port and node WWN from firmware 3570/* On sparc systems, obtain port and node WWN from firmware
@@ -3847,6 +3850,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3847 uint32_t i; 3850 uint32_t i;
3848 struct qla_hw_data *ha = vha->hw; 3851 struct qla_hw_data *ha = vha->hw;
3849 struct req_que *req = ha->req_q_map[0]; 3852 struct req_que *req = ha->req_q_map[0];
3853
3854 qla_printk(KERN_INFO, ha,
3855 "FW: Loading from flash (%x)...\n", ha->flt_region_fw);
3856
3850 rval = QLA_SUCCESS; 3857 rval = QLA_SUCCESS;
3851 3858
3852 segments = FA_RISC_CODE_SEGMENTS; 3859 segments = FA_RISC_CODE_SEGMENTS;
@@ -4022,8 +4029,8 @@ fail_fw_integrity:
4022 return QLA_FUNCTION_FAILED; 4029 return QLA_FUNCTION_FAILED;
4023} 4030}
4024 4031
4025int 4032static int
4026qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4033qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4027{ 4034{
4028 int rval; 4035 int rval;
4029 int segments, fragment; 4036 int segments, fragment;
@@ -4043,12 +4050,12 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4043 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4050 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
4044 "from: " QLA_FW_URL ".\n"); 4051 "from: " QLA_FW_URL ".\n");
4045 4052
4046 /* Try to load RISC code from flash. */ 4053 return QLA_FUNCTION_FAILED;
4047 qla_printk(KERN_ERR, ha, "Attempting to load (potentially "
4048 "outdated) firmware from flash.\n");
4049 return qla24xx_load_risc_flash(vha, srisc_addr);
4050 } 4054 }
4051 4055
4056 qla_printk(KERN_INFO, ha,
4057 "FW: Loading via request-firmware...\n");
4058
4052 rval = QLA_SUCCESS; 4059 rval = QLA_SUCCESS;
4053 4060
4054 segments = FA_RISC_CODE_SEGMENTS; 4061 segments = FA_RISC_CODE_SEGMENTS;
@@ -4133,6 +4140,40 @@ fail_fw_integrity:
4133 return QLA_FUNCTION_FAILED; 4140 return QLA_FUNCTION_FAILED;
4134} 4141}
4135 4142
4143int
4144qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4145{
4146 int rval;
4147
4148 /*
4149 * FW Load priority:
4150 * 1) Firmware via request-firmware interface (.bin file).
4151 * 2) Firmware residing in flash.
4152 */
4153 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4154 if (rval == QLA_SUCCESS)
4155 return rval;
4156
4157 return qla24xx_load_risc_flash(vha, srisc_addr);
4158}
4159
4160int
4161qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4162{
4163 int rval;
4164
4165 /*
4166 * FW Load priority:
4167 * 1) Firmware residing in flash.
4168 * 2) Firmware via request-firmware interface (.bin file).
4169 */
4170 rval = qla24xx_load_risc_flash(vha, srisc_addr);
4171 if (rval == QLA_SUCCESS)
4172 return rval;
4173
4174 return qla24xx_load_risc_blob(vha, srisc_addr);
4175}
4176
4136void 4177void
4137qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) 4178qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4138{ 4179{
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 789fc576f222..e28ad81baf1e 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1868,6 +1868,7 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
1868static int 1868static int
1869qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 1869qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1870{ 1870{
1871#define MIN_MSIX_COUNT 2
1871 int i, ret; 1872 int i, ret;
1872 struct msix_entry *entries; 1873 struct msix_entry *entries;
1873 struct qla_msix_entry *qentry; 1874 struct qla_msix_entry *qentry;
@@ -1883,12 +1884,16 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1883 1884
1884 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 1885 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1885 if (ret) { 1886 if (ret) {
1887 if (ret < MIN_MSIX_COUNT)
1888 goto msix_failed;
1889
1886 qla_printk(KERN_WARNING, ha, 1890 qla_printk(KERN_WARNING, ha,
1887 "MSI-X: Failed to enable support -- %d/%d\n" 1891 "MSI-X: Failed to enable support -- %d/%d\n"
1888 " Retry with %d vectors\n", ha->msix_count, ret, ret); 1892 " Retry with %d vectors\n", ha->msix_count, ret, ret);
1889 ha->msix_count = ret; 1893 ha->msix_count = ret;
1890 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 1894 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1891 if (ret) { 1895 if (ret) {
1896msix_failed:
1892 qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable" 1897 qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1893 " support, giving up -- %d/%d\n", 1898 " support, giving up -- %d/%d\n",
1894 ha->msix_count, ret); 1899 ha->msix_count, ret);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index db4df45234a5..f94ffbb98e95 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -58,14 +58,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
58 * seconds. This is to serialize actual issuing of mailbox cmds during 58 * seconds. This is to serialize actual issuing of mailbox cmds during
59 * non ISP abort time. 59 * non ISP abort time.
60 */ 60 */
61 if (!abort_active) { 61 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
62 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, 62 /* Timeout occurred. Return error. */
63 mcp->tov * HZ)) { 63 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. "
64 /* Timeout occurred. Return error. */ 64 "Exiting.\n", __func__, base_vha->host_no));
65 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " 65 return QLA_FUNCTION_TIMEOUT;
66 "Exiting.\n", __func__, base_vha->host_no));
67 return QLA_FUNCTION_TIMEOUT;
68 }
69 } 66 }
70 67
71 ha->flags.mbox_busy = 1; 68 ha->flags.mbox_busy = 1;
@@ -265,8 +262,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
265 } 262 }
266 263
267 /* Allow next mbx cmd to come in. */ 264 /* Allow next mbx cmd to come in. */
268 if (!abort_active) 265 complete(&ha->mbx_cmd_comp);
269 complete(&ha->mbx_cmd_comp);
270 266
271 if (rval) { 267 if (rval) {
272 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " 268 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, "
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index cf32653fe01a..c11f872d3e10 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -65,8 +65,6 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
65 65
66static void qla2x00_free_device(scsi_qla_host_t *); 66static void qla2x00_free_device(scsi_qla_host_t *);
67 67
68static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
69
70int ql2xfdmienable=1; 68int ql2xfdmienable=1;
71module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR); 69module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
72MODULE_PARM_DESC(ql2xfdmienable, 70MODULE_PARM_DESC(ql2xfdmienable,
@@ -800,6 +798,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
800 if (ha->isp_ops->abort_command(vha, sp, req)) { 798 if (ha->isp_ops->abort_command(vha, sp, req)) {
801 DEBUG2(printk("%s(%ld): abort_command " 799 DEBUG2(printk("%s(%ld): abort_command "
802 "mbx failed.\n", __func__, vha->host_no)); 800 "mbx failed.\n", __func__, vha->host_no));
801 ret = FAILED;
803 } else { 802 } else {
804 DEBUG3(printk("%s(%ld): abort_command " 803 DEBUG3(printk("%s(%ld): abort_command "
805 "mbx success.\n", __func__, vha->host_no)); 804 "mbx success.\n", __func__, vha->host_no));
@@ -1241,9 +1240,8 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1241 * supported addressing method. 1240 * supported addressing method.
1242 */ 1241 */
1243static void 1242static void
1244qla2x00_config_dma_addressing(scsi_qla_host_t *vha) 1243qla2x00_config_dma_addressing(struct qla_hw_data *ha)
1245{ 1244{
1246 struct qla_hw_data *ha = vha->hw;
1247 /* Assume a 32bit DMA mask. */ 1245 /* Assume a 32bit DMA mask. */
1248 ha->flags.enable_64bit_addressing = 0; 1246 ha->flags.enable_64bit_addressing = 0;
1249 1247
@@ -1480,7 +1478,7 @@ static struct isp_operations qla81xx_isp_ops = {
1480 .reset_adapter = qla24xx_reset_adapter, 1478 .reset_adapter = qla24xx_reset_adapter,
1481 .nvram_config = qla81xx_nvram_config, 1479 .nvram_config = qla81xx_nvram_config,
1482 .update_fw_options = qla81xx_update_fw_options, 1480 .update_fw_options = qla81xx_update_fw_options,
1483 .load_risc = qla24xx_load_risc, 1481 .load_risc = qla81xx_load_risc,
1484 .pci_info_str = qla24xx_pci_info_str, 1482 .pci_info_str = qla24xx_pci_info_str,
1485 .fw_version_str = qla24xx_fw_version_str, 1483 .fw_version_str = qla24xx_fw_version_str,
1486 .intr_handler = qla24xx_intr_handler, 1484 .intr_handler = qla24xx_intr_handler,
@@ -1869,6 +1867,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1869 1867
1870 set_bit(0, (unsigned long *) ha->vp_idx_map); 1868 set_bit(0, (unsigned long *) ha->vp_idx_map);
1871 1869
1870 qla2x00_config_dma_addressing(ha);
1872 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 1871 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
1873 if (!ret) { 1872 if (!ret) {
1874 qla_printk(KERN_WARNING, ha, 1873 qla_printk(KERN_WARNING, ha,
@@ -1888,13 +1887,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1888 "[ERROR] Failed to allocate memory for scsi_host\n"); 1887 "[ERROR] Failed to allocate memory for scsi_host\n");
1889 1888
1890 ret = -ENOMEM; 1889 ret = -ENOMEM;
1890 qla2x00_mem_free(ha);
1891 qla2x00_free_que(ha, req, rsp);
1891 goto probe_hw_failed; 1892 goto probe_hw_failed;
1892 } 1893 }
1893 1894
1894 pci_set_drvdata(pdev, base_vha); 1895 pci_set_drvdata(pdev, base_vha);
1895 1896
1896 qla2x00_config_dma_addressing(base_vha);
1897
1898 host = base_vha->host; 1897 host = base_vha->host;
1899 base_vha->req_ques[0] = req->id; 1898 base_vha->req_ques[0] = req->id;
1900 host->can_queue = req->length + 128; 1899 host->can_queue = req->length + 128;
@@ -1917,14 +1916,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1917 /* Set up the irqs */ 1916 /* Set up the irqs */
1918 ret = qla2x00_request_irqs(ha, rsp); 1917 ret = qla2x00_request_irqs(ha, rsp);
1919 if (ret) 1918 if (ret)
1920 goto probe_failed; 1919 goto probe_init_failed;
1921
1922 /* Alloc arrays of request and response ring ptrs */ 1920 /* Alloc arrays of request and response ring ptrs */
1923 if (!qla2x00_alloc_queues(ha)) { 1921 if (!qla2x00_alloc_queues(ha)) {
1924 qla_printk(KERN_WARNING, ha, 1922 qla_printk(KERN_WARNING, ha,
1925 "[ERROR] Failed to allocate memory for queue" 1923 "[ERROR] Failed to allocate memory for queue"
1926 " pointers\n"); 1924 " pointers\n");
1927 goto probe_failed; 1925 goto probe_init_failed;
1928 } 1926 }
1929 ha->rsp_q_map[0] = rsp; 1927 ha->rsp_q_map[0] = rsp;
1930 ha->req_q_map[0] = req; 1928 ha->req_q_map[0] = req;
@@ -1997,6 +1995,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1997 1995
1998 return 0; 1996 return 0;
1999 1997
1998probe_init_failed:
1999 qla2x00_free_que(ha, req, rsp);
2000 ha->max_queues = 0;
2001
2000probe_failed: 2002probe_failed:
2001 qla2x00_free_device(base_vha); 2003 qla2x00_free_device(base_vha);
2002 2004
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 303f8ee11f25..9c3b694c049d 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -944,9 +944,9 @@ qla24xx_unprotect_flash(struct qla_hw_data *ha)
944 if (!ha->fdt_wrt_disable) 944 if (!ha->fdt_wrt_disable)
945 return; 945 return;
946 946
947 /* Disable flash write-protection. */ 947 /* Disable flash write-protection, first clear SR protection bit */
948 qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0); 948 qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
949 /* Some flash parts need an additional zero-write to clear bits.*/ 949 /* Then write zero again to clear remaining SR bits.*/
950 qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0); 950 qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
951} 951}
952 952
@@ -980,12 +980,11 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
980 uint32_t dwords) 980 uint32_t dwords)
981{ 981{
982 int ret; 982 int ret;
983 uint32_t liter, miter; 983 uint32_t liter;
984 uint32_t sec_mask, rest_addr; 984 uint32_t sec_mask, rest_addr;
985 uint32_t fdata, findex; 985 uint32_t fdata;
986 dma_addr_t optrom_dma; 986 dma_addr_t optrom_dma;
987 void *optrom = NULL; 987 void *optrom = NULL;
988 uint32_t *s, *d;
989 struct qla_hw_data *ha = vha->hw; 988 struct qla_hw_data *ha = vha->hw;
990 989
991 ret = QLA_SUCCESS; 990 ret = QLA_SUCCESS;
@@ -1003,17 +1002,15 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1003 } 1002 }
1004 1003
1005 rest_addr = (ha->fdt_block_size >> 2) - 1; 1004 rest_addr = (ha->fdt_block_size >> 2) - 1;
1006 sec_mask = (ha->optrom_size >> 2) - (ha->fdt_block_size >> 2); 1005 sec_mask = ~rest_addr;
1007 1006
1008 qla24xx_unprotect_flash(ha); 1007 qla24xx_unprotect_flash(ha);
1009 1008
1010 for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { 1009 for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
1011 1010 fdata = (faddr & sec_mask) << 2;
1012 findex = faddr;
1013 fdata = (findex & sec_mask) << 2;
1014 1011
1015 /* Are we at the beginning of a sector? */ 1012 /* Are we at the beginning of a sector? */
1016 if ((findex & rest_addr) == 0) { 1013 if ((faddr & rest_addr) == 0) {
1017 /* Do sector unprotect. */ 1014 /* Do sector unprotect. */
1018 if (ha->fdt_unprotect_sec_cmd) 1015 if (ha->fdt_unprotect_sec_cmd)
1019 qla24xx_write_flash_dword(ha, 1016 qla24xx_write_flash_dword(ha,
@@ -1024,7 +1021,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1024 (fdata & 0xff00) |((fdata << 16) & 1021 (fdata & 0xff00) |((fdata << 16) &
1025 0xff0000) | ((fdata >> 16) & 0xff)); 1022 0xff0000) | ((fdata >> 16) & 0xff));
1026 if (ret != QLA_SUCCESS) { 1023 if (ret != QLA_SUCCESS) {
1027 DEBUG9(qla_printk("Unable to flash sector: " 1024 DEBUG9(qla_printk("Unable to erase sector: "
1028 "address=%x.\n", faddr)); 1025 "address=%x.\n", faddr));
1029 break; 1026 break;
1030 } 1027 }
@@ -1033,9 +1030,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1033 /* Go with burst-write. */ 1030 /* Go with burst-write. */
1034 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) { 1031 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
1035 /* Copy data to DMA'ble buffer. */ 1032 /* Copy data to DMA'ble buffer. */
1036 for (miter = 0, s = optrom, d = dwptr; 1033 memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
1037 miter < OPTROM_BURST_DWORDS; miter++, s++, d++)
1038 *s = cpu_to_le32(*d);
1039 1034
1040 ret = qla2x00_load_ram(vha, optrom_dma, 1035 ret = qla2x00_load_ram(vha, optrom_dma,
1041 flash_data_addr(ha, faddr), 1036 flash_data_addr(ha, faddr),
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 808bab6ef06b..cfa4c11a4797 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.00-k1" 10#define QLA2XXX_VERSION "8.03.00-k2"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index d6be0762eb91..b586f27c3bd4 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -244,6 +244,7 @@ struct ddb_entry {
244 uint8_t ip_addr[ISCSI_IPADDR_SIZE]; 244 uint8_t ip_addr[ISCSI_IPADDR_SIZE];
245 uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */ 245 uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */
246 uint8_t iscsi_alias[0x20]; 246 uint8_t iscsi_alias[0x20];
247 uint8_t isid[6];
247}; 248};
248 249
249/* 250/*
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 109c5f5985ec..af8c3233e8ae 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -342,8 +342,12 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
342 DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no, 342 DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no,
343 __func__, fw_ddb_index)); 343 __func__, fw_ddb_index));
344 list_for_each_entry(ddb_entry, &ha->ddb_list, list) { 344 list_for_each_entry(ddb_entry, &ha->ddb_list, list) {
345 if (memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsi_name, 345 if ((memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsi_name,
346 ISCSI_NAME_SIZE) == 0) { 346 ISCSI_NAME_SIZE) == 0) &&
347 (ddb_entry->tpgt ==
348 le32_to_cpu(fw_ddb_entry->tgt_portal_grp)) &&
349 (memcmp(ddb_entry->isid, fw_ddb_entry->isid,
350 sizeof(ddb_entry->isid)) == 0)) {
347 found++; 351 found++;
348 break; 352 break;
349 } 353 }
@@ -430,6 +434,8 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
430 434
431 ddb_entry->port = le16_to_cpu(fw_ddb_entry->port); 435 ddb_entry->port = le16_to_cpu(fw_ddb_entry->port);
432 ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 436 ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
437 memcpy(ddb_entry->isid, fw_ddb_entry->isid, sizeof(ddb_entry->isid));
438
433 memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], 439 memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
434 min(sizeof(ddb_entry->iscsi_name), 440 min(sizeof(ddb_entry->iscsi_name),
435 sizeof(fw_ddb_entry->iscsi_name))); 441 sizeof(fw_ddb_entry->iscsi_name)));
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 2a3671233b15..536d8e510f66 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -806,6 +806,8 @@ pci_default_setup(struct serial_private *priv,
806#define PCI_SUBDEVICE_ID_OCTPRO422 0x0208 806#define PCI_SUBDEVICE_ID_OCTPRO422 0x0208
807#define PCI_SUBDEVICE_ID_POCTAL232 0x0308 807#define PCI_SUBDEVICE_ID_POCTAL232 0x0308
808#define PCI_SUBDEVICE_ID_POCTAL422 0x0408 808#define PCI_SUBDEVICE_ID_POCTAL422 0x0408
809#define PCI_VENDOR_ID_ADVANTECH 0x13fe
810#define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620
809 811
810/* Unknown vendors/cards - this should not be in linux/pci_ids.h */ 812/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
811#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 813#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
@@ -2152,6 +2154,10 @@ static int pciserial_resume_one(struct pci_dev *dev)
2152#endif 2154#endif
2153 2155
2154static struct pci_device_id serial_pci_tbl[] = { 2156static struct pci_device_id serial_pci_tbl[] = {
2157 /* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */
2158 { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620,
2159 PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0,
2160 pbn_b2_8_921600 },
2155 { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V960, 2161 { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V960,
2156 PCI_SUBVENDOR_ID_CONNECT_TECH, 2162 PCI_SUBVENDOR_ID_CONNECT_TECH,
2157 PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232, 0, 0, 2163 PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232, 0, 0,
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 3e525e38a5d9..7d7f576da202 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -982,7 +982,7 @@ config SERIAL_SH_SCI_CONSOLE
982 982
983config SERIAL_PNX8XXX 983config SERIAL_PNX8XXX
984 bool "Enable PNX8XXX SoCs' UART Support" 984 bool "Enable PNX8XXX SoCs' UART Support"
985 depends on MIPS && SOC_PNX8550 985 depends on MIPS && (SOC_PNX8550 || SOC_PNX833X)
986 select SERIAL_CORE 986 select SERIAL_CORE
987 help 987 help
988 If you have a MIPS-based Philips SoC such as PNX8550 or PNX8330 988 If you have a MIPS-based Philips SoC such as PNX8550 or PNX8330
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index 338cf8a08b43..92187e28608a 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -180,7 +180,7 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
180 return rc; 180 return rc;
181} 181}
182 182
183static void jsm_remove_one(struct pci_dev *pdev) 183static void __devexit jsm_remove_one(struct pci_dev *pdev)
184{ 184{
185 struct jsm_board *brd = pci_get_drvdata(pdev); 185 struct jsm_board *brd = pci_get_drvdata(pdev);
186 int i = 0; 186 int i = 0;
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 3547558d2caf..324c74d2f666 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -161,6 +161,11 @@ static void jsm_tty_stop_rx(struct uart_port *port)
161 channel->ch_bd->bd_ops->disable_receiver(channel); 161 channel->ch_bd->bd_ops->disable_receiver(channel);
162} 162}
163 163
164static void jsm_tty_enable_ms(struct uart_port *port)
165{
166 /* Nothing needed */
167}
168
164static void jsm_tty_break(struct uart_port *port, int break_state) 169static void jsm_tty_break(struct uart_port *port, int break_state)
165{ 170{
166 unsigned long lock_flags; 171 unsigned long lock_flags;
@@ -345,6 +350,7 @@ static struct uart_ops jsm_ops = {
345 .start_tx = jsm_tty_start_tx, 350 .start_tx = jsm_tty_start_tx,
346 .send_xchar = jsm_tty_send_xchar, 351 .send_xchar = jsm_tty_send_xchar,
347 .stop_rx = jsm_tty_stop_rx, 352 .stop_rx = jsm_tty_stop_rx,
353 .enable_ms = jsm_tty_enable_ms,
348 .break_ctl = jsm_tty_break, 354 .break_ctl = jsm_tty_break,
349 .startup = jsm_tty_open, 355 .startup = jsm_tty_open,
350 .shutdown = jsm_tty_close, 356 .shutdown = jsm_tty_close,
diff --git a/drivers/serial/mcf.c b/drivers/serial/mcf.c
index b2001c5b145c..56841fe5f483 100644
--- a/drivers/serial/mcf.c
+++ b/drivers/serial/mcf.c
@@ -212,10 +212,18 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
212{ 212{
213 unsigned long flags; 213 unsigned long flags;
214 unsigned int baud, baudclk; 214 unsigned int baud, baudclk;
215#if defined(CONFIG_M5272)
216 unsigned int baudfr;
217#endif
215 unsigned char mr1, mr2; 218 unsigned char mr1, mr2;
216 219
217 baud = uart_get_baud_rate(port, termios, old, 0, 230400); 220 baud = uart_get_baud_rate(port, termios, old, 0, 230400);
221#if defined(CONFIG_M5272)
222 baudclk = (MCF_BUSCLK / baud) / 32;
223 baudfr = (((MCF_BUSCLK / baud) + 1) / 2) % 16;
224#else
218 baudclk = ((MCF_BUSCLK / baud) + 16) / 32; 225 baudclk = ((MCF_BUSCLK / baud) + 16) / 32;
226#endif
219 227
220 mr1 = MCFUART_MR1_RXIRQRDY | MCFUART_MR1_RXERRCHAR; 228 mr1 = MCFUART_MR1_RXIRQRDY | MCFUART_MR1_RXERRCHAR;
221 mr2 = 0; 229 mr2 = 0;
@@ -262,6 +270,9 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
262 writeb(mr2, port->membase + MCFUART_UMR); 270 writeb(mr2, port->membase + MCFUART_UMR);
263 writeb((baudclk & 0xff00) >> 8, port->membase + MCFUART_UBG1); 271 writeb((baudclk & 0xff00) >> 8, port->membase + MCFUART_UBG1);
264 writeb((baudclk & 0xff), port->membase + MCFUART_UBG2); 272 writeb((baudclk & 0xff), port->membase + MCFUART_UBG2);
273#if defined(CONFIG_M5272)
274 writeb((baudfr & 0x0f), port->membase + MCFUART_UFPD);
275#endif
265 writeb(MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER, 276 writeb(MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER,
266 port->membase + MCFUART_UCSR); 277 port->membase + MCFUART_UCSR);
267 writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE, 278 writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 38c600c0dbbf..3599828b9766 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -32,7 +32,9 @@
32#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 32#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
33 defined(CONFIG_CPU_SUBTYPE_SH7721) 33 defined(CONFIG_CPU_SUBTYPE_SH7721)
34# define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */ 34# define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */
35#define SCIF_ORER 0x0200 /* overrun error bit */ 35# define PORT_PTCR 0xA405011EUL
36# define PORT_PVCR 0xA4050122UL
37# define SCIF_ORER 0x0200 /* overrun error bit */
36#elif defined(CONFIG_SH_RTS7751R2D) 38#elif defined(CONFIG_SH_RTS7751R2D)
37# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */ 39# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */
38# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ 40# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
@@ -393,6 +395,7 @@ SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16)
393SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8) 395SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8)
394SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16) 396SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16)
395SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8) 397SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8)
398SCIx_FNS(SCSPTR, 0, 0, 0, 0)
396SCIF_FNS(SCTDSR, 0x0c, 8) 399SCIF_FNS(SCTDSR, 0x0c, 8)
397SCIF_FNS(SCFER, 0x10, 16) 400SCIF_FNS(SCFER, 0x10, 16)
398SCIF_FNS(SCFCR, 0x18, 16) 401SCIF_FNS(SCFCR, 0x18, 16)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 4a6fe01831a8..83a185d52961 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -230,17 +230,6 @@ config SPI_XILINX
230# 230#
231comment "SPI Protocol Masters" 231comment "SPI Protocol Masters"
232 232
233config SPI_AT25
234 tristate "SPI EEPROMs from most vendors"
235 depends on SYSFS
236 help
237 Enable this driver to get read/write support to most SPI EEPROMs,
238 after you configure the board init code to know about each eeprom
239 on your target board.
240
241 This driver can also be built as a module. If so, the module
242 will be called at25.
243
244config SPI_SPIDEV 233config SPI_SPIDEV
245 tristate "User mode SPI device driver support" 234 tristate "User mode SPI device driver support"
246 depends on EXPERIMENTAL 235 depends on EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 5e9f521b8844..5d0451936d86 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
33# ... add above this line ... 33# ... add above this line ...
34 34
35# SPI protocol drivers (device/link on bus) 35# SPI protocol drivers (device/link on bus)
36obj-$(CONFIG_SPI_AT25) += at25.o
37obj-$(CONFIG_SPI_SPIDEV) += spidev.o 36obj-$(CONFIG_SPI_SPIDEV) += spidev.o
38obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o 37obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o
39# ... add above this line ... 38# ... add above this line ...
diff --git a/drivers/staging/agnx/agnx.h b/drivers/staging/agnx/agnx.h
index a75b0db3726c..20f36da62475 100644
--- a/drivers/staging/agnx/agnx.h
+++ b/drivers/staging/agnx/agnx.h
@@ -1,6 +1,8 @@
1#ifndef AGNX_H_ 1#ifndef AGNX_H_
2#define AGNX_H_ 2#define AGNX_H_
3 3
4#include <linux/io.h>
5
4#include "xmit.h" 6#include "xmit.h"
5 7
6#define PFX KBUILD_MODNAME ": " 8#define PFX KBUILD_MODNAME ": "
diff --git a/drivers/staging/altpciechdma/altpciechdma.c b/drivers/staging/altpciechdma/altpciechdma.c
index 8e2b4ca0651d..f516140ca976 100644
--- a/drivers/staging/altpciechdma/altpciechdma.c
+++ b/drivers/staging/altpciechdma/altpciechdma.c
@@ -531,7 +531,7 @@ static int __devinit dma_test(struct ape_dev *ape, struct pci_dev *dev)
531 goto fail; 531 goto fail;
532 532
533 /* allocate and map coherently-cached memory for a DMA-able buffer */ 533 /* allocate and map coherently-cached memory for a DMA-able buffer */
534 /* @see 2.6.26.2/Documentation/DMA-mapping.txt line 318 */ 534 /* @see Documentation/PCI/PCI-DMA-mapping.txt, near line 318 */
535 buffer_virt = (u8 *)pci_alloc_consistent(dev, PAGE_SIZE * 4, &buffer_bus); 535 buffer_virt = (u8 *)pci_alloc_consistent(dev, PAGE_SIZE * 4, &buffer_bus);
536 if (!buffer_virt) { 536 if (!buffer_virt) {
537 printk(KERN_DEBUG "Could not allocate coherent DMA buffer.\n"); 537 printk(KERN_DEBUG "Could not allocate coherent DMA buffer.\n");
@@ -846,7 +846,7 @@ static int __devinit probe(struct pci_dev *dev, const struct pci_device_id *id)
846 846
847#if 1 // @todo For now, disable 64-bit, because I do not understand the implications (DAC!) 847#if 1 // @todo For now, disable 64-bit, because I do not understand the implications (DAC!)
848 /* query for DMA transfer */ 848 /* query for DMA transfer */
849 /* @see Documentation/DMA-mapping.txt */ 849 /* @see Documentation/PCI/PCI-DMA-mapping.txt */
850 if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)) { 850 if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)) {
851 pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK); 851 pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK);
852 /* use 64-bit DMA */ 852 /* use 64-bit DMA */
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 6a4ceacb33f5..758131cad08a 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -319,6 +319,7 @@ int task_get_unused_fd_flags(struct task_struct *tsk, int flags)
319 int fd, error; 319 int fd, error;
320 struct fdtable *fdt; 320 struct fdtable *fdt;
321 unsigned long rlim_cur; 321 unsigned long rlim_cur;
322 unsigned long irqs;
322 323
323 if (files == NULL) 324 if (files == NULL)
324 return -ESRCH; 325 return -ESRCH;
@@ -335,12 +336,11 @@ repeat:
335 * N.B. For clone tasks sharing a files structure, this test 336 * N.B. For clone tasks sharing a files structure, this test
336 * will limit the total number of files that can be opened. 337 * will limit the total number of files that can be opened.
337 */ 338 */
338 rcu_read_lock(); 339 rlim_cur = 0;
339 if (tsk->signal) 340 if (lock_task_sighand(tsk, &irqs)) {
340 rlim_cur = tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; 341 rlim_cur = tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
341 else 342 unlock_task_sighand(tsk, &irqs);
342 rlim_cur = 0; 343 }
343 rcu_read_unlock();
344 if (fd >= rlim_cur) 344 if (fd >= rlim_cur)
345 goto out; 345 goto out;
346 346
@@ -2649,14 +2649,14 @@ static void binder_vma_open(struct vm_area_struct *vma)
2649{ 2649{
2650 struct binder_proc *proc = vma->vm_private_data; 2650 struct binder_proc *proc = vma->vm_private_data;
2651 if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) 2651 if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
2652 printk(KERN_INFO "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot.pgprot); 2652 printk(KERN_INFO "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, pgprot_val(vma->vm_page_prot));
2653 dump_stack(); 2653 dump_stack();
2654} 2654}
2655static void binder_vma_close(struct vm_area_struct *vma) 2655static void binder_vma_close(struct vm_area_struct *vma)
2656{ 2656{
2657 struct binder_proc *proc = vma->vm_private_data; 2657 struct binder_proc *proc = vma->vm_private_data;
2658 if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) 2658 if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
2659 printk(KERN_INFO "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot.pgprot); 2659 printk(KERN_INFO "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, pgprot_val(vma->vm_page_prot));
2660 proc->vma = NULL; 2660 proc->vma = NULL;
2661} 2661}
2662 2662
@@ -2677,7 +2677,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2677 vma->vm_end = vma->vm_start + SZ_4M; 2677 vma->vm_end = vma->vm_start + SZ_4M;
2678 2678
2679 if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) 2679 if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
2680 printk(KERN_INFO "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot.pgprot); 2680 printk(KERN_INFO "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, pgprot_val(vma->vm_page_prot));
2681 2681
2682 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 2682 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2683 ret = -EPERM; 2683 ret = -EPERM;
diff --git a/drivers/staging/android/lowmemorykiller.txt b/drivers/staging/android/lowmemorykiller.txt
new file mode 100644
index 000000000000..bd5c0c028968
--- /dev/null
+++ b/drivers/staging/android/lowmemorykiller.txt
@@ -0,0 +1,16 @@
1The lowmemorykiller driver lets user-space specify a set of memory thresholds
2where processes with a range of oom_adj values will get killed. Specify the
3minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the
4number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both
5files take a comma separated list of numbers in ascending order.
6
7For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
8"1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill processes
9with a oom_adj value of 8 or higher when the free memory drops below 4096 pages
10and kill processes with a oom_adj value of 0 or higher when the free memory
11drops below 1024 pages.
12
13The driver considers memory used for caches to be free, but if a large
14percentage of the cached memory is locked this can be very inaccurate
15and processes may not get killed until the normal oom killer is triggered.
16
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index bea68c9fc942..903270cbbe02 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -18,7 +18,7 @@
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/hrtimer.h> 19#include <linux/hrtimer.h>
20#include <linux/err.h> 20#include <linux/err.h>
21#include <asm/arch/gpio.h> 21#include <linux/gpio.h>
22 22
23#include "timed_gpio.h" 23#include "timed_gpio.h"
24 24
@@ -49,7 +49,8 @@ static ssize_t gpio_enable_show(struct device *dev, struct device_attribute *att
49 49
50 if (hrtimer_active(&gpio_data->timer)) { 50 if (hrtimer_active(&gpio_data->timer)) {
51 ktime_t r = hrtimer_get_remaining(&gpio_data->timer); 51 ktime_t r = hrtimer_get_remaining(&gpio_data->timer);
52 remaining = r.tv.sec * 1000 + r.tv.nsec / 1000000; 52 struct timeval t = ktime_to_timeval(r);
53 remaining = t.tv_sec * 1000 + t.tv_usec;
53 } else 54 } else
54 remaining = 0; 55 remaining = 0;
55 56
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index b501bfb9c754..b47ca1e7e383 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -1,6 +1,7 @@
1config COMEDI 1config COMEDI
2 tristate "Data Acquision support (comedi)" 2 tristate "Data Acquision support (comedi)"
3 default N 3 default N
4 depends on m
4 ---help--- 5 ---help---
5 Enable support a wide range of data acquision devices 6 Enable support a wide range of data acquision devices
6 for Linux. 7 for Linux.
diff --git a/drivers/staging/meilhaus/Kconfig b/drivers/staging/meilhaus/Kconfig
index 6def83fa2c96..923af22a4686 100644
--- a/drivers/staging/meilhaus/Kconfig
+++ b/drivers/staging/meilhaus/Kconfig
@@ -4,6 +4,7 @@
4 4
5menuconfig MEILHAUS 5menuconfig MEILHAUS
6 tristate "Meilhaus support" 6 tristate "Meilhaus support"
7 depends on m
7 ---help--- 8 ---help---
8 If you have a Meilhaus card, say Y (or M) here. 9 If you have a Meilhaus card, say Y (or M) here.
9 10
@@ -18,7 +19,7 @@ if MEILHAUS
18config ME0600 19config ME0600
19 tristate "Meilhaus ME-600 support" 20 tristate "Meilhaus ME-600 support"
20 default n 21 default n
21 depends on PCI 22 depends on PCI && m
22 help 23 help
23 This driver supports the Meilhaus ME-600 family of boards 24 This driver supports the Meilhaus ME-600 family of boards
24 that do data collection and multipurpose I/O. 25 that do data collection and multipurpose I/O.
@@ -29,7 +30,7 @@ config ME0600
29config ME0900 30config ME0900
30 tristate "Meilhaus ME-900 support" 31 tristate "Meilhaus ME-900 support"
31 default n 32 default n
32 depends on PCI 33 depends on PCI && m
33 help 34 help
34 This driver supports the Meilhaus ME-900 family of boards 35 This driver supports the Meilhaus ME-900 family of boards
35 that do data collection and multipurpose I/O. 36 that do data collection and multipurpose I/O.
@@ -40,7 +41,7 @@ config ME0900
40config ME1000 41config ME1000
41 tristate "Meilhaus ME-1000 support" 42 tristate "Meilhaus ME-1000 support"
42 default n 43 default n
43 depends on PCI 44 depends on PCI && m
44 help 45 help
45 This driver supports the Meilhaus ME-1000 family of boards 46 This driver supports the Meilhaus ME-1000 family of boards
46 that do data collection and multipurpose I/O. 47 that do data collection and multipurpose I/O.
@@ -51,7 +52,7 @@ config ME1000
51config ME1400 52config ME1400
52 tristate "Meilhaus ME-1400 support" 53 tristate "Meilhaus ME-1400 support"
53 default n 54 default n
54 depends on PCI 55 depends on PCI && m
55 help 56 help
56 This driver supports the Meilhaus ME-1400 family of boards 57 This driver supports the Meilhaus ME-1400 family of boards
57 that do data collection and multipurpose I/O. 58 that do data collection and multipurpose I/O.
@@ -62,7 +63,7 @@ config ME1400
62config ME1600 63config ME1600
63 tristate "Meilhaus ME-1600 support" 64 tristate "Meilhaus ME-1600 support"
64 default n 65 default n
65 depends on PCI 66 depends on PCI && m
66 help 67 help
67 This driver supports the Meilhaus ME-1600 family of boards 68 This driver supports the Meilhaus ME-1600 family of boards
68 that do data collection and multipurpose I/O. 69 that do data collection and multipurpose I/O.
@@ -73,7 +74,7 @@ config ME1600
73config ME4600 74config ME4600
74 tristate "Meilhaus ME-4600 support" 75 tristate "Meilhaus ME-4600 support"
75 default n 76 default n
76 depends on PCI 77 depends on PCI && m
77 help 78 help
78 This driver supports the Meilhaus ME-4600 family of boards 79 This driver supports the Meilhaus ME-4600 family of boards
79 that do data collection and multipurpose I/O. 80 that do data collection and multipurpose I/O.
@@ -84,7 +85,7 @@ config ME4600
84config ME6000 85config ME6000
85 tristate "Meilhaus ME-6000 support" 86 tristate "Meilhaus ME-6000 support"
86 default n 87 default n
87 depends on PCI 88 depends on PCI && m
88 help 89 help
89 This driver supports the Meilhaus ME-6000 family of boards 90 This driver supports the Meilhaus ME-6000 family of boards
90 that do data collection and multipurpose I/O. 91 that do data collection and multipurpose I/O.
@@ -95,7 +96,7 @@ config ME6000
95config ME8100 96config ME8100
96 tristate "Meilhaus ME-8100 support" 97 tristate "Meilhaus ME-8100 support"
97 default n 98 default n
98 depends on PCI 99 depends on PCI && m
99 help 100 help
100 This driver supports the Meilhaus ME-8100 family of boards 101 This driver supports the Meilhaus ME-8100 family of boards
101 that do data collection and multipurpose I/O. 102 that do data collection and multipurpose I/O.
@@ -106,7 +107,7 @@ config ME8100
106config ME8200 107config ME8200
107 tristate "Meilhaus ME-8200 support" 108 tristate "Meilhaus ME-8200 support"
108 default n 109 default n
109 depends on PCI 110 depends on PCI && m
110 help 111 help
111 This driver supports the Meilhaus ME-8200 family of boards 112 This driver supports the Meilhaus ME-8200 family of boards
112 that do data collection and multipurpose I/O. 113 that do data collection and multipurpose I/O.
@@ -117,7 +118,7 @@ config ME8200
117config MEDUMMY 118config MEDUMMY
118 tristate "Meilhaus dummy driver" 119 tristate "Meilhaus dummy driver"
119 default n 120 default n
120 depends on PCI 121 depends on PCI && m
121 help 122 help
122 This provides a dummy driver for the Meilhaus driver package 123 This provides a dummy driver for the Meilhaus driver package
123 124
diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
index ec343ef53a85..0d111ddfabb2 100644
--- a/drivers/staging/poch/poch.c
+++ b/drivers/staging/poch/poch.c
@@ -1026,7 +1026,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
1026 } 1026 }
1027 break; 1027 break;
1028 case POCH_IOC_GET_COUNTERS: 1028 case POCH_IOC_GET_COUNTERS:
1029 if (access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters))) 1029 if (!access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
1030 return -EFAULT; 1030 return -EFAULT;
1031 1031
1032 spin_lock_irq(&channel->counters_lock); 1032 spin_lock_irq(&channel->counters_lock);
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 72e209276ea7..22f93dd0ba03 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -406,8 +406,20 @@ void usbip_start_threads(struct usbip_device *ud)
406 /* 406 /*
407 * threads are invoked per one device (per one connection). 407 * threads are invoked per one device (per one connection).
408 */ 408 */
409 kernel_thread(usbip_thread, (void *)&ud->tcp_rx, 0); 409 int retval;
410 kernel_thread(usbip_thread, (void *)&ud->tcp_tx, 0); 410
411 retval = kernel_thread(usbip_thread, (void *)&ud->tcp_rx, 0);
412 if (retval < 0) {
413 printk(KERN_ERR "Creating tcp_rx thread for ud %p failed.\n",
414 ud);
415 return;
416 }
417 retval = kernel_thread(usbip_thread, (void *)&ud->tcp_tx, 0);
418 if (retval < 0) {
419 printk(KERN_ERR "Creating tcp_tx thread for ud %p failed.\n",
420 ud);
421 return;
422 }
411 423
412 /* confirm threads are starting */ 424 /* confirm threads are starting */
413 wait_for_completion(&ud->tcp_rx.thread_done); 425 wait_for_completion(&ud->tcp_rx.thread_done);
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 8b7c419b876e..8bcde8cde554 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_USB_EHCI_HCD) += host/
13obj-$(CONFIG_USB_ISP116X_HCD) += host/ 13obj-$(CONFIG_USB_ISP116X_HCD) += host/
14obj-$(CONFIG_USB_OHCI_HCD) += host/ 14obj-$(CONFIG_USB_OHCI_HCD) += host/
15obj-$(CONFIG_USB_UHCI_HCD) += host/ 15obj-$(CONFIG_USB_UHCI_HCD) += host/
16obj-$(CONFIG_USB_FHCI_HCD) += host/
16obj-$(CONFIG_USB_SL811_HCD) += host/ 17obj-$(CONFIG_USB_SL811_HCD) += host/
17obj-$(CONFIG_USB_U132_HCD) += host/ 18obj-$(CONFIG_USB_U132_HCD) += host/
18obj-$(CONFIG_USB_R8A66597_HCD) += host/ 19obj-$(CONFIG_USB_R8A66597_HCD) += host/
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 00b47ea24f86..97ba4a985edc 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1349,6 +1349,12 @@ static struct usb_device_id acm_ids[] = {
1349 { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */ 1349 { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
1350 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1350 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1351 }, 1351 },
1352 { USB_DEVICE(0x0e8d, 0x3329), /* i-blue 747, Qstarz BT-Q1000, Holux M-241 */
1353 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1354 },
1355 { USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
1356 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1357 },
1352 { USB_DEVICE(0x0482, 0x0203), /* KYOCERA AH-K3001V */ 1358 { USB_DEVICE(0x0482, 0x0203), /* KYOCERA AH-K3001V */
1353 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1359 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1354 }, 1360 },
@@ -1370,6 +1376,9 @@ static struct usb_device_id acm_ids[] = {
1370 { USB_DEVICE(0x0572, 0x1321), /* Conexant USB MODEM CX93010 */ 1376 { USB_DEVICE(0x0572, 0x1321), /* Conexant USB MODEM CX93010 */
1371 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1377 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1372 }, 1378 },
1379 { USB_DEVICE(0x0572, 0x1324), /* Conexant USB MODEM RD02-D400 */
1380 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1381 },
1373 1382
1374 /* control interfaces with various AT-command sets */ 1383 /* control interfaces with various AT-command sets */
1375 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1384 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index b5775af3ba26..3f3ee1351930 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -226,6 +226,7 @@ static const struct quirk_printer_struct quirk_printers[] = {
226 { 0x0409, 0xf0be, USBLP_QUIRK_BIDIR }, /* NEC Picty920 (HP OEM) */ 226 { 0x0409, 0xf0be, USBLP_QUIRK_BIDIR }, /* NEC Picty920 (HP OEM) */
227 { 0x0409, 0xf1be, USBLP_QUIRK_BIDIR }, /* NEC Picty800 (HP OEM) */ 227 { 0x0409, 0xf1be, USBLP_QUIRK_BIDIR }, /* NEC Picty800 (HP OEM) */
228 { 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut <kernel@zut.de> */ 228 { 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut <kernel@zut.de> */
229 { 0x04f9, 0x000d, USBLP_QUIRK_BIDIR }, /* Brother Industries, Ltd HL-1440 Laser Printer */
229 { 0x04b8, 0x0202, USBLP_QUIRK_BAD_CLASS }, /* Seiko Epson Receipt Printer M129C */ 230 { 0x04b8, 0x0202, USBLP_QUIRK_BAD_CLASS }, /* Seiko Epson Receipt Printer M129C */
230 { 0, 0 } 231 { 0, 0 }
231}; 232};
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 26fece124e0e..7513bb083c15 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1700,7 +1700,7 @@ const struct file_operations usbdev_file_operations = {
1700 .release = usbdev_release, 1700 .release = usbdev_release,
1701}; 1701};
1702 1702
1703void usb_fs_classdev_common_remove(struct usb_device *udev) 1703static void usbdev_remove(struct usb_device *udev)
1704{ 1704{
1705 struct dev_state *ps; 1705 struct dev_state *ps;
1706 struct siginfo sinfo; 1706 struct siginfo sinfo;
@@ -1742,10 +1742,15 @@ static void usb_classdev_remove(struct usb_device *dev)
1742{ 1742{
1743 if (dev->usb_classdev) 1743 if (dev->usb_classdev)
1744 device_unregister(dev->usb_classdev); 1744 device_unregister(dev->usb_classdev);
1745 usb_fs_classdev_common_remove(dev);
1746} 1745}
1747 1746
1748static int usb_classdev_notify(struct notifier_block *self, 1747#else
1748#define usb_classdev_add(dev) 0
1749#define usb_classdev_remove(dev) do {} while (0)
1750
1751#endif
1752
1753static int usbdev_notify(struct notifier_block *self,
1749 unsigned long action, void *dev) 1754 unsigned long action, void *dev)
1750{ 1755{
1751 switch (action) { 1756 switch (action) {
@@ -1755,15 +1760,15 @@ static int usb_classdev_notify(struct notifier_block *self,
1755 break; 1760 break;
1756 case USB_DEVICE_REMOVE: 1761 case USB_DEVICE_REMOVE:
1757 usb_classdev_remove(dev); 1762 usb_classdev_remove(dev);
1763 usbdev_remove(dev);
1758 break; 1764 break;
1759 } 1765 }
1760 return NOTIFY_OK; 1766 return NOTIFY_OK;
1761} 1767}
1762 1768
1763static struct notifier_block usbdev_nb = { 1769static struct notifier_block usbdev_nb = {
1764 .notifier_call = usb_classdev_notify, 1770 .notifier_call = usbdev_notify,
1765}; 1771};
1766#endif
1767 1772
1768static struct cdev usb_device_cdev; 1773static struct cdev usb_device_cdev;
1769 1774
@@ -1798,9 +1803,8 @@ int __init usb_devio_init(void)
1798 * to /sys/dev 1803 * to /sys/dev
1799 */ 1804 */
1800 usb_classdev_class->dev_kobj = NULL; 1805 usb_classdev_class->dev_kobj = NULL;
1801
1802 usb_register_notify(&usbdev_nb);
1803#endif 1806#endif
1807 usb_register_notify(&usbdev_nb);
1804out: 1808out:
1805 return retval; 1809 return retval;
1806 1810
@@ -1811,8 +1815,8 @@ error_cdev:
1811 1815
1812void usb_devio_cleanup(void) 1816void usb_devio_cleanup(void)
1813{ 1817{
1814#ifdef CONFIG_USB_DEVICE_CLASS
1815 usb_unregister_notify(&usbdev_nb); 1818 usb_unregister_notify(&usbdev_nb);
1819#ifdef CONFIG_USB_DEVICE_CLASS
1816 class_destroy(usb_classdev_class); 1820 class_destroy(usb_classdev_class);
1817#endif 1821#endif
1818 cdev_del(&usb_device_cdev); 1822 cdev_del(&usb_device_cdev);
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 98760553bc95..d0a21a5f8201 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -284,7 +284,7 @@ static int usb_unbind_interface(struct device *dev)
284 * supports "soft" unbinding. 284 * supports "soft" unbinding.
285 */ 285 */
286 if (!driver->soft_unbind) 286 if (!driver->soft_unbind)
287 usb_disable_interface(udev, intf); 287 usb_disable_interface(udev, intf, false);
288 288
289 driver->disconnect(intf); 289 driver->disconnect(intf);
290 usb_cancel_queued_reset(intf); 290 usb_cancel_queued_reset(intf);
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 507741ed4482..c54fc40458b1 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -128,7 +128,6 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
128 } 128 }
129 129
130 pci_set_master(dev); 130 pci_set_master(dev);
131 device_set_wakeup_enable(&dev->dev, 1);
132 131
133 retval = usb_add_hcd(hcd, dev->irq, IRQF_DISABLED | IRQF_SHARED); 132 retval = usb_add_hcd(hcd, dev->irq, IRQF_DISABLED | IRQF_SHARED);
134 if (retval != 0) 133 if (retval != 0)
@@ -201,6 +200,7 @@ int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t message)
201 struct usb_hcd *hcd = pci_get_drvdata(dev); 200 struct usb_hcd *hcd = pci_get_drvdata(dev);
202 int retval = 0; 201 int retval = 0;
203 int wake, w; 202 int wake, w;
203 int has_pci_pm;
204 204
205 /* Root hub suspend should have stopped all downstream traffic, 205 /* Root hub suspend should have stopped all downstream traffic,
206 * and all bus master traffic. And done so for both the interface 206 * and all bus master traffic. And done so for both the interface
@@ -230,6 +230,15 @@ int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t message)
230 230
231 synchronize_irq(dev->irq); 231 synchronize_irq(dev->irq);
232 232
233 /* Downstream ports from this root hub should already be quiesced, so
234 * there will be no DMA activity. Now we can shut down the upstream
235 * link (except maybe for PME# resume signaling) and enter some PCI
236 * low power state, if the hardware allows.
237 */
238 pci_disable_device(dev);
239
240 pci_save_state(dev);
241
233 /* Don't fail on error to enable wakeup. We rely on pci code 242 /* Don't fail on error to enable wakeup. We rely on pci code
234 * to reject requests the hardware can't implement, rather 243 * to reject requests the hardware can't implement, rather
235 * than coding the same thing. 244 * than coding the same thing.
@@ -241,35 +250,6 @@ int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t message)
241 wake = w; 250 wake = w;
242 dev_dbg(&dev->dev, "wakeup: %d\n", wake); 251 dev_dbg(&dev->dev, "wakeup: %d\n", wake);
243 252
244 /* Downstream ports from this root hub should already be quiesced, so
245 * there will be no DMA activity. Now we can shut down the upstream
246 * link (except maybe for PME# resume signaling) and enter some PCI
247 * low power state, if the hardware allows.
248 */
249 pci_disable_device(dev);
250 done:
251 return retval;
252}
253EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend);
254
255/**
256 * usb_hcd_pci_suspend_late - suspend a PCI-based HCD after IRQs are disabled
257 * @dev: USB Host Controller being suspended
258 * @message: Power Management message describing this state transition
259 *
260 * Store this function in the HCD's struct pci_driver as .suspend_late.
261 */
262int usb_hcd_pci_suspend_late(struct pci_dev *dev, pm_message_t message)
263{
264 int retval = 0;
265 int has_pci_pm;
266
267 /* We might already be suspended (runtime PM -- not yet written) */
268 if (dev->current_state != PCI_D0)
269 goto done;
270
271 pci_save_state(dev);
272
273 /* Don't change state if we don't need to */ 253 /* Don't change state if we don't need to */
274 if (message.event == PM_EVENT_FREEZE || 254 if (message.event == PM_EVENT_FREEZE ||
275 message.event == PM_EVENT_PRETHAW) { 255 message.event == PM_EVENT_PRETHAW) {
@@ -315,7 +295,7 @@ int usb_hcd_pci_suspend_late(struct pci_dev *dev, pm_message_t message)
315 done: 295 done:
316 return retval; 296 return retval;
317} 297}
318EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend_late); 298EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend);
319 299
320/** 300/**
321 * usb_hcd_pci_resume_early - resume a PCI-based HCD before IRQs are enabled 301 * usb_hcd_pci_resume_early - resume a PCI-based HCD before IRQs are enabled
@@ -325,65 +305,8 @@ EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend_late);
325 */ 305 */
326int usb_hcd_pci_resume_early(struct pci_dev *dev) 306int usb_hcd_pci_resume_early(struct pci_dev *dev)
327{ 307{
328 int retval = 0; 308 pci_restore_state(dev);
329 pci_power_t state = dev->current_state; 309 return 0;
330
331#ifdef CONFIG_PPC_PMAC
332 /* Reenable ASIC clocks for USB */
333 if (machine_is(powermac)) {
334 struct device_node *of_node;
335
336 of_node = pci_device_to_OF_node(dev);
337 if (of_node)
338 pmac_call_feature(PMAC_FTR_USB_ENABLE,
339 of_node, 0, 1);
340 }
341#endif
342
343 /* NOTE: chip docs cover clean "real suspend" cases (what Linux
344 * calls "standby", "suspend to RAM", and so on). There are also
345 * dirty cases when swsusp fakes a suspend in "shutdown" mode.
346 */
347 if (state != PCI_D0) {
348#ifdef DEBUG
349 int pci_pm;
350 u16 pmcr;
351
352 pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
353 pci_read_config_word(dev, pci_pm + PCI_PM_CTRL, &pmcr);
354 pmcr &= PCI_PM_CTRL_STATE_MASK;
355 if (pmcr) {
356 /* Clean case: power to USB and to HC registers was
357 * maintained; remote wakeup is easy.
358 */
359 dev_dbg(&dev->dev, "resume from PCI D%d\n", pmcr);
360 } else {
361 /* Clean: HC lost Vcc power, D0 uninitialized
362 * + Vaux may have preserved port and transceiver
363 * state ... for remote wakeup from D3cold
364 * + or not; HCD must reinit + re-enumerate
365 *
366 * Dirty: D0 semi-initialized cases with swsusp
367 * + after BIOS init
368 * + after Linux init (HCD statically linked)
369 */
370 dev_dbg(&dev->dev, "resume from previous PCI D%d\n",
371 state);
372 }
373#endif
374
375 retval = pci_set_power_state(dev, PCI_D0);
376 } else {
377 /* Same basic cases: clean (powered/not), dirty */
378 dev_dbg(&dev->dev, "PCI legacy resume\n");
379 }
380
381 if (retval < 0)
382 dev_err(&dev->dev, "can't resume: %d\n", retval);
383 else
384 pci_restore_state(dev);
385
386 return retval;
387} 310}
388EXPORT_SYMBOL_GPL(usb_hcd_pci_resume_early); 311EXPORT_SYMBOL_GPL(usb_hcd_pci_resume_early);
389 312
@@ -398,6 +321,18 @@ int usb_hcd_pci_resume(struct pci_dev *dev)
398 struct usb_hcd *hcd; 321 struct usb_hcd *hcd;
399 int retval; 322 int retval;
400 323
324#ifdef CONFIG_PPC_PMAC
325 /* Reenable ASIC clocks for USB */
326 if (machine_is(powermac)) {
327 struct device_node *of_node;
328
329 of_node = pci_device_to_OF_node(dev);
330 if (of_node)
331 pmac_call_feature(PMAC_FTR_USB_ENABLE,
332 of_node, 0, 1);
333 }
334#endif
335
401 hcd = pci_get_drvdata(dev); 336 hcd = pci_get_drvdata(dev);
402 if (hcd->state != HC_STATE_SUSPENDED) { 337 if (hcd->state != HC_STATE_SUSPENDED) {
403 dev_dbg(hcd->self.controller, 338 dev_dbg(hcd->self.controller,
@@ -405,6 +340,8 @@ int usb_hcd_pci_resume(struct pci_dev *dev)
405 return 0; 340 return 0;
406 } 341 }
407 342
343 pci_enable_wake(dev, PCI_D0, false);
344
408 retval = pci_enable_device(dev); 345 retval = pci_enable_device(dev);
409 if (retval < 0) { 346 if (retval < 0) {
410 dev_err(&dev->dev, "can't re-enable after resume, %d!\n", 347 dev_err(&dev->dev, "can't re-enable after resume, %d!\n",
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 572d2cf46e8d..5b94a56bec23 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -257,7 +257,6 @@ extern void usb_hcd_pci_remove(struct pci_dev *dev);
257 257
258#ifdef CONFIG_PM 258#ifdef CONFIG_PM
259extern int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t msg); 259extern int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t msg);
260extern int usb_hcd_pci_suspend_late(struct pci_dev *dev, pm_message_t msg);
261extern int usb_hcd_pci_resume_early(struct pci_dev *dev); 260extern int usb_hcd_pci_resume_early(struct pci_dev *dev);
262extern int usb_hcd_pci_resume(struct pci_dev *dev); 261extern int usb_hcd_pci_resume(struct pci_dev *dev);
263#endif /* CONFIG_PM */ 262#endif /* CONFIG_PM */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 94d5ee263c20..cd50d86029e7 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2382,8 +2382,8 @@ static int hub_port_debounce(struct usb_hub *hub, int port1)
2382 2382
2383void usb_ep0_reinit(struct usb_device *udev) 2383void usb_ep0_reinit(struct usb_device *udev)
2384{ 2384{
2385 usb_disable_endpoint(udev, 0 + USB_DIR_IN); 2385 usb_disable_endpoint(udev, 0 + USB_DIR_IN, true);
2386 usb_disable_endpoint(udev, 0 + USB_DIR_OUT); 2386 usb_disable_endpoint(udev, 0 + USB_DIR_OUT, true);
2387 usb_enable_endpoint(udev, &udev->ep0, true); 2387 usb_enable_endpoint(udev, &udev->ep0, true);
2388} 2388}
2389EXPORT_SYMBOL_GPL(usb_ep0_reinit); 2389EXPORT_SYMBOL_GPL(usb_ep0_reinit);
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 2a129cb7bb56..dff5760a37f6 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -717,7 +717,6 @@ static void usbfs_remove_device(struct usb_device *dev)
717 fs_remove_file (dev->usbfs_dentry); 717 fs_remove_file (dev->usbfs_dentry);
718 dev->usbfs_dentry = NULL; 718 dev->usbfs_dentry = NULL;
719 } 719 }
720 usb_fs_classdev_common_remove(dev);
721} 720}
722 721
723static int usbfs_notify(struct notifier_block *self, unsigned long action, void *dev) 722static int usbfs_notify(struct notifier_block *self, unsigned long action, void *dev)
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index de51667dd64d..31fb204f44c6 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1039,14 +1039,15 @@ static void remove_intf_ep_devs(struct usb_interface *intf)
1039 * @dev: the device whose endpoint is being disabled 1039 * @dev: the device whose endpoint is being disabled
1040 * @epaddr: the endpoint's address. Endpoint number for output, 1040 * @epaddr: the endpoint's address. Endpoint number for output,
1041 * endpoint number + USB_DIR_IN for input 1041 * endpoint number + USB_DIR_IN for input
1042 * @reset_hardware: flag to erase any endpoint state stored in the
1043 * controller hardware
1042 * 1044 *
1043 * Deallocates hcd/hardware state for this endpoint ... and nukes all 1045 * Disables the endpoint for URB submission and nukes all pending URBs.
1044 * pending urbs. 1046 * If @reset_hardware is set then also deallocates hcd/hardware state
1045 * 1047 * for the endpoint.
1046 * If the HCD hasn't registered a disable() function, this sets the
1047 * endpoint's maxpacket size to 0 to prevent further submissions.
1048 */ 1048 */
1049void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr) 1049void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
1050 bool reset_hardware)
1050{ 1051{
1051 unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK; 1052 unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK;
1052 struct usb_host_endpoint *ep; 1053 struct usb_host_endpoint *ep;
@@ -1056,15 +1057,18 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr)
1056 1057
1057 if (usb_endpoint_out(epaddr)) { 1058 if (usb_endpoint_out(epaddr)) {
1058 ep = dev->ep_out[epnum]; 1059 ep = dev->ep_out[epnum];
1059 dev->ep_out[epnum] = NULL; 1060 if (reset_hardware)
1061 dev->ep_out[epnum] = NULL;
1060 } else { 1062 } else {
1061 ep = dev->ep_in[epnum]; 1063 ep = dev->ep_in[epnum];
1062 dev->ep_in[epnum] = NULL; 1064 if (reset_hardware)
1065 dev->ep_in[epnum] = NULL;
1063 } 1066 }
1064 if (ep) { 1067 if (ep) {
1065 ep->enabled = 0; 1068 ep->enabled = 0;
1066 usb_hcd_flush_endpoint(dev, ep); 1069 usb_hcd_flush_endpoint(dev, ep);
1067 usb_hcd_disable_endpoint(dev, ep); 1070 if (reset_hardware)
1071 usb_hcd_disable_endpoint(dev, ep);
1068 } 1072 }
1069} 1073}
1070 1074
@@ -1072,17 +1076,21 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr)
1072 * usb_disable_interface -- Disable all endpoints for an interface 1076 * usb_disable_interface -- Disable all endpoints for an interface
1073 * @dev: the device whose interface is being disabled 1077 * @dev: the device whose interface is being disabled
1074 * @intf: pointer to the interface descriptor 1078 * @intf: pointer to the interface descriptor
1079 * @reset_hardware: flag to erase any endpoint state stored in the
1080 * controller hardware
1075 * 1081 *
1076 * Disables all the endpoints for the interface's current altsetting. 1082 * Disables all the endpoints for the interface's current altsetting.
1077 */ 1083 */
1078void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf) 1084void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
1085 bool reset_hardware)
1079{ 1086{
1080 struct usb_host_interface *alt = intf->cur_altsetting; 1087 struct usb_host_interface *alt = intf->cur_altsetting;
1081 int i; 1088 int i;
1082 1089
1083 for (i = 0; i < alt->desc.bNumEndpoints; ++i) { 1090 for (i = 0; i < alt->desc.bNumEndpoints; ++i) {
1084 usb_disable_endpoint(dev, 1091 usb_disable_endpoint(dev,
1085 alt->endpoint[i].desc.bEndpointAddress); 1092 alt->endpoint[i].desc.bEndpointAddress,
1093 reset_hardware);
1086 } 1094 }
1087} 1095}
1088 1096
@@ -1103,8 +1111,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
1103 dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, 1111 dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
1104 skip_ep0 ? "non-ep0" : "all"); 1112 skip_ep0 ? "non-ep0" : "all");
1105 for (i = skip_ep0; i < 16; ++i) { 1113 for (i = skip_ep0; i < 16; ++i) {
1106 usb_disable_endpoint(dev, i); 1114 usb_disable_endpoint(dev, i, true);
1107 usb_disable_endpoint(dev, i + USB_DIR_IN); 1115 usb_disable_endpoint(dev, i + USB_DIR_IN, true);
1108 } 1116 }
1109 dev->toggle[0] = dev->toggle[1] = 0; 1117 dev->toggle[0] = dev->toggle[1] = 0;
1110 1118
@@ -1274,7 +1282,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1274 remove_intf_ep_devs(iface); 1282 remove_intf_ep_devs(iface);
1275 usb_remove_sysfs_intf_files(iface); 1283 usb_remove_sysfs_intf_files(iface);
1276 } 1284 }
1277 usb_disable_interface(dev, iface); 1285 usb_disable_interface(dev, iface, true);
1278 1286
1279 iface->cur_altsetting = alt; 1287 iface->cur_altsetting = alt;
1280 1288
@@ -1353,8 +1361,8 @@ int usb_reset_configuration(struct usb_device *dev)
1353 */ 1361 */
1354 1362
1355 for (i = 1; i < 16; ++i) { 1363 for (i = 1; i < 16; ++i) {
1356 usb_disable_endpoint(dev, i); 1364 usb_disable_endpoint(dev, i, true);
1357 usb_disable_endpoint(dev, i + USB_DIR_IN); 1365 usb_disable_endpoint(dev, i + USB_DIR_IN, true);
1358 } 1366 }
1359 1367
1360 config = dev->actconfig; 1368 config = dev->actconfig;
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 386177867a8a..79d8a9ea559b 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -15,9 +15,10 @@ extern void usb_enable_endpoint(struct usb_device *dev,
15 struct usb_host_endpoint *ep, bool reset_toggle); 15 struct usb_host_endpoint *ep, bool reset_toggle);
16extern void usb_enable_interface(struct usb_device *dev, 16extern void usb_enable_interface(struct usb_device *dev,
17 struct usb_interface *intf, bool reset_toggles); 17 struct usb_interface *intf, bool reset_toggles);
18extern void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr); 18extern void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
19 bool reset_hardware);
19extern void usb_disable_interface(struct usb_device *dev, 20extern void usb_disable_interface(struct usb_device *dev,
20 struct usb_interface *intf); 21 struct usb_interface *intf, bool reset_hardware);
21extern void usb_release_interface_cache(struct kref *ref); 22extern void usb_release_interface_cache(struct kref *ref);
22extern void usb_disable_device(struct usb_device *dev, int skip_ep0); 23extern void usb_disable_device(struct usb_device *dev, int skip_ep0);
23extern int usb_deauthorize_device(struct usb_device *); 24extern int usb_deauthorize_device(struct usb_device *);
@@ -151,7 +152,6 @@ extern struct usb_driver usbfs_driver;
151extern const struct file_operations usbfs_devices_fops; 152extern const struct file_operations usbfs_devices_fops;
152extern const struct file_operations usbdev_file_operations; 153extern const struct file_operations usbdev_file_operations;
153extern void usbfs_conn_disc_event(void); 154extern void usbfs_conn_disc_event(void);
154extern void usb_fs_classdev_common_remove(struct usb_device *udev);
155 155
156extern int usb_devio_init(void); 156extern int usb_devio_init(void);
157extern void usb_devio_cleanup(void); 157extern void usb_devio_cleanup(void);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index f2da0269e1b1..5d11c291f1ad 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -683,6 +683,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
683 struct usb_request *req = cdev->req; 683 struct usb_request *req = cdev->req;
684 int value = -EOPNOTSUPP; 684 int value = -EOPNOTSUPP;
685 u16 w_index = le16_to_cpu(ctrl->wIndex); 685 u16 w_index = le16_to_cpu(ctrl->wIndex);
686 u8 intf = w_index & 0xFF;
686 u16 w_value = le16_to_cpu(ctrl->wValue); 687 u16 w_value = le16_to_cpu(ctrl->wValue);
687 u16 w_length = le16_to_cpu(ctrl->wLength); 688 u16 w_length = le16_to_cpu(ctrl->wLength);
688 struct usb_function *f = NULL; 689 struct usb_function *f = NULL;
@@ -769,10 +770,10 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
769 goto unknown; 770 goto unknown;
770 if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES) 771 if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES)
771 break; 772 break;
772 f = cdev->config->interface[w_index]; 773 f = cdev->config->interface[intf];
773 if (!f) 774 if (!f)
774 break; 775 break;
775 if (w_value && !f->get_alt) 776 if (w_value && !f->set_alt)
776 break; 777 break;
777 value = f->set_alt(f, w_index, w_value); 778 value = f->set_alt(f, w_index, w_value);
778 break; 779 break;
@@ -781,7 +782,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
781 goto unknown; 782 goto unknown;
782 if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES) 783 if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES)
783 break; 784 break;
784 f = cdev->config->interface[w_index]; 785 f = cdev->config->interface[intf];
785 if (!f) 786 if (!f)
786 break; 787 break;
787 /* lots of interfaces only need altsetting zero... */ 788 /* lots of interfaces only need altsetting zero... */
@@ -808,7 +809,7 @@ unknown:
808 */ 809 */
809 if ((ctrl->bRequestType & USB_RECIP_MASK) 810 if ((ctrl->bRequestType & USB_RECIP_MASK)
810 == USB_RECIP_INTERFACE) { 811 == USB_RECIP_INTERFACE) {
811 f = cdev->config->interface[w_index]; 812 f = cdev->config->interface[intf];
812 if (f && f->setup) 813 if (f && f->setup)
813 value = f->setup(f, ctrl); 814 value = f->setup(f, ctrl);
814 else 815 else
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index cde8fdf15d5b..77c5d0a8a06e 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -297,7 +297,7 @@ void imx_ep_stall(struct imx_ep_struct *imx_ep)
297 297
298 for (i = 0; i < 100; i ++) { 298 for (i = 0; i < 100; i ++) {
299 temp = __raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep))); 299 temp = __raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
300 if (!temp & EPSTAT_STALL) 300 if (!(temp & EPSTAT_STALL))
301 break; 301 break;
302 udelay(20); 302 udelay(20);
303 } 303 }
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2b476b6b3d4d..2c63bfb1f8d9 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -140,6 +140,7 @@ config USB_OHCI_HCD
140 tristate "OHCI HCD support" 140 tristate "OHCI HCD support"
141 depends on USB && USB_ARCH_HAS_OHCI 141 depends on USB && USB_ARCH_HAS_OHCI
142 select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 142 select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
143 select USB_OTG_UTILS if ARCH_OMAP
143 ---help--- 144 ---help---
144 The Open Host Controller Interface (OHCI) is a standard for accessing 145 The Open Host Controller Interface (OHCI) is a standard for accessing
145 USB 1.1 host controller hardware. It does more in hardware than Intel's 146 USB 1.1 host controller hardware. It does more in hardware than Intel's
@@ -238,6 +239,23 @@ config USB_UHCI_HCD
238 To compile this driver as a module, choose M here: the 239 To compile this driver as a module, choose M here: the
239 module will be called uhci-hcd. 240 module will be called uhci-hcd.
240 241
242config USB_FHCI_HCD
243 tristate "Freescale QE USB Host Controller support"
244 depends on USB && OF_GPIO && QE_GPIO && QUICC_ENGINE
245 select FSL_GTM
246 select QE_USB
247 help
248 This driver enables support for Freescale QE USB Host Controller
249 (as found on MPC8360 and MPC8323 processors), the driver supports
250 Full and Low Speed USB.
251
252config FHCI_DEBUG
253 bool "Freescale QE USB Host Controller debug support"
254 depends on USB_FHCI_HCD && DEBUG_FS
255 help
256 Say "y" to see some FHCI debug information and statistics
257 throught debugfs.
258
241config USB_U132_HCD 259config USB_U132_HCD
242 tristate "Elan U132 Adapter Host Controller" 260 tristate "Elan U132 Adapter Host Controller"
243 depends on USB && USB_FTDI_ELAN 261 depends on USB && USB_FTDI_ELAN
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index e5f3f20787e4..f163571e33d8 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -7,6 +7,11 @@ ifeq ($(CONFIG_USB_DEBUG),y)
7endif 7endif
8 8
9isp1760-objs := isp1760-hcd.o isp1760-if.o 9isp1760-objs := isp1760-hcd.o isp1760-if.o
10fhci-objs := fhci-hcd.o fhci-hub.o fhci-q.o fhci-mem.o \
11 fhci-tds.o fhci-sched.o
12ifeq ($(CONFIG_FHCI_DEBUG),y)
13fhci-objs += fhci-dbg.o
14endif
10 15
11obj-$(CONFIG_USB_WHCI_HCD) += whci/ 16obj-$(CONFIG_USB_WHCI_HCD) += whci/
12 17
@@ -17,6 +22,7 @@ obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
17obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o 22obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
18obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o 23obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
19obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o 24obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
25obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
20obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o 26obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
21obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o 27obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
22obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o 28obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index bdc6e86e1f8b..bb21fb0a4969 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -230,7 +230,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
230 pci_read_config_word(pdev, 0x62, &port_wake); 230 pci_read_config_word(pdev, 0x62, &port_wake);
231 if (port_wake & 0x0001) { 231 if (port_wake & 0x0001) {
232 dev_warn(&pdev->dev, "Enabling legacy PCI PM\n"); 232 dev_warn(&pdev->dev, "Enabling legacy PCI PM\n");
233 device_init_wakeup(&pdev->dev, 1); 233 device_set_wakeup_capable(&pdev->dev, 1);
234 } 234 }
235 } 235 }
236 236
@@ -432,7 +432,6 @@ static struct pci_driver ehci_pci_driver = {
432 432
433#ifdef CONFIG_PM 433#ifdef CONFIG_PM
434 .suspend = usb_hcd_pci_suspend, 434 .suspend = usb_hcd_pci_suspend,
435 .suspend_late = usb_hcd_pci_suspend_late,
436 .resume_early = usb_hcd_pci_resume_early, 435 .resume_early = usb_hcd_pci_resume_early,
437 .resume = usb_hcd_pci_resume, 436 .resume = usb_hcd_pci_resume,
438#endif 437#endif
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
new file mode 100644
index 000000000000..34e14edf390b
--- /dev/null
+++ b/drivers/usb/host/fhci-dbg.c
@@ -0,0 +1,139 @@
1/*
2 * Freescale QUICC Engine USB Host Controller Driver
3 *
4 * Copyright (c) Freescale Semicondutor, Inc. 2006.
5 * Shlomi Gridish <gridish@freescale.com>
6 * Jerry Huang <Chang-Ming.Huang@freescale.com>
7 * Copyright (c) Logic Product Development, Inc. 2007
8 * Peter Barada <peterb@logicpd.com>
9 * Copyright (c) MontaVista Software, Inc. 2008.
10 * Anton Vorontsov <avorontsov@ru.mvista.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/kernel.h>
19#include <linux/errno.h>
20#include <linux/debugfs.h>
21#include <linux/seq_file.h>
22#include <linux/usb.h>
23#include "../core/hcd.h"
24#include "fhci.h"
25
26void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er)
27{
28 int i;
29
30 if (usb_er == -1) {
31 fhci->usb_irq_stat[12]++;
32 return;
33 }
34
35 for (i = 0; i < 12; ++i) {
36 if (usb_er & (1 << i))
37 fhci->usb_irq_stat[i]++;
38 }
39}
40
41static int fhci_dfs_regs_show(struct seq_file *s, void *v)
42{
43 struct fhci_hcd *fhci = s->private;
44 struct fhci_regs __iomem *regs = fhci->regs;
45
46 seq_printf(s,
47 "mode: 0x%x\n" "addr: 0x%x\n"
48 "command: 0x%x\n" "ep0: 0x%x\n"
49 "event: 0x%x\n" "mask: 0x%x\n"
50 "status: 0x%x\n" "SOF timer: %d\n"
51 "frame number: %d\n"
52 "lines status: 0x%x\n",
53 in_8(&regs->usb_mod), in_8(&regs->usb_addr),
54 in_8(&regs->usb_comm), in_be16(&regs->usb_ep[0]),
55 in_be16(&regs->usb_event), in_be16(&regs->usb_mask),
56 in_8(&regs->usb_status), in_be16(&regs->usb_sof_tmr),
57 in_be16(&regs->usb_frame_num),
58 fhci_ioports_check_bus_state(fhci));
59
60 return 0;
61}
62
63static int fhci_dfs_irq_stat_show(struct seq_file *s, void *v)
64{
65 struct fhci_hcd *fhci = s->private;
66 int *usb_irq_stat = fhci->usb_irq_stat;
67
68 seq_printf(s,
69 "RXB: %d\n" "TXB: %d\n" "BSY: %d\n"
70 "SOF: %d\n" "TXE0: %d\n" "TXE1: %d\n"
71 "TXE2: %d\n" "TXE3: %d\n" "IDLE: %d\n"
72 "RESET: %d\n" "SFT: %d\n" "MSF: %d\n"
73 "IDLE_ONLY: %d\n",
74 usb_irq_stat[0], usb_irq_stat[1], usb_irq_stat[2],
75 usb_irq_stat[3], usb_irq_stat[4], usb_irq_stat[5],
76 usb_irq_stat[6], usb_irq_stat[7], usb_irq_stat[8],
77 usb_irq_stat[9], usb_irq_stat[10], usb_irq_stat[11],
78 usb_irq_stat[12]);
79
80 return 0;
81}
82
83static int fhci_dfs_regs_open(struct inode *inode, struct file *file)
84{
85 return single_open(file, fhci_dfs_regs_show, inode->i_private);
86}
87
88static int fhci_dfs_irq_stat_open(struct inode *inode, struct file *file)
89{
90 return single_open(file, fhci_dfs_irq_stat_show, inode->i_private);
91}
92
93static const struct file_operations fhci_dfs_regs_fops = {
94 .open = fhci_dfs_regs_open,
95 .read = seq_read,
96 .llseek = seq_lseek,
97 .release = single_release,
98};
99
100static const struct file_operations fhci_dfs_irq_stat_fops = {
101 .open = fhci_dfs_irq_stat_open,
102 .read = seq_read,
103 .llseek = seq_lseek,
104 .release = single_release,
105};
106
107void fhci_dfs_create(struct fhci_hcd *fhci)
108{
109 struct device *dev = fhci_to_hcd(fhci)->self.controller;
110
111 fhci->dfs_root = debugfs_create_dir(dev->bus_id, NULL);
112 if (!fhci->dfs_root) {
113 WARN_ON(1);
114 return;
115 }
116
117 fhci->dfs_regs = debugfs_create_file("regs", S_IFREG | S_IRUGO,
118 fhci->dfs_root, fhci, &fhci_dfs_regs_fops);
119
120 fhci->dfs_irq_stat = debugfs_create_file("irq_stat",
121 S_IFREG | S_IRUGO, fhci->dfs_root, fhci,
122 &fhci_dfs_irq_stat_fops);
123
124 WARN_ON(!fhci->dfs_regs || !fhci->dfs_irq_stat);
125}
126
127void fhci_dfs_destroy(struct fhci_hcd *fhci)
128{
129 if (!fhci->dfs_root)
130 return;
131
132 if (fhci->dfs_irq_stat)
133 debugfs_remove(fhci->dfs_irq_stat);
134
135 if (fhci->dfs_regs)
136 debugfs_remove(fhci->dfs_regs);
137
138 debugfs_remove(fhci->dfs_root);
139}
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
new file mode 100644
index 000000000000..ba622cc8a9ba
--- /dev/null
+++ b/drivers/usb/host/fhci-hcd.c
@@ -0,0 +1,836 @@
1/*
2 * Freescale QUICC Engine USB Host Controller Driver
3 *
4 * Copyright (c) Freescale Semicondutor, Inc. 2006.
5 * Shlomi Gridish <gridish@freescale.com>
6 * Jerry Huang <Chang-Ming.Huang@freescale.com>
7 * Copyright (c) Logic Product Development, Inc. 2007
8 * Peter Barada <peterb@logicpd.com>
9 * Copyright (c) MontaVista Software, Inc. 2008.
10 * Anton Vorontsov <avorontsov@ru.mvista.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/spinlock.h>
21#include <linux/kernel.h>
22#include <linux/delay.h>
23#include <linux/errno.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/usb.h>
28#include <linux/of_platform.h>
29#include <linux/of_gpio.h>
30#include <asm/qe.h>
31#include <asm/fsl_gtm.h>
32#include "../core/hcd.h"
33#include "fhci.h"
34
35void fhci_start_sof_timer(struct fhci_hcd *fhci)
36{
37 fhci_dbg(fhci, "-> %s\n", __func__);
38
39 /* clear frame_n */
40 out_be16(&fhci->pram->frame_num, 0);
41
42 out_be16(&fhci->regs->usb_sof_tmr, 0);
43 setbits8(&fhci->regs->usb_mod, USB_MODE_SFTE);
44
45 fhci_dbg(fhci, "<- %s\n", __func__);
46}
47
48void fhci_stop_sof_timer(struct fhci_hcd *fhci)
49{
50 fhci_dbg(fhci, "-> %s\n", __func__);
51
52 clrbits8(&fhci->regs->usb_mod, USB_MODE_SFTE);
53 gtm_stop_timer16(fhci->timer);
54
55 fhci_dbg(fhci, "<- %s\n", __func__);
56}
57
58u16 fhci_get_sof_timer_count(struct fhci_usb *usb)
59{
60 return be16_to_cpu(in_be16(&usb->fhci->regs->usb_sof_tmr) / 12);
61}
62
63/* initialize the endpoint zero */
64static u32 endpoint_zero_init(struct fhci_usb *usb,
65 enum fhci_mem_alloc data_mem,
66 u32 ring_len)
67{
68 u32 rc;
69
70 rc = fhci_create_ep(usb, data_mem, ring_len);
71 if (rc)
72 return rc;
73
74 /* inilialize endpoint registers */
75 fhci_init_ep_registers(usb, usb->ep0, data_mem);
76
77 return 0;
78}
79
80/* enable the USB interrupts */
81void fhci_usb_enable_interrupt(struct fhci_usb *usb)
82{
83 struct fhci_hcd *fhci = usb->fhci;
84
85 if (usb->intr_nesting_cnt == 1) {
86 /* initialize the USB interrupt */
87 enable_irq(fhci_to_hcd(fhci)->irq);
88
89 /* initialize the event register and mask register */
90 out_be16(&usb->fhci->regs->usb_event, 0xffff);
91 out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
92
93 /* enable the timer interrupts */
94 enable_irq(fhci->timer->irq);
95 } else if (usb->intr_nesting_cnt > 1)
96 fhci_info(fhci, "unbalanced USB interrupts nesting\n");
97 usb->intr_nesting_cnt--;
98}
99
100/* diable the usb interrupt */
101void fhci_usb_disable_interrupt(struct fhci_usb *usb)
102{
103 struct fhci_hcd *fhci = usb->fhci;
104
105 if (usb->intr_nesting_cnt == 0) {
106 /* diable the timer interrupt */
107 disable_irq_nosync(fhci->timer->irq);
108
109 /* disable the usb interrupt */
110 disable_irq_nosync(fhci_to_hcd(fhci)->irq);
111 out_be16(&usb->fhci->regs->usb_mask, 0);
112 }
113 usb->intr_nesting_cnt++;
114}
115
116/* enable the USB controller */
117static u32 fhci_usb_enable(struct fhci_hcd *fhci)
118{
119 struct fhci_usb *usb = fhci->usb_lld;
120
121 out_be16(&usb->fhci->regs->usb_event, 0xffff);
122 out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
123 setbits8(&usb->fhci->regs->usb_mod, USB_MODE_EN);
124
125 mdelay(100);
126
127 return 0;
128}
129
130/* disable the USB controller */
131static u32 fhci_usb_disable(struct fhci_hcd *fhci)
132{
133 struct fhci_usb *usb = fhci->usb_lld;
134
135 fhci_usb_disable_interrupt(usb);
136 fhci_port_disable(fhci);
137
138 /* disable the usb controller */
139 if (usb->port_status == FHCI_PORT_FULL ||
140 usb->port_status == FHCI_PORT_LOW)
141 fhci_device_disconnected_interrupt(fhci);
142
143 clrbits8(&usb->fhci->regs->usb_mod, USB_MODE_EN);
144
145 return 0;
146}
147
148/* check the bus state by polling the QE bit on the IO ports */
149int fhci_ioports_check_bus_state(struct fhci_hcd *fhci)
150{
151 u8 bits = 0;
152
153 /* check USBOE,if transmitting,exit */
154 if (!gpio_get_value(fhci->gpios[GPIO_USBOE]))
155 return -1;
156
157 /* check USBRP */
158 if (gpio_get_value(fhci->gpios[GPIO_USBRP]))
159 bits |= 0x2;
160
161 /* check USBRN */
162 if (gpio_get_value(fhci->gpios[GPIO_USBRN]))
163 bits |= 0x1;
164
165 return bits;
166}
167
168static void fhci_mem_free(struct fhci_hcd *fhci)
169{
170 struct ed *ed;
171 struct ed *next_ed;
172 struct td *td;
173 struct td *next_td;
174
175 list_for_each_entry_safe(ed, next_ed, &fhci->empty_eds, node) {
176 list_del(&ed->node);
177 kfree(ed);
178 }
179
180 list_for_each_entry_safe(td, next_td, &fhci->empty_tds, node) {
181 list_del(&td->node);
182 kfree(td);
183 }
184
185 kfree(fhci->vroot_hub);
186 fhci->vroot_hub = NULL;
187
188 kfree(fhci->hc_list);
189 fhci->hc_list = NULL;
190}
191
192static int fhci_mem_init(struct fhci_hcd *fhci)
193{
194 int i;
195
196 fhci->hc_list = kzalloc(sizeof(*fhci->hc_list), GFP_KERNEL);
197 if (!fhci->hc_list)
198 goto err;
199
200 INIT_LIST_HEAD(&fhci->hc_list->ctrl_list);
201 INIT_LIST_HEAD(&fhci->hc_list->bulk_list);
202 INIT_LIST_HEAD(&fhci->hc_list->iso_list);
203 INIT_LIST_HEAD(&fhci->hc_list->intr_list);
204 INIT_LIST_HEAD(&fhci->hc_list->done_list);
205
206 fhci->vroot_hub = kzalloc(sizeof(*fhci->vroot_hub), GFP_KERNEL);
207 if (!fhci->vroot_hub)
208 goto err;
209
210 INIT_LIST_HEAD(&fhci->empty_eds);
211 INIT_LIST_HEAD(&fhci->empty_tds);
212
213 /* initialize work queue to handle done list */
214 fhci_tasklet.data = (unsigned long)fhci;
215 fhci->process_done_task = &fhci_tasklet;
216
217 for (i = 0; i < MAX_TDS; i++) {
218 struct td *td;
219
220 td = kmalloc(sizeof(*td), GFP_KERNEL);
221 if (!td)
222 goto err;
223 fhci_recycle_empty_td(fhci, td);
224 }
225 for (i = 0; i < MAX_EDS; i++) {
226 struct ed *ed;
227
228 ed = kmalloc(sizeof(*ed), GFP_KERNEL);
229 if (!ed)
230 goto err;
231 fhci_recycle_empty_ed(fhci, ed);
232 }
233
234 fhci->active_urbs = 0;
235 return 0;
236err:
237 fhci_mem_free(fhci);
238 return -ENOMEM;
239}
240
241/* destroy the fhci_usb structure */
242static void fhci_usb_free(void *lld)
243{
244 struct fhci_usb *usb = lld;
245 struct fhci_hcd *fhci = usb->fhci;
246
247 if (usb) {
248 fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF);
249 fhci_ep0_free(usb);
250 kfree(usb->actual_frame);
251 kfree(usb);
252 }
253}
254
255/* initialize the USB */
256static int fhci_usb_init(struct fhci_hcd *fhci)
257{
258 struct fhci_usb *usb = fhci->usb_lld;
259
260 memset_io(usb->fhci->pram, 0, FHCI_PRAM_SIZE);
261
262 usb->port_status = FHCI_PORT_DISABLED;
263 usb->max_frame_usage = FRAME_TIME_USAGE;
264 usb->sw_transaction_time = SW_FIX_TIME_BETWEEN_TRANSACTION;
265
266 usb->actual_frame = kzalloc(sizeof(*usb->actual_frame), GFP_KERNEL);
267 if (!usb->actual_frame) {
268 fhci_usb_free(usb);
269 return -ENOMEM;
270 }
271
272 INIT_LIST_HEAD(&usb->actual_frame->tds_list);
273
274 /* initializing registers on chip, clear frame number */
275 out_be16(&fhci->pram->frame_num, 0);
276
277 /* clear rx state */
278 out_be32(&fhci->pram->rx_state, 0);
279
280 /* set mask register */
281 usb->saved_msk = (USB_E_TXB_MASK |
282 USB_E_TXE1_MASK |
283 USB_E_IDLE_MASK |
284 USB_E_RESET_MASK | USB_E_SFT_MASK | USB_E_MSF_MASK);
285
286 out_8(&usb->fhci->regs->usb_mod, USB_MODE_HOST | USB_MODE_EN);
287
288 /* clearing the mask register */
289 out_be16(&usb->fhci->regs->usb_mask, 0);
290
291 /* initialing the event register */
292 out_be16(&usb->fhci->regs->usb_event, 0xffff);
293
294 if (endpoint_zero_init(usb, DEFAULT_DATA_MEM, DEFAULT_RING_LEN) != 0) {
295 fhci_usb_free(usb);
296 return -EINVAL;
297 }
298
299 return 0;
300}
301
302/* initialize the fhci_usb struct and the corresponding data staruct */
303static struct fhci_usb *fhci_create_lld(struct fhci_hcd *fhci)
304{
305 struct fhci_usb *usb;
306
307 /* allocate memory for SCC data structure */
308 usb = kzalloc(sizeof(*usb), GFP_KERNEL);
309 if (!usb) {
310 fhci_err(fhci, "no memory for SCC data struct\n");
311 return NULL;
312 }
313
314 usb->fhci = fhci;
315 usb->hc_list = fhci->hc_list;
316 usb->vroot_hub = fhci->vroot_hub;
317
318 usb->transfer_confirm = fhci_transfer_confirm_callback;
319
320 return usb;
321}
322
323static int fhci_start(struct usb_hcd *hcd)
324{
325 int ret;
326 struct fhci_hcd *fhci = hcd_to_fhci(hcd);
327
328 ret = fhci_mem_init(fhci);
329 if (ret) {
330 fhci_err(fhci, "failed to allocate memory\n");
331 goto err;
332 }
333
334 fhci->usb_lld = fhci_create_lld(fhci);
335 if (!fhci->usb_lld) {
336 fhci_err(fhci, "low level driver config failed\n");
337 ret = -ENOMEM;
338 goto err;
339 }
340
341 ret = fhci_usb_init(fhci);
342 if (ret) {
343 fhci_err(fhci, "low level driver initialize failed\n");
344 goto err;
345 }
346
347 spin_lock_init(&fhci->lock);
348
349 /* connect the virtual root hub */
350 fhci->vroot_hub->dev_num = 1; /* this field may be needed to fix */
351 fhci->vroot_hub->hub.wHubStatus = 0;
352 fhci->vroot_hub->hub.wHubChange = 0;
353 fhci->vroot_hub->port.wPortStatus = 0;
354 fhci->vroot_hub->port.wPortChange = 0;
355
356 hcd->state = HC_STATE_RUNNING;
357
358 /*
359 * From here on, khubd concurrently accesses the root
360 * hub; drivers will be talking to enumerated devices.
361 * (On restart paths, khubd already knows about the root
362 * hub and could find work as soon as we wrote FLAG_CF.)
363 *
364 * Before this point the HC was idle/ready. After, khubd
365 * and device drivers may start it running.
366 */
367 fhci_usb_enable(fhci);
368 return 0;
369err:
370 fhci_mem_free(fhci);
371 return ret;
372}
373
374static void fhci_stop(struct usb_hcd *hcd)
375{
376 struct fhci_hcd *fhci = hcd_to_fhci(hcd);
377
378 fhci_usb_disable_interrupt(fhci->usb_lld);
379 fhci_usb_disable(fhci);
380
381 fhci_usb_free(fhci->usb_lld);
382 fhci->usb_lld = NULL;
383 fhci_mem_free(fhci);
384}
385
386static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
387 gfp_t mem_flags)
388{
389 struct fhci_hcd *fhci = hcd_to_fhci(hcd);
390 u32 pipe = urb->pipe;
391 int ret;
392 int i;
393 int size = 0;
394 struct urb_priv *urb_priv;
395 unsigned long flags;
396
397 switch (usb_pipetype(pipe)) {
398 case PIPE_CONTROL:
399 /* 1 td fro setup,1 for ack */
400 size = 2;
401 case PIPE_BULK:
402 /* one td for every 4096 bytes(can be upto 8k) */
403 size += urb->transfer_buffer_length / 4096;
404 /* ...add for any remaining bytes... */
405 if ((urb->transfer_buffer_length % 4096) != 0)
406 size++;
407 /* ..and maybe a zero length packet to wrap it up */
408 if (size == 0)
409 size++;
410 else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
411 && (urb->transfer_buffer_length
412 % usb_maxpacket(urb->dev, pipe,
413 usb_pipeout(pipe))) != 0)
414 size++;
415 break;
416 case PIPE_ISOCHRONOUS:
417 size = urb->number_of_packets;
418 if (size <= 0)
419 return -EINVAL;
420 for (i = 0; i < urb->number_of_packets; i++) {
421 urb->iso_frame_desc[i].actual_length = 0;
422 urb->iso_frame_desc[i].status = (u32) (-EXDEV);
423 }
424 break;
425 case PIPE_INTERRUPT:
426 size = 1;
427 }
428
429 /* allocate the private part of the URB */
430 urb_priv = kzalloc(sizeof(*urb_priv), mem_flags);
431 if (!urb_priv)
432 return -ENOMEM;
433
434 /* allocate the private part of the URB */
435 urb_priv->tds = kzalloc(size * sizeof(struct td), mem_flags);
436 if (!urb_priv->tds) {
437 kfree(urb_priv);
438 return -ENOMEM;
439 }
440
441 spin_lock_irqsave(&fhci->lock, flags);
442
443 ret = usb_hcd_link_urb_to_ep(hcd, urb);
444 if (ret)
445 goto err;
446
447 /* fill the private part of the URB */
448 urb_priv->num_of_tds = size;
449
450 urb->status = -EINPROGRESS;
451 urb->actual_length = 0;
452 urb->error_count = 0;
453 urb->hcpriv = urb_priv;
454
455 fhci_queue_urb(fhci, urb);
456err:
457 if (ret) {
458 kfree(urb_priv->tds);
459 kfree(urb_priv);
460 }
461 spin_unlock_irqrestore(&fhci->lock, flags);
462 return ret;
463}
464
465/* dequeue FHCI URB */
466static int fhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
467{
468 struct fhci_hcd *fhci = hcd_to_fhci(hcd);
469 struct fhci_usb *usb = fhci->usb_lld;
470 int ret = -EINVAL;
471 unsigned long flags;
472
473 if (!urb || !urb->dev || !urb->dev->bus)
474 goto out;
475
476 spin_lock_irqsave(&fhci->lock, flags);
477
478 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
479 if (ret)
480 goto out2;
481
482 if (usb->port_status != FHCI_PORT_DISABLED) {
483 struct urb_priv *urb_priv;
484
485 /*
486 * flag the urb's data for deletion in some upcoming
487 * SF interrupt's delete list processing
488 */
489 urb_priv = urb->hcpriv;
490
491 if (!urb_priv || (urb_priv->state == URB_DEL))
492 goto out2;
493
494 urb_priv->state = URB_DEL;
495
496 /* already pending? */
497 urb_priv->ed->state = FHCI_ED_URB_DEL;
498 } else {
499 fhci_urb_complete_free(fhci, urb);
500 }
501
502out2:
503 spin_unlock_irqrestore(&fhci->lock, flags);
504out:
505 return ret;
506}
507
508static void fhci_endpoint_disable(struct usb_hcd *hcd,
509 struct usb_host_endpoint *ep)
510{
511 struct fhci_hcd *fhci;
512 struct ed *ed;
513 unsigned long flags;
514
515 fhci = hcd_to_fhci(hcd);
516 spin_lock_irqsave(&fhci->lock, flags);
517 ed = ep->hcpriv;
518 if (ed) {
519 while (ed->td_head != NULL) {
520 struct td *td = fhci_remove_td_from_ed(ed);
521 fhci_urb_complete_free(fhci, td->urb);
522 }
523 fhci_recycle_empty_ed(fhci, ed);
524 ep->hcpriv = NULL;
525 }
526 spin_unlock_irqrestore(&fhci->lock, flags);
527}
528
529static int fhci_get_frame_number(struct usb_hcd *hcd)
530{
531 struct fhci_hcd *fhci = hcd_to_fhci(hcd);
532
533 return get_frame_num(fhci);
534}
535
536static const struct hc_driver fhci_driver = {
537 .description = "fsl,usb-fhci",
538 .product_desc = "FHCI HOST Controller",
539 .hcd_priv_size = sizeof(struct fhci_hcd),
540
541 /* generic hardware linkage */
542 .irq = fhci_irq,
543 .flags = HCD_USB11 | HCD_MEMORY,
544
545 /* basic lifecycle operation */
546 .start = fhci_start,
547 .stop = fhci_stop,
548
549 /* managing i/o requests and associated device resources */
550 .urb_enqueue = fhci_urb_enqueue,
551 .urb_dequeue = fhci_urb_dequeue,
552 .endpoint_disable = fhci_endpoint_disable,
553
554 /* scheduling support */
555 .get_frame_number = fhci_get_frame_number,
556
557 /* root hub support */
558 .hub_status_data = fhci_hub_status_data,
559 .hub_control = fhci_hub_control,
560};
561
562static int __devinit of_fhci_probe(struct of_device *ofdev,
563 const struct of_device_id *ofid)
564{
565 struct device *dev = &ofdev->dev;
566 struct device_node *node = ofdev->node;
567 struct usb_hcd *hcd;
568 struct fhci_hcd *fhci;
569 struct resource usb_regs;
570 unsigned long pram_addr;
571 unsigned int usb_irq;
572 const char *sprop;
573 const u32 *iprop;
574 int size;
575 int ret;
576 int i;
577 int j;
578
579 if (usb_disabled())
580 return -ENODEV;
581
582 sprop = of_get_property(node, "mode", NULL);
583 if (sprop && strcmp(sprop, "host"))
584 return -ENODEV;
585
586 hcd = usb_create_hcd(&fhci_driver, dev, dev->bus_id);
587 if (!hcd) {
588 dev_err(dev, "could not create hcd\n");
589 return -ENOMEM;
590 }
591
592 fhci = hcd_to_fhci(hcd);
593 hcd->self.controller = dev;
594 dev_set_drvdata(dev, hcd);
595
596 iprop = of_get_property(node, "hub-power-budget", &size);
597 if (iprop && size == sizeof(*iprop))
598 hcd->power_budget = *iprop;
599
600 /* FHCI registers. */
601 ret = of_address_to_resource(node, 0, &usb_regs);
602 if (ret) {
603 dev_err(dev, "could not get regs\n");
604 goto err_regs;
605 }
606
607 hcd->regs = ioremap(usb_regs.start, usb_regs.end - usb_regs.start + 1);
608 if (!hcd->regs) {
609 dev_err(dev, "could not ioremap regs\n");
610 ret = -ENOMEM;
611 goto err_regs;
612 }
613 fhci->regs = hcd->regs;
614
615 /* Parameter RAM. */
616 iprop = of_get_property(node, "reg", &size);
617 if (!iprop || size < sizeof(*iprop) * 4) {
618 dev_err(dev, "can't get pram offset\n");
619 ret = -EINVAL;
620 goto err_pram;
621 }
622
623 pram_addr = cpm_muram_alloc_fixed(iprop[2], FHCI_PRAM_SIZE);
624 if (IS_ERR_VALUE(pram_addr)) {
625 dev_err(dev, "failed to allocate usb pram\n");
626 ret = -ENOMEM;
627 goto err_pram;
628 }
629 fhci->pram = cpm_muram_addr(pram_addr);
630
631 /* GPIOs and pins */
632 for (i = 0; i < NUM_GPIOS; i++) {
633 int gpio;
634 enum of_gpio_flags flags;
635
636 gpio = of_get_gpio_flags(node, i, &flags);
637 fhci->gpios[i] = gpio;
638 fhci->alow_gpios[i] = flags & OF_GPIO_ACTIVE_LOW;
639
640 if (!gpio_is_valid(gpio)) {
641 if (i < GPIO_SPEED) {
642 dev_err(dev, "incorrect GPIO%d: %d\n",
643 i, gpio);
644 goto err_gpios;
645 } else {
646 dev_info(dev, "assuming board doesn't have "
647 "%s gpio\n", i == GPIO_SPEED ?
648 "speed" : "power");
649 continue;
650 }
651 }
652
653 ret = gpio_request(gpio, dev->bus_id);
654 if (ret) {
655 dev_err(dev, "failed to request gpio %d", i);
656 goto err_gpios;
657 }
658
659 if (i >= GPIO_SPEED) {
660 ret = gpio_direction_output(gpio, 0);
661 if (ret) {
662 dev_err(dev, "failed to set gpio %d as "
663 "an output\n", i);
664 i++;
665 goto err_gpios;
666 }
667 }
668 }
669
670 for (j = 0; j < NUM_PINS; j++) {
671 fhci->pins[j] = qe_pin_request(ofdev->node, j);
672 if (IS_ERR(fhci->pins[j])) {
673 ret = PTR_ERR(fhci->pins[j]);
674 dev_err(dev, "can't get pin %d: %d\n", j, ret);
675 goto err_pins;
676 }
677 }
678
679 /* Frame limit timer and its interrupt. */
680 fhci->timer = gtm_get_timer16();
681 if (IS_ERR(fhci->timer)) {
682 ret = PTR_ERR(fhci->timer);
683 dev_err(dev, "failed to request qe timer: %i", ret);
684 goto err_get_timer;
685 }
686
687 ret = request_irq(fhci->timer->irq, fhci_frame_limit_timer_irq,
688 IRQF_DISABLED, "qe timer (usb)", hcd);
689 if (ret) {
690 dev_err(dev, "failed to request timer irq");
691 goto err_timer_irq;
692 }
693
694 /* USB Host interrupt. */
695 usb_irq = irq_of_parse_and_map(node, 0);
696 if (usb_irq == NO_IRQ) {
697 dev_err(dev, "could not get usb irq\n");
698 ret = -EINVAL;
699 goto err_usb_irq;
700 }
701
702 /* Clocks. */
703 sprop = of_get_property(node, "fsl,fullspeed-clock", NULL);
704 if (sprop) {
705 fhci->fullspeed_clk = qe_clock_source(sprop);
706 if (fhci->fullspeed_clk == QE_CLK_DUMMY) {
707 dev_err(dev, "wrong fullspeed-clock\n");
708 ret = -EINVAL;
709 goto err_clocks;
710 }
711 }
712
713 sprop = of_get_property(node, "fsl,lowspeed-clock", NULL);
714 if (sprop) {
715 fhci->lowspeed_clk = qe_clock_source(sprop);
716 if (fhci->lowspeed_clk == QE_CLK_DUMMY) {
717 dev_err(dev, "wrong lowspeed-clock\n");
718 ret = -EINVAL;
719 goto err_clocks;
720 }
721 }
722
723 if (fhci->fullspeed_clk == QE_CLK_NONE &&
724 fhci->lowspeed_clk == QE_CLK_NONE) {
725 dev_err(dev, "no clocks specified\n");
726 ret = -EINVAL;
727 goto err_clocks;
728 }
729
730 dev_info(dev, "at 0x%p, irq %d\n", hcd->regs, usb_irq);
731
732 fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF);
733
734 /* Start with full-speed, if possible. */
735 if (fhci->fullspeed_clk != QE_CLK_NONE) {
736 fhci_config_transceiver(fhci, FHCI_PORT_FULL);
737 qe_usb_clock_set(fhci->fullspeed_clk, USB_CLOCK);
738 } else {
739 fhci_config_transceiver(fhci, FHCI_PORT_LOW);
740 qe_usb_clock_set(fhci->lowspeed_clk, USB_CLOCK >> 3);
741 }
742
743 /* Clear and disable any pending interrupts. */
744 out_be16(&fhci->regs->usb_event, 0xffff);
745 out_be16(&fhci->regs->usb_mask, 0);
746
747 ret = usb_add_hcd(hcd, usb_irq, IRQF_DISABLED);
748 if (ret < 0)
749 goto err_add_hcd;
750
751 fhci_dfs_create(fhci);
752
753 return 0;
754
755err_add_hcd:
756err_clocks:
757 irq_dispose_mapping(usb_irq);
758err_usb_irq:
759 free_irq(fhci->timer->irq, hcd);
760err_timer_irq:
761 gtm_put_timer16(fhci->timer);
762err_get_timer:
763err_pins:
764 while (--j >= 0)
765 qe_pin_free(fhci->pins[j]);
766err_gpios:
767 while (--i >= 0) {
768 if (gpio_is_valid(fhci->gpios[i]))
769 gpio_free(fhci->gpios[i]);
770 }
771 cpm_muram_free(pram_addr);
772err_pram:
773 iounmap(hcd->regs);
774err_regs:
775 usb_put_hcd(hcd);
776 return ret;
777}
778
779static int __devexit fhci_remove(struct device *dev)
780{
781 struct usb_hcd *hcd = dev_get_drvdata(dev);
782 struct fhci_hcd *fhci = hcd_to_fhci(hcd);
783 int i;
784 int j;
785
786 usb_remove_hcd(hcd);
787 free_irq(fhci->timer->irq, hcd);
788 gtm_put_timer16(fhci->timer);
789 cpm_muram_free(cpm_muram_offset(fhci->pram));
790 for (i = 0; i < NUM_GPIOS; i++) {
791 if (!gpio_is_valid(fhci->gpios[i]))
792 continue;
793 gpio_free(fhci->gpios[i]);
794 }
795 for (j = 0; j < NUM_PINS; j++)
796 qe_pin_free(fhci->pins[j]);
797 fhci_dfs_destroy(fhci);
798 usb_put_hcd(hcd);
799 return 0;
800}
801
802static int __devexit of_fhci_remove(struct of_device *ofdev)
803{
804 return fhci_remove(&ofdev->dev);
805}
806
807static struct of_device_id of_fhci_match[] = {
808 { .compatible = "fsl,mpc8323-qe-usb", },
809 {},
810};
811MODULE_DEVICE_TABLE(of, of_fhci_match);
812
813static struct of_platform_driver of_fhci_driver = {
814 .name = "fsl,usb-fhci",
815 .match_table = of_fhci_match,
816 .probe = of_fhci_probe,
817 .remove = __devexit_p(of_fhci_remove),
818};
819
820static int __init fhci_module_init(void)
821{
822 return of_register_platform_driver(&of_fhci_driver);
823}
824module_init(fhci_module_init);
825
826static void __exit fhci_module_exit(void)
827{
828 of_unregister_platform_driver(&of_fhci_driver);
829}
830module_exit(fhci_module_exit);
831
832MODULE_DESCRIPTION("USB Freescale Host Controller Interface Driver");
833MODULE_AUTHOR("Shlomi Gridish <gridish@freescale.com>, "
834 "Jerry Huang <Chang-Ming.Huang@freescale.com>, "
835 "Anton Vorontsov <avorontsov@ru.mvista.com>");
836MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/fhci-hub.c b/drivers/usb/host/fhci-hub.c
new file mode 100644
index 000000000000..0cfaedc3e124
--- /dev/null
+++ b/drivers/usb/host/fhci-hub.c
@@ -0,0 +1,345 @@
1/*
2 * Freescale QUICC Engine USB Host Controller Driver
3 *
4 * Copyright (c) Freescale Semicondutor, Inc. 2006.
5 * Shlomi Gridish <gridish@freescale.com>
6 * Jerry Huang <Chang-Ming.Huang@freescale.com>
7 * Copyright (c) Logic Product Development, Inc. 2007
8 * Peter Barada <peterb@logicpd.com>
9 * Copyright (c) MontaVista Software, Inc. 2008.
10 * Anton Vorontsov <avorontsov@ru.mvista.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/spinlock.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/io.h>
24#include <linux/usb.h>
25#include <linux/gpio.h>
26#include <asm/qe.h>
27#include "../core/hcd.h"
28#include "fhci.h"
29
30/* virtual root hub specific descriptor */
31static u8 root_hub_des[] = {
32 0x09, /* blength */
33 0x29, /* bDescriptorType;hub-descriptor */
34 0x01, /* bNbrPorts */
35 0x00, /* wHubCharacteristics */
36 0x00,
37 0x01, /* bPwrOn2pwrGood;2ms */
38 0x00, /* bHubContrCurrent;0mA */
39 0x00, /* DeviceRemoveable */
40 0xff, /* PortPwrCtrlMask */
41};
42
43static void fhci_gpio_set_value(struct fhci_hcd *fhci, int gpio_nr, bool on)
44{
45 int gpio = fhci->gpios[gpio_nr];
46 bool alow = fhci->alow_gpios[gpio_nr];
47
48 if (!gpio_is_valid(gpio))
49 return;
50
51 gpio_set_value(gpio, on ^ alow);
52 mdelay(5);
53}
54
55void fhci_config_transceiver(struct fhci_hcd *fhci,
56 enum fhci_port_status status)
57{
58 fhci_dbg(fhci, "-> %s: %d\n", __func__, status);
59
60 switch (status) {
61 case FHCI_PORT_POWER_OFF:
62 fhci_gpio_set_value(fhci, GPIO_POWER, false);
63 break;
64 case FHCI_PORT_DISABLED:
65 case FHCI_PORT_WAITING:
66 fhci_gpio_set_value(fhci, GPIO_POWER, true);
67 break;
68 case FHCI_PORT_LOW:
69 fhci_gpio_set_value(fhci, GPIO_SPEED, false);
70 break;
71 case FHCI_PORT_FULL:
72 fhci_gpio_set_value(fhci, GPIO_SPEED, true);
73 break;
74 default:
75 WARN_ON(1);
76 break;
77 }
78
79 fhci_dbg(fhci, "<- %s: %d\n", __func__, status);
80}
81
82/* disable the USB port by clearing the EN bit in the USBMOD register */
83void fhci_port_disable(struct fhci_hcd *fhci)
84{
85 struct fhci_usb *usb = (struct fhci_usb *)fhci->usb_lld;
86 enum fhci_port_status port_status;
87
88 fhci_dbg(fhci, "-> %s\n", __func__);
89
90 fhci_stop_sof_timer(fhci);
91
92 fhci_flush_all_transmissions(usb);
93
94 fhci_usb_disable_interrupt((struct fhci_usb *)fhci->usb_lld);
95 port_status = usb->port_status;
96 usb->port_status = FHCI_PORT_DISABLED;
97
98 /* Enable IDLE since we want to know if something comes along */
99 usb->saved_msk |= USB_E_IDLE_MASK;
100 out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
101
102 /* check if during the disconnection process attached new device */
103 if (port_status == FHCI_PORT_WAITING)
104 fhci_device_connected_interrupt(fhci);
105 usb->vroot_hub->port.wPortStatus &= ~USB_PORT_STAT_ENABLE;
106 usb->vroot_hub->port.wPortChange |= USB_PORT_STAT_C_ENABLE;
107 fhci_usb_enable_interrupt((struct fhci_usb *)fhci->usb_lld);
108
109 fhci_dbg(fhci, "<- %s\n", __func__);
110}
111
112/* enable the USB port by setting the EN bit in the USBMOD register */
113void fhci_port_enable(void *lld)
114{
115 struct fhci_usb *usb = (struct fhci_usb *)lld;
116 struct fhci_hcd *fhci = usb->fhci;
117
118 fhci_dbg(fhci, "-> %s\n", __func__);
119
120 fhci_config_transceiver(fhci, usb->port_status);
121
122 if ((usb->port_status != FHCI_PORT_FULL) &&
123 (usb->port_status != FHCI_PORT_LOW))
124 fhci_start_sof_timer(fhci);
125
126 usb->vroot_hub->port.wPortStatus |= USB_PORT_STAT_ENABLE;
127 usb->vroot_hub->port.wPortChange |= USB_PORT_STAT_C_ENABLE;
128
129 fhci_dbg(fhci, "<- %s\n", __func__);
130}
131
132void fhci_io_port_generate_reset(struct fhci_hcd *fhci)
133{
134 fhci_dbg(fhci, "-> %s\n", __func__);
135
136 gpio_direction_output(fhci->gpios[GPIO_USBOE], 0);
137 gpio_direction_output(fhci->gpios[GPIO_USBTP], 0);
138 gpio_direction_output(fhci->gpios[GPIO_USBTN], 0);
139
140 mdelay(5);
141
142 qe_pin_set_dedicated(fhci->pins[PIN_USBOE]);
143 qe_pin_set_dedicated(fhci->pins[PIN_USBTP]);
144 qe_pin_set_dedicated(fhci->pins[PIN_USBTN]);
145
146 fhci_dbg(fhci, "<- %s\n", __func__);
147}
148
149/* generate the RESET condition on the bus */
150void fhci_port_reset(void *lld)
151{
152 struct fhci_usb *usb = (struct fhci_usb *)lld;
153 struct fhci_hcd *fhci = usb->fhci;
154 u8 mode;
155 u16 mask;
156
157 fhci_dbg(fhci, "-> %s\n", __func__);
158
159 fhci_stop_sof_timer(fhci);
160 /* disable the USB controller */
161 mode = in_8(&fhci->regs->usb_mod);
162 out_8(&fhci->regs->usb_mod, mode & (~USB_MODE_EN));
163
164 /* disable idle interrupts */
165 mask = in_be16(&fhci->regs->usb_mask);
166 out_be16(&fhci->regs->usb_mask, mask & (~USB_E_IDLE_MASK));
167
168 fhci_io_port_generate_reset(fhci);
169
170 /* enable interrupt on this endpoint */
171 out_be16(&fhci->regs->usb_mask, mask);
172
173 /* enable the USB controller */
174 mode = in_8(&fhci->regs->usb_mod);
175 out_8(&fhci->regs->usb_mod, mode | USB_MODE_EN);
176 fhci_start_sof_timer(fhci);
177
178 fhci_dbg(fhci, "<- %s\n", __func__);
179}
180
181int fhci_hub_status_data(struct usb_hcd *hcd, char *buf)
182{
183 struct fhci_hcd *fhci = hcd_to_fhci(hcd);
184 int ret = 0;
185 unsigned long flags;
186
187 fhci_dbg(fhci, "-> %s\n", __func__);
188
189 spin_lock_irqsave(&fhci->lock, flags);
190
191 if (fhci->vroot_hub->port.wPortChange & (USB_PORT_STAT_C_CONNECTION |
192 USB_PORT_STAT_C_ENABLE | USB_PORT_STAT_C_SUSPEND |
193 USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_OVERCURRENT)) {
194 *buf = 1 << 1;
195 ret = 1;
196 fhci_dbg(fhci, "-- %s\n", __func__);
197 }
198
199 spin_unlock_irqrestore(&fhci->lock, flags);
200
201 fhci_dbg(fhci, "<- %s\n", __func__);
202
203 return ret;
204}
205
206int fhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
207 u16 wIndex, char *buf, u16 wLength)
208{
209 struct fhci_hcd *fhci = hcd_to_fhci(hcd);
210 int retval = 0;
211 int len = 0;
212 struct usb_hub_status *hub_status;
213 struct usb_port_status *port_status;
214 unsigned long flags;
215
216 spin_lock_irqsave(&fhci->lock, flags);
217
218 fhci_dbg(fhci, "-> %s\n", __func__);
219
220 switch (typeReq) {
221 case ClearHubFeature:
222 switch (wValue) {
223 case C_HUB_LOCAL_POWER:
224 case C_HUB_OVER_CURRENT:
225 break;
226 default:
227 goto error;
228 }
229 break;
230 case ClearPortFeature:
231 fhci->vroot_hub->feature &= (1 << wValue);
232
233 switch (wValue) {
234 case USB_PORT_FEAT_ENABLE:
235 fhci->vroot_hub->port.wPortStatus &=
236 ~USB_PORT_STAT_ENABLE;
237 fhci_port_disable(fhci);
238 break;
239 case USB_PORT_FEAT_C_ENABLE:
240 fhci->vroot_hub->port.wPortChange &=
241 ~USB_PORT_STAT_C_ENABLE;
242 break;
243 case USB_PORT_FEAT_SUSPEND:
244 fhci->vroot_hub->port.wPortStatus &=
245 ~USB_PORT_STAT_SUSPEND;
246 fhci_stop_sof_timer(fhci);
247 break;
248 case USB_PORT_FEAT_C_SUSPEND:
249 fhci->vroot_hub->port.wPortChange &=
250 ~USB_PORT_STAT_C_SUSPEND;
251 break;
252 case USB_PORT_FEAT_POWER:
253 fhci->vroot_hub->port.wPortStatus &=
254 ~USB_PORT_STAT_POWER;
255 fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF);
256 break;
257 case USB_PORT_FEAT_C_CONNECTION:
258 fhci->vroot_hub->port.wPortChange &=
259 ~USB_PORT_STAT_C_CONNECTION;
260 break;
261 case USB_PORT_FEAT_C_OVER_CURRENT:
262 fhci->vroot_hub->port.wPortChange &=
263 ~USB_PORT_STAT_C_OVERCURRENT;
264 break;
265 case USB_PORT_FEAT_C_RESET:
266 fhci->vroot_hub->port.wPortChange &=
267 ~USB_PORT_STAT_C_RESET;
268 break;
269 default:
270 goto error;
271 }
272 break;
273 case GetHubDescriptor:
274 memcpy(buf, root_hub_des, sizeof(root_hub_des));
275 buf[3] = 0x11; /* per-port power, no ovrcrnt */
276 len = (buf[0] < wLength) ? buf[0] : wLength;
277 break;
278 case GetHubStatus:
279 hub_status = (struct usb_hub_status *)buf;
280 hub_status->wHubStatus =
281 cpu_to_le16(fhci->vroot_hub->hub.wHubStatus);
282 hub_status->wHubChange =
283 cpu_to_le16(fhci->vroot_hub->hub.wHubChange);
284 len = 4;
285 break;
286 case GetPortStatus:
287 port_status = (struct usb_port_status *)buf;
288 port_status->wPortStatus =
289 cpu_to_le16(fhci->vroot_hub->port.wPortStatus);
290 port_status->wPortChange =
291 cpu_to_le16(fhci->vroot_hub->port.wPortChange);
292 len = 4;
293 break;
294 case SetHubFeature:
295 switch (wValue) {
296 case C_HUB_OVER_CURRENT:
297 case C_HUB_LOCAL_POWER:
298 break;
299 default:
300 goto error;
301 }
302 break;
303 case SetPortFeature:
304 fhci->vroot_hub->feature |= (1 << wValue);
305
306 switch (wValue) {
307 case USB_PORT_FEAT_ENABLE:
308 fhci->vroot_hub->port.wPortStatus |=
309 USB_PORT_STAT_ENABLE;
310 fhci_port_enable(fhci->usb_lld);
311 break;
312 case USB_PORT_FEAT_SUSPEND:
313 fhci->vroot_hub->port.wPortStatus |=
314 USB_PORT_STAT_SUSPEND;
315 fhci_stop_sof_timer(fhci);
316 break;
317 case USB_PORT_FEAT_RESET:
318 fhci->vroot_hub->port.wPortStatus |=
319 USB_PORT_STAT_RESET;
320 fhci_port_reset(fhci->usb_lld);
321 fhci->vroot_hub->port.wPortStatus |=
322 USB_PORT_STAT_ENABLE;
323 fhci->vroot_hub->port.wPortStatus &=
324 ~USB_PORT_STAT_RESET;
325 break;
326 case USB_PORT_FEAT_POWER:
327 fhci->vroot_hub->port.wPortStatus |=
328 USB_PORT_STAT_POWER;
329 fhci_config_transceiver(fhci, FHCI_PORT_WAITING);
330 break;
331 default:
332 goto error;
333 }
334 break;
335 default:
336error:
337 retval = -EPIPE;
338 }
339
340 fhci_dbg(fhci, "<- %s\n", __func__);
341
342 spin_unlock_irqrestore(&fhci->lock, flags);
343
344 return retval;
345}
diff --git a/drivers/usb/host/fhci-mem.c b/drivers/usb/host/fhci-mem.c
new file mode 100644
index 000000000000..2c0736c99712
--- /dev/null
+++ b/drivers/usb/host/fhci-mem.c
@@ -0,0 +1,113 @@
1/*
2 * Freescale QUICC Engine USB Host Controller Driver
3 *
4 * Copyright (c) Freescale Semicondutor, Inc. 2006.
5 * Shlomi Gridish <gridish@freescale.com>
6 * Jerry Huang <Chang-Ming.Huang@freescale.com>
7 * Copyright (c) Logic Product Development, Inc. 2007
8 * Peter Barada <peterb@logicpd.com>
9 * Copyright (c) MontaVista Software, Inc. 2008.
10 * Anton Vorontsov <avorontsov@ru.mvista.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/delay.h>
21#include <linux/list.h>
22#include <linux/usb.h>
23#include "../core/hcd.h"
24#include "fhci.h"
25
26static void init_td(struct td *td)
27{
28 memset(td, 0, sizeof(*td));
29 INIT_LIST_HEAD(&td->node);
30 INIT_LIST_HEAD(&td->frame_lh);
31}
32
33static void init_ed(struct ed *ed)
34{
35 memset(ed, 0, sizeof(*ed));
36 INIT_LIST_HEAD(&ed->td_list);
37 INIT_LIST_HEAD(&ed->node);
38}
39
40static struct td *get_empty_td(struct fhci_hcd *fhci)
41{
42 struct td *td;
43
44 if (!list_empty(&fhci->empty_tds)) {
45 td = list_entry(fhci->empty_tds.next, struct td, node);
46 list_del(fhci->empty_tds.next);
47 } else {
48 td = kmalloc(sizeof(*td), GFP_ATOMIC);
49 if (!td)
50 fhci_err(fhci, "No memory to allocate to TD\n");
51 else
52 init_td(td);
53 }
54
55 return td;
56}
57
58void fhci_recycle_empty_td(struct fhci_hcd *fhci, struct td *td)
59{
60 init_td(td);
61 list_add(&td->node, &fhci->empty_tds);
62}
63
64struct ed *fhci_get_empty_ed(struct fhci_hcd *fhci)
65{
66 struct ed *ed;
67
68 if (!list_empty(&fhci->empty_eds)) {
69 ed = list_entry(fhci->empty_eds.next, struct ed, node);
70 list_del(fhci->empty_eds.next);
71 } else {
72 ed = kmalloc(sizeof(*ed), GFP_ATOMIC);
73 if (!ed)
74 fhci_err(fhci, "No memory to allocate to ED\n");
75 else
76 init_ed(ed);
77 }
78
79 return ed;
80}
81
82void fhci_recycle_empty_ed(struct fhci_hcd *fhci, struct ed *ed)
83{
84 init_ed(ed);
85 list_add(&ed->node, &fhci->empty_eds);
86}
87
88struct td *fhci_td_fill(struct fhci_hcd *fhci, struct urb *urb,
89 struct urb_priv *urb_priv, struct ed *ed, u16 index,
90 enum fhci_ta_type type, int toggle, u8 *data, u32 len,
91 u16 interval, u16 start_frame, bool ioc)
92{
93 struct td *td = get_empty_td(fhci);
94
95 if (!td)
96 return NULL;
97
98 td->urb = urb;
99 td->ed = ed;
100 td->type = type;
101 td->toggle = toggle;
102 td->data = data;
103 td->len = len;
104 td->iso_index = index;
105 td->interval = interval;
106 td->start_frame = start_frame;
107 td->ioc = ioc;
108 td->status = USB_TD_OK;
109
110 urb_priv->tds[index] = td;
111
112 return td;
113}
diff --git a/drivers/usb/host/fhci-q.c b/drivers/usb/host/fhci-q.c
new file mode 100644
index 000000000000..b0a1446ba292
--- /dev/null
+++ b/drivers/usb/host/fhci-q.c
@@ -0,0 +1,284 @@
1/*
2 * Freescale QUICC Engine USB Host Controller Driver
3 *
4 * Copyright (c) Freescale Semicondutor, Inc. 2006.
5 * Shlomi Gridish <gridish@freescale.com>
6 * Jerry Huang <Chang-Ming.Huang@freescale.com>
7 * Copyright (c) Logic Product Development, Inc. 2007
8 * Peter Barada <peterb@logicpd.com>
9 * Copyright (c) MontaVista Software, Inc. 2008.
10 * Anton Vorontsov <avorontsov@ru.mvista.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/spinlock.h>
21#include <linux/errno.h>
22#include <linux/list.h>
23#include <linux/usb.h>
24#include "../core/hcd.h"
25#include "fhci.h"
26
27/* maps the hardware error code to the USB error code */
28static int status_to_error(u32 status)
29{
30 if (status == USB_TD_OK)
31 return 0;
32 else if (status & USB_TD_RX_ER_CRC)
33 return -EILSEQ;
34 else if (status & USB_TD_RX_ER_NONOCT)
35 return -EPROTO;
36 else if (status & USB_TD_RX_ER_OVERUN)
37 return -ECOMM;
38 else if (status & USB_TD_RX_ER_BITSTUFF)
39 return -EPROTO;
40 else if (status & USB_TD_RX_ER_PID)
41 return -EILSEQ;
42 else if (status & (USB_TD_TX_ER_NAK | USB_TD_TX_ER_TIMEOUT))
43 return -ETIMEDOUT;
44 else if (status & USB_TD_TX_ER_STALL)
45 return -EPIPE;
46 else if (status & USB_TD_TX_ER_UNDERUN)
47 return -ENOSR;
48 else if (status & USB_TD_RX_DATA_UNDERUN)
49 return -EREMOTEIO;
50 else if (status & USB_TD_RX_DATA_OVERUN)
51 return -EOVERFLOW;
52 else
53 return -EINVAL;
54}
55
56void fhci_add_td_to_frame(struct fhci_time_frame *frame, struct td *td)
57{
58 list_add_tail(&td->frame_lh, &frame->tds_list);
59}
60
61void fhci_add_tds_to_ed(struct ed *ed, struct td **td_list, int number)
62{
63 int i;
64
65 for (i = 0; i < number; i++) {
66 struct td *td = td_list[i];
67 list_add_tail(&td->node, &ed->td_list);
68 }
69 if (ed->td_head == NULL)
70 ed->td_head = td_list[0];
71}
72
73static struct td *peek_td_from_ed(struct ed *ed)
74{
75 struct td *td;
76
77 if (!list_empty(&ed->td_list))
78 td = list_entry(ed->td_list.next, struct td, node);
79 else
80 td = NULL;
81
82 return td;
83}
84
85struct td *fhci_remove_td_from_frame(struct fhci_time_frame *frame)
86{
87 struct td *td;
88
89 if (!list_empty(&frame->tds_list)) {
90 td = list_entry(frame->tds_list.next, struct td, frame_lh);
91 list_del_init(frame->tds_list.next);
92 } else
93 td = NULL;
94
95 return td;
96}
97
98struct td *fhci_peek_td_from_frame(struct fhci_time_frame *frame)
99{
100 struct td *td;
101
102 if (!list_empty(&frame->tds_list))
103 td = list_entry(frame->tds_list.next, struct td, frame_lh);
104 else
105 td = NULL;
106
107 return td;
108}
109
110struct td *fhci_remove_td_from_ed(struct ed *ed)
111{
112 struct td *td;
113
114 if (!list_empty(&ed->td_list)) {
115 td = list_entry(ed->td_list.next, struct td, node);
116 list_del_init(ed->td_list.next);
117
118 /* if this TD was the ED's head, find next TD */
119 if (!list_empty(&ed->td_list))
120 ed->td_head = list_entry(ed->td_list.next, struct td,
121 node);
122 else
123 ed->td_head = NULL;
124 } else
125 td = NULL;
126
127 return td;
128}
129
130struct td *fhci_remove_td_from_done_list(struct fhci_controller_list *p_list)
131{
132 struct td *td;
133
134 if (!list_empty(&p_list->done_list)) {
135 td = list_entry(p_list->done_list.next, struct td, node);
136 list_del_init(p_list->done_list.next);
137 } else
138 td = NULL;
139
140 return td;
141}
142
143void fhci_move_td_from_ed_to_done_list(struct fhci_usb *usb, struct ed *ed)
144{
145 struct td *td;
146
147 td = ed->td_head;
148 list_del_init(&td->node);
149
150 /* If this TD was the ED's head,find next TD */
151 if (!list_empty(&ed->td_list))
152 ed->td_head = list_entry(ed->td_list.next, struct td, node);
153 else {
154 ed->td_head = NULL;
155 ed->state = FHCI_ED_SKIP;
156 }
157 ed->toggle_carry = td->toggle;
158 list_add_tail(&td->node, &usb->hc_list->done_list);
159 if (td->ioc)
160 usb->transfer_confirm(usb->fhci);
161}
162
163/* free done FHCI URB resource such as ED and TD */
164static void free_urb_priv(struct fhci_hcd *fhci, struct urb *urb)
165{
166 int i;
167 struct urb_priv *urb_priv = urb->hcpriv;
168 struct ed *ed = urb_priv->ed;
169
170 for (i = 0; i < urb_priv->num_of_tds; i++) {
171 list_del_init(&urb_priv->tds[i]->node);
172 fhci_recycle_empty_td(fhci, urb_priv->tds[i]);
173 }
174
175 /* if this TD was the ED's head,find the next TD */
176 if (!list_empty(&ed->td_list))
177 ed->td_head = list_entry(ed->td_list.next, struct td, node);
178 else
179 ed->td_head = NULL;
180
181 kfree(urb_priv->tds);
182 kfree(urb_priv);
183 urb->hcpriv = NULL;
184
185 /* if this TD was the ED's head,find next TD */
186 if (ed->td_head == NULL)
187 list_del_init(&ed->node);
188 fhci->active_urbs--;
189}
190
191/* this routine called to complete and free done URB */
192void fhci_urb_complete_free(struct fhci_hcd *fhci, struct urb *urb)
193{
194 free_urb_priv(fhci, urb);
195
196 if (urb->status == -EINPROGRESS) {
197 if (urb->actual_length != urb->transfer_buffer_length &&
198 urb->transfer_flags & URB_SHORT_NOT_OK)
199 urb->status = -EREMOTEIO;
200 else
201 urb->status = 0;
202 }
203
204 usb_hcd_unlink_urb_from_ep(fhci_to_hcd(fhci), urb);
205
206 spin_unlock(&fhci->lock);
207
208 usb_hcd_giveback_urb(fhci_to_hcd(fhci), urb, urb->status);
209
210 spin_lock(&fhci->lock);
211}
212
213/*
214 * caculate transfer length/stats and update the urb
215 * Precondition: irqsafe(only for urb-?status locking)
216 */
217void fhci_done_td(struct urb *urb, struct td *td)
218{
219 struct ed *ed = td->ed;
220 u32 cc = td->status;
221
222 /* ISO...drivers see per-TD length/status */
223 if (ed->mode == FHCI_TF_ISO) {
224 u32 len;
225 if (!(urb->transfer_flags & URB_SHORT_NOT_OK &&
226 cc == USB_TD_RX_DATA_UNDERUN))
227 cc = USB_TD_OK;
228
229 if (usb_pipeout(urb->pipe))
230 len = urb->iso_frame_desc[td->iso_index].length;
231 else
232 len = td->actual_len;
233
234 urb->actual_length += len;
235 urb->iso_frame_desc[td->iso_index].actual_length = len;
236 urb->iso_frame_desc[td->iso_index].status =
237 status_to_error(cc);
238 }
239
240 /* BULK,INT,CONTROL... drivers see aggregate length/status,
241 * except that "setup" bytes aren't counted and "short" transfers
242 * might not be reported as errors.
243 */
244 else {
245 if (td->error_cnt >= 3)
246 urb->error_count = 3;
247
248 /* control endpoint only have soft stalls */
249
250 /* update packet status if needed(short may be ok) */
251 if (!(urb->transfer_flags & URB_SHORT_NOT_OK) &&
252 cc == USB_TD_RX_DATA_UNDERUN) {
253 ed->state = FHCI_ED_OPER;
254 cc = USB_TD_OK;
255 }
256 if (cc != USB_TD_OK) {
257 if (urb->status == -EINPROGRESS)
258 urb->status = status_to_error(cc);
259 }
260
261 /* count all non-empty packets except control SETUP packet */
262 if (td->type != FHCI_TA_SETUP || td->iso_index != 0)
263 urb->actual_length += td->actual_len;
264 }
265}
266
267/* there are some pedning request to unlink */
268void fhci_del_ed_list(struct fhci_hcd *fhci, struct ed *ed)
269{
270 struct td *td = peek_td_from_ed(ed);
271 struct urb *urb = td->urb;
272 struct urb_priv *urb_priv = urb->hcpriv;
273
274 if (urb_priv->state == URB_DEL) {
275 td = fhci_remove_td_from_ed(ed);
276 /* HC may have partly processed this TD */
277 if (td->status != USB_TD_INPROGRESS)
278 fhci_done_td(urb, td);
279
280 /* URB is done;clean up */
281 if (++(urb_priv->tds_cnt) == urb_priv->num_of_tds)
282 fhci_urb_complete_free(fhci, urb);
283 }
284}
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
new file mode 100644
index 000000000000..bb63b68ddb77
--- /dev/null
+++ b/drivers/usb/host/fhci-sched.c
@@ -0,0 +1,888 @@
1/*
2 * Freescale QUICC Engine USB Host Controller Driver
3 *
4 * Copyright (c) Freescale Semicondutor, Inc. 2006.
5 * Shlomi Gridish <gridish@freescale.com>
6 * Jerry Huang <Chang-Ming.Huang@freescale.com>
7 * Copyright (c) Logic Product Development, Inc. 2007
8 * Peter Barada <peterb@logicpd.com>
9 * Copyright (c) MontaVista Software, Inc. 2008.
10 * Anton Vorontsov <avorontsov@ru.mvista.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/spinlock.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/list.h>
24#include <linux/interrupt.h>
25#include <linux/io.h>
26#include <linux/usb.h>
27#include <asm/qe.h>
28#include <asm/fsl_gtm.h>
29#include "../core/hcd.h"
30#include "fhci.h"
31
32static void recycle_frame(struct fhci_usb *usb, struct packet *pkt)
33{
34 pkt->data = NULL;
35 pkt->len = 0;
36 pkt->status = USB_TD_OK;
37 pkt->info = 0;
38 pkt->priv_data = NULL;
39
40 cq_put(usb->ep0->empty_frame_Q, pkt);
41}
42
43/* confirm submitted packet */
44void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt)
45{
46 struct td *td;
47 struct packet *td_pkt;
48 struct ed *ed;
49 u32 trans_len;
50 bool td_done = false;
51
52 td = fhci_remove_td_from_frame(usb->actual_frame);
53 td_pkt = td->pkt;
54 trans_len = pkt->len;
55 td->status = pkt->status;
56 if (td->type == FHCI_TA_IN && td_pkt->info & PKT_DUMMY_PACKET) {
57 if ((td->data + td->actual_len) && trans_len)
58 memcpy(td->data + td->actual_len, pkt->data,
59 trans_len);
60 cq_put(usb->ep0->dummy_packets_Q, pkt->data);
61 }
62
63 recycle_frame(usb, pkt);
64
65 ed = td->ed;
66 if (ed->mode == FHCI_TF_ISO) {
67 if (ed->td_list.next->next != &ed->td_list) {
68 struct td *td_next =
69 list_entry(ed->td_list.next->next, struct td,
70 node);
71
72 td_next->start_frame = usb->actual_frame->frame_num;
73 }
74 td->actual_len = trans_len;
75 td_done = true;
76 } else if ((td->status & USB_TD_ERROR) &&
77 !(td->status & USB_TD_TX_ER_NAK)) {
78 /*
79 * There was an error on the transaction (but not NAK).
80 * If it is fatal error (data underrun, stall, bad pid or 3
81 * errors exceeded), mark this TD as done.
82 */
83 if ((td->status & USB_TD_RX_DATA_UNDERUN) ||
84 (td->status & USB_TD_TX_ER_STALL) ||
85 (td->status & USB_TD_RX_ER_PID) ||
86 (++td->error_cnt >= 3)) {
87 ed->state = FHCI_ED_HALTED;
88 td_done = true;
89
90 if (td->status & USB_TD_RX_DATA_UNDERUN) {
91 fhci_dbg(usb->fhci, "td err fu\n");
92 td->toggle = !td->toggle;
93 td->actual_len += trans_len;
94 } else {
95 fhci_dbg(usb->fhci, "td err f!u\n");
96 }
97 } else {
98 fhci_dbg(usb->fhci, "td err !f\n");
99 /* it is not a fatal error -retry this transaction */
100 td->nak_cnt = 0;
101 td->error_cnt++;
102 td->status = USB_TD_OK;
103 }
104 } else if (td->status & USB_TD_TX_ER_NAK) {
105 /* there was a NAK response */
106 fhci_vdbg(usb->fhci, "td nack\n");
107 td->nak_cnt++;
108 td->error_cnt = 0;
109 td->status = USB_TD_OK;
110 } else {
111 /* there was no error on transaction */
112 td->error_cnt = 0;
113 td->nak_cnt = 0;
114 td->toggle = !td->toggle;
115 td->actual_len += trans_len;
116
117 if (td->len == td->actual_len)
118 td_done = true;
119 }
120
121 if (td_done)
122 fhci_move_td_from_ed_to_done_list(usb, ed);
123}
124
125/*
126 * Flush all transmitted packets from BDs
127 * This routine is called when disabling the USB port to flush all
128 * transmissions that are allready scheduled in the BDs
129 */
130void fhci_flush_all_transmissions(struct fhci_usb *usb)
131{
132 u8 mode;
133 struct td *td;
134
135 mode = in_8(&usb->fhci->regs->usb_mod);
136 clrbits8(&usb->fhci->regs->usb_mod, USB_MODE_EN);
137
138 fhci_flush_bds(usb);
139
140 while ((td = fhci_peek_td_from_frame(usb->actual_frame)) != NULL) {
141 struct packet *pkt = td->pkt;
142
143 pkt->status = USB_TD_TX_ER_TIMEOUT;
144 fhci_transaction_confirm(usb, pkt);
145 }
146
147 usb->actual_frame->frame_status = FRAME_END_TRANSMISSION;
148
149 /* reset the event register */
150 out_be16(&usb->fhci->regs->usb_event, 0xffff);
151 /* enable the USB controller */
152 out_8(&usb->fhci->regs->usb_mod, mode | USB_MODE_EN);
153}
154
155/*
156 * This function forms the packet and transmit the packet. This function
157 * will handle all endpoint type:ISO,interrupt,control and bulk
158 */
159static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
160{
161 u32 fw_transaction_time, len = 0;
162 struct packet *pkt;
163 u8 *data = NULL;
164
165 /* calcalate data address,len and toggle and then add the transaction */
166 if (td->toggle == USB_TD_TOGGLE_CARRY)
167 td->toggle = ed->toggle_carry;
168
169 switch (ed->mode) {
170 case FHCI_TF_ISO:
171 len = td->len;
172 if (td->type != FHCI_TA_IN)
173 data = td->data;
174 break;
175 case FHCI_TF_CTRL:
176 case FHCI_TF_BULK:
177 len = min(td->len - td->actual_len, ed->max_pkt_size);
178 if (!((td->type == FHCI_TA_IN) &&
179 ((len + td->actual_len) == td->len)))
180 data = td->data + td->actual_len;
181 break;
182 case FHCI_TF_INTR:
183 len = min(td->len, ed->max_pkt_size);
184 if (!((td->type == FHCI_TA_IN) &&
185 ((td->len + CRC_SIZE) >= ed->max_pkt_size)))
186 data = td->data;
187 break;
188 default:
189 break;
190 }
191
192 if (usb->port_status == FHCI_PORT_FULL)
193 fw_transaction_time = (((len + PROTOCOL_OVERHEAD) * 11) >> 4);
194 else
195 fw_transaction_time = ((len + PROTOCOL_OVERHEAD) * 6);
196
197 /* check if there's enough space in this frame to submit this TD */
198 if (usb->actual_frame->total_bytes + len + PROTOCOL_OVERHEAD >=
199 usb->max_bytes_per_frame) {
200 fhci_vdbg(usb->fhci, "not enough space in this frame: "
201 "%d %d %d\n", usb->actual_frame->total_bytes, len,
202 usb->max_bytes_per_frame);
203 return -1;
204 }
205
206 /* check if there's enough time in this frame to submit this TD */
207 if (usb->actual_frame->frame_status != FRAME_IS_PREPARED &&
208 (usb->actual_frame->frame_status & FRAME_END_TRANSMISSION ||
209 (fw_transaction_time + usb->sw_transaction_time >=
210 1000 - fhci_get_sof_timer_count(usb)))) {
211 fhci_dbg(usb->fhci, "not enough time in this frame\n");
212 return -1;
213 }
214
215 /* update frame object fields before transmitting */
216 pkt = cq_get(usb->ep0->empty_frame_Q);
217 if (!pkt) {
218 fhci_dbg(usb->fhci, "there is no empty frame\n");
219 return -1;
220 }
221 td->pkt = pkt;
222
223 pkt->info = 0;
224 if (data == NULL) {
225 data = cq_get(usb->ep0->dummy_packets_Q);
226 BUG_ON(!data);
227 pkt->info = PKT_DUMMY_PACKET;
228 }
229 pkt->data = data;
230 pkt->len = len;
231 pkt->status = USB_TD_OK;
232 /* update TD status field before transmitting */
233 td->status = USB_TD_INPROGRESS;
234 /* update actual frame time object with the actual transmission */
235 usb->actual_frame->total_bytes += (len + PROTOCOL_OVERHEAD);
236 fhci_add_td_to_frame(usb->actual_frame, td);
237
238 if (usb->port_status != FHCI_PORT_FULL &&
239 usb->port_status != FHCI_PORT_LOW) {
240 pkt->status = USB_TD_TX_ER_TIMEOUT;
241 pkt->len = 0;
242 fhci_transaction_confirm(usb, pkt);
243 } else if (fhci_host_transaction(usb, pkt, td->type, ed->dev_addr,
244 ed->ep_addr, ed->mode, ed->speed, td->toggle)) {
245 /* remove TD from actual frame */
246 list_del_init(&td->frame_lh);
247 td->status = USB_TD_OK;
248 if (pkt->info & PKT_DUMMY_PACKET)
249 cq_put(usb->ep0->dummy_packets_Q, pkt->data);
250 recycle_frame(usb, pkt);
251 usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD);
252 fhci_err(usb->fhci, "host transaction failed\n");
253 return -1;
254 }
255
256 return len;
257}
258
259static void move_head_to_tail(struct list_head *list)
260{
261 struct list_head *node = list->next;
262
263 if (!list_empty(list)) {
264 list_del(node);
265 list_add_tail(node, list);
266 }
267}
268
269/*
270 * This function goes through the endpoint list and schedules the
271 * transactions within this list
272 */
273static int scan_ed_list(struct fhci_usb *usb,
274 struct list_head *list, enum fhci_tf_mode list_type)
275{
276 static const int frame_part[4] = {
277 [FHCI_TF_CTRL] = MAX_BYTES_PER_FRAME,
278 [FHCI_TF_ISO] = (MAX_BYTES_PER_FRAME *
279 MAX_PERIODIC_FRAME_USAGE) / 100,
280 [FHCI_TF_BULK] = MAX_BYTES_PER_FRAME,
281 [FHCI_TF_INTR] = (MAX_BYTES_PER_FRAME *
282 MAX_PERIODIC_FRAME_USAGE) / 100
283 };
284 struct ed *ed;
285 struct td *td;
286 int ans = 1;
287 u32 save_transaction_time = usb->sw_transaction_time;
288
289 list_for_each_entry(ed, list, node) {
290 td = ed->td_head;
291
292 if (!td || (td && td->status == USB_TD_INPROGRESS))
293 continue;
294
295 if (ed->state != FHCI_ED_OPER) {
296 if (ed->state == FHCI_ED_URB_DEL) {
297 td->status = USB_TD_OK;
298 fhci_move_td_from_ed_to_done_list(usb, ed);
299 ed->state = FHCI_ED_SKIP;
300 }
301 continue;
302 }
303
304 /*
305 * if it isn't interrupt pipe or it is not iso pipe and the
306 * interval time passed
307 */
308 if ((list_type == FHCI_TF_INTR || list_type == FHCI_TF_ISO) &&
309 (((usb->actual_frame->frame_num -
310 td->start_frame) & 0x7ff) < td->interval))
311 continue;
312
313 if (add_packet(usb, ed, td) < 0)
314 continue;
315
316 /* update time stamps in the TD */
317 td->start_frame = usb->actual_frame->frame_num;
318 usb->sw_transaction_time += save_transaction_time;
319
320 if (usb->actual_frame->total_bytes >=
321 usb->max_bytes_per_frame) {
322 usb->actual_frame->frame_status =
323 FRAME_DATA_END_TRANSMISSION;
324 fhci_push_dummy_bd(usb->ep0);
325 ans = 0;
326 break;
327 }
328
329 if (usb->actual_frame->total_bytes >= frame_part[list_type])
330 break;
331 }
332
333 /* be fair to each ED(move list head around) */
334 move_head_to_tail(list);
335 usb->sw_transaction_time = save_transaction_time;
336
337 return ans;
338}
339
340static u32 rotate_frames(struct fhci_usb *usb)
341{
342 struct fhci_hcd *fhci = usb->fhci;
343
344 if (!list_empty(&usb->actual_frame->tds_list)) {
345 if ((((in_be16(&fhci->pram->frame_num) & 0x07ff) -
346 usb->actual_frame->frame_num) & 0x7ff) > 5)
347 fhci_flush_actual_frame(usb);
348 else
349 return -EINVAL;
350 }
351
352 usb->actual_frame->frame_status = FRAME_IS_PREPARED;
353 usb->actual_frame->frame_num = in_be16(&fhci->pram->frame_num) & 0x7ff;
354 usb->actual_frame->total_bytes = 0;
355
356 return 0;
357}
358
359/*
360 * This function schedule the USB transaction and will process the
361 * endpoint in the following order: iso, interrupt, control and bulk.
362 */
363void fhci_schedule_transactions(struct fhci_usb *usb)
364{
365 int left = 1;
366
367 if (usb->actual_frame->frame_status & FRAME_END_TRANSMISSION)
368 if (rotate_frames(usb) != 0)
369 return;
370
371 if (usb->actual_frame->frame_status & FRAME_END_TRANSMISSION)
372 return;
373
374 if (usb->actual_frame->total_bytes == 0) {
375 /*
376 * schedule the next available ISO transfer
377 *or next stage of the ISO transfer
378 */
379 scan_ed_list(usb, &usb->hc_list->iso_list, FHCI_TF_ISO);
380
381 /*
382 * schedule the next available interrupt transfer or
383 * the next stage of the interrupt transfer
384 */
385 scan_ed_list(usb, &usb->hc_list->intr_list, FHCI_TF_INTR);
386
387 /*
388 * schedule the next available control transfer
389 * or the next stage of the control transfer
390 */
391 left = scan_ed_list(usb, &usb->hc_list->ctrl_list,
392 FHCI_TF_CTRL);
393 }
394
395 /*
396 * schedule the next available bulk transfer or the next stage of the
397 * bulk transfer
398 */
399 if (left > 0)
400 scan_ed_list(usb, &usb->hc_list->bulk_list, FHCI_TF_BULK);
401}
402
403/* Handles SOF interrupt */
404static void sof_interrupt(struct fhci_hcd *fhci)
405{
406 struct fhci_usb *usb = fhci->usb_lld;
407
408 if ((usb->port_status == FHCI_PORT_DISABLED) &&
409 (usb->vroot_hub->port.wPortStatus & USB_PORT_STAT_CONNECTION) &&
410 !(usb->vroot_hub->port.wPortChange & USB_PORT_STAT_C_CONNECTION)) {
411 if (usb->vroot_hub->port.wPortStatus & USB_PORT_STAT_LOW_SPEED)
412 usb->port_status = FHCI_PORT_LOW;
413 else
414 usb->port_status = FHCI_PORT_FULL;
415 /* Disable IDLE */
416 usb->saved_msk &= ~USB_E_IDLE_MASK;
417 out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
418 }
419
420 gtm_set_exact_timer16(fhci->timer, usb->max_frame_usage, false);
421
422 fhci_host_transmit_actual_frame(usb);
423 usb->actual_frame->frame_status = FRAME_IS_TRANSMITTED;
424
425 fhci_schedule_transactions(usb);
426}
427
428/* Handles device disconnected interrupt on port */
429void fhci_device_disconnected_interrupt(struct fhci_hcd *fhci)
430{
431 struct fhci_usb *usb = fhci->usb_lld;
432
433 fhci_dbg(fhci, "-> %s\n", __func__);
434
435 fhci_usb_disable_interrupt(usb);
436 clrbits8(&usb->fhci->regs->usb_mod, USB_MODE_LSS);
437 usb->port_status = FHCI_PORT_DISABLED;
438
439 fhci_stop_sof_timer(fhci);
440
441 /* Enable IDLE since we want to know if something comes along */
442 usb->saved_msk |= USB_E_IDLE_MASK;
443 out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
444
445 usb->vroot_hub->port.wPortStatus &= ~USB_PORT_STAT_CONNECTION;
446 usb->vroot_hub->port.wPortChange |= USB_PORT_STAT_C_CONNECTION;
447 usb->max_bytes_per_frame = 0;
448 fhci_usb_enable_interrupt(usb);
449
450 fhci_dbg(fhci, "<- %s\n", __func__);
451}
452
453/* detect a new device connected on the USB port */
454void fhci_device_connected_interrupt(struct fhci_hcd *fhci)
455{
456
457 struct fhci_usb *usb = fhci->usb_lld;
458 int state;
459 int ret;
460
461 fhci_dbg(fhci, "-> %s\n", __func__);
462
463 fhci_usb_disable_interrupt(usb);
464 state = fhci_ioports_check_bus_state(fhci);
465
466 /* low-speed device was connected to the USB port */
467 if (state == 1) {
468 ret = qe_usb_clock_set(fhci->lowspeed_clk, USB_CLOCK >> 3);
469 if (ret) {
470 fhci_warn(fhci, "Low-Speed device is not supported, "
471 "try use BRGx\n");
472 goto out;
473 }
474
475 usb->port_status = FHCI_PORT_LOW;
476 setbits8(&usb->fhci->regs->usb_mod, USB_MODE_LSS);
477 usb->vroot_hub->port.wPortStatus |=
478 (USB_PORT_STAT_LOW_SPEED |
479 USB_PORT_STAT_CONNECTION);
480 usb->vroot_hub->port.wPortChange |=
481 USB_PORT_STAT_C_CONNECTION;
482 usb->max_bytes_per_frame =
483 (MAX_BYTES_PER_FRAME >> 3) - 7;
484 fhci_port_enable(usb);
485 } else if (state == 2) {
486 ret = qe_usb_clock_set(fhci->fullspeed_clk, USB_CLOCK);
487 if (ret) {
488 fhci_warn(fhci, "Full-Speed device is not supported, "
489 "try use CLKx\n");
490 goto out;
491 }
492
493 usb->port_status = FHCI_PORT_FULL;
494 clrbits8(&usb->fhci->regs->usb_mod, USB_MODE_LSS);
495 usb->vroot_hub->port.wPortStatus &=
496 ~USB_PORT_STAT_LOW_SPEED;
497 usb->vroot_hub->port.wPortStatus |=
498 USB_PORT_STAT_CONNECTION;
499 usb->vroot_hub->port.wPortChange |=
500 USB_PORT_STAT_C_CONNECTION;
501 usb->max_bytes_per_frame = (MAX_BYTES_PER_FRAME - 15);
502 fhci_port_enable(usb);
503 }
504out:
505 fhci_usb_enable_interrupt(usb);
506 fhci_dbg(fhci, "<- %s\n", __func__);
507}
508
509irqreturn_t fhci_frame_limit_timer_irq(int irq, void *_hcd)
510{
511 struct usb_hcd *hcd = _hcd;
512 struct fhci_hcd *fhci = hcd_to_fhci(hcd);
513 struct fhci_usb *usb = fhci->usb_lld;
514
515 spin_lock(&fhci->lock);
516
517 gtm_set_exact_timer16(fhci->timer, 1000, false);
518
519 if (usb->actual_frame->frame_status == FRAME_IS_TRANSMITTED) {
520 usb->actual_frame->frame_status = FRAME_TIMER_END_TRANSMISSION;
521 fhci_push_dummy_bd(usb->ep0);
522 }
523
524 fhci_schedule_transactions(usb);
525
526 spin_unlock(&fhci->lock);
527
528 return IRQ_HANDLED;
529}
530
531/* Cancel transmission on the USB endpoint */
532static void abort_transmission(struct fhci_usb *usb)
533{
534 fhci_dbg(usb->fhci, "-> %s\n", __func__);
535 /* issue stop Tx command */
536 qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB, EP_ZERO, 0);
537 /* flush Tx FIFOs */
538 out_8(&usb->fhci->regs->usb_comm, USB_CMD_FLUSH_FIFO | EP_ZERO);
539 udelay(1000);
540 /* reset Tx BDs */
541 fhci_flush_bds(usb);
542 /* issue restart Tx command */
543 qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB, EP_ZERO, 0);
544 fhci_dbg(usb->fhci, "<- %s\n", __func__);
545}
546
547irqreturn_t fhci_irq(struct usb_hcd *hcd)
548{
549 struct fhci_hcd *fhci = hcd_to_fhci(hcd);
550 struct fhci_usb *usb;
551 u16 usb_er = 0;
552 unsigned long flags;
553
554 spin_lock_irqsave(&fhci->lock, flags);
555
556 usb = fhci->usb_lld;
557
558 usb_er |= in_be16(&usb->fhci->regs->usb_event) &
559 in_be16(&usb->fhci->regs->usb_mask);
560
561 /* clear event bits for next time */
562 out_be16(&usb->fhci->regs->usb_event, usb_er);
563
564 fhci_dbg_isr(fhci, usb_er);
565
566 if (usb_er & USB_E_RESET_MASK) {
567 if ((usb->port_status == FHCI_PORT_FULL) ||
568 (usb->port_status == FHCI_PORT_LOW)) {
569 fhci_device_disconnected_interrupt(fhci);
570 usb_er &= ~USB_E_IDLE_MASK;
571 } else if (usb->port_status == FHCI_PORT_WAITING) {
572 usb->port_status = FHCI_PORT_DISCONNECTING;
573
574 /* Turn on IDLE since we want to disconnect */
575 usb->saved_msk |= USB_E_IDLE_MASK;
576 out_be16(&usb->fhci->regs->usb_event,
577 usb->saved_msk);
578 } else if (usb->port_status == FHCI_PORT_DISABLED) {
579 if (fhci_ioports_check_bus_state(fhci) == 1 &&
580 usb->port_status != FHCI_PORT_LOW &&
581 usb->port_status != FHCI_PORT_FULL)
582 fhci_device_connected_interrupt(fhci);
583 }
584 usb_er &= ~USB_E_RESET_MASK;
585 }
586
587 if (usb_er & USB_E_MSF_MASK) {
588 abort_transmission(fhci->usb_lld);
589 usb_er &= ~USB_E_MSF_MASK;
590 }
591
592 if (usb_er & (USB_E_SOF_MASK | USB_E_SFT_MASK)) {
593 sof_interrupt(fhci);
594 usb_er &= ~(USB_E_SOF_MASK | USB_E_SFT_MASK);
595 }
596
597 if (usb_er & USB_E_TXB_MASK) {
598 fhci_tx_conf_interrupt(fhci->usb_lld);
599 usb_er &= ~USB_E_TXB_MASK;
600 }
601
602 if (usb_er & USB_E_TXE1_MASK) {
603 fhci_tx_conf_interrupt(fhci->usb_lld);
604 usb_er &= ~USB_E_TXE1_MASK;
605 }
606
607 if (usb_er & USB_E_IDLE_MASK) {
608 if (usb->port_status == FHCI_PORT_DISABLED &&
609 usb->port_status != FHCI_PORT_LOW &&
610 usb->port_status != FHCI_PORT_FULL) {
611 usb_er &= ~USB_E_RESET_MASK;
612 fhci_device_connected_interrupt(fhci);
613 } else if (usb->port_status ==
614 FHCI_PORT_DISCONNECTING) {
615 /* XXX usb->port_status = FHCI_PORT_WAITING; */
616 /* Disable IDLE */
617 usb->saved_msk &= ~USB_E_IDLE_MASK;
618 out_be16(&usb->fhci->regs->usb_mask,
619 usb->saved_msk);
620 } else {
621 fhci_dbg_isr(fhci, -1);
622 }
623
624 usb_er &= ~USB_E_IDLE_MASK;
625 }
626
627 spin_unlock_irqrestore(&fhci->lock, flags);
628
629 return IRQ_HANDLED;
630}
631
632
633/*
634 * Process normal completions(error or sucess) and clean the schedule.
635 *
636 * This is the main path for handing urbs back to drivers. The only other patth
637 * is process_del_list(),which unlinks URBs by scanning EDs,instead of scanning
638 * the (re-reversed) done list as this does.
639 */
640static void process_done_list(unsigned long data)
641{
642 struct urb *urb;
643 struct ed *ed;
644 struct td *td;
645 struct urb_priv *urb_priv;
646 struct fhci_hcd *fhci = (struct fhci_hcd *)data;
647
648 disable_irq(fhci->timer->irq);
649 disable_irq(fhci_to_hcd(fhci)->irq);
650 spin_lock(&fhci->lock);
651
652 td = fhci_remove_td_from_done_list(fhci->hc_list);
653 while (td != NULL) {
654 urb = td->urb;
655 urb_priv = urb->hcpriv;
656 ed = td->ed;
657
658 /* update URB's length and status from TD */
659 fhci_done_td(urb, td);
660 urb_priv->tds_cnt++;
661
662 /*
663 * if all this urb's TDs are done, call complete()
664 * Interrupt transfers are the onley special case:
665 * they are reissued,until "deleted" by usb_unlink_urb
666 * (real work done in a SOF intr, by process_del_list)
667 */
668 if (urb_priv->tds_cnt == urb_priv->num_of_tds) {
669 fhci_urb_complete_free(fhci, urb);
670 } else if (urb_priv->state == URB_DEL &&
671 ed->state == FHCI_ED_SKIP) {
672 fhci_del_ed_list(fhci, ed);
673 ed->state = FHCI_ED_OPER;
674 } else if (ed->state == FHCI_ED_HALTED) {
675 urb_priv->state = URB_DEL;
676 ed->state = FHCI_ED_URB_DEL;
677 fhci_del_ed_list(fhci, ed);
678 ed->state = FHCI_ED_OPER;
679 }
680
681 td = fhci_remove_td_from_done_list(fhci->hc_list);
682 }
683
684 spin_unlock(&fhci->lock);
685 enable_irq(fhci->timer->irq);
686 enable_irq(fhci_to_hcd(fhci)->irq);
687}
688
689DECLARE_TASKLET(fhci_tasklet, process_done_list, 0);
690
691/* transfer complted callback */
692u32 fhci_transfer_confirm_callback(struct fhci_hcd *fhci)
693{
694 if (!fhci->process_done_task->state)
695 tasklet_schedule(fhci->process_done_task);
696 return 0;
697}
698
699/*
700 * adds urb to the endpoint descriptor list
701 * arguments:
702 * fhci data structure for the Low level host controller
703 * ep USB Host endpoint data structure
704 * urb USB request block data structure
705 */
706void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
707{
708 struct ed *ed = urb->ep->hcpriv;
709 struct urb_priv *urb_priv = urb->hcpriv;
710 u32 data_len = urb->transfer_buffer_length;
711 int urb_state = 0;
712 int toggle = 0;
713 struct td *td;
714 u8 *data;
715 u16 cnt = 0;
716
717 if (ed == NULL) {
718 ed = fhci_get_empty_ed(fhci);
719 ed->dev_addr = usb_pipedevice(urb->pipe);
720 ed->ep_addr = usb_pipeendpoint(urb->pipe);
721 switch (usb_pipetype(urb->pipe)) {
722 case PIPE_CONTROL:
723 ed->mode = FHCI_TF_CTRL;
724 break;
725 case PIPE_BULK:
726 ed->mode = FHCI_TF_BULK;
727 break;
728 case PIPE_INTERRUPT:
729 ed->mode = FHCI_TF_INTR;
730 break;
731 case PIPE_ISOCHRONOUS:
732 ed->mode = FHCI_TF_ISO;
733 break;
734 default:
735 break;
736 }
737 ed->speed = (urb->dev->speed == USB_SPEED_LOW) ?
738 FHCI_LOW_SPEED : FHCI_FULL_SPEED;
739 ed->max_pkt_size = usb_maxpacket(urb->dev,
740 urb->pipe, usb_pipeout(urb->pipe));
741 urb->ep->hcpriv = ed;
742 fhci_dbg(fhci, "new ep speed=%d max_pkt_size=%d\n",
743 ed->speed, ed->max_pkt_size);
744 }
745
746 /* for ISO transfer calculate start frame index */
747 if (ed->mode == FHCI_TF_ISO && urb->transfer_flags & URB_ISO_ASAP)
748 urb->start_frame = ed->td_head ? ed->last_iso + 1 :
749 get_frame_num(fhci);
750
751 /*
752 * OHCI handles the DATA toggle itself,we just use the USB
753 * toggle bits
754 */
755 if (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
756 usb_pipeout(urb->pipe)))
757 toggle = USB_TD_TOGGLE_CARRY;
758 else {
759 toggle = USB_TD_TOGGLE_DATA0;
760 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
761 usb_pipeout(urb->pipe), 1);
762 }
763
764 urb_priv->tds_cnt = 0;
765 urb_priv->ed = ed;
766 if (data_len > 0)
767 data = urb->transfer_buffer;
768 else
769 data = NULL;
770
771 switch (ed->mode) {
772 case FHCI_TF_BULK:
773 if (urb->transfer_flags & URB_ZERO_PACKET &&
774 urb->transfer_buffer_length > 0 &&
775 ((urb->transfer_buffer_length %
776 usb_maxpacket(urb->dev, urb->pipe,
777 usb_pipeout(urb->pipe))) == 0))
778 urb_state = US_BULK0;
779 while (data_len > 4096) {
780 td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt,
781 usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
782 FHCI_TA_IN,
783 cnt ? USB_TD_TOGGLE_CARRY :
784 toggle,
785 data, 4096, 0, 0, true);
786 data += 4096;
787 data_len -= 4096;
788 cnt++;
789 }
790
791 td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt,
792 usb_pipeout(urb->pipe) ? FHCI_TA_OUT : FHCI_TA_IN,
793 cnt ? USB_TD_TOGGLE_CARRY : toggle,
794 data, data_len, 0, 0, true);
795 cnt++;
796
797 if (urb->transfer_flags & URB_ZERO_PACKET &&
798 cnt < urb_priv->num_of_tds) {
799 td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt,
800 usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
801 FHCI_TA_IN,
802 USB_TD_TOGGLE_CARRY, NULL, 0, 0, 0, true);
803 cnt++;
804 }
805 break;
806 case FHCI_TF_INTR:
807 urb->start_frame = get_frame_num(fhci) + 1;
808 td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
809 usb_pipeout(urb->pipe) ? FHCI_TA_OUT : FHCI_TA_IN,
810 USB_TD_TOGGLE_DATA0, data, data_len,
811 urb->interval, urb->start_frame, true);
812 break;
813 case FHCI_TF_CTRL:
814 ed->dev_addr = usb_pipedevice(urb->pipe);
815 ed->max_pkt_size = usb_maxpacket(urb->dev, urb->pipe,
816 usb_pipeout(urb->pipe));
817 td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++, FHCI_TA_SETUP,
818 USB_TD_TOGGLE_DATA0, urb->setup_packet, 8, 0, 0, true);
819
820 if (data_len > 0) {
821 td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
822 usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
823 FHCI_TA_IN,
824 USB_TD_TOGGLE_DATA1, data, data_len, 0, 0,
825 true);
826 }
827 td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
828 usb_pipeout(urb->pipe) ? FHCI_TA_IN : FHCI_TA_OUT,
829 USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
830 urb_state = US_CTRL_SETUP;
831 break;
832 case FHCI_TF_ISO:
833 for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
834 u16 frame = urb->start_frame;
835
836 /*
837 * FIXME scheduling should handle frame counter
838 * roll-around ... exotic case (and OHCI has
839 * a 2^16 iso range, vs other HCs max of 2^10)
840 */
841 frame += cnt * urb->interval;
842 frame &= 0x07ff;
843 td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt,
844 usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
845 FHCI_TA_IN,
846 USB_TD_TOGGLE_DATA0,
847 data + urb->iso_frame_desc[cnt].offset,
848 urb->iso_frame_desc[cnt].length,
849 urb->interval, frame, true);
850 }
851 break;
852 default:
853 break;
854 }
855
856 /*
857 * set the state of URB
858 * control pipe:3 states -- setup,data,status
859 * interrupt and bulk pipe:1 state -- data
860 */
861 urb->pipe &= ~0x1f;
862 urb->pipe |= urb_state & 0x1f;
863
864 urb_priv->state = URB_INPROGRESS;
865
866 if (!ed->td_head) {
867 ed->state = FHCI_ED_OPER;
868 switch (ed->mode) {
869 case FHCI_TF_CTRL:
870 list_add(&ed->node, &fhci->hc_list->ctrl_list);
871 break;
872 case FHCI_TF_BULK:
873 list_add(&ed->node, &fhci->hc_list->bulk_list);
874 break;
875 case FHCI_TF_INTR:
876 list_add(&ed->node, &fhci->hc_list->intr_list);
877 break;
878 case FHCI_TF_ISO:
879 list_add(&ed->node, &fhci->hc_list->iso_list);
880 break;
881 default:
882 break;
883 }
884 }
885
886 fhci_add_tds_to_ed(ed, urb_priv->tds, urb_priv->num_of_tds);
887 fhci->active_urbs++;
888}
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
new file mode 100644
index 000000000000..b40332290319
--- /dev/null
+++ b/drivers/usb/host/fhci-tds.c
@@ -0,0 +1,626 @@
1/*
2 * Freescale QUICC Engine USB Host Controller Driver
3 *
4 * Copyright (c) Freescale Semicondutor, Inc. 2006.
5 * Shlomi Gridish <gridish@freescale.com>
6 * Jerry Huang <Chang-Ming.Huang@freescale.com>
7 * Copyright (c) Logic Product Development, Inc. 2007
8 * Peter Barada <peterb@logicpd.com>
9 * Copyright (c) MontaVista Software, Inc. 2008.
10 * Anton Vorontsov <avorontsov@ru.mvista.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/errno.h>
21#include <linux/list.h>
22#include <linux/io.h>
23#include <linux/usb.h>
24#include "../core/hcd.h"
25#include "fhci.h"
26
27#define DUMMY_BD_BUFFER 0xdeadbeef
28#define DUMMY2_BD_BUFFER 0xbaadf00d
29
30/* Transaction Descriptors bits */
31#define TD_R 0x8000 /* ready bit */
32#define TD_W 0x2000 /* wrap bit */
33#define TD_I 0x1000 /* interrupt on completion */
34#define TD_L 0x0800 /* last */
35#define TD_TC 0x0400 /* transmit CRC */
36#define TD_CNF 0x0200 /* CNF - Must be always 1 */
37#define TD_LSP 0x0100 /* Low-speed transaction */
38#define TD_PID 0x00c0 /* packet id */
39#define TD_RXER 0x0020 /* Rx error or not */
40
41#define TD_NAK 0x0010 /* No ack. */
42#define TD_STAL 0x0008 /* Stall recieved */
43#define TD_TO 0x0004 /* time out */
44#define TD_UN 0x0002 /* underrun */
45#define TD_NO 0x0010 /* Rx Non Octet Aligned Packet */
46#define TD_AB 0x0008 /* Frame Aborted */
47#define TD_CR 0x0004 /* CRC Error */
48#define TD_OV 0x0002 /* Overrun */
49#define TD_BOV 0x0001 /* Buffer Overrun */
50
51#define TD_ERRORS (TD_NAK | TD_STAL | TD_TO | TD_UN | \
52 TD_NO | TD_AB | TD_CR | TD_OV | TD_BOV)
53
54#define TD_PID_DATA0 0x0080 /* Data 0 toggle */
55#define TD_PID_DATA1 0x00c0 /* Data 1 toggle */
56#define TD_PID_TOGGLE 0x00c0 /* Data 0/1 toggle mask */
57
58#define TD_TOK_SETUP 0x0000
59#define TD_TOK_OUT 0x4000
60#define TD_TOK_IN 0x8000
61#define TD_ISO 0x1000
62#define TD_ENDP 0x0780
63#define TD_ADDR 0x007f
64
65#define TD_ENDP_SHIFT 7
66
67struct usb_td {
68 __be16 status;
69 __be16 length;
70 __be32 buf_ptr;
71 __be16 extra;
72 __be16 reserved;
73};
74
75static struct usb_td __iomem *next_bd(struct usb_td __iomem *base,
76 struct usb_td __iomem *td,
77 u16 status)
78{
79 if (status & TD_W)
80 return base;
81 else
82 return ++td;
83}
84
85void fhci_push_dummy_bd(struct endpoint *ep)
86{
87 if (ep->already_pushed_dummy_bd == false) {
88 u16 td_status = in_be16(&ep->empty_td->status);
89
90 out_be32(&ep->empty_td->buf_ptr, DUMMY_BD_BUFFER);
91 /* get the next TD in the ring */
92 ep->empty_td = next_bd(ep->td_base, ep->empty_td, td_status);
93 ep->already_pushed_dummy_bd = true;
94 }
95}
96
97/* destroy an USB endpoint */
98void fhci_ep0_free(struct fhci_usb *usb)
99{
100 struct endpoint *ep;
101 int size;
102
103 ep = usb->ep0;
104 if (ep) {
105 if (ep->td_base)
106 cpm_muram_free(cpm_muram_offset(ep->td_base));
107
108 if (ep->conf_frame_Q) {
109 size = cq_howmany(ep->conf_frame_Q);
110 for (; size; size--) {
111 struct packet *pkt = cq_get(ep->conf_frame_Q);
112
113 kfree(pkt);
114 }
115 cq_delete(ep->conf_frame_Q);
116 }
117
118 if (ep->empty_frame_Q) {
119 size = cq_howmany(ep->empty_frame_Q);
120 for (; size; size--) {
121 struct packet *pkt = cq_get(ep->empty_frame_Q);
122
123 kfree(pkt);
124 }
125 cq_delete(ep->empty_frame_Q);
126 }
127
128 if (ep->dummy_packets_Q) {
129 size = cq_howmany(ep->dummy_packets_Q);
130 for (; size; size--) {
131 u8 *buff = cq_get(ep->dummy_packets_Q);
132
133 kfree(buff);
134 }
135 cq_delete(ep->dummy_packets_Q);
136 }
137
138 kfree(ep);
139 usb->ep0 = NULL;
140 }
141}
142
143/*
144 * create the endpoint structure
145 *
146 * arguments:
147 * usb A pointer to the data structure of the USB
148 * data_mem The data memory partition(BUS)
149 * ring_len TD ring length
150 */
151u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
152 u32 ring_len)
153{
154 struct endpoint *ep;
155 struct usb_td __iomem *td;
156 unsigned long ep_offset;
157 char *err_for = "enpoint PRAM";
158 int ep_mem_size;
159 u32 i;
160
161 /* we need at least 3 TDs in the ring */
162 if (!(ring_len > 2)) {
163 fhci_err(usb->fhci, "illegal TD ring length parameters\n");
164 return -EINVAL;
165 }
166
167 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
168 if (!ep)
169 return -ENOMEM;
170
171 ep_mem_size = ring_len * sizeof(*td) + sizeof(struct fhci_ep_pram);
172 ep_offset = cpm_muram_alloc(ep_mem_size, 32);
173 if (IS_ERR_VALUE(ep_offset))
174 goto err;
175 ep->td_base = cpm_muram_addr(ep_offset);
176
177 /* zero all queue pointers */
178 ep->conf_frame_Q = cq_new(ring_len + 2);
179 ep->empty_frame_Q = cq_new(ring_len + 2);
180 ep->dummy_packets_Q = cq_new(ring_len + 2);
181 if (!ep->conf_frame_Q || !ep->empty_frame_Q || !ep->dummy_packets_Q) {
182 err_for = "frame_queues";
183 goto err;
184 }
185
186 for (i = 0; i < (ring_len + 1); i++) {
187 struct packet *pkt;
188 u8 *buff;
189
190 pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
191 if (!pkt) {
192 err_for = "frame";
193 goto err;
194 }
195
196 buff = kmalloc(1028 * sizeof(*buff), GFP_KERNEL);
197 if (!buff) {
198 kfree(pkt);
199 err_for = "buffer";
200 goto err;
201 }
202 cq_put(ep->empty_frame_Q, pkt);
203 cq_put(ep->dummy_packets_Q, buff);
204 }
205
206 /* we put the endpoint parameter RAM right behind the TD ring */
207 ep->ep_pram_ptr = (void __iomem *)ep->td_base + sizeof(*td) * ring_len;
208
209 ep->conf_td = ep->td_base;
210 ep->empty_td = ep->td_base;
211
212 ep->already_pushed_dummy_bd = false;
213
214 /* initialize tds */
215 td = ep->td_base;
216 for (i = 0; i < ring_len; i++) {
217 out_be32(&td->buf_ptr, 0);
218 out_be16(&td->status, 0);
219 out_be16(&td->length, 0);
220 out_be16(&td->extra, 0);
221 td++;
222 }
223 td--;
224 out_be16(&td->status, TD_W); /* for last TD set Wrap bit */
225 out_be16(&td->length, 0);
226
227 /* endpoint structure has been created */
228 usb->ep0 = ep;
229
230 return 0;
231err:
232 fhci_ep0_free(usb);
233 kfree(ep);
234 fhci_err(usb->fhci, "no memory for the %s\n", err_for);
235 return -ENOMEM;
236}
237
238/*
239 * initialize the endpoint register according to the given parameters
240 *
241 * artuments:
242 * usb A pointer to the data strucutre of the USB
243 * ep A pointer to the endpoint structre
244 * data_mem The data memory partition(BUS)
245 */
246void fhci_init_ep_registers(struct fhci_usb *usb, struct endpoint *ep,
247 enum fhci_mem_alloc data_mem)
248{
249 u8 rt;
250
251 /* set the endpoint registers according to the endpoint */
252 out_be16(&usb->fhci->regs->usb_ep[0],
253 USB_TRANS_CTR | USB_EP_MF | USB_EP_RTE);
254 out_be16(&usb->fhci->pram->ep_ptr[0],
255 cpm_muram_offset(ep->ep_pram_ptr));
256
257 rt = (BUS_MODE_BO_BE | BUS_MODE_GBL);
258#ifdef MULTI_DATA_BUS
259 if (data_mem == MEM_SECONDARY)
260 rt |= BUS_MODE_DTB;
261#endif
262 out_8(&ep->ep_pram_ptr->rx_func_code, rt);
263 out_8(&ep->ep_pram_ptr->tx_func_code, rt);
264 out_be16(&ep->ep_pram_ptr->rx_buff_len, 1028);
265 out_be16(&ep->ep_pram_ptr->rx_base, 0);
266 out_be16(&ep->ep_pram_ptr->tx_base, cpm_muram_offset(ep->td_base));
267 out_be16(&ep->ep_pram_ptr->rx_bd_ptr, 0);
268 out_be16(&ep->ep_pram_ptr->tx_bd_ptr, cpm_muram_offset(ep->td_base));
269 out_be32(&ep->ep_pram_ptr->tx_state, 0);
270}
271
272/*
273 * Collect the submitted frames and inform the application about them
274 * It is also prepearing the TDs for new frames. If the Tx interrupts
275 * are diabled, the application should call that routine to get
276 * confirmation about the submitted frames. Otherwise, the routine is
277 * called frome the interrupt service routine during the Tx interrupt.
278 * In that case the application is informed by calling the application
279 * specific 'fhci_transaction_confirm' routine
280 */
281static void fhci_td_transaction_confirm(struct fhci_usb *usb)
282{
283 struct endpoint *ep = usb->ep0;
284 struct packet *pkt;
285 struct usb_td __iomem *td;
286 u16 extra_data;
287 u16 td_status;
288 u16 td_length;
289 u32 buf;
290
291 /*
292 * collect transmitted BDs from the chip. The routine clears all BDs
293 * with R bit = 0 and the pointer to data buffer is not NULL, that is
294 * BDs which point to the transmitted data buffer
295 */
296 while (1) {
297 td = ep->conf_td;
298 td_status = in_be16(&td->status);
299 td_length = in_be16(&td->length);
300 buf = in_be32(&td->buf_ptr);
301 extra_data = in_be16(&td->extra);
302
303 /* check if the TD is empty */
304 if (!(!(td_status & TD_R) && ((td_status & ~TD_W) || buf)))
305 break;
306 /* check if it is a dummy buffer */
307 else if ((buf == DUMMY_BD_BUFFER) && !(td_status & ~TD_W))
308 break;
309
310 /* mark TD as empty */
311 clrbits16(&td->status, ~TD_W);
312 out_be16(&td->length, 0);
313 out_be32(&td->buf_ptr, 0);
314 out_be16(&td->extra, 0);
315 /* advance the TD pointer */
316 ep->conf_td = next_bd(ep->td_base, ep->conf_td, td_status);
317
318 /* check if it is a dummy buffer(type2) */
319 if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W))
320 continue;
321
322 pkt = cq_get(ep->conf_frame_Q);
323 if (!pkt)
324 fhci_err(usb->fhci, "no frame to confirm\n");
325
326 if (td_status & TD_ERRORS) {
327 if (td_status & TD_RXER) {
328 if (td_status & TD_CR)
329 pkt->status = USB_TD_RX_ER_CRC;
330 else if (td_status & TD_AB)
331 pkt->status = USB_TD_RX_ER_BITSTUFF;
332 else if (td_status & TD_OV)
333 pkt->status = USB_TD_RX_ER_OVERUN;
334 else if (td_status & TD_BOV)
335 pkt->status = USB_TD_RX_DATA_OVERUN;
336 else if (td_status & TD_NO)
337 pkt->status = USB_TD_RX_ER_NONOCT;
338 else
339 fhci_err(usb->fhci, "illegal error "
340 "occured\n");
341 } else if (td_status & TD_NAK)
342 pkt->status = USB_TD_TX_ER_NAK;
343 else if (td_status & TD_TO)
344 pkt->status = USB_TD_TX_ER_TIMEOUT;
345 else if (td_status & TD_UN)
346 pkt->status = USB_TD_TX_ER_UNDERUN;
347 else if (td_status & TD_STAL)
348 pkt->status = USB_TD_TX_ER_STALL;
349 else
350 fhci_err(usb->fhci, "illegal error occured\n");
351 } else if ((extra_data & TD_TOK_IN) &&
352 pkt->len > td_length - CRC_SIZE) {
353 pkt->status = USB_TD_RX_DATA_UNDERUN;
354 }
355
356 if (extra_data & TD_TOK_IN)
357 pkt->len = td_length - CRC_SIZE;
358 else if (pkt->info & PKT_ZLP)
359 pkt->len = 0;
360 else
361 pkt->len = td_length;
362
363 fhci_transaction_confirm(usb, pkt);
364 }
365}
366
367/*
368 * Submitting a data frame to a specified endpoint of a USB device
369 * The frame is put in the driver's transmit queue for this endpoint
370 *
371 * Arguments:
372 * usb A pointer to the USB structure
373 * pkt A pointer to the user frame structure
374 * trans_type Transaction tyep - IN,OUT or SETUP
375 * dest_addr Device address - 0~127
376 * dest_ep Endpoint number of the device - 0~16
377 * trans_mode Pipe type - ISO,Interrupt,bulk or control
378 * dest_speed USB speed - Low speed or FULL speed
379 * data_toggle Data sequence toggle - 0 or 1
380 */
381u32 fhci_host_transaction(struct fhci_usb *usb,
382 struct packet *pkt,
383 enum fhci_ta_type trans_type,
384 u8 dest_addr,
385 u8 dest_ep,
386 enum fhci_tf_mode trans_mode,
387 enum fhci_speed dest_speed, u8 data_toggle)
388{
389 struct endpoint *ep = usb->ep0;
390 struct usb_td __iomem *td;
391 u16 extra_data;
392 u16 td_status;
393
394 fhci_usb_disable_interrupt(usb);
395 /* start from the next BD that should be filled */
396 td = ep->empty_td;
397 td_status = in_be16(&td->status);
398
399 if (td_status & TD_R && in_be16(&td->length)) {
400 /* if the TD is not free */
401 fhci_usb_enable_interrupt(usb);
402 return -1;
403 }
404
405 /* get the next TD in the ring */
406 ep->empty_td = next_bd(ep->td_base, ep->empty_td, td_status);
407 fhci_usb_enable_interrupt(usb);
408 pkt->priv_data = td;
409 out_be32(&td->buf_ptr, virt_to_phys(pkt->data));
410 /* sets up transaction parameters - addr,endp,dir,and type */
411 extra_data = (dest_ep << TD_ENDP_SHIFT) | dest_addr;
412 switch (trans_type) {
413 case FHCI_TA_IN:
414 extra_data |= TD_TOK_IN;
415 break;
416 case FHCI_TA_OUT:
417 extra_data |= TD_TOK_OUT;
418 break;
419 case FHCI_TA_SETUP:
420 extra_data |= TD_TOK_SETUP;
421 break;
422 }
423 if (trans_mode == FHCI_TF_ISO)
424 extra_data |= TD_ISO;
425 out_be16(&td->extra, extra_data);
426
427 /* sets up the buffer descriptor */
428 td_status = ((td_status & TD_W) | TD_R | TD_L | TD_I | TD_CNF);
429 if (!(pkt->info & PKT_NO_CRC))
430 td_status |= TD_TC;
431
432 switch (trans_type) {
433 case FHCI_TA_IN:
434 if (data_toggle)
435 pkt->info |= PKT_PID_DATA1;
436 else
437 pkt->info |= PKT_PID_DATA0;
438 break;
439 default:
440 if (data_toggle) {
441 td_status |= TD_PID_DATA1;
442 pkt->info |= PKT_PID_DATA1;
443 } else {
444 td_status |= TD_PID_DATA0;
445 pkt->info |= PKT_PID_DATA0;
446 }
447 break;
448 }
449
450 if ((dest_speed == FHCI_LOW_SPEED) &&
451 (usb->port_status == FHCI_PORT_FULL))
452 td_status |= TD_LSP;
453
454 out_be16(&td->status, td_status);
455
456 /* set up buffer length */
457 if (trans_type == FHCI_TA_IN)
458 out_be16(&td->length, pkt->len + CRC_SIZE);
459 else
460 out_be16(&td->length, pkt->len);
461
462 /* put the frame to the confirmation queue */
463 cq_put(ep->conf_frame_Q, pkt);
464
465 if (cq_howmany(ep->conf_frame_Q) == 1)
466 out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO);
467
468 return 0;
469}
470
471/* Reset the Tx BD ring */
472void fhci_flush_bds(struct fhci_usb *usb)
473{
474 u16 extra_data;
475 u16 td_status;
476 u32 buf;
477 struct usb_td __iomem *td;
478 struct endpoint *ep = usb->ep0;
479
480 td = ep->td_base;
481 while (1) {
482 td_status = in_be16(&td->status);
483 buf = in_be32(&td->buf_ptr);
484 extra_data = in_be16(&td->extra);
485
486 /* if the TD is not empty - we'll confirm it as Timeout */
487 if (td_status & TD_R)
488 out_be16(&td->status, (td_status & ~TD_R) | TD_TO);
489 /* if this TD is dummy - let's skip this TD */
490 else if (in_be32(&td->buf_ptr) == DUMMY_BD_BUFFER)
491 out_be32(&td->buf_ptr, DUMMY2_BD_BUFFER);
492 /* if this is the last TD - break */
493 if (td_status & TD_W)
494 break;
495
496 td++;
497 }
498
499 fhci_td_transaction_confirm(usb);
500
501 td = ep->td_base;
502 do {
503 out_be16(&td->status, 0);
504 out_be16(&td->length, 0);
505 out_be32(&td->buf_ptr, 0);
506 out_be16(&td->extra, 0);
507 td++;
508 } while (!(in_be16(&td->status) & TD_W));
509 out_be16(&td->status, TD_W); /* for last TD set Wrap bit */
510 out_be16(&td->length, 0);
511 out_be32(&td->buf_ptr, 0);
512 out_be16(&td->extra, 0);
513
514 out_be16(&ep->ep_pram_ptr->tx_bd_ptr,
515 in_be16(&ep->ep_pram_ptr->tx_base));
516 out_be32(&ep->ep_pram_ptr->tx_state, 0);
517 out_be16(&ep->ep_pram_ptr->tx_cnt, 0);
518 ep->empty_td = ep->td_base;
519 ep->conf_td = ep->td_base;
520}
521
522/*
523 * Flush all transmitted packets from TDs in the actual frame.
524 * This routine is called when something wrong with the controller and
525 * we want to get rid of the actual frame and start again next frame
526 */
527void fhci_flush_actual_frame(struct fhci_usb *usb)
528{
529 u8 mode;
530 u16 tb_ptr;
531 u16 extra_data;
532 u16 td_status;
533 u32 buf_ptr;
534 struct usb_td __iomem *td;
535 struct endpoint *ep = usb->ep0;
536
537 /* disable the USB controller */
538 mode = in_8(&usb->fhci->regs->usb_mod);
539 out_8(&usb->fhci->regs->usb_mod, mode & ~USB_MODE_EN);
540
541 tb_ptr = in_be16(&ep->ep_pram_ptr->tx_bd_ptr);
542 td = cpm_muram_addr(tb_ptr);
543 td_status = in_be16(&td->status);
544 buf_ptr = in_be32(&td->buf_ptr);
545 extra_data = in_be16(&td->extra);
546 do {
547 if (td_status & TD_R) {
548 out_be16(&td->status, (td_status & ~TD_R) | TD_TO);
549 } else {
550 out_be32(&td->buf_ptr, 0);
551 ep->already_pushed_dummy_bd = false;
552 break;
553 }
554
555 /* advance the TD pointer */
556 td = next_bd(ep->td_base, td, td_status);
557 td_status = in_be16(&td->status);
558 buf_ptr = in_be32(&td->buf_ptr);
559 extra_data = in_be16(&td->extra);
560 } while ((td_status & TD_R) || buf_ptr);
561
562 fhci_td_transaction_confirm(usb);
563
564 out_be16(&ep->ep_pram_ptr->tx_bd_ptr,
565 in_be16(&ep->ep_pram_ptr->tx_base));
566 out_be32(&ep->ep_pram_ptr->tx_state, 0);
567 out_be16(&ep->ep_pram_ptr->tx_cnt, 0);
568 ep->empty_td = ep->td_base;
569 ep->conf_td = ep->td_base;
570
571 usb->actual_frame->frame_status = FRAME_TIMER_END_TRANSMISSION;
572
573 /* reset the event register */
574 out_be16(&usb->fhci->regs->usb_event, 0xffff);
575 /* enable the USB controller */
576 out_8(&usb->fhci->regs->usb_mod, mode | USB_MODE_EN);
577}
578
579/* handles Tx confirm and Tx error interrupt */
580void fhci_tx_conf_interrupt(struct fhci_usb *usb)
581{
582 fhci_td_transaction_confirm(usb);
583
584 /*
585 * Schedule another transaction to this frame only if we have
586 * already confirmed all transaction in the frame.
587 */
588 if (((fhci_get_sof_timer_count(usb) < usb->max_frame_usage) ||
589 (usb->actual_frame->frame_status & FRAME_END_TRANSMISSION)) &&
590 (list_empty(&usb->actual_frame->tds_list)))
591 fhci_schedule_transactions(usb);
592}
593
594void fhci_host_transmit_actual_frame(struct fhci_usb *usb)
595{
596 u16 tb_ptr;
597 u16 td_status;
598 struct usb_td __iomem *td;
599 struct endpoint *ep = usb->ep0;
600
601 tb_ptr = in_be16(&ep->ep_pram_ptr->tx_bd_ptr);
602 td = cpm_muram_addr(tb_ptr);
603
604 if (in_be32(&td->buf_ptr) == DUMMY_BD_BUFFER) {
605 struct usb_td __iomem *old_td = td;
606
607 ep->already_pushed_dummy_bd = false;
608 td_status = in_be16(&td->status);
609 /* gets the next TD in the ring */
610 td = next_bd(ep->td_base, td, td_status);
611 tb_ptr = cpm_muram_offset(td);
612 out_be16(&ep->ep_pram_ptr->tx_bd_ptr, tb_ptr);
613
614 /* start transmit only if we have something in the TDs */
615 if (in_be16(&td->status) & TD_R)
616 out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO);
617
618 if (in_be32(&ep->conf_td->buf_ptr) == DUMMY_BD_BUFFER) {
619 out_be32(&old_td->buf_ptr, 0);
620 ep->conf_td = next_bd(ep->td_base, ep->conf_td,
621 td_status);
622 } else {
623 out_be32(&old_td->buf_ptr, DUMMY2_BD_BUFFER);
624 }
625 }
626}
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
new file mode 100644
index 000000000000..7116284ed21a
--- /dev/null
+++ b/drivers/usb/host/fhci.h
@@ -0,0 +1,607 @@
1/*
2 * Freescale QUICC Engine USB Host Controller Driver
3 *
4 * Copyright (c) Freescale Semicondutor, Inc. 2006.
5 * Shlomi Gridish <gridish@freescale.com>
6 * Jerry Huang <Chang-Ming.Huang@freescale.com>
7 * Copyright (c) Logic Product Development, Inc. 2007
8 * Peter Barada <peterb@logicpd.com>
9 * Copyright (c) MontaVista Software, Inc. 2008.
10 * Anton Vorontsov <avorontsov@ru.mvista.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#ifndef __FHCI_H
19#define __FHCI_H
20
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
25#include <linux/kfifo.h>
26#include <linux/io.h>
27#include <linux/usb.h>
28#include <asm/qe.h>
29#include "../core/hcd.h"
30
31#define USB_CLOCK 48000000
32
33#define FHCI_PRAM_SIZE 0x100
34
35#define MAX_EDS 32
36#define MAX_TDS 32
37
38
39/* CRC16 field size */
40#define CRC_SIZE 2
41
42/* USB protocol overhead for each frame transmitted from the host */
43#define PROTOCOL_OVERHEAD 7
44
45/* Packet structure, info field */
46#define PKT_PID_DATA0 0x80000000 /* PID - Data toggle zero */
47#define PKT_PID_DATA1 0x40000000 /* PID - Data toggle one */
48#define PKT_PID_SETUP 0x20000000 /* PID - Setup bit */
49#define PKT_SETUP_STATUS 0x10000000 /* Setup status bit */
50#define PKT_SETADDR_STATUS 0x08000000 /* Set address status bit */
51#define PKT_SET_HOST_LAST 0x04000000 /* Last data packet */
52#define PKT_HOST_DATA 0x02000000 /* Data packet */
53#define PKT_FIRST_IN_FRAME 0x01000000 /* First packet in the frame */
54#define PKT_TOKEN_FRAME 0x00800000 /* Token packet */
55#define PKT_ZLP 0x00400000 /* Zero length packet */
56#define PKT_IN_TOKEN_FRAME 0x00200000 /* IN token packet */
57#define PKT_OUT_TOKEN_FRAME 0x00100000 /* OUT token packet */
58#define PKT_SETUP_TOKEN_FRAME 0x00080000 /* SETUP token packet */
59#define PKT_STALL_FRAME 0x00040000 /* STALL packet */
60#define PKT_NACK_FRAME 0x00020000 /* NACK packet */
61#define PKT_NO_PID 0x00010000 /* No PID */
62#define PKT_NO_CRC 0x00008000 /* don't append CRC */
63#define PKT_HOST_COMMAND 0x00004000 /* Host command packet */
64#define PKT_DUMMY_PACKET 0x00002000 /* Dummy packet, used for mmm */
65#define PKT_LOW_SPEED_PACKET 0x00001000 /* Low-Speed packet */
66
67#define TRANS_OK (0)
68#define TRANS_INPROGRESS (-1)
69#define TRANS_DISCARD (-2)
70#define TRANS_FAIL (-3)
71
72#define PS_INT 0
73#define PS_DISCONNECTED 1
74#define PS_CONNECTED 2
75#define PS_READY 3
76#define PS_MISSING 4
77
78/* Transfer Descriptor status field */
79#define USB_TD_OK 0x00000000 /* TD transmited or received ok */
80#define USB_TD_INPROGRESS 0x80000000 /* TD is being transmitted */
81#define USB_TD_RX_ER_NONOCT 0x40000000 /* Tx Non Octet Aligned Packet */
82#define USB_TD_RX_ER_BITSTUFF 0x20000000 /* Frame Aborted-Received pkt */
83#define USB_TD_RX_ER_CRC 0x10000000 /* CRC error */
84#define USB_TD_RX_ER_OVERUN 0x08000000 /* Over - run occured */
85#define USB_TD_RX_ER_PID 0x04000000 /* wrong PID received */
86#define USB_TD_RX_DATA_UNDERUN 0x02000000 /* shorter than expected */
87#define USB_TD_RX_DATA_OVERUN 0x01000000 /* longer than expected */
88#define USB_TD_TX_ER_NAK 0x00800000 /* NAK handshake */
89#define USB_TD_TX_ER_STALL 0x00400000 /* STALL handshake */
90#define USB_TD_TX_ER_TIMEOUT 0x00200000 /* transmit time out */
91#define USB_TD_TX_ER_UNDERUN 0x00100000 /* transmit underrun */
92
93#define USB_TD_ERROR (USB_TD_RX_ER_NONOCT | USB_TD_RX_ER_BITSTUFF | \
94 USB_TD_RX_ER_CRC | USB_TD_RX_ER_OVERUN | USB_TD_RX_ER_PID | \
95 USB_TD_RX_DATA_UNDERUN | USB_TD_RX_DATA_OVERUN | \
96 USB_TD_TX_ER_NAK | USB_TD_TX_ER_STALL | \
97 USB_TD_TX_ER_TIMEOUT | USB_TD_TX_ER_UNDERUN)
98
99/* Transfer Descriptor toggle field */
100#define USB_TD_TOGGLE_DATA0 0
101#define USB_TD_TOGGLE_DATA1 1
102#define USB_TD_TOGGLE_CARRY 2
103
104/* #define MULTI_DATA_BUS */
105
106/* Bus mode register RBMR/TBMR */
107#define BUS_MODE_GBL 0x20 /* Global snooping */
108#define BUS_MODE_BO 0x18 /* Byte ordering */
109#define BUS_MODE_BO_BE 0x10 /* Byte ordering - Big-endian */
110#define BUS_MODE_DTB 0x02 /* Data bus */
111
112/* FHCI QE USB Register Description */
113
114/* USB Mode Register bit define */
115#define USB_MODE_EN 0x01
116#define USB_MODE_HOST 0x02
117#define USB_MODE_TEST 0x04
118#define USB_MODE_SFTE 0x08
119#define USB_MODE_RESUME 0x40
120#define USB_MODE_LSS 0x80
121
122/* USB Slave Address Register Mask */
123#define USB_SLVADDR_MASK 0x7F
124
125/* USB Endpoint register define */
126#define USB_EPNUM_MASK 0xF000
127#define USB_EPNUM_SHIFT 12
128
129#define USB_TRANS_MODE_SHIFT 8
130#define USB_TRANS_CTR 0x0000
131#define USB_TRANS_INT 0x0100
132#define USB_TRANS_BULK 0x0200
133#define USB_TRANS_ISO 0x0300
134
135#define USB_EP_MF 0x0020
136#define USB_EP_RTE 0x0010
137
138#define USB_THS_SHIFT 2
139#define USB_THS_MASK 0x000c
140#define USB_THS_NORMAL 0x0
141#define USB_THS_IGNORE_IN 0x0004
142#define USB_THS_NACK 0x0008
143#define USB_THS_STALL 0x000c
144
145#define USB_RHS_SHIFT 0
146#define USB_RHS_MASK 0x0003
147#define USB_RHS_NORMAL 0x0
148#define USB_RHS_IGNORE_OUT 0x0001
149#define USB_RHS_NACK 0x0002
150#define USB_RHS_STALL 0x0003
151
152#define USB_RTHS_MASK 0x000f
153
154/* USB Command Register define */
155#define USB_CMD_STR_FIFO 0x80
156#define USB_CMD_FLUSH_FIFO 0x40
157#define USB_CMD_ISFT 0x20
158#define USB_CMD_DSFT 0x10
159#define USB_CMD_EP_MASK 0x03
160
161/* USB Event and Mask Register define */
162#define USB_E_MSF_MASK 0x0800
163#define USB_E_SFT_MASK 0x0400
164#define USB_E_RESET_MASK 0x0200
165#define USB_E_IDLE_MASK 0x0100
166#define USB_E_TXE4_MASK 0x0080
167#define USB_E_TXE3_MASK 0x0040
168#define USB_E_TXE2_MASK 0x0020
169#define USB_E_TXE1_MASK 0x0010
170#define USB_E_SOF_MASK 0x0008
171#define USB_E_BSY_MASK 0x0004
172#define USB_E_TXB_MASK 0x0002
173#define USB_E_RXB_MASK 0x0001
174
175/* Freescale USB Host controller registers */
176struct fhci_regs {
177 u8 usb_mod; /* mode register */
178 u8 usb_addr; /* address register */
179 u8 usb_comm; /* command register */
180 u8 reserved1[1];
181 __be16 usb_ep[4]; /* endpoint register */
182 u8 reserved2[4];
183 __be16 usb_event; /* event register */
184 u8 reserved3[2];
185 __be16 usb_mask; /* mask register */
186 u8 reserved4[1];
187 u8 usb_status; /* status register */
188 __be16 usb_sof_tmr; /* Start Of Frame timer */
189 u8 reserved5[2];
190 __be16 usb_frame_num; /* frame number register */
191 u8 reserved6[1];
192};
193
194/* Freescale USB HOST */
195struct fhci_pram {
196 __be16 ep_ptr[4]; /* Endpoint porter reg */
197 __be32 rx_state; /* Rx internal state */
198 __be32 rx_ptr; /* Rx internal data pointer */
199 __be16 frame_num; /* Frame number */
200 __be16 rx_cnt; /* Rx byte count */
201 __be32 rx_temp; /* Rx temp */
202 __be32 rx_data_temp; /* Rx data temp */
203 __be16 rx_u_ptr; /* Rx microcode return address temp */
204 u8 reserved1[2]; /* reserved area */
205 __be32 sof_tbl; /* SOF lookup table pointer */
206 u8 sof_u_crc_temp; /* SOF micorcode CRC5 temp reg */
207 u8 reserved2[0xdb];
208};
209
210/* Freescale USB Endpoint*/
211struct fhci_ep_pram {
212 __be16 rx_base; /* Rx BD base address */
213 __be16 tx_base; /* Tx BD base address */
214 u8 rx_func_code; /* Rx function code */
215 u8 tx_func_code; /* Tx function code */
216 __be16 rx_buff_len; /* Rx buffer length */
217 __be16 rx_bd_ptr; /* Rx BD pointer */
218 __be16 tx_bd_ptr; /* Tx BD pointer */
219 __be32 tx_state; /* Tx internal state */
220 __be32 tx_ptr; /* Tx internal data pointer */
221 __be16 tx_crc; /* temp transmit CRC */
222 __be16 tx_cnt; /* Tx byte count */
223 __be32 tx_temp; /* Tx temp */
224 __be16 tx_u_ptr; /* Tx microcode return address temp */
225 __be16 reserved;
226};
227
228struct fhci_controller_list {
229 struct list_head ctrl_list; /* control endpoints */
230 struct list_head bulk_list; /* bulk endpoints */
231 struct list_head iso_list; /* isochronous endpoints */
232 struct list_head intr_list; /* interruput endpoints */
233 struct list_head done_list; /* done transfers */
234};
235
236struct virtual_root_hub {
237 int dev_num; /* USB address of the root hub */
238 u32 feature; /* indicates what feature has been set */
239 struct usb_hub_status hub;
240 struct usb_port_status port;
241};
242
243enum fhci_gpios {
244 GPIO_USBOE = 0,
245 GPIO_USBTP,
246 GPIO_USBTN,
247 GPIO_USBRP,
248 GPIO_USBRN,
249 /* these are optional */
250 GPIO_SPEED,
251 GPIO_POWER,
252 NUM_GPIOS,
253};
254
255enum fhci_pins {
256 PIN_USBOE = 0,
257 PIN_USBTP,
258 PIN_USBTN,
259 NUM_PINS,
260};
261
262struct fhci_hcd {
263 enum qe_clock fullspeed_clk;
264 enum qe_clock lowspeed_clk;
265 struct qe_pin *pins[NUM_PINS];
266 int gpios[NUM_GPIOS];
267 bool alow_gpios[NUM_GPIOS];
268
269 struct fhci_regs __iomem *regs; /* I/O memory used to communicate */
270 struct fhci_pram __iomem *pram; /* Parameter RAM */
271 struct gtm_timer *timer;
272
273 spinlock_t lock;
274 struct fhci_usb *usb_lld; /* Low-level driver */
275 struct virtual_root_hub *vroot_hub; /* the virtual root hub */
276 int active_urbs;
277 struct fhci_controller_list *hc_list;
278 struct tasklet_struct *process_done_task; /* tasklet for done list */
279
280 struct list_head empty_eds;
281 struct list_head empty_tds;
282
283#ifdef CONFIG_FHCI_DEBUG
284 int usb_irq_stat[13];
285 struct dentry *dfs_root;
286 struct dentry *dfs_regs;
287 struct dentry *dfs_irq_stat;
288#endif
289};
290
291#define USB_FRAME_USAGE 90
292#define FRAME_TIME_USAGE (USB_FRAME_USAGE*10) /* frame time usage */
293#define SW_FIX_TIME_BETWEEN_TRANSACTION 150 /* SW */
294#define MAX_BYTES_PER_FRAME (USB_FRAME_USAGE*15)
295#define MAX_PERIODIC_FRAME_USAGE 90
296
297/* transaction type */
298enum fhci_ta_type {
299 FHCI_TA_IN = 0, /* input transaction */
300 FHCI_TA_OUT, /* output transaction */
301 FHCI_TA_SETUP, /* setup transaction */
302};
303
304/* transfer mode */
305enum fhci_tf_mode {
306 FHCI_TF_CTRL = 0,
307 FHCI_TF_ISO,
308 FHCI_TF_BULK,
309 FHCI_TF_INTR,
310};
311
312enum fhci_speed {
313 FHCI_FULL_SPEED,
314 FHCI_LOW_SPEED,
315};
316
317/* endpoint state */
318enum fhci_ed_state {
319 FHCI_ED_NEW = 0, /* pipe is new */
320 FHCI_ED_OPER, /* pipe is operating */
321 FHCI_ED_URB_DEL, /* pipe is in hold because urb is being deleted */
322 FHCI_ED_SKIP, /* skip this pipe */
323 FHCI_ED_HALTED, /* pipe is halted */
324};
325
326enum fhci_port_status {
327 FHCI_PORT_POWER_OFF = 0,
328 FHCI_PORT_DISABLED,
329 FHCI_PORT_DISCONNECTING,
330 FHCI_PORT_WAITING, /* waiting for connection */
331 FHCI_PORT_FULL, /* full speed connected */
332 FHCI_PORT_LOW, /* low speed connected */
333};
334
335enum fhci_mem_alloc {
336 MEM_CACHABLE_SYS = 0x00000001, /* primary DDR,cachable */
337 MEM_NOCACHE_SYS = 0x00000004, /* primary DDR,non-cachable */
338 MEM_SECONDARY = 0x00000002, /* either secondary DDR or SDRAM */
339 MEM_PRAM = 0x00000008, /* multi-user RAM identifier */
340};
341
342/* USB default parameters*/
343#define DEFAULT_RING_LEN 8
344#define DEFAULT_DATA_MEM MEM_CACHABLE_SYS
345
346struct ed {
347 u8 dev_addr; /* device address */
348 u8 ep_addr; /* endpoint address */
349 enum fhci_tf_mode mode; /* USB transfer mode */
350 enum fhci_speed speed;
351 unsigned int max_pkt_size;
352 enum fhci_ed_state state;
353 struct list_head td_list; /* a list of all queued TD to this pipe */
354 struct list_head node;
355
356 /* read only parameters, should be cleared upon initialization */
357 u8 toggle_carry; /* toggle carry from the last TD submitted */
358 u32 last_iso; /* time stamp of last queued ISO transfer */
359 struct td *td_head; /* a pointer to the current TD handled */
360};
361
362struct td {
363 void *data; /* a pointer to the data buffer */
364 unsigned int len; /* length of the data to be submitted */
365 unsigned int actual_len; /* actual bytes transfered on this td */
366 enum fhci_ta_type type; /* transaction type */
367 u8 toggle; /* toggle for next trans. within this TD */
368 u16 iso_index; /* ISO transaction index */
369 u16 start_frame; /* start frame time stamp */
370 u16 interval; /* interval between trans. (for ISO/Intr) */
371 u32 status; /* status of the TD */
372 struct ed *ed; /* a handle to the corresponding ED */
373 struct urb *urb; /* a handle to the corresponding URB */
374 bool ioc; /* Inform On Completion */
375 struct list_head node;
376
377 /* read only parameters should be cleared upon initialization */
378 struct packet *pkt;
379 int nak_cnt;
380 int error_cnt;
381 struct list_head frame_lh;
382};
383
384struct packet {
385 u8 *data; /* packet data */
386 u32 len; /* packet length */
387 u32 status; /* status of the packet - equivalent to the status
388 * field for the corresponding structure td */
389 u32 info; /* packet information */
390 void __iomem *priv_data; /* private data of the driver (TDs or BDs) */
391};
392
393/* struct for each URB */
394#define URB_INPROGRESS 0
395#define URB_DEL 1
396
397/* URB states (state field) */
398#define US_BULK 0
399#define US_BULK0 1
400
401/* three setup states */
402#define US_CTRL_SETUP 2
403#define US_CTRL_DATA 1
404#define US_CTRL_ACK 0
405
406#define EP_ZERO 0
407
408struct urb_priv {
409 int num_of_tds;
410 int tds_cnt;
411 int state;
412
413 struct td **tds;
414 struct ed *ed;
415 struct timer_list time_out;
416};
417
418struct endpoint {
419 /* Pointer to ep parameter RAM */
420 struct fhci_ep_pram __iomem *ep_pram_ptr;
421
422 /* Host transactions */
423 struct usb_td __iomem *td_base; /* first TD in the ring */
424 struct usb_td __iomem *conf_td; /* next TD for confirm after transac */
425 struct usb_td __iomem *empty_td;/* next TD for new transaction req. */
426 struct kfifo *empty_frame_Q; /* Empty frames list to use */
427 struct kfifo *conf_frame_Q; /* frames passed to TDs,waiting for tx */
428 struct kfifo *dummy_packets_Q;/* dummy packets for the CRC overun */
429
430 bool already_pushed_dummy_bd;
431};
432
433/* struct for each 1mSec frame time */
434#define FRAME_IS_TRANSMITTED 0x00
435#define FRAME_TIMER_END_TRANSMISSION 0x01
436#define FRAME_DATA_END_TRANSMISSION 0x02
437#define FRAME_END_TRANSMISSION 0x03
438#define FRAME_IS_PREPARED 0x04
439
440struct fhci_time_frame {
441 u16 frame_num; /* frame number */
442 u16 total_bytes; /* total bytes submitted within this frame */
443 u8 frame_status; /* flag that indicates to stop fill this frame */
444 struct list_head tds_list; /* all tds of this frame */
445};
446
447/* internal driver structure*/
448struct fhci_usb {
449 u16 saved_msk; /* saving of the USB mask register */
450 struct endpoint *ep0; /* pointer for endpoint0 structure */
451 int intr_nesting_cnt; /* interrupt nesting counter */
452 u16 max_frame_usage; /* max frame time usage,in micro-sec */
453 u16 max_bytes_per_frame; /* max byte can be tx in one time frame */
454 u32 sw_transaction_time; /* sw complete trans time,in micro-sec */
455 struct fhci_time_frame *actual_frame;
456 struct fhci_controller_list *hc_list; /* main structure for hc */
457 struct virtual_root_hub *vroot_hub;
458 enum fhci_port_status port_status; /* v_rh port status */
459
460 u32 (*transfer_confirm)(struct fhci_hcd *fhci);
461
462 struct fhci_hcd *fhci;
463};
464
465/*
466 * Various helpers and prototypes below.
467 */
468
469static inline u16 get_frame_num(struct fhci_hcd *fhci)
470{
471 return in_be16(&fhci->pram->frame_num) & 0x07ff;
472}
473
474#define fhci_dbg(fhci, fmt, args...) \
475 dev_dbg(fhci_to_hcd(fhci)->self.controller, fmt, ##args)
476#define fhci_vdbg(fhci, fmt, args...) \
477 dev_vdbg(fhci_to_hcd(fhci)->self.controller, fmt, ##args)
478#define fhci_err(fhci, fmt, args...) \
479 dev_err(fhci_to_hcd(fhci)->self.controller, fmt, ##args)
480#define fhci_info(fhci, fmt, args...) \
481 dev_info(fhci_to_hcd(fhci)->self.controller, fmt, ##args)
482#define fhci_warn(fhci, fmt, args...) \
483 dev_warn(fhci_to_hcd(fhci)->self.controller, fmt, ##args)
484
485static inline struct fhci_hcd *hcd_to_fhci(struct usb_hcd *hcd)
486{
487 return (struct fhci_hcd *)hcd->hcd_priv;
488}
489
490static inline struct usb_hcd *fhci_to_hcd(struct fhci_hcd *fhci)
491{
492 return container_of((void *)fhci, struct usb_hcd, hcd_priv);
493}
494
495/* fifo of pointers */
496static inline struct kfifo *cq_new(int size)
497{
498 return kfifo_alloc(size * sizeof(void *), GFP_KERNEL, NULL);
499}
500
501static inline void cq_delete(struct kfifo *kfifo)
502{
503 kfifo_free(kfifo);
504}
505
506static inline unsigned int cq_howmany(struct kfifo *kfifo)
507{
508 return __kfifo_len(kfifo) / sizeof(void *);
509}
510
511static inline int cq_put(struct kfifo *kfifo, void *p)
512{
513 return __kfifo_put(kfifo, (void *)&p, sizeof(p));
514}
515
516static inline void *cq_get(struct kfifo *kfifo)
517{
518 void *p = NULL;
519
520 __kfifo_get(kfifo, (void *)&p, sizeof(p));
521 return p;
522}
523
524/* fhci-hcd.c */
525void fhci_start_sof_timer(struct fhci_hcd *fhci);
526void fhci_stop_sof_timer(struct fhci_hcd *fhci);
527u16 fhci_get_sof_timer_count(struct fhci_usb *usb);
528void fhci_usb_enable_interrupt(struct fhci_usb *usb);
529void fhci_usb_disable_interrupt(struct fhci_usb *usb);
530int fhci_ioports_check_bus_state(struct fhci_hcd *fhci);
531
532/* fhci-mem.c */
533void fhci_recycle_empty_td(struct fhci_hcd *fhci, struct td *td);
534void fhci_recycle_empty_ed(struct fhci_hcd *fhci, struct ed *ed);
535struct ed *fhci_get_empty_ed(struct fhci_hcd *fhci);
536struct td *fhci_td_fill(struct fhci_hcd *fhci, struct urb *urb,
537 struct urb_priv *urb_priv, struct ed *ed, u16 index,
538 enum fhci_ta_type type, int toggle, u8 *data, u32 len,
539 u16 interval, u16 start_frame, bool ioc);
540void fhci_add_tds_to_ed(struct ed *ed, struct td **td_list, int number);
541
542/* fhci-hub.c */
543void fhci_config_transceiver(struct fhci_hcd *fhci,
544 enum fhci_port_status status);
545void fhci_port_disable(struct fhci_hcd *fhci);
546void fhci_port_enable(void *lld);
547void fhci_io_port_generate_reset(struct fhci_hcd *fhci);
548void fhci_port_reset(void *lld);
549int fhci_hub_status_data(struct usb_hcd *hcd, char *buf);
550int fhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
551 u16 wIndex, char *buf, u16 wLength);
552
553/* fhci-tds.c */
554void fhci_flush_bds(struct fhci_usb *usb);
555void fhci_flush_actual_frame(struct fhci_usb *usb);
556u32 fhci_host_transaction(struct fhci_usb *usb, struct packet *pkt,
557 enum fhci_ta_type trans_type, u8 dest_addr,
558 u8 dest_ep, enum fhci_tf_mode trans_mode,
559 enum fhci_speed dest_speed, u8 data_toggle);
560void fhci_host_transmit_actual_frame(struct fhci_usb *usb);
561void fhci_tx_conf_interrupt(struct fhci_usb *usb);
562void fhci_push_dummy_bd(struct endpoint *ep);
563u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
564 u32 ring_len);
565void fhci_init_ep_registers(struct fhci_usb *usb,
566 struct endpoint *ep,
567 enum fhci_mem_alloc data_mem);
568void fhci_ep0_free(struct fhci_usb *usb);
569
570/* fhci-sched.c */
571extern struct tasklet_struct fhci_tasklet;
572void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt);
573void fhci_flush_all_transmissions(struct fhci_usb *usb);
574void fhci_schedule_transactions(struct fhci_usb *usb);
575void fhci_device_connected_interrupt(struct fhci_hcd *fhci);
576void fhci_device_disconnected_interrupt(struct fhci_hcd *fhci);
577void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb);
578u32 fhci_transfer_confirm_callback(struct fhci_hcd *fhci);
579irqreturn_t fhci_irq(struct usb_hcd *hcd);
580irqreturn_t fhci_frame_limit_timer_irq(int irq, void *_hcd);
581
582/* fhci-q.h */
583void fhci_urb_complete_free(struct fhci_hcd *fhci, struct urb *urb);
584struct td *fhci_remove_td_from_ed(struct ed *ed);
585struct td *fhci_remove_td_from_frame(struct fhci_time_frame *frame);
586void fhci_move_td_from_ed_to_done_list(struct fhci_usb *usb, struct ed *ed);
587struct td *fhci_peek_td_from_frame(struct fhci_time_frame *frame);
588void fhci_add_td_to_frame(struct fhci_time_frame *frame, struct td *td);
589struct td *fhci_remove_td_from_done_list(struct fhci_controller_list *p_list);
590void fhci_done_td(struct urb *urb, struct td *td);
591void fhci_del_ed_list(struct fhci_hcd *fhci, struct ed *ed);
592
593#ifdef CONFIG_FHCI_DEBUG
594
595void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er);
596void fhci_dfs_destroy(struct fhci_hcd *fhci);
597void fhci_dfs_create(struct fhci_hcd *fhci);
598
599#else
600
601static inline void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er) {}
602static inline void fhci_dfs_destroy(struct fhci_hcd *fhci) {}
603static inline void fhci_dfs_create(struct fhci_hcd *fhci) {}
604
605#endif /* CONFIG_FHCI_DEBUG */
606
607#endif /* __FHCI_H */
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 65a9609f4ad6..5cf5f1eca4f4 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -593,12 +593,10 @@ static int ohci_run (struct ohci_hcd *ohci)
593 * to be checked in case boot firmware (BIOS/SMM/...) has set up 593 * to be checked in case boot firmware (BIOS/SMM/...) has set up
594 * wakeup in a way the bus isn't aware of (e.g., legacy PCI PM). 594 * wakeup in a way the bus isn't aware of (e.g., legacy PCI PM).
595 * If the bus glue detected wakeup capability then it should 595 * If the bus glue detected wakeup capability then it should
596 * already be enabled. Either way, if wakeup should be enabled 596 * already be enabled; if so we'll just enable it again.
597 * but isn't, we'll enable it now.
598 */ 597 */
599 if ((ohci->hc_control & OHCI_CTRL_RWC) != 0 598 if ((ohci->hc_control & OHCI_CTRL_RWC) != 0)
600 && !device_can_wakeup(hcd->self.controller)) 599 device_set_wakeup_capable(hcd->self.controller, 1);
601 device_init_wakeup(hcd->self.controller, 1);
602 600
603 switch (ohci->hc_control & OHCI_CTRL_HCFS) { 601 switch (ohci->hc_control & OHCI_CTRL_HCFS) {
604 case OHCI_USB_OPER: 602 case OHCI_USB_OPER:
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 4bbddb73abd9..f3aaba35e912 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -315,14 +315,14 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
315 return -ENODEV; 315 return -ENODEV;
316 } 316 }
317 317
318 usb_host_ck = clk_get(0, "usb_hhc_ck"); 318 usb_host_ck = clk_get(&pdev->dev, "usb_hhc_ck");
319 if (IS_ERR(usb_host_ck)) 319 if (IS_ERR(usb_host_ck))
320 return PTR_ERR(usb_host_ck); 320 return PTR_ERR(usb_host_ck);
321 321
322 if (!cpu_is_omap15xx()) 322 if (!cpu_is_omap15xx())
323 usb_dc_ck = clk_get(0, "usb_dc_ck"); 323 usb_dc_ck = clk_get(&pdev->dev, "usb_dc_ck");
324 else 324 else
325 usb_dc_ck = clk_get(0, "lb_ck"); 325 usb_dc_ck = clk_get(&pdev->dev, "lb_ck");
326 326
327 if (IS_ERR(usb_dc_ck)) { 327 if (IS_ERR(usb_dc_ck)) {
328 clk_put(usb_host_ck); 328 clk_put(usb_host_ck);
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 8b28ae7865ba..5d625c3fd423 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -487,7 +487,6 @@ static struct pci_driver ohci_pci_driver = {
487 487
488#ifdef CONFIG_PM 488#ifdef CONFIG_PM
489 .suspend = usb_hcd_pci_suspend, 489 .suspend = usb_hcd_pci_suspend,
490 .suspend_late = usb_hcd_pci_suspend_late,
491 .resume_early = usb_hcd_pci_resume_early, 490 .resume_early = usb_hcd_pci_resume_early,
492 .resume = usb_hcd_pci_resume, 491 .resume = usb_hcd_pci_resume,
493#endif 492#endif
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 4e221060f58c..944f7e0ca4df 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -942,7 +942,6 @@ static struct pci_driver uhci_pci_driver = {
942 942
943#ifdef CONFIG_PM 943#ifdef CONFIG_PM
944 .suspend = usb_hcd_pci_suspend, 944 .suspend = usb_hcd_pci_suspend,
945 .suspend_late = usb_hcd_pci_suspend_late,
946 .resume_early = usb_hcd_pci_resume_early, 945 .resume_early = usb_hcd_pci_resume_early,
947 .resume = usb_hcd_pci_resume, 946 .resume = usb_hcd_pci_resume,
948#endif /* PM */ 947#endif /* PM */
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 189a9db03509..ad4fb15b5dcb 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -57,7 +57,6 @@
57#define USB_DEVICE_ID_LD_MACHINETEST 0x2040 /* USB Product ID of Machine Test System */ 57#define USB_DEVICE_ID_LD_MACHINETEST 0x2040 /* USB Product ID of Machine Test System */
58 58
59#define USB_VENDOR_ID_VERNIER 0x08f7 59#define USB_VENDOR_ID_VERNIER 0x08f7
60#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
61#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002 60#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002
62#define USB_DEVICE_ID_VERNIER_SKIP 0x0003 61#define USB_DEVICE_ID_VERNIER_SKIP 0x0003
63#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004 62#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004
@@ -85,7 +84,6 @@ static struct usb_device_id ld_usb_table [] = {
85 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) }, 84 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) },
86 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) }, 85 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) },
87 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) }, 86 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) },
88 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) },
89 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) }, 87 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) },
90 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) }, 88 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
91 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) }, 89 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) },
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index e06810aef2df..4cf27c72423e 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -37,6 +37,7 @@
37#define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) 37#define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get)
38#define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) 38#define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch)
39#define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) 39#define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8)
40
40#ifdef CONFIG_COMPAT 41#ifdef CONFIG_COMPAT
41#define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) 42#define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32)
42#define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) 43#define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32)
@@ -921,21 +922,6 @@ static int mon_bin_ioctl(struct inode *inode, struct file *file,
921 } 922 }
922 break; 923 break;
923 924
924#ifdef CONFIG_COMPAT
925 case MON_IOCX_GET32: {
926 struct mon_bin_get32 getb;
927
928 if (copy_from_user(&getb, (void __user *)arg,
929 sizeof(struct mon_bin_get32)))
930 return -EFAULT;
931
932 ret = mon_bin_get_event(file, rp,
933 compat_ptr(getb.hdr32), compat_ptr(getb.data32),
934 getb.alloc32);
935 }
936 break;
937#endif
938
939 case MON_IOCX_MFETCH: 925 case MON_IOCX_MFETCH:
940 { 926 {
941 struct mon_bin_mfetch mfetch; 927 struct mon_bin_mfetch mfetch;
@@ -962,7 +948,57 @@ static int mon_bin_ioctl(struct inode *inode, struct file *file,
962 } 948 }
963 break; 949 break;
964 950
951 case MON_IOCG_STATS: {
952 struct mon_bin_stats __user *sp;
953 unsigned int nevents;
954 unsigned int ndropped;
955
956 spin_lock_irqsave(&rp->b_lock, flags);
957 ndropped = rp->cnt_lost;
958 rp->cnt_lost = 0;
959 spin_unlock_irqrestore(&rp->b_lock, flags);
960 nevents = mon_bin_queued(rp);
961
962 sp = (struct mon_bin_stats __user *)arg;
963 if (put_user(rp->cnt_lost, &sp->dropped))
964 return -EFAULT;
965 if (put_user(nevents, &sp->queued))
966 return -EFAULT;
967
968 }
969 break;
970
971 default:
972 return -ENOTTY;
973 }
974
975 return ret;
976}
977
965#ifdef CONFIG_COMPAT 978#ifdef CONFIG_COMPAT
979static long mon_bin_compat_ioctl(struct file *file,
980 unsigned int cmd, unsigned long arg)
981{
982 struct mon_reader_bin *rp = file->private_data;
983 int ret;
984
985 switch (cmd) {
986
987 case MON_IOCX_GET32: {
988 struct mon_bin_get32 getb;
989
990 if (copy_from_user(&getb, (void __user *)arg,
991 sizeof(struct mon_bin_get32)))
992 return -EFAULT;
993
994 ret = mon_bin_get_event(file, rp,
995 compat_ptr(getb.hdr32), compat_ptr(getb.data32),
996 getb.alloc32);
997 if (ret < 0)
998 return ret;
999 }
1000 return 0;
1001
966 case MON_IOCX_MFETCH32: 1002 case MON_IOCX_MFETCH32:
967 { 1003 {
968 struct mon_bin_mfetch32 mfetch; 1004 struct mon_bin_mfetch32 mfetch;
@@ -986,37 +1022,25 @@ static int mon_bin_ioctl(struct inode *inode, struct file *file,
986 return ret; 1022 return ret;
987 if (put_user(ret, &uptr->nfetch32)) 1023 if (put_user(ret, &uptr->nfetch32))
988 return -EFAULT; 1024 return -EFAULT;
989 ret = 0;
990 } 1025 }
991 break; 1026 return 0;
992#endif
993
994 case MON_IOCG_STATS: {
995 struct mon_bin_stats __user *sp;
996 unsigned int nevents;
997 unsigned int ndropped;
998
999 spin_lock_irqsave(&rp->b_lock, flags);
1000 ndropped = rp->cnt_lost;
1001 rp->cnt_lost = 0;
1002 spin_unlock_irqrestore(&rp->b_lock, flags);
1003 nevents = mon_bin_queued(rp);
1004 1027
1005 sp = (struct mon_bin_stats __user *)arg; 1028 case MON_IOCG_STATS:
1006 if (put_user(rp->cnt_lost, &sp->dropped)) 1029 return mon_bin_ioctl(NULL, file, cmd,
1007 return -EFAULT; 1030 (unsigned long) compat_ptr(arg));
1008 if (put_user(nevents, &sp->queued))
1009 return -EFAULT;
1010 1031
1011 } 1032 case MON_IOCQ_URB_LEN:
1012 break; 1033 case MON_IOCQ_RING_SIZE:
1034 case MON_IOCT_RING_SIZE:
1035 case MON_IOCH_MFLUSH:
1036 return mon_bin_ioctl(NULL, file, cmd, arg);
1013 1037
1014 default: 1038 default:
1015 return -ENOTTY; 1039 ;
1016 } 1040 }
1017 1041 return -ENOTTY;
1018 return ret;
1019} 1042}
1043#endif /* CONFIG_COMPAT */
1020 1044
1021static unsigned int 1045static unsigned int
1022mon_bin_poll(struct file *file, struct poll_table_struct *wait) 1046mon_bin_poll(struct file *file, struct poll_table_struct *wait)
@@ -1094,6 +1118,9 @@ static const struct file_operations mon_fops_binary = {
1094 /* .write = mon_text_write, */ 1118 /* .write = mon_text_write, */
1095 .poll = mon_bin_poll, 1119 .poll = mon_bin_poll,
1096 .ioctl = mon_bin_ioctl, 1120 .ioctl = mon_bin_ioctl,
1121#ifdef CONFIG_COMPAT
1122 .compat_ioctl = mon_bin_compat_ioctl,
1123#endif
1097 .release = mon_bin_release, 1124 .release = mon_bin_release,
1098 .mmap = mon_bin_mmap, 1125 .mmap = mon_bin_mmap,
1099}; 1126};
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 5af7379cd9a3..9985db08e7db 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -11,6 +11,7 @@ config USB_MUSB_HDRC
11 depends on (USB || USB_GADGET) && HAVE_CLK 11 depends on (USB || USB_GADGET) && HAVE_CLK
12 depends on !SUPERH 12 depends on !SUPERH
13 select TWL4030_USB if MACH_OMAP_3430SDP 13 select TWL4030_USB if MACH_OMAP_3430SDP
14 select USB_OTG_UTILS
14 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' 15 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
15 help 16 help
16 Say Y here if your system has a dual role high speed USB 17 Say Y here if your system has a dual role high speed USB
@@ -49,7 +50,7 @@ comment "OMAP 343x high speed USB support"
49 depends on USB_MUSB_HDRC && ARCH_OMAP34XX 50 depends on USB_MUSB_HDRC && ARCH_OMAP34XX
50 51
51comment "Blackfin high speed USB Support" 52comment "Blackfin high speed USB Support"
52 depends on USB_MUSB_HDRC && (BF54x && !BF544) || (BF52x && !BF522 && !BF523) 53 depends on USB_MUSB_HDRC && ((BF54x && !BF544) || (BF52x && !BF522 && !BF523))
53 54
54config USB_TUSB6010 55config USB_TUSB6010
55 boolean "TUSB 6010 support" 56 boolean "TUSB 6010 support"
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 5ad6d0893cbe..569ef0fed0f6 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -9,6 +9,7 @@
9#include <linux/usb.h> 9#include <linux/usb.h>
10 10
11#include "musb_core.h" 11#include "musb_core.h"
12#include "musb_debug.h"
12#include "cppi_dma.h" 13#include "cppi_dma.h"
13 14
14 15
@@ -423,6 +424,7 @@ cppi_rndis_update(struct cppi_channel *c, int is_rx,
423 } 424 }
424} 425}
425 426
427#ifdef CONFIG_USB_MUSB_DEBUG
426static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd) 428static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
427{ 429{
428 pr_debug("RXBD/%s %08x: " 430 pr_debug("RXBD/%s %08x: "
@@ -431,10 +433,11 @@ static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
431 bd->hw_next, bd->hw_bufp, bd->hw_off_len, 433 bd->hw_next, bd->hw_bufp, bd->hw_off_len,
432 bd->hw_options); 434 bd->hw_options);
433} 435}
436#endif
434 437
435static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx) 438static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
436{ 439{
437#if MUSB_DEBUG > 0 440#ifdef CONFIG_USB_MUSB_DEBUG
438 struct cppi_descriptor *bd; 441 struct cppi_descriptor *bd;
439 442
440 if (!_dbg_level(level)) 443 if (!_dbg_level(level))
@@ -881,12 +884,14 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
881 bd->hw_options |= CPPI_SOP_SET; 884 bd->hw_options |= CPPI_SOP_SET;
882 tail->hw_options |= CPPI_EOP_SET; 885 tail->hw_options |= CPPI_EOP_SET;
883 886
884 if (debug >= 5) { 887#ifdef CONFIG_USB_MUSB_DEBUG
888 if (_dbg_level(5)) {
885 struct cppi_descriptor *d; 889 struct cppi_descriptor *d;
886 890
887 for (d = rx->head; d; d = d->next) 891 for (d = rx->head; d; d = d->next)
888 cppi_dump_rxbd("S", d); 892 cppi_dump_rxbd("S", d);
889 } 893 }
894#endif
890 895
891 /* in case the preceding transfer left some state... */ 896 /* in case the preceding transfer left some state... */
892 tail = rx->last_processed; 897 tail = rx->last_processed;
@@ -990,6 +995,7 @@ static int cppi_channel_program(struct dma_channel *ch,
990 cppi_ch->offset = 0; 995 cppi_ch->offset = 0;
991 cppi_ch->maxpacket = maxpacket; 996 cppi_ch->maxpacket = maxpacket;
992 cppi_ch->buf_len = len; 997 cppi_ch->buf_len = len;
998 cppi_ch->channel.actual_len = 0;
993 999
994 /* TX channel? or RX? */ 1000 /* TX channel? or RX? */
995 if (cppi_ch->transmit) 1001 if (cppi_ch->transmit)
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 0d566dc5ce06..5a8fd5d57a11 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -32,9 +32,10 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34 34
35#include <mach/arch/hardware.h> 35#include <mach/hardware.h>
36#include <mach/arch/memory.h> 36#include <mach/memory.h>
37#include <mach/arch/gpio.h> 37#include <mach/gpio.h>
38
38#include <asm/mach-types.h> 39#include <asm/mach-types.h>
39 40
40#include "musb_core.h" 41#include "musb_core.h"
@@ -370,12 +371,6 @@ int musb_platform_set_mode(struct musb *musb, u8 mode)
370 return -EIO; 371 return -EIO;
371} 372}
372 373
373int musb_platform_set_mode(struct musb *musb, u8 mode)
374{
375 /* EVM can't do this (right?) */
376 return -EIO;
377}
378
379int __init musb_platform_init(struct musb *musb) 374int __init musb_platform_init(struct musb *musb)
380{ 375{
381 void __iomem *tibase = musb->ctrl_base; 376 void __iomem *tibase = musb->ctrl_base;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 6c7faacfb535..2cc34fa05b73 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1824,8 +1824,9 @@ static void musb_free(struct musb *musb)
1824 musb_gadget_cleanup(musb); 1824 musb_gadget_cleanup(musb);
1825#endif 1825#endif
1826 1826
1827 if (musb->nIrq >= 0 && musb->irq_wake) { 1827 if (musb->nIrq >= 0) {
1828 disable_irq_wake(musb->nIrq); 1828 if (musb->irq_wake)
1829 disable_irq_wake(musb->nIrq);
1829 free_irq(musb->nIrq, musb); 1830 free_irq(musb->nIrq, musb);
1830 } 1831 }
1831 if (is_dma_capable() && musb->dma_controller) { 1832 if (is_dma_capable() && musb->dma_controller) {
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 6197daeab8f9..4ea305387981 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -874,10 +874,10 @@ static int musb_gadget_enable(struct usb_ep *ep,
874 status = -EBUSY; 874 status = -EBUSY;
875 goto fail; 875 goto fail;
876 } 876 }
877 musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 877 musb_ep->type = usb_endpoint_type(desc);
878 878
879 /* check direction and (later) maxpacket size against endpoint */ 879 /* check direction and (later) maxpacket size against endpoint */
880 if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum) 880 if (usb_endpoint_num(desc) != epnum)
881 goto fail; 881 goto fail;
882 882
883 /* REVISIT this rules out high bandwidth periodic transfers */ 883 /* REVISIT this rules out high bandwidth periodic transfers */
@@ -890,7 +890,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
890 * packet size (or fail), set the mode, clear the fifo 890 * packet size (or fail), set the mode, clear the fifo
891 */ 891 */
892 musb_ep_select(mbase, epnum); 892 musb_ep_select(mbase, epnum);
893 if (desc->bEndpointAddress & USB_DIR_IN) { 893 if (usb_endpoint_dir_in(desc)) {
894 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE); 894 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
895 895
896 if (hw_ep->is_shared_fifo) 896 if (hw_ep->is_shared_fifo)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 99fa61234876..a035ceccf950 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1847,8 +1847,8 @@ static int musb_urb_enqueue(
1847 goto done; 1847 goto done;
1848 } 1848 }
1849 1849
1850 qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 1850 qh->epnum = usb_endpoint_num(epd);
1851 qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 1851 qh->type = usb_endpoint_type(epd);
1852 1852
1853 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ 1853 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1854 qh->addr_reg = (u8) usb_pipedevice(urb->pipe); 1854 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index 52f7f29cebda..7e073a0d7ac9 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -15,8 +15,8 @@
15#include <linux/usb.h> 15#include <linux/usb.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18#include <asm/arch/dma.h> 18#include <mach/dma.h>
19#include <asm/arch/mux.h> 19#include <mach/mux.h>
20 20
21#include "musb_core.h" 21#include "musb_core.h"
22 22
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 8e8dbdb9b39b..ee55b449ffde 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -6,14 +6,14 @@
6 6
7comment "OTG and related infrastructure" 7comment "OTG and related infrastructure"
8 8
9if USB || USB_GADGET
10
11config USB_OTG_UTILS 9config USB_OTG_UTILS
12 bool 10 bool
13 help 11 help
14 Select this to make sure the build includes objects from 12 Select this to make sure the build includes objects from
15 the OTG infrastructure directory. 13 the OTG infrastructure directory.
16 14
15if USB || USB_GADGET
16
17# 17#
18# USB Transceiver Drivers 18# USB Transceiver Drivers
19# 19#
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c
index cfaf1f085535..027f4b7dde86 100644
--- a/drivers/usb/serial/cp2101.c
+++ b/drivers/usb/serial/cp2101.c
@@ -85,6 +85,8 @@ static struct usb_device_id id_table [] = {
85 { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */ 85 { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
86 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ 86 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
87 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ 87 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
88 { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
89 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */
88 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ 90 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
89 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 91 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
90 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ 92 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index c70a8f667d85..75597337583e 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -660,6 +660,8 @@ static struct usb_device_id id_table_combined [] = {
660 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, 660 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
661 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, 661 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
662 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, 662 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
663 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
664 { USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) },
663 { }, /* Optional parameter entry */ 665 { }, /* Optional parameter entry */
664 { } /* Terminating entry */ 666 { } /* Terminating entry */
665}; 667};
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 373ee09975bb..1b62eff475d2 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -854,6 +854,10 @@
854#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */ 854#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */
855#define FTDI_DOMINTELL_DUSB_PID 0xEF51 /* DUSB01 module */ 855#define FTDI_DOMINTELL_DUSB_PID 0xEF51 /* DUSB01 module */
856 856
857/* Alti-2 products http://www.alti-2.com */
858#define ALTI2_VID 0x1BC9
859#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
860
857/* Commands */ 861/* Commands */
858#define FTDI_SIO_RESET 0 /* Reset the port */ 862#define FTDI_SIO_RESET 0 /* Reset the port */
859#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */ 863#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */
@@ -881,6 +885,11 @@
881#define RATOC_PRODUCT_ID_USB60F 0xb020 885#define RATOC_PRODUCT_ID_USB60F 0xb020
882 886
883/* 887/*
888 * DIEBOLD BCS SE923
889 */
890#define DIEBOLD_BCS_SE923_PID 0xfb99
891
892/*
884 * BmRequestType: 1100 0000b 893 * BmRequestType: 1100 0000b
885 * bRequest: FTDI_E2_READ 894 * bRequest: FTDI_E2_READ
886 * wValue: 0 895 * wValue: 0
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5ed183477aaf..6c89da9c6fea 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -158,6 +158,13 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
158#define HUAWEI_PRODUCT_E143E 0x143E 158#define HUAWEI_PRODUCT_E143E 0x143E
159#define HUAWEI_PRODUCT_E143F 0x143F 159#define HUAWEI_PRODUCT_E143F 0x143F
160 160
161#define QUANTA_VENDOR_ID 0x0408
162#define QUANTA_PRODUCT_Q101 0xEA02
163#define QUANTA_PRODUCT_Q111 0xEA03
164#define QUANTA_PRODUCT_GLX 0xEA04
165#define QUANTA_PRODUCT_GKE 0xEA05
166#define QUANTA_PRODUCT_GLE 0xEA06
167
161#define NOVATELWIRELESS_VENDOR_ID 0x1410 168#define NOVATELWIRELESS_VENDOR_ID 0x1410
162 169
163/* YISO PRODUCTS */ 170/* YISO PRODUCTS */
@@ -224,7 +231,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
224#define ONDA_VENDOR_ID 0x19d2 231#define ONDA_VENDOR_ID 0x19d2
225#define ONDA_PRODUCT_MSA501HS 0x0001 232#define ONDA_PRODUCT_MSA501HS 0x0001
226#define ONDA_PRODUCT_ET502HS 0x0002 233#define ONDA_PRODUCT_ET502HS 0x0002
227#define ONDA_PRODUCT_MT503HS 0x0200 234#define ONDA_PRODUCT_MT503HS 0x2000
228 235
229#define BANDRICH_VENDOR_ID 0x1A8D 236#define BANDRICH_VENDOR_ID 0x1A8D
230#define BANDRICH_PRODUCT_C100_1 0x1002 237#define BANDRICH_PRODUCT_C100_1 0x1002
@@ -298,6 +305,11 @@ static struct usb_device_id option_ids[] = {
298 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_GT) }, 305 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_GT) },
299 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_EX) }, 306 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_EX) },
300 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_MODEM) }, 307 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_MODEM) },
308 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q101) },
309 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q111) },
310 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
311 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
312 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
301 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) }, 313 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) },
302 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, 314 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
303 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, 315 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 3cf41df302d7..baf591137b80 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -184,6 +184,7 @@ static struct usb_device_id ti_id_table_3410[7+TI_EXTRA_VID_PID_COUNT+1] = {
184 { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, 184 { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) },
185 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, 185 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
186 { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, 186 { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
187 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
187}; 188};
188 189
189static struct usb_device_id ti_id_table_5052[4+TI_EXTRA_VID_PID_COUNT+1] = { 190static struct usb_device_id ti_id_table_5052[4+TI_EXTRA_VID_PID_COUNT+1] = {
@@ -191,6 +192,7 @@ static struct usb_device_id ti_id_table_5052[4+TI_EXTRA_VID_PID_COUNT+1] = {
191 { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, 192 { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
192 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, 193 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
193 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, 194 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
195 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
194}; 196};
195 197
196static struct usb_device_id ti_id_table_combined[6+2*TI_EXTRA_VID_PID_COUNT+1] = { 198static struct usb_device_id ti_id_table_combined[6+2*TI_EXTRA_VID_PID_COUNT+1] = {
@@ -205,6 +207,7 @@ static struct usb_device_id ti_id_table_combined[6+2*TI_EXTRA_VID_PID_COUNT+1] =
205 { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, 207 { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
206 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, 208 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
207 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, 209 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
210 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
208 { } 211 { }
209}; 212};
210 213
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index 7e4752fbf232..b7ea5dbadee5 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -27,7 +27,9 @@
27 27
28/* Vendor and product ids */ 28/* Vendor and product ids */
29#define TI_VENDOR_ID 0x0451 29#define TI_VENDOR_ID 0x0451
30#define IBM_VENDOR_ID 0x04b3
30#define TI_3410_PRODUCT_ID 0x3410 31#define TI_3410_PRODUCT_ID 0x3410
32#define IBM_4543_PRODUCT_ID 0x4543
31#define TI_3410_EZ430_ID 0xF430 /* TI ez430 development tool */ 33#define TI_3410_EZ430_ID 0xF430 /* TI ez430 development tool */
32#define TI_5052_BOOT_PRODUCT_ID 0x5052 /* no EEPROM, no firmware */ 34#define TI_5052_BOOT_PRODUCT_ID 0x5052 /* no EEPROM, no firmware */
33#define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */ 35#define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index a7f9513fa19d..69269f739563 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -995,6 +995,16 @@ UNUSUAL_DEV( 0x071b, 0x3203, 0x0000, 0x0000,
995 US_SC_DEVICE, US_PR_DEVICE, NULL, 995 US_SC_DEVICE, US_PR_DEVICE, NULL,
996 US_FL_NO_WP_DETECT | US_FL_MAX_SECTORS_64), 996 US_FL_NO_WP_DETECT | US_FL_MAX_SECTORS_64),
997 997
998/* Reported by Jean-Baptiste Onofre <jb@nanthrax.net>
999 * Support the following product :
1000 * "Dane-Elec MediaTouch"
1001 */
1002UNUSUAL_DEV( 0x071b, 0x32bb, 0x0000, 0x0000,
1003 "RockChip",
1004 "MTP",
1005 US_SC_DEVICE, US_PR_DEVICE, NULL,
1006 US_FL_NO_WP_DETECT | US_FL_MAX_SECTORS_64),
1007
998/* Reported by Massimiliano Ghilardi <massimiliano.ghilardi@gmail.com> 1008/* Reported by Massimiliano Ghilardi <massimiliano.ghilardi@gmail.com>
999 * This USB MP3/AVI player device fails and disconnects if more than 128 1009 * This USB MP3/AVI player device fails and disconnects if more than 128
1000 * sectors (64kB) are read/written in a single command, and may be present 1010 * sectors (64kB) are read/written in a single command, and may be present
@@ -1251,6 +1261,13 @@ UNUSUAL_DEV( 0x0840, 0x0084, 0x0001, 0x0001,
1251 US_SC_DEVICE, US_PR_DEVICE, NULL, 1261 US_SC_DEVICE, US_PR_DEVICE, NULL,
1252 US_FL_FIX_CAPACITY), 1262 US_FL_FIX_CAPACITY),
1253 1263
1264/* Reported by Martijn Hijdra <martijn.hijdra@gmail.com> */
1265UNUSUAL_DEV( 0x0840, 0x0085, 0x0001, 0x0001,
1266 "Argosy",
1267 "Storage",
1268 US_SC_DEVICE, US_PR_DEVICE, NULL,
1269 US_FL_FIX_CAPACITY),
1270
1254/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>. 1271/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
1255 * Flag will support Bulk devices which use a standards-violating 32-byte 1272 * Flag will support Bulk devices which use a standards-violating 32-byte
1256 * Command Block Wrapper. Here, the "DC2MEGA" cameras (several brands) with 1273 * Command Block Wrapper. Here, the "DC2MEGA" cameras (several brands) with
@@ -1589,6 +1606,13 @@ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
1589 US_SC_DEVICE, US_PR_DEVICE, NULL, 1606 US_SC_DEVICE, US_PR_DEVICE, NULL,
1590 US_FL_NO_WP_DETECT ), 1607 US_FL_NO_WP_DETECT ),
1591 1608
1609/* Reported by The Solutor <thesolutor@gmail.com> */
1610UNUSUAL_DEV( 0x0fce, 0xd0e1, 0x0000, 0x0000,
1611 "Sony Ericsson",
1612 "MD400",
1613 US_SC_DEVICE, US_PR_DEVICE, NULL,
1614 US_FL_IGNORE_DEVICE),
1615
1592/* Reported by Jan Mate <mate@fiit.stuba.sk> 1616/* Reported by Jan Mate <mate@fiit.stuba.sk>
1593 * and by Soeren Sonnenburg <kernel@nn7.de> */ 1617 * and by Soeren Sonnenburg <kernel@nn7.de> */
1594UNUSUAL_DEV( 0x0fce, 0xe030, 0x0000, 0x0000, 1618UNUSUAL_DEV( 0x0fce, 0xe030, 0x0000, 0x0000,
@@ -2031,15 +2055,11 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
2031 US_SC_DEVICE, US_PR_DEVICE, NULL, 2055 US_SC_DEVICE, US_PR_DEVICE, NULL,
2032 US_FL_IGNORE_RESIDUE ), 2056 US_FL_IGNORE_RESIDUE ),
2033 2057
2034/* Reported by Mauro Andreolini <andreoli@weblab.ing.unimo.it> 2058UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
2035 * This entry is needed to bypass the ZeroCD mechanism 2059 "ST",
2036 * and to properly load as a modem device. 2060 "2A",
2037 */
2038UNUSUAL_DEV( 0x19d2, 0x2000, 0x0000, 0x0000,
2039 "Onda ET502HS",
2040 "USB MMC Storage",
2041 US_SC_DEVICE, US_PR_DEVICE, NULL, 2061 US_SC_DEVICE, US_PR_DEVICE, NULL,
2042 US_FL_IGNORE_DEVICE), 2062 US_FL_FIX_CAPACITY),
2043 2063
2044/* patch submitted by Davide Perini <perini.davide@dpsoftware.org> 2064/* patch submitted by Davide Perini <perini.davide@dpsoftware.org>
2045 * and Renato Perini <rperini@email.it> 2065 * and Renato Perini <rperini@email.it>
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 6372f8b17b45..f0267706cb45 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -41,7 +41,7 @@ menuconfig FB
41 You need an utility program called fbset to make full use of frame 41 You need an utility program called fbset to make full use of frame
42 buffer devices. Please read <file:Documentation/fb/framebuffer.txt> 42 buffer devices. Please read <file:Documentation/fb/framebuffer.txt>
43 and the Framebuffer-HOWTO at 43 and the Framebuffer-HOWTO at
44 <http://www.tahallah.demon.co.uk/programming/prog.html> for more 44 <http://www.munted.org.uk/programming/Framebuffer-HOWTO-1.2.html> for more
45 information. 45 information.
46 46
47 Say Y here and to the driver for your graphics board below if you 47 Say Y here and to the driver for your graphics board below if you
@@ -2123,6 +2123,18 @@ config FB_PRE_INIT_FB
2123 Select this option if display contents should be inherited as set by 2123 Select this option if display contents should be inherited as set by
2124 the bootloader. 2124 the bootloader.
2125 2125
2126config FB_MX3
2127 tristate "MX3 Framebuffer support"
2128 depends on FB && MX3_IPU
2129 select FB_CFB_FILLRECT
2130 select FB_CFB_COPYAREA
2131 select FB_CFB_IMAGEBLIT
2132 default y
2133 help
2134 This is a framebuffer device for the i.MX31 LCD Controller. So
2135 far only synchronous displays are supported. If you plan to use
2136 an LCD display with your i.MX31 system, say Y here.
2137
2126source "drivers/video/omap/Kconfig" 2138source "drivers/video/omap/Kconfig"
2127 2139
2128source "drivers/video/backlight/Kconfig" 2140source "drivers/video/backlight/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index be2b657546ef..2a998ca6181d 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -132,6 +132,7 @@ obj-$(CONFIG_FB_VGA16) += vga16fb.o
132obj-$(CONFIG_FB_OF) += offb.o 132obj-$(CONFIG_FB_OF) += offb.o
133obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o 133obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o
134obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o 134obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
135obj-$(CONFIG_FB_MX3) += mx3fb.o
135 136
136# the test framebuffer is last 137# the test framebuffer is last
137obj-$(CONFIG_FB_VIRTUAL) += vfb.o 138obj-$(CONFIG_FB_VIRTUAL) += vfb.o
diff --git a/drivers/video/aty/radeon_i2c.c b/drivers/video/aty/radeon_i2c.c
index 2c5567175dca..359fc64e761a 100644
--- a/drivers/video/aty/radeon_i2c.c
+++ b/drivers/video/aty/radeon_i2c.c
@@ -72,7 +72,6 @@ static int radeon_setup_i2c_bus(struct radeon_i2c_chan *chan, const char *name)
72 snprintf(chan->adapter.name, sizeof(chan->adapter.name), 72 snprintf(chan->adapter.name, sizeof(chan->adapter.name),
73 "radeonfb %s", name); 73 "radeonfb %s", name);
74 chan->adapter.owner = THIS_MODULE; 74 chan->adapter.owner = THIS_MODULE;
75 chan->adapter.id = I2C_HW_B_RADEON;
76 chan->adapter.algo_data = &chan->algo; 75 chan->adapter.algo_data = &chan->algo;
77 chan->adapter.dev.parent = &chan->rinfo->pdev->dev; 76 chan->adapter.dev.parent = &chan->rinfo->pdev->dev;
78 chan->algo.setsda = radeon_gpio_setsda; 77 chan->algo.setsda = radeon_gpio_setsda;
diff --git a/drivers/video/i810/i810-i2c.c b/drivers/video/i810/i810-i2c.c
index 7787c3322ffb..9dd55e5324a1 100644
--- a/drivers/video/i810/i810-i2c.c
+++ b/drivers/video/i810/i810-i2c.c
@@ -90,7 +90,6 @@ static int i810_setup_i2c_bus(struct i810fb_i2c_chan *chan, const char *name)
90 chan->adapter.owner = THIS_MODULE; 90 chan->adapter.owner = THIS_MODULE;
91 chan->adapter.algo_data = &chan->algo; 91 chan->adapter.algo_data = &chan->algo;
92 chan->adapter.dev.parent = &chan->par->dev->dev; 92 chan->adapter.dev.parent = &chan->par->dev->dev;
93 chan->adapter.id = I2C_HW_B_I810;
94 chan->algo.setsda = i810i2c_setsda; 93 chan->algo.setsda = i810i2c_setsda;
95 chan->algo.setscl = i810i2c_setscl; 94 chan->algo.setscl = i810i2c_setscl;
96 chan->algo.getsda = i810i2c_getsda; 95 chan->algo.getsda = i810i2c_getsda;
diff --git a/drivers/video/intelfb/intelfb_i2c.c b/drivers/video/intelfb/intelfb_i2c.c
index 5d896b81f4e0..b3065492bb20 100644
--- a/drivers/video/intelfb/intelfb_i2c.c
+++ b/drivers/video/intelfb/intelfb_i2c.c
@@ -111,7 +111,6 @@ static int intelfb_setup_i2c_bus(struct intelfb_info *dinfo,
111 "intelfb %s", name); 111 "intelfb %s", name);
112 chan->adapter.class = class; 112 chan->adapter.class = class;
113 chan->adapter.owner = THIS_MODULE; 113 chan->adapter.owner = THIS_MODULE;
114 chan->adapter.id = I2C_HW_B_INTELFB;
115 chan->adapter.algo_data = &chan->algo; 114 chan->adapter.algo_data = &chan->algo;
116 chan->adapter.dev.parent = &chan->dinfo->pdev->dev; 115 chan->adapter.dev.parent = &chan->dinfo->pdev->dev;
117 chan->algo.setsda = intelfb_gpio_setsda; 116 chan->algo.setsda = intelfb_gpio_setsda;
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
new file mode 100644
index 000000000000..8a75d05f4334
--- /dev/null
+++ b/drivers/video/mx3fb.c
@@ -0,0 +1,1555 @@
1/*
2 * Copyright (C) 2008
3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
4 *
5 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/platform_device.h>
15#include <linux/sched.h>
16#include <linux/errno.h>
17#include <linux/string.h>
18#include <linux/interrupt.h>
19#include <linux/slab.h>
20#include <linux/fb.h>
21#include <linux/delay.h>
22#include <linux/init.h>
23#include <linux/ioport.h>
24#include <linux/dma-mapping.h>
25#include <linux/dmaengine.h>
26#include <linux/console.h>
27#include <linux/clk.h>
28#include <linux/mutex.h>
29
30#include <mach/hardware.h>
31#include <mach/ipu.h>
32#include <mach/mx3fb.h>
33
34#include <asm/io.h>
35#include <asm/uaccess.h>
36
37#define MX3FB_NAME "mx3_sdc_fb"
38
39#define MX3FB_REG_OFFSET 0xB4
40
41/* SDC Registers */
42#define SDC_COM_CONF (0xB4 - MX3FB_REG_OFFSET)
43#define SDC_GW_CTRL (0xB8 - MX3FB_REG_OFFSET)
44#define SDC_FG_POS (0xBC - MX3FB_REG_OFFSET)
45#define SDC_BG_POS (0xC0 - MX3FB_REG_OFFSET)
46#define SDC_CUR_POS (0xC4 - MX3FB_REG_OFFSET)
47#define SDC_PWM_CTRL (0xC8 - MX3FB_REG_OFFSET)
48#define SDC_CUR_MAP (0xCC - MX3FB_REG_OFFSET)
49#define SDC_HOR_CONF (0xD0 - MX3FB_REG_OFFSET)
50#define SDC_VER_CONF (0xD4 - MX3FB_REG_OFFSET)
51#define SDC_SHARP_CONF_1 (0xD8 - MX3FB_REG_OFFSET)
52#define SDC_SHARP_CONF_2 (0xDC - MX3FB_REG_OFFSET)
53
54/* Register bits */
55#define SDC_COM_TFT_COLOR 0x00000001UL
56#define SDC_COM_FG_EN 0x00000010UL
57#define SDC_COM_GWSEL 0x00000020UL
58#define SDC_COM_GLB_A 0x00000040UL
59#define SDC_COM_KEY_COLOR_G 0x00000080UL
60#define SDC_COM_BG_EN 0x00000200UL
61#define SDC_COM_SHARP 0x00001000UL
62
63#define SDC_V_SYNC_WIDTH_L 0x00000001UL
64
65/* Display Interface registers */
66#define DI_DISP_IF_CONF (0x0124 - MX3FB_REG_OFFSET)
67#define DI_DISP_SIG_POL (0x0128 - MX3FB_REG_OFFSET)
68#define DI_SER_DISP1_CONF (0x012C - MX3FB_REG_OFFSET)
69#define DI_SER_DISP2_CONF (0x0130 - MX3FB_REG_OFFSET)
70#define DI_HSP_CLK_PER (0x0134 - MX3FB_REG_OFFSET)
71#define DI_DISP0_TIME_CONF_1 (0x0138 - MX3FB_REG_OFFSET)
72#define DI_DISP0_TIME_CONF_2 (0x013C - MX3FB_REG_OFFSET)
73#define DI_DISP0_TIME_CONF_3 (0x0140 - MX3FB_REG_OFFSET)
74#define DI_DISP1_TIME_CONF_1 (0x0144 - MX3FB_REG_OFFSET)
75#define DI_DISP1_TIME_CONF_2 (0x0148 - MX3FB_REG_OFFSET)
76#define DI_DISP1_TIME_CONF_3 (0x014C - MX3FB_REG_OFFSET)
77#define DI_DISP2_TIME_CONF_1 (0x0150 - MX3FB_REG_OFFSET)
78#define DI_DISP2_TIME_CONF_2 (0x0154 - MX3FB_REG_OFFSET)
79#define DI_DISP2_TIME_CONF_3 (0x0158 - MX3FB_REG_OFFSET)
80#define DI_DISP3_TIME_CONF (0x015C - MX3FB_REG_OFFSET)
81#define DI_DISP0_DB0_MAP (0x0160 - MX3FB_REG_OFFSET)
82#define DI_DISP0_DB1_MAP (0x0164 - MX3FB_REG_OFFSET)
83#define DI_DISP0_DB2_MAP (0x0168 - MX3FB_REG_OFFSET)
84#define DI_DISP0_CB0_MAP (0x016C - MX3FB_REG_OFFSET)
85#define DI_DISP0_CB1_MAP (0x0170 - MX3FB_REG_OFFSET)
86#define DI_DISP0_CB2_MAP (0x0174 - MX3FB_REG_OFFSET)
87#define DI_DISP1_DB0_MAP (0x0178 - MX3FB_REG_OFFSET)
88#define DI_DISP1_DB1_MAP (0x017C - MX3FB_REG_OFFSET)
89#define DI_DISP1_DB2_MAP (0x0180 - MX3FB_REG_OFFSET)
90#define DI_DISP1_CB0_MAP (0x0184 - MX3FB_REG_OFFSET)
91#define DI_DISP1_CB1_MAP (0x0188 - MX3FB_REG_OFFSET)
92#define DI_DISP1_CB2_MAP (0x018C - MX3FB_REG_OFFSET)
93#define DI_DISP2_DB0_MAP (0x0190 - MX3FB_REG_OFFSET)
94#define DI_DISP2_DB1_MAP (0x0194 - MX3FB_REG_OFFSET)
95#define DI_DISP2_DB2_MAP (0x0198 - MX3FB_REG_OFFSET)
96#define DI_DISP2_CB0_MAP (0x019C - MX3FB_REG_OFFSET)
97#define DI_DISP2_CB1_MAP (0x01A0 - MX3FB_REG_OFFSET)
98#define DI_DISP2_CB2_MAP (0x01A4 - MX3FB_REG_OFFSET)
99#define DI_DISP3_B0_MAP (0x01A8 - MX3FB_REG_OFFSET)
100#define DI_DISP3_B1_MAP (0x01AC - MX3FB_REG_OFFSET)
101#define DI_DISP3_B2_MAP (0x01B0 - MX3FB_REG_OFFSET)
102#define DI_DISP_ACC_CC (0x01B4 - MX3FB_REG_OFFSET)
103#define DI_DISP_LLA_CONF (0x01B8 - MX3FB_REG_OFFSET)
104#define DI_DISP_LLA_DATA (0x01BC - MX3FB_REG_OFFSET)
105
106/* DI_DISP_SIG_POL bits */
107#define DI_D3_VSYNC_POL_SHIFT 28
108#define DI_D3_HSYNC_POL_SHIFT 27
109#define DI_D3_DRDY_SHARP_POL_SHIFT 26
110#define DI_D3_CLK_POL_SHIFT 25
111#define DI_D3_DATA_POL_SHIFT 24
112
113/* DI_DISP_IF_CONF bits */
114#define DI_D3_CLK_IDLE_SHIFT 26
115#define DI_D3_CLK_SEL_SHIFT 25
116#define DI_D3_DATAMSK_SHIFT 24
117
118enum ipu_panel {
119 IPU_PANEL_SHARP_TFT,
120 IPU_PANEL_TFT,
121};
122
123struct ipu_di_signal_cfg {
124 unsigned datamask_en:1;
125 unsigned clksel_en:1;
126 unsigned clkidle_en:1;
127 unsigned data_pol:1; /* true = inverted */
128 unsigned clk_pol:1; /* true = rising edge */
129 unsigned enable_pol:1;
130 unsigned Hsync_pol:1; /* true = active high */
131 unsigned Vsync_pol:1;
132};
133
134static const struct fb_videomode mx3fb_modedb[] = {
135 {
136 /* 240x320 @ 60 Hz */
137 .name = "Sharp-QVGA",
138 .refresh = 60,
139 .xres = 240,
140 .yres = 320,
141 .pixclock = 185925,
142 .left_margin = 9,
143 .right_margin = 16,
144 .upper_margin = 7,
145 .lower_margin = 9,
146 .hsync_len = 1,
147 .vsync_len = 1,
148 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE |
149 FB_SYNC_CLK_INVERT | FB_SYNC_DATA_INVERT |
150 FB_SYNC_CLK_IDLE_EN,
151 .vmode = FB_VMODE_NONINTERLACED,
152 .flag = 0,
153 }, {
154 /* 240x33 @ 60 Hz */
155 .name = "Sharp-CLI",
156 .refresh = 60,
157 .xres = 240,
158 .yres = 33,
159 .pixclock = 185925,
160 .left_margin = 9,
161 .right_margin = 16,
162 .upper_margin = 7,
163 .lower_margin = 9 + 287,
164 .hsync_len = 1,
165 .vsync_len = 1,
166 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE |
167 FB_SYNC_CLK_INVERT | FB_SYNC_DATA_INVERT |
168 FB_SYNC_CLK_IDLE_EN,
169 .vmode = FB_VMODE_NONINTERLACED,
170 .flag = 0,
171 }, {
172 /* 640x480 @ 60 Hz */
173 .name = "NEC-VGA",
174 .refresh = 60,
175 .xres = 640,
176 .yres = 480,
177 .pixclock = 38255,
178 .left_margin = 144,
179 .right_margin = 0,
180 .upper_margin = 34,
181 .lower_margin = 40,
182 .hsync_len = 1,
183 .vsync_len = 1,
184 .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_OE_ACT_HIGH,
185 .vmode = FB_VMODE_NONINTERLACED,
186 .flag = 0,
187 }, {
188 /* NTSC TV output */
189 .name = "TV-NTSC",
190 .refresh = 60,
191 .xres = 640,
192 .yres = 480,
193 .pixclock = 37538,
194 .left_margin = 38,
195 .right_margin = 858 - 640 - 38 - 3,
196 .upper_margin = 36,
197 .lower_margin = 518 - 480 - 36 - 1,
198 .hsync_len = 3,
199 .vsync_len = 1,
200 .sync = 0,
201 .vmode = FB_VMODE_NONINTERLACED,
202 .flag = 0,
203 }, {
204 /* PAL TV output */
205 .name = "TV-PAL",
206 .refresh = 50,
207 .xres = 640,
208 .yres = 480,
209 .pixclock = 37538,
210 .left_margin = 38,
211 .right_margin = 960 - 640 - 38 - 32,
212 .upper_margin = 32,
213 .lower_margin = 555 - 480 - 32 - 3,
214 .hsync_len = 32,
215 .vsync_len = 3,
216 .sync = 0,
217 .vmode = FB_VMODE_NONINTERLACED,
218 .flag = 0,
219 }, {
220 /* TV output VGA mode, 640x480 @ 65 Hz */
221 .name = "TV-VGA",
222 .refresh = 60,
223 .xres = 640,
224 .yres = 480,
225 .pixclock = 40574,
226 .left_margin = 35,
227 .right_margin = 45,
228 .upper_margin = 9,
229 .lower_margin = 1,
230 .hsync_len = 46,
231 .vsync_len = 5,
232 .sync = 0,
233 .vmode = FB_VMODE_NONINTERLACED,
234 .flag = 0,
235 },
236};
237
238struct mx3fb_data {
239 struct fb_info *fbi;
240 int backlight_level;
241 void __iomem *reg_base;
242 spinlock_t lock;
243 struct device *dev;
244
245 uint32_t h_start_width;
246 uint32_t v_start_width;
247};
248
249struct dma_chan_request {
250 struct mx3fb_data *mx3fb;
251 enum ipu_channel id;
252};
253
254/* MX3 specific framebuffer information. */
255struct mx3fb_info {
256 int blank;
257 enum ipu_channel ipu_ch;
258 uint32_t cur_ipu_buf;
259
260 u32 pseudo_palette[16];
261
262 struct completion flip_cmpl;
263 struct mutex mutex; /* Protects fb-ops */
264 struct mx3fb_data *mx3fb;
265 struct idmac_channel *idmac_channel;
266 struct dma_async_tx_descriptor *txd;
267 dma_cookie_t cookie;
268 struct scatterlist sg[2];
269
270 u32 sync; /* preserve var->sync flags */
271};
272
273static void mx3fb_dma_done(void *);
274
275/* Used fb-mode and bpp. Can be set on kernel command line, therefore file-static. */
276static const char *fb_mode;
277static unsigned long default_bpp = 16;
278
279static u32 mx3fb_read_reg(struct mx3fb_data *mx3fb, unsigned long reg)
280{
281 return __raw_readl(mx3fb->reg_base + reg);
282}
283
284static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long reg)
285{
286 __raw_writel(value, mx3fb->reg_base + reg);
287}
288
289static const uint32_t di_mappings[] = {
290 0x1600AAAA, 0x00E05555, 0x00070000, 3, /* RGB888 */
291 0x0005000F, 0x000B000F, 0x0011000F, 1, /* RGB666 */
292 0x0011000F, 0x000B000F, 0x0005000F, 1, /* BGR666 */
293 0x0004003F, 0x000A000F, 0x000F003F, 1 /* RGB565 */
294};
295
296static void sdc_fb_init(struct mx3fb_info *fbi)
297{
298 struct mx3fb_data *mx3fb = fbi->mx3fb;
299 uint32_t reg;
300
301 reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
302
303 mx3fb_write_reg(mx3fb, reg | SDC_COM_BG_EN, SDC_COM_CONF);
304}
305
306/* Returns enabled flag before uninit */
307static uint32_t sdc_fb_uninit(struct mx3fb_info *fbi)
308{
309 struct mx3fb_data *mx3fb = fbi->mx3fb;
310 uint32_t reg;
311
312 reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
313
314 mx3fb_write_reg(mx3fb, reg & ~SDC_COM_BG_EN, SDC_COM_CONF);
315
316 return reg & SDC_COM_BG_EN;
317}
318
319static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
320{
321 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
322 struct idmac_channel *ichan = mx3_fbi->idmac_channel;
323 struct dma_chan *dma_chan = &ichan->dma_chan;
324 unsigned long flags;
325 dma_cookie_t cookie;
326
327 dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
328 to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
329
330 /* This enables the channel */
331 if (mx3_fbi->cookie < 0) {
332 mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan,
333 &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
334 if (!mx3_fbi->txd) {
335 dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
336 dma_chan->chan_id);
337 return;
338 }
339
340 mx3_fbi->txd->callback_param = mx3_fbi->txd;
341 mx3_fbi->txd->callback = mx3fb_dma_done;
342
343 cookie = mx3_fbi->txd->tx_submit(mx3_fbi->txd);
344 dev_dbg(mx3fb->dev, "%d: Submit %p #%d [%c]\n", __LINE__,
345 mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+');
346 } else {
347 if (!mx3_fbi->txd || !mx3_fbi->txd->tx_submit) {
348 dev_err(mx3fb->dev, "Cannot enable channel %d\n",
349 dma_chan->chan_id);
350 return;
351 }
352
353 /* Just re-activate the same buffer */
354 dma_async_issue_pending(dma_chan);
355 cookie = mx3_fbi->cookie;
356 dev_dbg(mx3fb->dev, "%d: Re-submit %p #%d [%c]\n", __LINE__,
357 mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+');
358 }
359
360 if (cookie >= 0) {
361 spin_lock_irqsave(&mx3fb->lock, flags);
362 sdc_fb_init(mx3_fbi);
363 mx3_fbi->cookie = cookie;
364 spin_unlock_irqrestore(&mx3fb->lock, flags);
365 }
366
367 /*
368 * Attention! Without this msleep the channel keeps generating
369 * interrupts. Next sdc_set_brightness() is going to be called
370 * from mx3fb_blank().
371 */
372 msleep(2);
373}
374
375static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
376{
377 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
378 uint32_t enabled;
379 unsigned long flags;
380
381 spin_lock_irqsave(&mx3fb->lock, flags);
382
383 enabled = sdc_fb_uninit(mx3_fbi);
384
385 spin_unlock_irqrestore(&mx3fb->lock, flags);
386
387 mx3_fbi->txd->chan->device->device_terminate_all(mx3_fbi->txd->chan);
388 mx3_fbi->txd = NULL;
389 mx3_fbi->cookie = -EINVAL;
390}
391
392/**
393 * sdc_set_window_pos() - set window position of the respective plane.
394 * @mx3fb: mx3fb context.
395 * @channel: IPU DMAC channel ID.
396 * @x_pos: X coordinate relative to the top left corner to place window at.
397 * @y_pos: Y coordinate relative to the top left corner to place window at.
398 * @return: 0 on success or negative error code on failure.
399 */
400static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel,
401 int16_t x_pos, int16_t y_pos)
402{
403 x_pos += mx3fb->h_start_width;
404 y_pos += mx3fb->v_start_width;
405
406 if (channel != IDMAC_SDC_0)
407 return -EINVAL;
408
409 mx3fb_write_reg(mx3fb, (x_pos << 16) | y_pos, SDC_BG_POS);
410 return 0;
411}
412
413/**
414 * sdc_init_panel() - initialize a synchronous LCD panel.
415 * @mx3fb: mx3fb context.
416 * @panel: panel type.
417 * @pixel_clk: desired pixel clock frequency in Hz.
418 * @width: width of panel in pixels.
419 * @height: height of panel in pixels.
420 * @pixel_fmt: pixel format of buffer as FOURCC ASCII code.
421 * @h_start_width: number of pixel clocks between the HSYNC signal pulse
422 * and the start of valid data.
423 * @h_sync_width: width of the HSYNC signal in units of pixel clocks.
424 * @h_end_width: number of pixel clocks between the end of valid data
425 * and the HSYNC signal for next line.
426 * @v_start_width: number of lines between the VSYNC signal pulse and the
427 * start of valid data.
428 * @v_sync_width: width of the VSYNC signal in units of lines
429 * @v_end_width: number of lines between the end of valid data and the
430 * VSYNC signal for next frame.
431 * @sig: bitfield of signal polarities for LCD interface.
432 * @return: 0 on success or negative error code on failure.
433 */
434static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
435 uint32_t pixel_clk,
436 uint16_t width, uint16_t height,
437 enum pixel_fmt pixel_fmt,
438 uint16_t h_start_width, uint16_t h_sync_width,
439 uint16_t h_end_width, uint16_t v_start_width,
440 uint16_t v_sync_width, uint16_t v_end_width,
441 struct ipu_di_signal_cfg sig)
442{
443 unsigned long lock_flags;
444 uint32_t reg;
445 uint32_t old_conf;
446 uint32_t div;
447 struct clk *ipu_clk;
448
449 dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height);
450
451 if (v_sync_width == 0 || h_sync_width == 0)
452 return -EINVAL;
453
454 /* Init panel size and blanking periods */
455 reg = ((uint32_t) (h_sync_width - 1) << 26) |
456 ((uint32_t) (width + h_start_width + h_end_width - 1) << 16);
457 mx3fb_write_reg(mx3fb, reg, SDC_HOR_CONF);
458
459#ifdef DEBUG
460 printk(KERN_CONT " hor_conf %x,", reg);
461#endif
462
463 reg = ((uint32_t) (v_sync_width - 1) << 26) | SDC_V_SYNC_WIDTH_L |
464 ((uint32_t) (height + v_start_width + v_end_width - 1) << 16);
465 mx3fb_write_reg(mx3fb, reg, SDC_VER_CONF);
466
467#ifdef DEBUG
468 printk(KERN_CONT " ver_conf %x\n", reg);
469#endif
470
471 mx3fb->h_start_width = h_start_width;
472 mx3fb->v_start_width = v_start_width;
473
474 switch (panel) {
475 case IPU_PANEL_SHARP_TFT:
476 mx3fb_write_reg(mx3fb, 0x00FD0102L, SDC_SHARP_CONF_1);
477 mx3fb_write_reg(mx3fb, 0x00F500F4L, SDC_SHARP_CONF_2);
478 mx3fb_write_reg(mx3fb, SDC_COM_SHARP | SDC_COM_TFT_COLOR, SDC_COM_CONF);
479 break;
480 case IPU_PANEL_TFT:
481 mx3fb_write_reg(mx3fb, SDC_COM_TFT_COLOR, SDC_COM_CONF);
482 break;
483 default:
484 return -EINVAL;
485 }
486
487 /* Init clocking */
488
489 /*
490 * Calculate divider: fractional part is 4 bits so simply multiple by
491 * 24 to get fractional part, as long as we stay under ~250MHz and on
492 * i.MX31 it (HSP_CLK) is <= 178MHz. Currently 128.267MHz
493 */
494 dev_dbg(mx3fb->dev, "pixel clk = %d\n", pixel_clk);
495
496 ipu_clk = clk_get(mx3fb->dev, "ipu_clk");
497 div = clk_get_rate(ipu_clk) * 16 / pixel_clk;
498 clk_put(ipu_clk);
499
500 if (div < 0x40) { /* Divider less than 4 */
501 dev_dbg(mx3fb->dev,
502 "InitPanel() - Pixel clock divider less than 4\n");
503 div = 0x40;
504 }
505
506 spin_lock_irqsave(&mx3fb->lock, lock_flags);
507
508 /*
509 * DISP3_IF_CLK_DOWN_WR is half the divider value and 2 fraction bits
510 * fewer. Subtract 1 extra from DISP3_IF_CLK_DOWN_WR based on timing
511 * debug. DISP3_IF_CLK_UP_WR is 0
512 */
513 mx3fb_write_reg(mx3fb, (((div / 8) - 1) << 22) | div, DI_DISP3_TIME_CONF);
514
515 /* DI settings */
516 old_conf = mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF) & 0x78FFFFFF;
517 old_conf |= sig.datamask_en << DI_D3_DATAMSK_SHIFT |
518 sig.clksel_en << DI_D3_CLK_SEL_SHIFT |
519 sig.clkidle_en << DI_D3_CLK_IDLE_SHIFT;
520 mx3fb_write_reg(mx3fb, old_conf, DI_DISP_IF_CONF);
521
522 old_conf = mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL) & 0xE0FFFFFF;
523 old_conf |= sig.data_pol << DI_D3_DATA_POL_SHIFT |
524 sig.clk_pol << DI_D3_CLK_POL_SHIFT |
525 sig.enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT |
526 sig.Hsync_pol << DI_D3_HSYNC_POL_SHIFT |
527 sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
528 mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
529
530 switch (pixel_fmt) {
531 case IPU_PIX_FMT_RGB24:
532 mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP);
533 mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP);
534 mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP);
535 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
536 ((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC);
537 break;
538 case IPU_PIX_FMT_RGB666:
539 mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP);
540 mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP);
541 mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP);
542 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
543 ((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC);
544 break;
545 case IPU_PIX_FMT_BGR666:
546 mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP);
547 mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP);
548 mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP);
549 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
550 ((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC);
551 break;
552 default:
553 mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP);
554 mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP);
555 mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP);
556 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
557 ((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC);
558 break;
559 }
560
561 spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
562
563 dev_dbg(mx3fb->dev, "DI_DISP_IF_CONF = 0x%08X\n",
564 mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF));
565 dev_dbg(mx3fb->dev, "DI_DISP_SIG_POL = 0x%08X\n",
566 mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL));
567 dev_dbg(mx3fb->dev, "DI_DISP3_TIME_CONF = 0x%08X\n",
568 mx3fb_read_reg(mx3fb, DI_DISP3_TIME_CONF));
569
570 return 0;
571}
572
573/**
574 * sdc_set_color_key() - set the transparent color key for SDC graphic plane.
575 * @mx3fb: mx3fb context.
576 * @channel: IPU DMAC channel ID.
577 * @enable: boolean to enable or disable color keyl.
578 * @color_key: 24-bit RGB color to use as transparent color key.
579 * @return: 0 on success or negative error code on failure.
580 */
581static int sdc_set_color_key(struct mx3fb_data *mx3fb, enum ipu_channel channel,
582 bool enable, uint32_t color_key)
583{
584 uint32_t reg, sdc_conf;
585 unsigned long lock_flags;
586
587 spin_lock_irqsave(&mx3fb->lock, lock_flags);
588
589 sdc_conf = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
590 if (channel == IDMAC_SDC_0)
591 sdc_conf &= ~SDC_COM_GWSEL;
592 else
593 sdc_conf |= SDC_COM_GWSEL;
594
595 if (enable) {
596 reg = mx3fb_read_reg(mx3fb, SDC_GW_CTRL) & 0xFF000000L;
597 mx3fb_write_reg(mx3fb, reg | (color_key & 0x00FFFFFFL),
598 SDC_GW_CTRL);
599
600 sdc_conf |= SDC_COM_KEY_COLOR_G;
601 } else {
602 sdc_conf &= ~SDC_COM_KEY_COLOR_G;
603 }
604 mx3fb_write_reg(mx3fb, sdc_conf, SDC_COM_CONF);
605
606 spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
607
608 return 0;
609}
610
611/**
612 * sdc_set_global_alpha() - set global alpha blending modes.
613 * @mx3fb: mx3fb context.
614 * @enable: boolean to enable or disable global alpha blending. If disabled,
615 * per pixel blending is used.
616 * @alpha: global alpha value.
617 * @return: 0 on success or negative error code on failure.
618 */
619static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t alpha)
620{
621 uint32_t reg;
622 unsigned long lock_flags;
623
624 spin_lock_irqsave(&mx3fb->lock, lock_flags);
625
626 if (enable) {
627 reg = mx3fb_read_reg(mx3fb, SDC_GW_CTRL) & 0x00FFFFFFL;
628 mx3fb_write_reg(mx3fb, reg | ((uint32_t) alpha << 24), SDC_GW_CTRL);
629
630 reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
631 mx3fb_write_reg(mx3fb, reg | SDC_COM_GLB_A, SDC_COM_CONF);
632 } else {
633 reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
634 mx3fb_write_reg(mx3fb, reg & ~SDC_COM_GLB_A, SDC_COM_CONF);
635 }
636
637 spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
638
639 return 0;
640}
641
642static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value)
643{
644 /* This might be board-specific */
645 mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL);
646 return;
647}
648
649static uint32_t bpp_to_pixfmt(int bpp)
650{
651 uint32_t pixfmt = 0;
652 switch (bpp) {
653 case 24:
654 pixfmt = IPU_PIX_FMT_BGR24;
655 break;
656 case 32:
657 pixfmt = IPU_PIX_FMT_BGR32;
658 break;
659 case 16:
660 pixfmt = IPU_PIX_FMT_RGB565;
661 break;
662 }
663 return pixfmt;
664}
665
666static int mx3fb_blank(int blank, struct fb_info *fbi);
667static int mx3fb_map_video_memory(struct fb_info *fbi);
668static int mx3fb_unmap_video_memory(struct fb_info *fbi);
669
670/**
671 * mx3fb_set_fix() - set fixed framebuffer parameters from variable settings.
672 * @info: framebuffer information pointer
673 * @return: 0 on success or negative error code on failure.
674 */
675static int mx3fb_set_fix(struct fb_info *fbi)
676{
677 struct fb_fix_screeninfo *fix = &fbi->fix;
678 struct fb_var_screeninfo *var = &fbi->var;
679
680 strncpy(fix->id, "DISP3 BG", 8);
681
682 fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
683
684 fix->type = FB_TYPE_PACKED_PIXELS;
685 fix->accel = FB_ACCEL_NONE;
686 fix->visual = FB_VISUAL_TRUECOLOR;
687 fix->xpanstep = 1;
688 fix->ypanstep = 1;
689
690 return 0;
691}
692
693static void mx3fb_dma_done(void *arg)
694{
695 struct idmac_tx_desc *tx_desc = to_tx_desc(arg);
696 struct dma_chan *chan = tx_desc->txd.chan;
697 struct idmac_channel *ichannel = to_idmac_chan(chan);
698 struct mx3fb_data *mx3fb = ichannel->client;
699 struct mx3fb_info *mx3_fbi = mx3fb->fbi->par;
700
701 dev_dbg(mx3fb->dev, "irq %d callback\n", ichannel->eof_irq);
702
703 /* We only need one interrupt, it will be re-enabled as needed */
704 disable_irq(ichannel->eof_irq);
705
706 complete(&mx3_fbi->flip_cmpl);
707}
708
709/**
710 * mx3fb_set_par() - set framebuffer parameters and change the operating mode.
711 * @fbi: framebuffer information pointer.
712 * @return: 0 on success or negative error code on failure.
713 */
714static int mx3fb_set_par(struct fb_info *fbi)
715{
716 u32 mem_len;
717 struct ipu_di_signal_cfg sig_cfg;
718 enum ipu_panel mode = IPU_PANEL_TFT;
719 struct mx3fb_info *mx3_fbi = fbi->par;
720 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
721 struct idmac_channel *ichan = mx3_fbi->idmac_channel;
722 struct idmac_video_param *video = &ichan->params.video;
723 struct scatterlist *sg = mx3_fbi->sg;
724 size_t screen_size;
725
726 dev_dbg(mx3fb->dev, "%s [%c]\n", __func__, list_empty(&ichan->queue) ? '-' : '+');
727
728 mutex_lock(&mx3_fbi->mutex);
729
730 /* Total cleanup */
731 if (mx3_fbi->txd)
732 sdc_disable_channel(mx3_fbi);
733
734 mx3fb_set_fix(fbi);
735
736 mem_len = fbi->var.yres_virtual * fbi->fix.line_length;
737 if (mem_len > fbi->fix.smem_len) {
738 if (fbi->fix.smem_start)
739 mx3fb_unmap_video_memory(fbi);
740
741 fbi->fix.smem_len = mem_len;
742 if (mx3fb_map_video_memory(fbi) < 0) {
743 mutex_unlock(&mx3_fbi->mutex);
744 return -ENOMEM;
745 }
746 }
747
748 screen_size = fbi->fix.line_length * fbi->var.yres;
749
750 sg_init_table(&sg[0], 1);
751 sg_init_table(&sg[1], 1);
752
753 sg_dma_address(&sg[0]) = fbi->fix.smem_start;
754 sg_set_page(&sg[0], virt_to_page(fbi->screen_base),
755 fbi->fix.smem_len,
756 offset_in_page(fbi->screen_base));
757
758 if (mx3_fbi->ipu_ch == IDMAC_SDC_0) {
759 memset(&sig_cfg, 0, sizeof(sig_cfg));
760 if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT)
761 sig_cfg.Hsync_pol = true;
762 if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT)
763 sig_cfg.Vsync_pol = true;
764 if (fbi->var.sync & FB_SYNC_CLK_INVERT)
765 sig_cfg.clk_pol = true;
766 if (fbi->var.sync & FB_SYNC_DATA_INVERT)
767 sig_cfg.data_pol = true;
768 if (fbi->var.sync & FB_SYNC_OE_ACT_HIGH)
769 sig_cfg.enable_pol = true;
770 if (fbi->var.sync & FB_SYNC_CLK_IDLE_EN)
771 sig_cfg.clkidle_en = true;
772 if (fbi->var.sync & FB_SYNC_CLK_SEL_EN)
773 sig_cfg.clksel_en = true;
774 if (fbi->var.sync & FB_SYNC_SHARP_MODE)
775 mode = IPU_PANEL_SHARP_TFT;
776
777 dev_dbg(fbi->device, "pixclock = %ul Hz\n",
778 (u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL));
779
780 if (sdc_init_panel(mx3fb, mode,
781 (PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
782 fbi->var.xres, fbi->var.yres,
783 (fbi->var.sync & FB_SYNC_SWAP_RGB) ?
784 IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666,
785 fbi->var.left_margin,
786 fbi->var.hsync_len,
787 fbi->var.right_margin +
788 fbi->var.hsync_len,
789 fbi->var.upper_margin,
790 fbi->var.vsync_len,
791 fbi->var.lower_margin +
792 fbi->var.vsync_len, sig_cfg) != 0) {
793 mutex_unlock(&mx3_fbi->mutex);
794 dev_err(fbi->device,
795 "mx3fb: Error initializing panel.\n");
796 return -EINVAL;
797 }
798 }
799
800 sdc_set_window_pos(mx3fb, mx3_fbi->ipu_ch, 0, 0);
801
802 mx3_fbi->cur_ipu_buf = 0;
803
804 video->out_pixel_fmt = bpp_to_pixfmt(fbi->var.bits_per_pixel);
805 video->out_width = fbi->var.xres;
806 video->out_height = fbi->var.yres;
807 video->out_stride = fbi->var.xres_virtual;
808
809 if (mx3_fbi->blank == FB_BLANK_UNBLANK)
810 sdc_enable_channel(mx3_fbi);
811
812 mutex_unlock(&mx3_fbi->mutex);
813
814 return 0;
815}
816
817/**
818 * mx3fb_check_var() - check and adjust framebuffer variable parameters.
819 * @var: framebuffer variable parameters
820 * @fbi: framebuffer information pointer
821 */
822static int mx3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
823{
824 struct mx3fb_info *mx3_fbi = fbi->par;
825 u32 vtotal;
826 u32 htotal;
827
828 dev_dbg(fbi->device, "%s\n", __func__);
829
830 if (var->xres_virtual < var->xres)
831 var->xres_virtual = var->xres;
832 if (var->yres_virtual < var->yres)
833 var->yres_virtual = var->yres;
834
835 if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) &&
836 (var->bits_per_pixel != 16))
837 var->bits_per_pixel = default_bpp;
838
839 switch (var->bits_per_pixel) {
840 case 16:
841 var->red.length = 5;
842 var->red.offset = 11;
843 var->red.msb_right = 0;
844
845 var->green.length = 6;
846 var->green.offset = 5;
847 var->green.msb_right = 0;
848
849 var->blue.length = 5;
850 var->blue.offset = 0;
851 var->blue.msb_right = 0;
852
853 var->transp.length = 0;
854 var->transp.offset = 0;
855 var->transp.msb_right = 0;
856 break;
857 case 24:
858 var->red.length = 8;
859 var->red.offset = 16;
860 var->red.msb_right = 0;
861
862 var->green.length = 8;
863 var->green.offset = 8;
864 var->green.msb_right = 0;
865
866 var->blue.length = 8;
867 var->blue.offset = 0;
868 var->blue.msb_right = 0;
869
870 var->transp.length = 0;
871 var->transp.offset = 0;
872 var->transp.msb_right = 0;
873 break;
874 case 32:
875 var->red.length = 8;
876 var->red.offset = 16;
877 var->red.msb_right = 0;
878
879 var->green.length = 8;
880 var->green.offset = 8;
881 var->green.msb_right = 0;
882
883 var->blue.length = 8;
884 var->blue.offset = 0;
885 var->blue.msb_right = 0;
886
887 var->transp.length = 8;
888 var->transp.offset = 24;
889 var->transp.msb_right = 0;
890 break;
891 }
892
893 if (var->pixclock < 1000) {
894 htotal = var->xres + var->right_margin + var->hsync_len +
895 var->left_margin;
896 vtotal = var->yres + var->lower_margin + var->vsync_len +
897 var->upper_margin;
898 var->pixclock = (vtotal * htotal * 6UL) / 100UL;
899 var->pixclock = KHZ2PICOS(var->pixclock);
900 dev_dbg(fbi->device, "pixclock set for 60Hz refresh = %u ps\n",
901 var->pixclock);
902 }
903
904 var->height = -1;
905 var->width = -1;
906 var->grayscale = 0;
907
908 /* Preserve sync flags */
909 var->sync |= mx3_fbi->sync;
910 mx3_fbi->sync |= var->sync;
911
912 return 0;
913}
914
915static u32 chan_to_field(unsigned int chan, struct fb_bitfield *bf)
916{
917 chan &= 0xffff;
918 chan >>= 16 - bf->length;
919 return chan << bf->offset;
920}
921
922static int mx3fb_setcolreg(unsigned int regno, unsigned int red,
923 unsigned int green, unsigned int blue,
924 unsigned int trans, struct fb_info *fbi)
925{
926 struct mx3fb_info *mx3_fbi = fbi->par;
927 u32 val;
928 int ret = 1;
929
930 dev_dbg(fbi->device, "%s\n", __func__);
931
932 mutex_lock(&mx3_fbi->mutex);
933 /*
934 * If greyscale is true, then we convert the RGB value
935 * to greyscale no matter what visual we are using.
936 */
937 if (fbi->var.grayscale)
938 red = green = blue = (19595 * red + 38470 * green +
939 7471 * blue) >> 16;
940 switch (fbi->fix.visual) {
941 case FB_VISUAL_TRUECOLOR:
942 /*
943 * 16-bit True Colour. We encode the RGB value
944 * according to the RGB bitfield information.
945 */
946 if (regno < 16) {
947 u32 *pal = fbi->pseudo_palette;
948
949 val = chan_to_field(red, &fbi->var.red);
950 val |= chan_to_field(green, &fbi->var.green);
951 val |= chan_to_field(blue, &fbi->var.blue);
952
953 pal[regno] = val;
954
955 ret = 0;
956 }
957 break;
958
959 case FB_VISUAL_STATIC_PSEUDOCOLOR:
960 case FB_VISUAL_PSEUDOCOLOR:
961 break;
962 }
963 mutex_unlock(&mx3_fbi->mutex);
964
965 return ret;
966}
967
968/**
969 * mx3fb_blank() - blank the display.
970 */
971static int mx3fb_blank(int blank, struct fb_info *fbi)
972{
973 struct mx3fb_info *mx3_fbi = fbi->par;
974 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
975
976 dev_dbg(fbi->device, "%s\n", __func__);
977
978 dev_dbg(fbi->device, "blank = %d\n", blank);
979
980 if (mx3_fbi->blank == blank)
981 return 0;
982
983 mutex_lock(&mx3_fbi->mutex);
984 mx3_fbi->blank = blank;
985
986 switch (blank) {
987 case FB_BLANK_POWERDOWN:
988 case FB_BLANK_VSYNC_SUSPEND:
989 case FB_BLANK_HSYNC_SUSPEND:
990 case FB_BLANK_NORMAL:
991 sdc_disable_channel(mx3_fbi);
992 sdc_set_brightness(mx3fb, 0);
993 break;
994 case FB_BLANK_UNBLANK:
995 sdc_enable_channel(mx3_fbi);
996 sdc_set_brightness(mx3fb, mx3fb->backlight_level);
997 break;
998 }
999 mutex_unlock(&mx3_fbi->mutex);
1000
1001 return 0;
1002}
1003
1004/**
1005 * mx3fb_pan_display() - pan or wrap the display
1006 * @var: variable screen buffer information.
1007 * @info: framebuffer information pointer.
1008 *
1009 * We look only at xoffset, yoffset and the FB_VMODE_YWRAP flag
1010 */
1011static int mx3fb_pan_display(struct fb_var_screeninfo *var,
1012 struct fb_info *fbi)
1013{
1014 struct mx3fb_info *mx3_fbi = fbi->par;
1015 u32 y_bottom;
1016 unsigned long base;
1017 off_t offset;
1018 dma_cookie_t cookie;
1019 struct scatterlist *sg = mx3_fbi->sg;
1020 struct dma_chan *dma_chan = &mx3_fbi->idmac_channel->dma_chan;
1021 struct dma_async_tx_descriptor *txd;
1022 int ret;
1023
1024 dev_dbg(fbi->device, "%s [%c]\n", __func__,
1025 list_empty(&mx3_fbi->idmac_channel->queue) ? '-' : '+');
1026
1027 if (var->xoffset > 0) {
1028 dev_dbg(fbi->device, "x panning not supported\n");
1029 return -EINVAL;
1030 }
1031
1032 if (fbi->var.xoffset == var->xoffset &&
1033 fbi->var.yoffset == var->yoffset)
1034 return 0; /* No change, do nothing */
1035
1036 y_bottom = var->yoffset;
1037
1038 if (!(var->vmode & FB_VMODE_YWRAP))
1039 y_bottom += var->yres;
1040
1041 if (y_bottom > fbi->var.yres_virtual)
1042 return -EINVAL;
1043
1044 mutex_lock(&mx3_fbi->mutex);
1045
1046 offset = (var->yoffset * var->xres_virtual + var->xoffset) *
1047 (var->bits_per_pixel / 8);
1048 base = fbi->fix.smem_start + offset;
1049
1050 dev_dbg(fbi->device, "Updating SDC BG buf %d address=0x%08lX\n",
1051 mx3_fbi->cur_ipu_buf, base);
1052
1053 /*
1054 * We enable the End of Frame interrupt, which will free a tx-descriptor,
1055 * which we will need for the next device_prep_slave_sg(). The
1056 * IRQ-handler will disable the IRQ again.
1057 */
1058 init_completion(&mx3_fbi->flip_cmpl);
1059 enable_irq(mx3_fbi->idmac_channel->eof_irq);
1060
1061 ret = wait_for_completion_timeout(&mx3_fbi->flip_cmpl, HZ / 10);
1062 if (ret <= 0) {
1063 mutex_unlock(&mx3_fbi->mutex);
1064 dev_info(fbi->device, "Panning failed due to %s\n", ret < 0 ?
1065 "user interrupt" : "timeout");
1066 return ret ? : -ETIMEDOUT;
1067 }
1068
1069 mx3_fbi->cur_ipu_buf = !mx3_fbi->cur_ipu_buf;
1070
1071 sg_dma_address(&sg[mx3_fbi->cur_ipu_buf]) = base;
1072 sg_set_page(&sg[mx3_fbi->cur_ipu_buf],
1073 virt_to_page(fbi->screen_base + offset), fbi->fix.smem_len,
1074 offset_in_page(fbi->screen_base + offset));
1075
1076 txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg +
1077 mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
1078 if (!txd) {
1079 dev_err(fbi->device,
1080 "Error preparing a DMA transaction descriptor.\n");
1081 mutex_unlock(&mx3_fbi->mutex);
1082 return -EIO;
1083 }
1084
1085 txd->callback_param = txd;
1086 txd->callback = mx3fb_dma_done;
1087
1088 /*
1089 * Emulate original mx3fb behaviour: each new call to idmac_tx_submit()
1090 * should switch to another buffer
1091 */
1092 cookie = txd->tx_submit(txd);
1093 dev_dbg(fbi->device, "%d: Submit %p #%d\n", __LINE__, txd, cookie);
1094 if (cookie < 0) {
1095 dev_err(fbi->device,
1096 "Error updating SDC buf %d to address=0x%08lX\n",
1097 mx3_fbi->cur_ipu_buf, base);
1098 mutex_unlock(&mx3_fbi->mutex);
1099 return -EIO;
1100 }
1101
1102 if (mx3_fbi->txd)
1103 async_tx_ack(mx3_fbi->txd);
1104 mx3_fbi->txd = txd;
1105
1106 fbi->var.xoffset = var->xoffset;
1107 fbi->var.yoffset = var->yoffset;
1108
1109 if (var->vmode & FB_VMODE_YWRAP)
1110 fbi->var.vmode |= FB_VMODE_YWRAP;
1111 else
1112 fbi->var.vmode &= ~FB_VMODE_YWRAP;
1113
1114 mutex_unlock(&mx3_fbi->mutex);
1115
1116 dev_dbg(fbi->device, "Update complete\n");
1117
1118 return 0;
1119}
1120
1121/*
1122 * This structure contains the pointers to the control functions that are
1123 * invoked by the core framebuffer driver to perform operations like
1124 * blitting, rectangle filling, copy regions and cursor definition.
1125 */
1126static struct fb_ops mx3fb_ops = {
1127 .owner = THIS_MODULE,
1128 .fb_set_par = mx3fb_set_par,
1129 .fb_check_var = mx3fb_check_var,
1130 .fb_setcolreg = mx3fb_setcolreg,
1131 .fb_pan_display = mx3fb_pan_display,
1132 .fb_fillrect = cfb_fillrect,
1133 .fb_copyarea = cfb_copyarea,
1134 .fb_imageblit = cfb_imageblit,
1135 .fb_blank = mx3fb_blank,
1136};
1137
1138#ifdef CONFIG_PM
1139/*
1140 * Power management hooks. Note that we won't be called from IRQ context,
1141 * unlike the blank functions above, so we may sleep.
1142 */
1143
1144/*
1145 * Suspends the framebuffer and blanks the screen. Power management support
1146 */
1147static int mx3fb_suspend(struct platform_device *pdev, pm_message_t state)
1148{
1149 struct mx3fb_data *drv_data = platform_get_drvdata(pdev);
1150 struct mx3fb_info *mx3_fbi = drv_data->fbi->par;
1151
1152 acquire_console_sem();
1153 fb_set_suspend(drv_data->fbi, 1);
1154 release_console_sem();
1155
1156 if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
1157 sdc_disable_channel(mx3_fbi);
1158 sdc_set_brightness(mx3fb, 0);
1159
1160 }
1161 return 0;
1162}
1163
1164/*
1165 * Resumes the framebuffer and unblanks the screen. Power management support
1166 */
1167static int mx3fb_resume(struct platform_device *pdev)
1168{
1169 struct mx3fb_data *drv_data = platform_get_drvdata(pdev);
1170 struct mx3fb_info *mx3_fbi = drv_data->fbi->par;
1171
1172 if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
1173 sdc_enable_channel(mx3_fbi);
1174 sdc_set_brightness(mx3fb, drv_data->backlight_level);
1175 }
1176
1177 acquire_console_sem();
1178 fb_set_suspend(drv_data->fbi, 0);
1179 release_console_sem();
1180
1181 return 0;
1182}
1183#else
1184#define mx3fb_suspend NULL
1185#define mx3fb_resume NULL
1186#endif
1187
1188/*
1189 * Main framebuffer functions
1190 */
1191
1192/**
1193 * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer.
1194 * @fbi: framebuffer information pointer
1195 * @return: Error code indicating success or failure
1196 *
1197 * This buffer is remapped into a non-cached, non-buffered, memory region to
1198 * allow palette and pixel writes to occur without flushing the cache. Once this
1199 * area is remapped, all virtual memory access to the video memory should occur
1200 * at the new region.
1201 */
1202static int mx3fb_map_video_memory(struct fb_info *fbi)
1203{
1204 int retval = 0;
1205 dma_addr_t addr;
1206
1207 fbi->screen_base = dma_alloc_writecombine(fbi->device,
1208 fbi->fix.smem_len,
1209 &addr, GFP_DMA);
1210
1211 if (!fbi->screen_base) {
1212 dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
1213 fbi->fix.smem_len);
1214 retval = -EBUSY;
1215 goto err0;
1216 }
1217
1218 fbi->fix.smem_start = addr;
1219
1220 dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n",
1221 (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len);
1222
1223 fbi->screen_size = fbi->fix.smem_len;
1224
1225 /* Clear the screen */
1226 memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
1227
1228 return 0;
1229
1230err0:
1231 fbi->fix.smem_len = 0;
1232 fbi->fix.smem_start = 0;
1233 fbi->screen_base = NULL;
1234 return retval;
1235}
1236
1237/**
1238 * mx3fb_unmap_video_memory() - de-allocate frame buffer memory.
1239 * @fbi: framebuffer information pointer
1240 * @return: error code indicating success or failure
1241 */
1242static int mx3fb_unmap_video_memory(struct fb_info *fbi)
1243{
1244 dma_free_writecombine(fbi->device, fbi->fix.smem_len,
1245 fbi->screen_base, fbi->fix.smem_start);
1246
1247 fbi->screen_base = 0;
1248 fbi->fix.smem_start = 0;
1249 fbi->fix.smem_len = 0;
1250 return 0;
1251}
1252
1253/**
1254 * mx3fb_init_fbinfo() - initialize framebuffer information object.
1255 * @return: initialized framebuffer structure.
1256 */
1257static struct fb_info *mx3fb_init_fbinfo(struct device *dev, struct fb_ops *ops)
1258{
1259 struct fb_info *fbi;
1260 struct mx3fb_info *mx3fbi;
1261 int ret;
1262
1263 /* Allocate sufficient memory for the fb structure */
1264 fbi = framebuffer_alloc(sizeof(struct mx3fb_info), dev);
1265 if (!fbi)
1266 return NULL;
1267
1268 mx3fbi = fbi->par;
1269 mx3fbi->cookie = -EINVAL;
1270 mx3fbi->cur_ipu_buf = 0;
1271
1272 fbi->var.activate = FB_ACTIVATE_NOW;
1273
1274 fbi->fbops = ops;
1275 fbi->flags = FBINFO_FLAG_DEFAULT;
1276 fbi->pseudo_palette = mx3fbi->pseudo_palette;
1277
1278 mutex_init(&mx3fbi->mutex);
1279
1280 /* Allocate colormap */
1281 ret = fb_alloc_cmap(&fbi->cmap, 16, 0);
1282 if (ret < 0) {
1283 framebuffer_release(fbi);
1284 return NULL;
1285 }
1286
1287 return fbi;
1288}
1289
1290static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
1291{
1292 struct device *dev = mx3fb->dev;
1293 struct mx3fb_platform_data *mx3fb_pdata = dev->platform_data;
1294 const char *name = mx3fb_pdata->name;
1295 unsigned int irq;
1296 struct fb_info *fbi;
1297 struct mx3fb_info *mx3fbi;
1298 const struct fb_videomode *mode;
1299 int ret, num_modes;
1300
1301 ichan->client = mx3fb;
1302 irq = ichan->eof_irq;
1303
1304 if (ichan->dma_chan.chan_id != IDMAC_SDC_0)
1305 return -EINVAL;
1306
1307 fbi = mx3fb_init_fbinfo(dev, &mx3fb_ops);
1308 if (!fbi)
1309 return -ENOMEM;
1310
1311 if (!fb_mode)
1312 fb_mode = name;
1313
1314 if (!fb_mode) {
1315 ret = -EINVAL;
1316 goto emode;
1317 }
1318
1319 if (mx3fb_pdata->mode && mx3fb_pdata->num_modes) {
1320 mode = mx3fb_pdata->mode;
1321 num_modes = mx3fb_pdata->num_modes;
1322 } else {
1323 mode = mx3fb_modedb;
1324 num_modes = ARRAY_SIZE(mx3fb_modedb);
1325 }
1326
1327 if (!fb_find_mode(&fbi->var, fbi, fb_mode, mode,
1328 num_modes, NULL, default_bpp)) {
1329 ret = -EBUSY;
1330 goto emode;
1331 }
1332
1333 fb_videomode_to_modelist(mode, num_modes, &fbi->modelist);
1334
1335 /* Default Y virtual size is 2x panel size */
1336 fbi->var.yres_virtual = fbi->var.yres * 2;
1337
1338 mx3fb->fbi = fbi;
1339
1340 /* set Display Interface clock period */
1341 mx3fb_write_reg(mx3fb, 0x00100010L, DI_HSP_CLK_PER);
1342 /* Might need to trigger HSP clock change - see 44.3.3.8.5 */
1343
1344 sdc_set_brightness(mx3fb, 255);
1345 sdc_set_global_alpha(mx3fb, true, 0xFF);
1346 sdc_set_color_key(mx3fb, IDMAC_SDC_0, false, 0);
1347
1348 mx3fbi = fbi->par;
1349 mx3fbi->idmac_channel = ichan;
1350 mx3fbi->ipu_ch = ichan->dma_chan.chan_id;
1351 mx3fbi->mx3fb = mx3fb;
1352 mx3fbi->blank = FB_BLANK_NORMAL;
1353
1354 init_completion(&mx3fbi->flip_cmpl);
1355 disable_irq(ichan->eof_irq);
1356 dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq);
1357 ret = mx3fb_set_par(fbi);
1358 if (ret < 0)
1359 goto esetpar;
1360
1361 mx3fb_blank(FB_BLANK_UNBLANK, fbi);
1362
1363 dev_info(dev, "mx3fb: fb registered, using mode %s\n", fb_mode);
1364
1365 ret = register_framebuffer(fbi);
1366 if (ret < 0)
1367 goto erfb;
1368
1369 return 0;
1370
1371erfb:
1372esetpar:
1373emode:
1374 fb_dealloc_cmap(&fbi->cmap);
1375 framebuffer_release(fbi);
1376
1377 return ret;
1378}
1379
1380static bool chan_filter(struct dma_chan *chan, void *arg)
1381{
1382 struct dma_chan_request *rq = arg;
1383 struct device *dev;
1384 struct mx3fb_platform_data *mx3fb_pdata;
1385
1386 if (!rq)
1387 return false;
1388
1389 dev = rq->mx3fb->dev;
1390 mx3fb_pdata = dev->platform_data;
1391
1392 return rq->id == chan->chan_id &&
1393 mx3fb_pdata->dma_dev == chan->device->dev;
1394}
1395
1396static void release_fbi(struct fb_info *fbi)
1397{
1398 mx3fb_unmap_video_memory(fbi);
1399
1400 fb_dealloc_cmap(&fbi->cmap);
1401
1402 unregister_framebuffer(fbi);
1403 framebuffer_release(fbi);
1404}
1405
1406static int mx3fb_probe(struct platform_device *pdev)
1407{
1408 struct device *dev = &pdev->dev;
1409 int ret;
1410 struct resource *sdc_reg;
1411 struct mx3fb_data *mx3fb;
1412 dma_cap_mask_t mask;
1413 struct dma_chan *chan;
1414 struct dma_chan_request rq;
1415
1416 /*
1417 * Display Interface (DI) and Synchronous Display Controller (SDC)
1418 * registers
1419 */
1420 sdc_reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1421 if (!sdc_reg)
1422 return -EINVAL;
1423
1424 mx3fb = kzalloc(sizeof(*mx3fb), GFP_KERNEL);
1425 if (!mx3fb)
1426 return -ENOMEM;
1427
1428 spin_lock_init(&mx3fb->lock);
1429
1430 mx3fb->reg_base = ioremap(sdc_reg->start, resource_size(sdc_reg));
1431 if (!mx3fb->reg_base) {
1432 ret = -ENOMEM;
1433 goto eremap;
1434 }
1435
1436 pr_debug("Remapped %x to %x at %p\n", sdc_reg->start, sdc_reg->end,
1437 mx3fb->reg_base);
1438
1439 /* IDMAC interface */
1440 dmaengine_get();
1441
1442 mx3fb->dev = dev;
1443 platform_set_drvdata(pdev, mx3fb);
1444
1445 rq.mx3fb = mx3fb;
1446
1447 dma_cap_zero(mask);
1448 dma_cap_set(DMA_SLAVE, mask);
1449 dma_cap_set(DMA_PRIVATE, mask);
1450 rq.id = IDMAC_SDC_0;
1451 chan = dma_request_channel(mask, chan_filter, &rq);
1452 if (!chan) {
1453 ret = -EBUSY;
1454 goto ersdc0;
1455 }
1456
1457 ret = init_fb_chan(mx3fb, to_idmac_chan(chan));
1458 if (ret < 0)
1459 goto eisdc0;
1460
1461 mx3fb->backlight_level = 255;
1462
1463 return 0;
1464
1465eisdc0:
1466 dma_release_channel(chan);
1467ersdc0:
1468 dmaengine_put();
1469 iounmap(mx3fb->reg_base);
1470eremap:
1471 kfree(mx3fb);
1472 dev_err(dev, "mx3fb: failed to register fb\n");
1473 return ret;
1474}
1475
1476static int mx3fb_remove(struct platform_device *dev)
1477{
1478 struct mx3fb_data *mx3fb = platform_get_drvdata(dev);
1479 struct fb_info *fbi = mx3fb->fbi;
1480 struct mx3fb_info *mx3_fbi = fbi->par;
1481 struct dma_chan *chan;
1482
1483 chan = &mx3_fbi->idmac_channel->dma_chan;
1484 release_fbi(fbi);
1485
1486 dma_release_channel(chan);
1487 dmaengine_put();
1488
1489 iounmap(mx3fb->reg_base);
1490 kfree(mx3fb);
1491 return 0;
1492}
1493
1494static struct platform_driver mx3fb_driver = {
1495 .driver = {
1496 .name = MX3FB_NAME,
1497 },
1498 .probe = mx3fb_probe,
1499 .remove = mx3fb_remove,
1500 .suspend = mx3fb_suspend,
1501 .resume = mx3fb_resume,
1502};
1503
1504/*
1505 * Parse user specified options (`video=mx3fb:')
1506 * example:
1507 * video=mx3fb:bpp=16
1508 */
1509static int mx3fb_setup(void)
1510{
1511#ifndef MODULE
1512 char *opt, *options = NULL;
1513
1514 if (fb_get_options("mx3fb", &options))
1515 return -ENODEV;
1516
1517 if (!options || !*options)
1518 return 0;
1519
1520 while ((opt = strsep(&options, ",")) != NULL) {
1521 if (!*opt)
1522 continue;
1523 if (!strncmp(opt, "bpp=", 4))
1524 default_bpp = simple_strtoul(opt + 4, NULL, 0);
1525 else
1526 fb_mode = opt;
1527 }
1528#endif
1529
1530 return 0;
1531}
1532
1533static int __init mx3fb_init(void)
1534{
1535 int ret = mx3fb_setup();
1536
1537 if (ret < 0)
1538 return ret;
1539
1540 ret = platform_driver_register(&mx3fb_driver);
1541 return ret;
1542}
1543
1544static void __exit mx3fb_exit(void)
1545{
1546 platform_driver_unregister(&mx3fb_driver);
1547}
1548
1549module_init(mx3fb_init);
1550module_exit(mx3fb_exit);
1551
1552MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1553MODULE_DESCRIPTION("MX3 framebuffer driver");
1554MODULE_ALIAS("platform:" MX3FB_NAME);
1555MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/nvidia/nv_i2c.c b/drivers/video/nvidia/nv_i2c.c
index 6fd7cb8f9b8e..6aaddb4f6788 100644
--- a/drivers/video/nvidia/nv_i2c.c
+++ b/drivers/video/nvidia/nv_i2c.c
@@ -87,7 +87,6 @@ static int nvidia_setup_i2c_bus(struct nvidia_i2c_chan *chan, const char *name,
87 87
88 strcpy(chan->adapter.name, name); 88 strcpy(chan->adapter.name, name);
89 chan->adapter.owner = THIS_MODULE; 89 chan->adapter.owner = THIS_MODULE;
90 chan->adapter.id = I2C_HW_B_NVIDIA;
91 chan->adapter.class = i2c_class; 90 chan->adapter.class = i2c_class;
92 chan->adapter.algo_data = &chan->algo; 91 chan->adapter.algo_data = &chan->algo;
93 chan->adapter.dev.parent = &chan->par->pci_dev->dev; 92 chan->adapter.dev.parent = &chan->par->pci_dev->dev;
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c
index 6e2ea7518761..ab3949256677 100644
--- a/drivers/video/omap/lcdc.c
+++ b/drivers/video/omap/lcdc.c
@@ -800,14 +800,14 @@ static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode,
800 /* FIXME: 800 /* FIXME:
801 * According to errata some platforms have a clock rate limitiation 801 * According to errata some platforms have a clock rate limitiation
802 */ 802 */
803 lcdc.lcd_ck = clk_get(NULL, "lcd_ck"); 803 lcdc.lcd_ck = clk_get(fbdev->dev, "lcd_ck");
804 if (IS_ERR(lcdc.lcd_ck)) { 804 if (IS_ERR(lcdc.lcd_ck)) {
805 dev_err(fbdev->dev, "unable to access LCD clock\n"); 805 dev_err(fbdev->dev, "unable to access LCD clock\n");
806 r = PTR_ERR(lcdc.lcd_ck); 806 r = PTR_ERR(lcdc.lcd_ck);
807 goto fail0; 807 goto fail0;
808 } 808 }
809 809
810 tc_ck = clk_get(NULL, "tc_ck"); 810 tc_ck = clk_get(fbdev->dev, "tc_ck");
811 if (IS_ERR(tc_ck)) { 811 if (IS_ERR(tc_ck)) {
812 dev_err(fbdev->dev, "unable to access TC clock\n"); 812 dev_err(fbdev->dev, "unable to access TC clock\n");
813 r = PTR_ERR(tc_ck); 813 r = PTR_ERR(tc_ck);
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index 783d4adffb93..574b29e9f8f2 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -137,7 +137,6 @@ static int savage_setup_i2c_bus(struct savagefb_i2c_chan *chan,
137 if (chan->par) { 137 if (chan->par) {
138 strcpy(chan->adapter.name, name); 138 strcpy(chan->adapter.name, name);
139 chan->adapter.owner = THIS_MODULE; 139 chan->adapter.owner = THIS_MODULE;
140 chan->adapter.id = I2C_HW_B_SAVAGE;
141 chan->adapter.algo_data = &chan->algo; 140 chan->adapter.algo_data = &chan->algo;
142 chan->adapter.dev.parent = &chan->par->pcidev->dev; 141 chan->adapter.dev.parent = &chan->par->pcidev->dev;
143 chan->algo.udelay = 10; 142 chan->algo.udelay = 10;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 3efa12f9ee50..09a3d5522b43 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -187,10 +187,10 @@ config EP93XX_WATCHDOG
187 187
188config OMAP_WATCHDOG 188config OMAP_WATCHDOG
189 tristate "OMAP Watchdog" 189 tristate "OMAP Watchdog"
190 depends on ARCH_OMAP16XX || ARCH_OMAP24XX 190 depends on ARCH_OMAP16XX || ARCH_OMAP24XX || ARCH_OMAP34XX
191 help 191 help
192 Support for TI OMAP1610/OMAP1710/OMAP2420 watchdog. Say 'Y' here to 192 Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog. Say 'Y'
193 enable the OMAP1610/OMAP1710 watchdog timer. 193 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog timer.
194 194
195config PNX4008_WATCHDOG 195config PNX4008_WATCHDOG
196 tristate "PNX4008 Watchdog" 196 tristate "PNX4008 Watchdog"
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 993e5f52afef..5531691f46ea 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -13,6 +13,7 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/fs.h> 14#include <linux/fs.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/io.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
18#include <linux/module.h> 19#include <linux/module.h>
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 8dc7109d61b7..efa4b363ce72 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -298,6 +298,14 @@ static int decrease_reservation(unsigned long nr_pages)
298 frame_list[i] = pfn_to_mfn(pfn); 298 frame_list[i] = pfn_to_mfn(pfn);
299 299
300 scrub_page(page); 300 scrub_page(page);
301
302 if (!PageHighMem(page)) {
303 ret = HYPERVISOR_update_va_mapping(
304 (unsigned long)__va(pfn << PAGE_SHIFT),
305 __pte_ma(0), 0);
306 BUG_ON(ret);
307 }
308
301 } 309 }
302 310
303 /* Ensure that ballooned highmem pages don't have kmaps. */ 311 /* Ensure that ballooned highmem pages don't have kmaps. */
@@ -490,7 +498,7 @@ static ssize_t store_target_kb(struct sys_device *dev,
490 if (!capable(CAP_SYS_ADMIN)) 498 if (!capable(CAP_SYS_ADMIN))
491 return -EPERM; 499 return -EPERM;
492 500
493 target_bytes = memparse(buf, &endchar); 501 target_bytes = simple_strtoull(buf, &endchar, 0) * 1024;
494 502
495 balloon_set_new_target(target_bytes >> PAGE_SHIFT); 503 balloon_set_new_target(target_bytes >> PAGE_SHIFT);
496 504
@@ -500,8 +508,39 @@ static ssize_t store_target_kb(struct sys_device *dev,
500static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, 508static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
501 show_target_kb, store_target_kb); 509 show_target_kb, store_target_kb);
502 510
511
512static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr,
513 char *buf)
514{
515 return sprintf(buf, "%llu\n",
516 (u64)balloon_stats.target_pages << PAGE_SHIFT);
517}
518
519static ssize_t store_target(struct sys_device *dev,
520 struct sysdev_attribute *attr,
521 const char *buf,
522 size_t count)
523{
524 char *endchar;
525 unsigned long long target_bytes;
526
527 if (!capable(CAP_SYS_ADMIN))
528 return -EPERM;
529
530 target_bytes = memparse(buf, &endchar);
531
532 balloon_set_new_target(target_bytes >> PAGE_SHIFT);
533
534 return count;
535}
536
537static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR,
538 show_target, store_target);
539
540
503static struct sysdev_attribute *balloon_attrs[] = { 541static struct sysdev_attribute *balloon_attrs[] = {
504 &attr_target_kb, 542 &attr_target_kb,
543 &attr_target,
505}; 544};
506 545
507static struct attribute *balloon_info_attrs[] = { 546static struct attribute *balloon_info_attrs[] = {
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
index 875a4c59c594..a9592d981b10 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenfs/xenbus.c
@@ -291,7 +291,7 @@ static void watch_fired(struct xenbus_watch *watch,
291static int xenbus_write_transaction(unsigned msg_type, 291static int xenbus_write_transaction(unsigned msg_type,
292 struct xenbus_file_priv *u) 292 struct xenbus_file_priv *u)
293{ 293{
294 int rc, ret; 294 int rc;
295 void *reply; 295 void *reply;
296 struct xenbus_transaction_holder *trans = NULL; 296 struct xenbus_transaction_holder *trans = NULL;
297 LIST_HEAD(staging_q); 297 LIST_HEAD(staging_q);
@@ -326,15 +326,14 @@ static int xenbus_write_transaction(unsigned msg_type,
326 } 326 }
327 327
328 mutex_lock(&u->reply_mutex); 328 mutex_lock(&u->reply_mutex);
329 ret = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); 329 rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg));
330 if (!ret) 330 if (!rc)
331 ret = queue_reply(&staging_q, reply, u->u.msg.len); 331 rc = queue_reply(&staging_q, reply, u->u.msg.len);
332 if (!ret) { 332 if (!rc) {
333 list_splice_tail(&staging_q, &u->read_buffers); 333 list_splice_tail(&staging_q, &u->read_buffers);
334 wake_up(&u->read_waitq); 334 wake_up(&u->read_waitq);
335 } else { 335 } else {
336 queue_cleanup(&staging_q); 336 queue_cleanup(&staging_q);
337 rc = ret;
338 } 337 }
339 mutex_unlock(&u->reply_mutex); 338 mutex_unlock(&u->reply_mutex);
340 339