aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/ata/Kconfig8
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/libata-core.c25
-rw-r--r--drivers/ata/pata_at91.c361
-rw-r--r--drivers/ata/sata_fsl.c35
-rw-r--r--drivers/char/bfin_jtag_comm.c30
-rw-r--r--drivers/char/moxa.c7
-rw-r--r--drivers/char/n_hdlc.c46
-rw-r--r--drivers/char/n_r3964.c26
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c3
-rw-r--r--drivers/char/vt_ioctl.c3
-rw-r--r--drivers/crypto/padlock-aes.c138
-rw-r--r--drivers/firewire/Kconfig60
-rw-r--r--drivers/firewire/Makefile4
-rw-r--r--drivers/firewire/core-card.c20
-rw-r--r--drivers/firewire/core-iso.c11
-rw-r--r--drivers/firewire/core.h87
-rw-r--r--drivers/firewire/net.c1655
-rw-r--r--drivers/i2c/busses/i2c-cpm.c6
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/ide/cmd64x.c3
-rw-r--r--drivers/ieee1394/Kconfig19
-rw-r--r--drivers/media/common/ir-keymaps.c23
-rw-r--r--drivers/media/dvb/frontends/stv0900.h7
-rw-r--r--drivers/media/dvb/frontends/stv0900_core.c100
-rw-r--r--drivers/media/dvb/frontends/stv0900_priv.h2
-rw-r--r--drivers/media/dvb/frontends/stv090x.c11
-rw-r--r--drivers/media/dvb/frontends/tda10048.c1
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c4
-rw-r--r--drivers/media/radio/radio-tea5764.c4
-rw-r--r--drivers/media/video/Kconfig6
-rw-r--r--drivers/media/video/cx18/cx18-controls.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c19
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c26
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h3
-rw-r--r--drivers/media/video/cx2341x.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c33
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c11
-rw-r--r--drivers/media/video/cx88/cx88-cards.c94
-rw-r--r--drivers/media/video/cx88/cx88-video.c11
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c56
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c1
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c38
-rw-r--r--drivers/media/video/em28xx/em28xx.h1
-rw-r--r--drivers/media/video/gspca/gspca.c8
-rw-r--r--drivers/media/video/gspca/ov519.c981
-rw-r--r--drivers/media/video/gspca/sonixj.c181
-rw-r--r--drivers/media/video/gspca/stv06xx/Makefile3
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c53
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.h11
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c10
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_sensor.h3
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_st6422.c453
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_st6422.h59
-rw-r--r--drivers/media/video/ivtv/ivtv-controls.c2
-rw-r--r--drivers/media/video/mt9m001.c12
-rw-r--r--drivers/media/video/mt9t031.c14
-rw-r--r--drivers/media/video/mt9v022.c12
-rw-r--r--drivers/media/video/ov511.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-audio.c14
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c24
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c37
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c60
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-video-v4l.c35
-rw-r--r--drivers/media/video/pxa_camera.c34
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c11
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c12
-rw-r--r--drivers/media/video/tcm825x.c4
-rw-r--r--drivers/media/video/usbvideo/Kconfig5
-rw-r--r--drivers/media/video/v4l2-common.c181
-rw-r--r--drivers/media/video/vivi.c11
-rw-r--r--drivers/media/video/w9968cf.c35
-rw-r--r--drivers/media/video/zoran/zoran_driver.c14
-rw-r--r--drivers/mmc/host/Kconfig36
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-of.c3
-rw-r--r--drivers/mmc/host/sdhci-pci.c20
-rw-r--r--drivers/mmc/host/sdhci-s3c.c428
-rw-r--r--drivers/mmc/host/sdhci.c52
-rw-r--r--drivers/mmc/host/sdhci.h6
-rw-r--r--drivers/mmc/host/via-sdmmc.c1362
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c26
-rw-r--r--drivers/mtd/chips/jedec_probe.c13
-rw-r--r--drivers/mtd/devices/m25p80.c4
-rw-r--r--drivers/mtd/maps/Kconfig13
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c5
-rw-r--r--drivers/mtd/maps/integrator-flash.c226
-rw-r--r--drivers/mtd/maps/physmap.c40
-rw-r--r--drivers/mtd/maps/physmap_of.c199
-rw-r--r--drivers/mtd/maps/pmcmsp-ramroot.c104
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c22
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c23
-rw-r--r--drivers/mtd/maps/sa1100-flash.c23
-rw-r--r--drivers/mtd/maps/uclinux.c16
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdchar.c303
-rw-r--r--drivers/mtd/mtdcore.c47
-rw-r--r--drivers/mtd/mtdpart.c20
-rw-r--r--drivers/mtd/nand/Kconfig24
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel_nand.c11
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c17
-rw-r--r--drivers/mtd/nand/davinci_nand.c342
-rw-r--r--drivers/mtd/nand/mxc_nand.c66
-rw-r--r--drivers/mtd/nand/nand_base.c3
-rw-r--r--drivers/mtd/nand/nand_ecc.c4
-rw-r--r--drivers/mtd/nand/omap2.c776
-rw-r--r--drivers/mtd/nand/orion_nand.c23
-rw-r--r--drivers/mtd/nand/plat_nand.c19
-rw-r--r--drivers/mtd/nand/s3c2410.c268
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c16
-rw-r--r--drivers/mtd/onenand/omap2.c4
-rw-r--r--drivers/mtd/onenand/onenand_base.c862
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c14
-rw-r--r--drivers/mtd/onenand/onenand_sim.c81
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/benet/be.h102
-rw-r--r--drivers/net/benet/be_cmds.c368
-rw-r--r--drivers/net/benet/be_cmds.h80
-rw-r--r--drivers/net/benet/be_hw.h8
-rw-r--r--drivers/net/benet/be_main.c299
-rw-r--r--drivers/net/e1000e/netdev.c2
-rw-r--r--drivers/net/mlx4/en_netdev.c8
-rw-r--r--drivers/net/mlx4/en_rx.c96
-rw-r--r--drivers/net/mlx4/en_tx.c29
-rw-r--r--drivers/net/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/mv643xx_eth.c7
-rw-r--r--drivers/net/ppp_async.c1
-rw-r--r--drivers/net/ppp_synctty.c1
-rw-r--r--drivers/net/qla3xxx.c2
-rw-r--r--drivers/net/r8169.c3
-rw-r--r--drivers/net/s6gmac.c1073
-rw-r--r--drivers/net/usb/Kconfig4
-rw-r--r--drivers/net/usb/cdc_ether.c25
-rw-r--r--drivers/net/usb/cdc_subset.c7
-rw-r--r--drivers/net/usb/pegasus.c29
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c7
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c64
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c49
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c11
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/parport/parport_pc.c34
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/access.c19
-rw-r--r--drivers/pci/bus.c18
-rw-r--r--drivers/pci/dmar.c235
-rw-r--r--drivers/pci/hotplug/Kconfig4
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c1
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c1
-rw-r--r--drivers/pci/hotplug/cpqphp.h167
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c1100
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c371
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c97
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c599
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c155
-rw-r--r--drivers/pci/hotplug/pciehp.h3
-rw-r--r--drivers/pci/hotplug/pciehp_core.c112
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c31
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c1
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c1
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c1
-rw-r--r--drivers/pci/hotplug/shpchp_core.c1
-rw-r--r--drivers/pci/intel-iommu.c449
-rw-r--r--drivers/pci/intr_remapping.c8
-rw-r--r--drivers/pci/iov.c161
-rw-r--r--drivers/pci/msi.c100
-rw-r--r--drivers/pci/msi.h14
-rw-r--r--drivers/pci/pci.c246
-rw-r--r--drivers/pci/pci.h39
-rw-r--r--drivers/pci/pcie/aer/Kconfig15
-rw-r--r--drivers/pci/pcie/aer/Kconfig.debug18
-rw-r--r--drivers/pci/pcie/aer/Makefile3
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c473
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c3
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h6
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c278
-rw-r--r--drivers/pci/pcie/aer/ecrc.c131
-rw-r--r--drivers/pci/pcie/aspm.c787
-rw-r--r--drivers/pci/probe.c11
-rw-r--r--drivers/pci/quirks.c24
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/search.c32
-rw-r--r--drivers/pci/setup-bus.c53
-rw-r--r--drivers/pci/setup-res.c49
-rw-r--r--drivers/pci/slot.c39
-rw-r--r--drivers/platform/x86/acer-wmi.c4
-rw-r--r--drivers/platform/x86/eeepc-laptop.c50
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c14
-rw-r--r--drivers/s390/block/dasd.c20
-rw-r--r--drivers/s390/block/dasd_eckd.c10
-rw-r--r--drivers/s390/char/con3215.c22
-rw-r--r--drivers/s390/char/con3270.c13
-rw-r--r--drivers/s390/char/monreader.c6
-rw-r--r--drivers/s390/char/raw3270.c36
-rw-r--r--drivers/s390/char/sclp_con.c7
-rw-r--r--drivers/s390/char/sclp_vt220.c18
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c4
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/qdio.h11
-rw-r--r--drivers/s390/cio/qdio_debug.c3
-rw-r--r--drivers/s390/cio/qdio_main.c144
-rw-r--r--drivers/s390/cio/qdio_thinint.c114
-rw-r--r--drivers/s390/crypto/ap_bus.c85
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/serial/Kconfig10
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/bfin_5xx.c5
-rw-r--r--drivers/serial/msm_serial.c772
-rw-r--r--drivers/serial/msm_serial.h117
-rw-r--r--drivers/serial/s3c2400.c2
-rw-r--r--drivers/serial/s3c2410.c2
-rw-r--r--drivers/serial/s3c2412.c2
-rw-r--r--drivers/serial/s3c2440.c2
-rw-r--r--drivers/serial/s3c24a0.c2
-rw-r--r--drivers/serial/s3c6400.c2
-rw-r--r--drivers/serial/samsung.c2
-rw-r--r--drivers/serial/samsung.h2
-rw-r--r--drivers/serial/sb1250-duart.c6
-rw-r--r--drivers/serial/sunhv.c2
-rw-r--r--drivers/serial/timbuart.c50
-rw-r--r--drivers/serial/zs.c6
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c29
233 files changed, 16075 insertions, 4551 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 431f8b439553..7ec7d88c5999 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -266,6 +266,7 @@ config ACPI_DEBUG_FUNC_TRACE
266 266
267config ACPI_PCI_SLOT 267config ACPI_PCI_SLOT
268 tristate "PCI slot detection driver" 268 tristate "PCI slot detection driver"
269 depends on SYSFS
269 default n 270 default n
270 help 271 help
271 This driver creates entries in /sys/bus/pci/slots/ for all PCI 272 This driver creates entries in /sys/bus/pci/slots/ for all PCI
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2aa1908e5ce0..b17c57f85032 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -679,6 +679,14 @@ config PATA_PLATFORM
679 679
680 If unsure, say N. 680 If unsure, say N.
681 681
682config PATA_AT91
683 tristate "PATA support for AT91SAM9260"
684 depends on ARM && ARCH_AT91
685 help
686 This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
687
688 If unsure, say N.
689
682config PATA_OF_PLATFORM 690config PATA_OF_PLATFORM
683 tristate "OpenFirmware platform device PATA support" 691 tristate "OpenFirmware platform device PATA support"
684 depends on PATA_PLATFORM && PPC_OF 692 depends on PATA_PLATFORM && PPC_OF
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 1558059874f0..38906f9bbb4e 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_PATA_SCH) += pata_sch.o
72obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o 72obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
73obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o 73obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
74obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o 74obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
75obj-$(CONFIG_PATA_AT91) += pata_at91.o
75obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o 76obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
76obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o 77obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
77# Should be last but two libata driver 78# Should be last but two libata driver
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index ca4d208ddf3b..045a486a09ea 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -125,19 +125,19 @@ MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link spe
125 125
126static int atapi_enabled = 1; 126static int atapi_enabled = 1;
127module_param(atapi_enabled, int, 0444); 127module_param(atapi_enabled, int, 0444);
128MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 128MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
129 129
130static int atapi_dmadir = 0; 130static int atapi_dmadir = 0;
131module_param(atapi_dmadir, int, 0444); 131module_param(atapi_dmadir, int, 0444);
132MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); 132MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
133 133
134int atapi_passthru16 = 1; 134int atapi_passthru16 = 1;
135module_param(atapi_passthru16, int, 0444); 135module_param(atapi_passthru16, int, 0444);
136MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)"); 136MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
137 137
138int libata_fua = 0; 138int libata_fua = 0;
139module_param_named(fua, libata_fua, int, 0444); 139module_param_named(fua, libata_fua, int, 0444);
140MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 140MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
141 141
142static int ata_ignore_hpa; 142static int ata_ignore_hpa;
143module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 143module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
@@ -153,11 +153,11 @@ MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
153 153
154int libata_noacpi = 0; 154int libata_noacpi = 0;
155module_param_named(noacpi, libata_noacpi, int, 0444); 155module_param_named(noacpi, libata_noacpi, int, 0444);
156MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set"); 156MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
157 157
158int libata_allow_tpm = 0; 158int libata_allow_tpm = 0;
159module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 159module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
160MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands"); 160MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
161 161
162MODULE_AUTHOR("Jeff Garzik"); 162MODULE_AUTHOR("Jeff Garzik");
163MODULE_DESCRIPTION("Library module for ATA devices"); 163MODULE_DESCRIPTION("Library module for ATA devices");
@@ -1993,11 +1993,17 @@ unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1993 * Check if the current speed of the device requires IORDY. Used 1993 * Check if the current speed of the device requires IORDY. Used
1994 * by various controllers for chip configuration. 1994 * by various controllers for chip configuration.
1995 */ 1995 */
1996
1997unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1996unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1998{ 1997{
1999 /* Controller doesn't support IORDY. Probably a pointless check 1998 /* Don't set IORDY if we're preparing for reset. IORDY may
2000 as the caller should know this */ 1999 * lead to controller lock up on certain controllers if the
2000 * port is not occupied. See bko#11703 for details.
2001 */
2002 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
2003 return 0;
2004 /* Controller doesn't support IORDY. Probably a pointless
2005 * check as the caller should know this.
2006 */
2001 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 2007 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
2002 return 0; 2008 return 0;
2003 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 2009 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
@@ -2020,7 +2026,6 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
2020 * Compute the highest mode possible if we are not using iordy. Return 2026 * Compute the highest mode possible if we are not using iordy. Return
2021 * -1 if no iordy mode is available. 2027 * -1 if no iordy mode is available.
2022 */ 2028 */
2023
2024static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 2029static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2025{ 2030{
2026 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 2031 /* If we have no drive specific rule, then PIO 2 is non IORDY */
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
new file mode 100644
index 000000000000..4b27617be26d
--- /dev/null
+++ b/drivers/ata/pata_at91.c
@@ -0,0 +1,361 @@
1/*
2 * PATA driver for AT91SAM9260 Static Memory Controller
3 * with CompactFlash interface in True IDE mode
4 *
5 * Copyright (C) 2009 Matyukevich Sergey
6 *
7 * Based on:
8 * * generic platform driver by Paul Mundt: drivers/ata/pata_platform.c
9 * * pata_at32 driver by Kristoffer Nyborg Gregertsen
10 * * at91_ide driver by Stanislaw Gruszka
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation.
15 *
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/blkdev.h>
22#include <scsi/scsi_host.h>
23#include <linux/ata.h>
24#include <linux/clk.h>
25#include <linux/libata.h>
26#include <linux/platform_device.h>
27#include <linux/ata_platform.h>
28
29#include <mach/at91sam9260_matrix.h>
30#include <mach/at91sam9_smc.h>
31#include <mach/at91sam9260.h>
32#include <mach/board.h>
33#include <mach/gpio.h>
34
35
36#define DRV_NAME "pata_at91"
37#define DRV_VERSION "0.1"
38
39#define CF_IDE_OFFSET 0x00c00000
40#define CF_ALT_IDE_OFFSET 0x00e00000
41#define CF_IDE_RES_SIZE 0x08
42
43struct at91_ide_info {
44 unsigned long mode;
45 unsigned int cs;
46
47 void __iomem *ide_addr;
48 void __iomem *alt_addr;
49};
50
51const struct ata_timing initial_timing =
52 {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
53
54static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz)
55{
56 unsigned long mul;
57
58 /*
59 * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] =
60 * x * (f / 1_000_000_000) =
61 * x * ((f * 65536) / 1_000_000_000) / 65536 =
62 * x * (((f / 10_000) * 65536) / 100_000) / 65536 =
63 */
64
65 mul = (mck_hz / 10000) << 16;
66 mul /= 100000;
67
68 return (ns * mul + 65536) >> 16; /* rounding */
69}
70
71static void set_smc_mode(struct at91_ide_info *info)
72{
73 at91_sys_write(AT91_SMC_MODE(info->cs), info->mode);
74 return;
75}
76
77static void set_smc_timing(struct device *dev,
78 struct at91_ide_info *info, const struct ata_timing *ata)
79{
80 int read_cycle, write_cycle, active, recover;
81 int nrd_setup, nrd_pulse, nrd_recover;
82 int nwe_setup, nwe_pulse;
83
84 int ncs_write_setup, ncs_write_pulse;
85 int ncs_read_setup, ncs_read_pulse;
86
87 unsigned int mck_hz;
88 struct clk *mck;
89
90 read_cycle = ata->cyc8b;
91 nrd_setup = ata->setup;
92 nrd_pulse = ata->act8b;
93 nrd_recover = ata->rec8b;
94
95 mck = clk_get(NULL, "mck");
96 BUG_ON(IS_ERR(mck));
97 mck_hz = clk_get_rate(mck);
98
99 read_cycle = calc_mck_cycles(read_cycle, mck_hz);
100 nrd_setup = calc_mck_cycles(nrd_setup, mck_hz);
101 nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz);
102 nrd_recover = calc_mck_cycles(nrd_recover, mck_hz);
103
104 clk_put(mck);
105
106 active = nrd_setup + nrd_pulse;
107 recover = read_cycle - active;
108
109 /* Need at least two cycles recovery */
110 if (recover < 2)
111 read_cycle = active + 2;
112
113 /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
114 ncs_read_setup = 1;
115 ncs_read_pulse = read_cycle - 2;
116
117 /* Write timings same as read timings */
118 write_cycle = read_cycle;
119 nwe_setup = nrd_setup;
120 nwe_pulse = nrd_pulse;
121 ncs_write_setup = ncs_read_setup;
122 ncs_write_pulse = ncs_read_pulse;
123
124 dev_dbg(dev, "ATA timings: nrd_setup = %d nrd_pulse = %d nrd_cycle = %d\n",
125 nrd_setup, nrd_pulse, read_cycle);
126 dev_dbg(dev, "ATA timings: nwe_setup = %d nwe_pulse = %d nwe_cycle = %d\n",
127 nwe_setup, nwe_pulse, write_cycle);
128 dev_dbg(dev, "ATA timings: ncs_read_setup = %d ncs_read_pulse = %d\n",
129 ncs_read_setup, ncs_read_pulse);
130 dev_dbg(dev, "ATA timings: ncs_write_setup = %d ncs_write_pulse = %d\n",
131 ncs_write_setup, ncs_write_pulse);
132
133 at91_sys_write(AT91_SMC_SETUP(info->cs),
134 AT91_SMC_NWESETUP_(nwe_setup) |
135 AT91_SMC_NRDSETUP_(nrd_setup) |
136 AT91_SMC_NCS_WRSETUP_(ncs_write_setup) |
137 AT91_SMC_NCS_RDSETUP_(ncs_read_setup));
138
139 at91_sys_write(AT91_SMC_PULSE(info->cs),
140 AT91_SMC_NWEPULSE_(nwe_pulse) |
141 AT91_SMC_NRDPULSE_(nrd_pulse) |
142 AT91_SMC_NCS_WRPULSE_(ncs_write_pulse) |
143 AT91_SMC_NCS_RDPULSE_(ncs_read_pulse));
144
145 at91_sys_write(AT91_SMC_CYCLE(info->cs),
146 AT91_SMC_NWECYCLE_(write_cycle) |
147 AT91_SMC_NRDCYCLE_(read_cycle));
148
149 return;
150}
151
152static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev)
153{
154 struct at91_ide_info *info = ap->host->private_data;
155 struct ata_timing timing;
156 int ret;
157
158 /* Compute ATA timing and set it to SMC */
159 ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0);
160 if (ret) {
161 dev_warn(ap->dev, "Failed to compute ATA timing %d, \
162 set PIO_0 timing\n", ret);
163 set_smc_timing(ap->dev, info, &initial_timing);
164 } else {
165 set_smc_timing(ap->dev, info, &timing);
166 }
167
168 /* Setup SMC mode */
169 set_smc_mode(info);
170
171 return;
172}
173
174static unsigned int pata_at91_data_xfer_noirq(struct ata_device *dev,
175 unsigned char *buf, unsigned int buflen, int rw)
176{
177 struct at91_ide_info *info = dev->link->ap->host->private_data;
178 unsigned int consumed;
179 unsigned long flags;
180 unsigned int mode;
181
182 local_irq_save(flags);
183 mode = at91_sys_read(AT91_SMC_MODE(info->cs));
184
185 /* set 16bit mode before writing data */
186 at91_sys_write(AT91_SMC_MODE(info->cs),
187 (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_16);
188
189 consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
190
191 /* restore 8bit mode after data is written */
192 at91_sys_write(AT91_SMC_MODE(info->cs),
193 (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_8);
194
195 local_irq_restore(flags);
196 return consumed;
197}
198
199static struct scsi_host_template pata_at91_sht = {
200 ATA_PIO_SHT(DRV_NAME),
201};
202
203static struct ata_port_operations pata_at91_port_ops = {
204 .inherits = &ata_sff_port_ops,
205
206 .sff_data_xfer = pata_at91_data_xfer_noirq,
207 .set_piomode = pata_at91_set_piomode,
208 .cable_detect = ata_cable_40wire,
209 .port_start = ATA_OP_NULL,
210};
211
212static int __devinit pata_at91_probe(struct platform_device *pdev)
213{
214 struct at91_cf_data *board = pdev->dev.platform_data;
215 struct device *dev = &pdev->dev;
216 struct at91_ide_info *info;
217 struct resource *mem_res;
218 struct ata_host *host;
219 struct ata_port *ap;
220 int irq_flags = 0;
221 int irq = 0;
222 int ret;
223
224 /* get platform resources: IO/CTL memories and irq/rst pins */
225
226 if (pdev->num_resources != 1) {
227 dev_err(&pdev->dev, "invalid number of resources\n");
228 return -EINVAL;
229 }
230
231 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
232
233 if (!mem_res) {
234 dev_err(dev, "failed to get mem resource\n");
235 return -EINVAL;
236 }
237
238 irq = board->irq_pin;
239
240 /* init ata host */
241
242 host = ata_host_alloc(dev, 1);
243
244 if (!host)
245 return -ENOMEM;
246
247 ap = host->ports[0];
248 ap->ops = &pata_at91_port_ops;
249 ap->flags |= ATA_FLAG_SLAVE_POSS;
250 ap->pio_mask = ATA_PIO4;
251
252 if (!irq) {
253 ap->flags |= ATA_FLAG_PIO_POLLING;
254 ata_port_desc(ap, "no IRQ, using PIO polling");
255 }
256
257 info = kzalloc(sizeof(*info), GFP_KERNEL);
258
259 if (!info) {
260 dev_err(dev, "failed to allocate memory for private data\n");
261 return -ENOMEM;
262 }
263
264 info->cs = board->chipselect;
265 info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE |
266 AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT |
267 AT91_SMC_DBW_8 | AT91_SMC_TDF_(0);
268
269 info->ide_addr = devm_ioremap(dev,
270 mem_res->start + CF_IDE_OFFSET, CF_IDE_RES_SIZE);
271
272 if (!info->ide_addr) {
273 dev_err(dev, "failed to map IO base\n");
274 ret = -ENOMEM;
275 goto err_ide_ioremap;
276 }
277
278 info->alt_addr = devm_ioremap(dev,
279 mem_res->start + CF_ALT_IDE_OFFSET, CF_IDE_RES_SIZE);
280
281 if (!info->alt_addr) {
282 dev_err(dev, "failed to map CTL base\n");
283 ret = -ENOMEM;
284 goto err_alt_ioremap;
285 }
286
287 ap->ioaddr.cmd_addr = info->ide_addr;
288 ap->ioaddr.ctl_addr = info->alt_addr + 0x06;
289 ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
290
291 ata_sff_std_ports(&ap->ioaddr);
292
293 ata_port_desc(ap, "mmio cmd 0x%llx ctl 0x%llx",
294 (unsigned long long)mem_res->start + CF_IDE_OFFSET,
295 (unsigned long long)mem_res->start + CF_ALT_IDE_OFFSET);
296
297 host->private_data = info;
298
299 return ata_host_activate(host, irq ? gpio_to_irq(irq) : 0,
300 irq ? ata_sff_interrupt : NULL,
301 irq_flags, &pata_at91_sht);
302
303err_alt_ioremap:
304 devm_iounmap(dev, info->ide_addr);
305
306err_ide_ioremap:
307 kfree(info);
308
309 return ret;
310}
311
312static int __devexit pata_at91_remove(struct platform_device *pdev)
313{
314 struct ata_host *host = dev_get_drvdata(&pdev->dev);
315 struct at91_ide_info *info = host->private_data;
316 struct device *dev = &pdev->dev;
317
318 if (!host)
319 return 0;
320
321 ata_host_detach(host);
322
323 if (!info)
324 return 0;
325
326 devm_iounmap(dev, info->ide_addr);
327 devm_iounmap(dev, info->alt_addr);
328
329 kfree(info);
330 return 0;
331}
332
333static struct platform_driver pata_at91_driver = {
334 .probe = pata_at91_probe,
335 .remove = __devexit_p(pata_at91_remove),
336 .driver = {
337 .name = DRV_NAME,
338 .owner = THIS_MODULE,
339 },
340};
341
342static int __init pata_at91_init(void)
343{
344 return platform_driver_register(&pata_at91_driver);
345}
346
347static void __exit pata_at91_exit(void)
348{
349 platform_driver_unregister(&pata_at91_driver);
350}
351
352
353module_init(pata_at91_init);
354module_exit(pata_at91_exit);
355
356
357MODULE_LICENSE("GPL");
358MODULE_DESCRIPTION("Driver for CF in True IDE mode on AT91SAM9260 SoC");
359MODULE_AUTHOR("Matyukevich Sergey");
360MODULE_VERSION(DRV_VERSION);
361
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 36b8629203be..94eaa432c40a 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1378,6 +1378,37 @@ static int sata_fsl_remove(struct of_device *ofdev)
1378 return 0; 1378 return 0;
1379} 1379}
1380 1380
1381#ifdef CONFIG_PM
1382static int sata_fsl_suspend(struct of_device *op, pm_message_t state)
1383{
1384 struct ata_host *host = dev_get_drvdata(&op->dev);
1385 return ata_host_suspend(host, state);
1386}
1387
1388static int sata_fsl_resume(struct of_device *op)
1389{
1390 struct ata_host *host = dev_get_drvdata(&op->dev);
1391 struct sata_fsl_host_priv *host_priv = host->private_data;
1392 int ret;
1393 void __iomem *hcr_base = host_priv->hcr_base;
1394 struct ata_port *ap = host->ports[0];
1395 struct sata_fsl_port_priv *pp = ap->private_data;
1396
1397 ret = sata_fsl_init_controller(host);
1398 if (ret) {
1399 dev_printk(KERN_ERR, &op->dev,
1400 "Error initialize hardware\n");
1401 return ret;
1402 }
1403
1404 /* Recovery the CHBA register in host controller cmd register set */
1405 iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
1406
1407 ata_host_resume(host);
1408 return 0;
1409}
1410#endif
1411
1381static struct of_device_id fsl_sata_match[] = { 1412static struct of_device_id fsl_sata_match[] = {
1382 { 1413 {
1383 .compatible = "fsl,pq-sata", 1414 .compatible = "fsl,pq-sata",
@@ -1392,6 +1423,10 @@ static struct of_platform_driver fsl_sata_driver = {
1392 .match_table = fsl_sata_match, 1423 .match_table = fsl_sata_match,
1393 .probe = sata_fsl_probe, 1424 .probe = sata_fsl_probe,
1394 .remove = sata_fsl_remove, 1425 .remove = sata_fsl_remove,
1426#ifdef CONFIG_PM
1427 .suspend = sata_fsl_suspend,
1428 .resume = sata_fsl_resume,
1429#endif
1395}; 1430};
1396 1431
1397static int __init sata_fsl_init(void) 1432static int __init sata_fsl_init(void)
diff --git a/drivers/char/bfin_jtag_comm.c b/drivers/char/bfin_jtag_comm.c
index 44c113d56045..1d7c34c73b20 100644
--- a/drivers/char/bfin_jtag_comm.c
+++ b/drivers/char/bfin_jtag_comm.c
@@ -8,6 +8,10 @@
8 * Licensed under the GPL-2 or later. 8 * Licensed under the GPL-2 or later.
9 */ 9 */
10 10
11#define DRV_NAME "bfin-jtag-comm"
12#define DEV_NAME "ttyBFJC"
13#define pr_fmt(fmt) DRV_NAME ": " fmt
14
11#include <linux/circ_buf.h> 15#include <linux/circ_buf.h>
12#include <linux/console.h> 16#include <linux/console.h>
13#include <linux/delay.h> 17#include <linux/delay.h>
@@ -22,18 +26,14 @@
22#include <linux/tty_flip.h> 26#include <linux/tty_flip.h>
23#include <asm/atomic.h> 27#include <asm/atomic.h>
24 28
29#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); })
30
25/* See the Debug/Emulation chapter in the HRM */ 31/* See the Debug/Emulation chapter in the HRM */
26#define EMUDOF 0x00000001 /* EMUDAT_OUT full & valid */ 32#define EMUDOF 0x00000001 /* EMUDAT_OUT full & valid */
27#define EMUDIF 0x00000002 /* EMUDAT_IN full & valid */ 33#define EMUDIF 0x00000002 /* EMUDAT_IN full & valid */
28#define EMUDOOVF 0x00000004 /* EMUDAT_OUT overflow */ 34#define EMUDOOVF 0x00000004 /* EMUDAT_OUT overflow */
29#define EMUDIOVF 0x00000008 /* EMUDAT_IN overflow */ 35#define EMUDIOVF 0x00000008 /* EMUDAT_IN overflow */
30 36
31#define DRV_NAME "bfin-jtag-comm"
32#define DEV_NAME "ttyBFJC"
33
34#define pr_init(fmt, args...) ({ static const __initdata char __fmt[] = fmt; printk(__fmt, ## args); })
35#define debug(fmt, args...) pr_debug(DRV_NAME ": " fmt, ## args)
36
37static inline uint32_t bfin_write_emudat(uint32_t emudat) 37static inline uint32_t bfin_write_emudat(uint32_t emudat)
38{ 38{
39 __asm__ __volatile__("emudat = %0;" : : "d"(emudat)); 39 __asm__ __volatile__("emudat = %0;" : : "d"(emudat));
@@ -74,7 +74,7 @@ bfin_jc_emudat_manager(void *arg)
74 while (!kthread_should_stop()) { 74 while (!kthread_should_stop()) {
75 /* no one left to give data to, so sleep */ 75 /* no one left to give data to, so sleep */
76 if (bfin_jc_tty == NULL && circ_empty(&bfin_jc_write_buf)) { 76 if (bfin_jc_tty == NULL && circ_empty(&bfin_jc_write_buf)) {
77 debug("waiting for readers\n"); 77 pr_debug("waiting for readers\n");
78 __set_current_state(TASK_UNINTERRUPTIBLE); 78 __set_current_state(TASK_UNINTERRUPTIBLE);
79 schedule(); 79 schedule();
80 __set_current_state(TASK_RUNNING); 80 __set_current_state(TASK_RUNNING);
@@ -82,7 +82,7 @@ bfin_jc_emudat_manager(void *arg)
82 82
83 /* no data available, so just chill */ 83 /* no data available, so just chill */
84 if (!(bfin_read_DBGSTAT() & EMUDIF) && circ_empty(&bfin_jc_write_buf)) { 84 if (!(bfin_read_DBGSTAT() & EMUDIF) && circ_empty(&bfin_jc_write_buf)) {
85 debug("waiting for data (in_len = %i) (circ: %i %i)\n", 85 pr_debug("waiting for data (in_len = %i) (circ: %i %i)\n",
86 inbound_len, bfin_jc_write_buf.tail, bfin_jc_write_buf.head); 86 inbound_len, bfin_jc_write_buf.tail, bfin_jc_write_buf.head);
87 if (inbound_len) 87 if (inbound_len)
88 schedule(); 88 schedule();
@@ -99,11 +99,11 @@ bfin_jc_emudat_manager(void *arg)
99 if (tty != NULL) { 99 if (tty != NULL) {
100 uint32_t emudat = bfin_read_emudat(); 100 uint32_t emudat = bfin_read_emudat();
101 if (inbound_len == 0) { 101 if (inbound_len == 0) {
102 debug("incoming length: 0x%08x\n", emudat); 102 pr_debug("incoming length: 0x%08x\n", emudat);
103 inbound_len = emudat; 103 inbound_len = emudat;
104 } else { 104 } else {
105 size_t num_chars = (4 <= inbound_len ? 4 : inbound_len); 105 size_t num_chars = (4 <= inbound_len ? 4 : inbound_len);
106 debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars); 106 pr_debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
107 inbound_len -= num_chars; 107 inbound_len -= num_chars;
108 tty_insert_flip_string(tty, (unsigned char *)&emudat, num_chars); 108 tty_insert_flip_string(tty, (unsigned char *)&emudat, num_chars);
109 tty_flip_buffer_push(tty); 109 tty_flip_buffer_push(tty);
@@ -117,7 +117,7 @@ bfin_jc_emudat_manager(void *arg)
117 if (outbound_len == 0) { 117 if (outbound_len == 0) {
118 outbound_len = circ_cnt(&bfin_jc_write_buf); 118 outbound_len = circ_cnt(&bfin_jc_write_buf);
119 bfin_write_emudat(outbound_len); 119 bfin_write_emudat(outbound_len);
120 debug("outgoing length: 0x%08x\n", outbound_len); 120 pr_debug("outgoing length: 0x%08x\n", outbound_len);
121 } else { 121 } else {
122 struct tty_struct *tty; 122 struct tty_struct *tty;
123 int tail = bfin_jc_write_buf.tail; 123 int tail = bfin_jc_write_buf.tail;
@@ -136,7 +136,7 @@ bfin_jc_emudat_manager(void *arg)
136 if (tty) 136 if (tty)
137 tty_wakeup(tty); 137 tty_wakeup(tty);
138 mutex_unlock(&bfin_jc_tty_mutex); 138 mutex_unlock(&bfin_jc_tty_mutex);
139 debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate); 139 pr_debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate);
140 } 140 }
141 } 141 }
142 } 142 }
@@ -149,7 +149,7 @@ static int
149bfin_jc_open(struct tty_struct *tty, struct file *filp) 149bfin_jc_open(struct tty_struct *tty, struct file *filp)
150{ 150{
151 mutex_lock(&bfin_jc_tty_mutex); 151 mutex_lock(&bfin_jc_tty_mutex);
152 debug("open %lu\n", bfin_jc_count); 152 pr_debug("open %lu\n", bfin_jc_count);
153 ++bfin_jc_count; 153 ++bfin_jc_count;
154 bfin_jc_tty = tty; 154 bfin_jc_tty = tty;
155 wake_up_process(bfin_jc_kthread); 155 wake_up_process(bfin_jc_kthread);
@@ -161,7 +161,7 @@ static void
161bfin_jc_close(struct tty_struct *tty, struct file *filp) 161bfin_jc_close(struct tty_struct *tty, struct file *filp)
162{ 162{
163 mutex_lock(&bfin_jc_tty_mutex); 163 mutex_lock(&bfin_jc_tty_mutex);
164 debug("close %lu\n", bfin_jc_count); 164 pr_debug("close %lu\n", bfin_jc_count);
165 if (--bfin_jc_count == 0) 165 if (--bfin_jc_count == 0)
166 bfin_jc_tty = NULL; 166 bfin_jc_tty = NULL;
167 wake_up_process(bfin_jc_kthread); 167 wake_up_process(bfin_jc_kthread);
@@ -174,7 +174,7 @@ bfin_jc_circ_write(const unsigned char *buf, int count)
174{ 174{
175 int i; 175 int i;
176 count = min(count, circ_free(&bfin_jc_write_buf)); 176 count = min(count, circ_free(&bfin_jc_write_buf));
177 debug("going to write chunk of %i bytes\n", count); 177 pr_debug("going to write chunk of %i bytes\n", count);
178 for (i = 0; i < count; ++i) 178 for (i = 0; i < count; ++i)
179 circ_byte(&bfin_jc_write_buf, bfin_jc_write_buf.head + i) = buf[i]; 179 circ_byte(&bfin_jc_write_buf, bfin_jc_write_buf.head + i) = buf[i];
180 bfin_jc_write_buf.head += i; 180 bfin_jc_write_buf.head += i;
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 6799588b0099..65b6ff2442c6 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1189,11 +1189,6 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
1189 return -ENODEV; 1189 return -ENODEV;
1190 } 1190 }
1191 1191
1192 if (port % MAX_PORTS_PER_BOARD >= brd->numPorts) {
1193 retval = -ENODEV;
1194 goto out_unlock;
1195 }
1196
1197 ch = &brd->ports[port % MAX_PORTS_PER_BOARD]; 1192 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
1198 ch->port.count++; 1193 ch->port.count++;
1199 tty->driver_data = ch; 1194 tty->driver_data = ch;
@@ -1218,8 +1213,8 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
1218 moxa_close_port(tty); 1213 moxa_close_port(tty);
1219 } else 1214 } else
1220 ch->port.flags |= ASYNC_NORMAL_ACTIVE; 1215 ch->port.flags |= ASYNC_NORMAL_ACTIVE;
1221out_unlock:
1222 mutex_unlock(&moxa_openlock); 1216 mutex_unlock(&moxa_openlock);
1217
1223 return retval; 1218 return retval;
1224} 1219}
1225 1220
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index 461ece591a5b..1c43c8cdee25 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -10,7 +10,6 @@
10 * Paul Mackerras <Paul.Mackerras@cs.anu.edu.au> 10 * Paul Mackerras <Paul.Mackerras@cs.anu.edu.au>
11 * 11 *
12 * Original release 01/11/99 12 * Original release 01/11/99
13 * $Id: n_hdlc.c,v 4.8 2003/05/06 21:18:51 paulkf Exp $
14 * 13 *
15 * This code is released under the GNU General Public License (GPL) 14 * This code is released under the GNU General Public License (GPL)
16 * 15 *
@@ -79,7 +78,6 @@
79 */ 78 */
80 79
81#define HDLC_MAGIC 0x239e 80#define HDLC_MAGIC 0x239e
82#define HDLC_VERSION "$Revision: 4.8 $"
83 81
84#include <linux/module.h> 82#include <linux/module.h>
85#include <linux/init.h> 83#include <linux/init.h>
@@ -114,7 +112,7 @@
114#define MAX_HDLC_FRAME_SIZE 65535 112#define MAX_HDLC_FRAME_SIZE 65535
115#define DEFAULT_RX_BUF_COUNT 10 113#define DEFAULT_RX_BUF_COUNT 10
116#define MAX_RX_BUF_COUNT 60 114#define MAX_RX_BUF_COUNT 60
117#define DEFAULT_TX_BUF_COUNT 1 115#define DEFAULT_TX_BUF_COUNT 3
118 116
119struct n_hdlc_buf { 117struct n_hdlc_buf {
120 struct n_hdlc_buf *link; 118 struct n_hdlc_buf *link;
@@ -199,6 +197,31 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty);
199#define tty2n_hdlc(tty) ((struct n_hdlc *) ((tty)->disc_data)) 197#define tty2n_hdlc(tty) ((struct n_hdlc *) ((tty)->disc_data))
200#define n_hdlc2tty(n_hdlc) ((n_hdlc)->tty) 198#define n_hdlc2tty(n_hdlc) ((n_hdlc)->tty)
201 199
200static void flush_rx_queue(struct tty_struct *tty)
201{
202 struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
203 struct n_hdlc_buf *buf;
204
205 while ((buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list)))
206 n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, buf);
207}
208
209static void flush_tx_queue(struct tty_struct *tty)
210{
211 struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
212 struct n_hdlc_buf *buf;
213 unsigned long flags;
214
215 while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
216 n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
217 spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
218 if (n_hdlc->tbuf) {
219 n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
220 n_hdlc->tbuf = NULL;
221 }
222 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
223}
224
202static struct tty_ldisc_ops n_hdlc_ldisc = { 225static struct tty_ldisc_ops n_hdlc_ldisc = {
203 .owner = THIS_MODULE, 226 .owner = THIS_MODULE,
204 .magic = TTY_LDISC_MAGIC, 227 .magic = TTY_LDISC_MAGIC,
@@ -211,6 +234,7 @@ static struct tty_ldisc_ops n_hdlc_ldisc = {
211 .poll = n_hdlc_tty_poll, 234 .poll = n_hdlc_tty_poll,
212 .receive_buf = n_hdlc_tty_receive, 235 .receive_buf = n_hdlc_tty_receive,
213 .write_wakeup = n_hdlc_tty_wakeup, 236 .write_wakeup = n_hdlc_tty_wakeup,
237 .flush_buffer = flush_rx_queue,
214}; 238};
215 239
216/** 240/**
@@ -341,10 +365,7 @@ static int n_hdlc_tty_open (struct tty_struct *tty)
341 set_bit(TTY_NO_WRITE_SPLIT,&tty->flags); 365 set_bit(TTY_NO_WRITE_SPLIT,&tty->flags);
342#endif 366#endif
343 367
344 /* Flush any pending characters in the driver and discipline. */ 368 /* flush receive data from driver */
345 if (tty->ldisc->ops->flush_buffer)
346 tty->ldisc->ops->flush_buffer(tty);
347
348 tty_driver_flush_buffer(tty); 369 tty_driver_flush_buffer(tty);
349 370
350 if (debuglevel >= DEBUG_LEVEL_INFO) 371 if (debuglevel >= DEBUG_LEVEL_INFO)
@@ -763,6 +784,14 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
763 error = put_user(count, (int __user *)arg); 784 error = put_user(count, (int __user *)arg);
764 break; 785 break;
765 786
787 case TCFLSH:
788 switch (arg) {
789 case TCIOFLUSH:
790 case TCOFLUSH:
791 flush_tx_queue(tty);
792 }
793 /* fall through to default */
794
766 default: 795 default:
767 error = n_tty_ioctl_helper(tty, file, cmd, arg); 796 error = n_tty_ioctl_helper(tty, file, cmd, arg);
768 break; 797 break;
@@ -919,8 +948,7 @@ static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
919} /* end of n_hdlc_buf_get() */ 948} /* end of n_hdlc_buf_get() */
920 949
921static char hdlc_banner[] __initdata = 950static char hdlc_banner[] __initdata =
922 KERN_INFO "HDLC line discipline: version " HDLC_VERSION 951 KERN_INFO "HDLC line discipline maxframe=%u\n";
923 ", maxframe=%u\n";
924static char hdlc_register_ok[] __initdata = 952static char hdlc_register_ok[] __initdata =
925 KERN_INFO "N_HDLC line discipline registered.\n"; 953 KERN_INFO "N_HDLC line discipline registered.\n";
926static char hdlc_register_fail[] __initdata = 954static char hdlc_register_fail[] __initdata =
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index d2e93e343226..2e99158ebb8a 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -1062,7 +1062,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1062 struct r3964_client_info *pClient; 1062 struct r3964_client_info *pClient;
1063 struct r3964_message *pMsg; 1063 struct r3964_message *pMsg;
1064 struct r3964_client_message theMsg; 1064 struct r3964_client_message theMsg;
1065 int count; 1065 int ret;
1066 1066
1067 TRACE_L("read()"); 1067 TRACE_L("read()");
1068 1068
@@ -1074,8 +1074,8 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1074 if (pMsg == NULL) { 1074 if (pMsg == NULL) {
1075 /* no messages available. */ 1075 /* no messages available. */
1076 if (file->f_flags & O_NONBLOCK) { 1076 if (file->f_flags & O_NONBLOCK) {
1077 unlock_kernel(); 1077 ret = -EAGAIN;
1078 return -EAGAIN; 1078 goto unlock;
1079 } 1079 }
1080 /* block until there is a message: */ 1080 /* block until there is a message: */
1081 wait_event_interruptible(pInfo->read_wait, 1081 wait_event_interruptible(pInfo->read_wait,
@@ -1085,29 +1085,31 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1085 /* If we still haven't got a message, we must have been signalled */ 1085 /* If we still haven't got a message, we must have been signalled */
1086 1086
1087 if (!pMsg) { 1087 if (!pMsg) {
1088 unlock_kernel(); 1088 ret = -EINTR;
1089 return -EINTR; 1089 goto unlock;
1090 } 1090 }
1091 1091
1092 /* deliver msg to client process: */ 1092 /* deliver msg to client process: */
1093 theMsg.msg_id = pMsg->msg_id; 1093 theMsg.msg_id = pMsg->msg_id;
1094 theMsg.arg = pMsg->arg; 1094 theMsg.arg = pMsg->arg;
1095 theMsg.error_code = pMsg->error_code; 1095 theMsg.error_code = pMsg->error_code;
1096 count = sizeof(struct r3964_client_message); 1096 ret = sizeof(struct r3964_client_message);
1097 1097
1098 kfree(pMsg); 1098 kfree(pMsg);
1099 TRACE_M("r3964_read - msg kfree %p", pMsg); 1099 TRACE_M("r3964_read - msg kfree %p", pMsg);
1100 1100
1101 if (copy_to_user(buf, &theMsg, count)) { 1101 if (copy_to_user(buf, &theMsg, ret)) {
1102 unlock_kernel(); 1102 ret = -EFAULT;
1103 return -EFAULT; 1103 goto unlock;
1104 } 1104 }
1105 1105
1106 TRACE_PS("read - return %d", count); 1106 TRACE_PS("read - return %d", ret);
1107 return count; 1107 goto unlock;
1108 } 1108 }
1109 ret = -EPERM;
1110unlock:
1109 unlock_kernel(); 1111 unlock_kernel();
1110 return -EPERM; 1112 return ret;
1111} 1113}
1112 1114
1113static ssize_t r3964_write(struct tty_struct *tty, struct file *file, 1115static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index dbb912574569..881934c068c8 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1575,7 +1575,8 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1575 clear_bit(LOCK_IO, &dev->flags); 1575 clear_bit(LOCK_IO, &dev->flags);
1576 wake_up_interruptible(&dev->ioq); 1576 wake_up_interruptible(&dev->ioq);
1577 1577
1578 return 0; 1578 rc = 0;
1579 break;
1579 case CM_IOCSPTS: 1580 case CM_IOCSPTS:
1580 { 1581 {
1581 struct ptsreq krnptsreq; 1582 struct ptsreq krnptsreq;
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index e6ce632a393e..7539bed0f7e0 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -396,7 +396,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
396 kbd = kbd_table + console; 396 kbd = kbd_table + console;
397 switch (cmd) { 397 switch (cmd) {
398 case TIOCLINUX: 398 case TIOCLINUX:
399 return tioclinux(tty, arg); 399 ret = tioclinux(tty, arg);
400 break;
400 case KIOCSOUND: 401 case KIOCSOUND:
401 if (!perm) 402 if (!perm)
402 goto eperm; 403 goto eperm;
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 87f92c39b5f0..a9952b1236b0 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -18,9 +18,22 @@
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
20#include <asm/byteorder.h> 20#include <asm/byteorder.h>
21#include <asm/processor.h>
21#include <asm/i387.h> 22#include <asm/i387.h>
22#include "padlock.h" 23#include "padlock.h"
23 24
25/*
26 * Number of data blocks actually fetched for each xcrypt insn.
27 * Processors with prefetch errata will fetch extra blocks.
28 */
29static unsigned int ecb_fetch_blocks = 2;
30#define MAX_ECB_FETCH_BLOCKS (8)
31#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
32
33static unsigned int cbc_fetch_blocks = 1;
34#define MAX_CBC_FETCH_BLOCKS (4)
35#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
36
24/* Control word. */ 37/* Control word. */
25struct cword { 38struct cword {
26 unsigned int __attribute__ ((__packed__)) 39 unsigned int __attribute__ ((__packed__))
@@ -172,73 +185,111 @@ static inline void padlock_store_cword(struct cword *cword)
172 * should be used only inside the irq_ts_save/restore() context 185 * should be used only inside the irq_ts_save/restore() context
173 */ 186 */
174 187
175static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, 188static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
176 struct cword *control_word) 189 struct cword *control_word, int count)
177{ 190{
178 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ 191 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
179 : "+S"(input), "+D"(output) 192 : "+S"(input), "+D"(output)
180 : "d"(control_word), "b"(key), "c"(1)); 193 : "d"(control_word), "b"(key), "c"(count));
194}
195
196static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
197 u8 *iv, struct cword *control_word, int count)
198{
199 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
200 : "+S" (input), "+D" (output), "+a" (iv)
201 : "d" (control_word), "b" (key), "c" (count));
202 return iv;
181} 203}
182 204
183static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) 205static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
206 struct cword *cword, int count)
184{ 207{
185 u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; 208 /*
209 * Padlock prefetches extra data so we must provide mapped input buffers.
210 * Assume there are at least 16 bytes of stack already in use.
211 */
212 u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
213 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
214
215 memcpy(tmp, in, count * AES_BLOCK_SIZE);
216 rep_xcrypt_ecb(tmp, out, key, cword, count);
217}
218
219static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
220 u8 *iv, struct cword *cword, int count)
221{
222 /*
223 * Padlock prefetches extra data so we must provide mapped input buffers.
224 * Assume there are at least 16 bytes of stack already in use.
225 */
226 u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
186 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); 227 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
187 228
188 memcpy(tmp, in, AES_BLOCK_SIZE); 229 memcpy(tmp, in, count * AES_BLOCK_SIZE);
189 padlock_xcrypt(tmp, out, key, cword); 230 return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
190} 231}
191 232
192static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, 233static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
193 struct cword *cword) 234 struct cword *cword, int count)
194{ 235{
195 /* padlock_xcrypt requires at least two blocks of data. */ 236 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
196 if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & 237 * We could avoid some copying here but it's probably not worth it.
197 (PAGE_SIZE - 1)))) { 238 */
198 aes_crypt_copy(in, out, key, cword); 239 if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) {
240 ecb_crypt_copy(in, out, key, cword, count);
199 return; 241 return;
200 } 242 }
201 243
202 padlock_xcrypt(in, out, key, cword); 244 rep_xcrypt_ecb(in, out, key, cword, count);
245}
246
247static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
248 u8 *iv, struct cword *cword, int count)
249{
250 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
251 if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE))
252 return cbc_crypt_copy(in, out, key, iv, cword, count);
253
254 return rep_xcrypt_cbc(in, out, key, iv, cword, count);
203} 255}
204 256
205static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, 257static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
206 void *control_word, u32 count) 258 void *control_word, u32 count)
207{ 259{
208 if (count == 1) { 260 u32 initial = count & (ecb_fetch_blocks - 1);
209 aes_crypt(input, output, key, control_word); 261
262 if (count < ecb_fetch_blocks) {
263 ecb_crypt(input, output, key, control_word, count);
210 return; 264 return;
211 } 265 }
212 266
213 asm volatile ("test $1, %%cl;" 267 if (initial)
214 "je 1f;" 268 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
215#ifndef CONFIG_X86_64 269 : "+S"(input), "+D"(output)
216 "lea -1(%%ecx), %%eax;" 270 : "d"(control_word), "b"(key), "c"(initial));
217 "mov $1, %%ecx;" 271
218#else 272 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
219 "lea -1(%%rcx), %%rax;"
220 "mov $1, %%rcx;"
221#endif
222 ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
223#ifndef CONFIG_X86_64
224 "mov %%eax, %%ecx;"
225#else
226 "mov %%rax, %%rcx;"
227#endif
228 "1:"
229 ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
230 : "+S"(input), "+D"(output) 273 : "+S"(input), "+D"(output)
231 : "d"(control_word), "b"(key), "c"(count) 274 : "d"(control_word), "b"(key), "c"(count - initial));
232 : "ax");
233} 275}
234 276
235static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, 277static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
236 u8 *iv, void *control_word, u32 count) 278 u8 *iv, void *control_word, u32 count)
237{ 279{
238 /* rep xcryptcbc */ 280 u32 initial = count & (cbc_fetch_blocks - 1);
239 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" 281
282 if (count < cbc_fetch_blocks)
283 return cbc_crypt(input, output, key, iv, control_word, count);
284
285 if (initial)
286 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
287 : "+S" (input), "+D" (output), "+a" (iv)
288 : "d" (control_word), "b" (key), "c" (count));
289
290 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
240 : "+S" (input), "+D" (output), "+a" (iv) 291 : "+S" (input), "+D" (output), "+a" (iv)
241 : "d" (control_word), "b" (key), "c" (count)); 292 : "d" (control_word), "b" (key), "c" (count-initial));
242 return iv; 293 return iv;
243} 294}
244 295
@@ -249,7 +300,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
249 300
250 padlock_reset_key(&ctx->cword.encrypt); 301 padlock_reset_key(&ctx->cword.encrypt);
251 ts_state = irq_ts_save(); 302 ts_state = irq_ts_save();
252 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); 303 ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
253 irq_ts_restore(ts_state); 304 irq_ts_restore(ts_state);
254 padlock_store_cword(&ctx->cword.encrypt); 305 padlock_store_cword(&ctx->cword.encrypt);
255} 306}
@@ -261,7 +312,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
261 312
262 padlock_reset_key(&ctx->cword.encrypt); 313 padlock_reset_key(&ctx->cword.encrypt);
263 ts_state = irq_ts_save(); 314 ts_state = irq_ts_save();
264 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); 315 ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
265 irq_ts_restore(ts_state); 316 irq_ts_restore(ts_state);
266 padlock_store_cword(&ctx->cword.encrypt); 317 padlock_store_cword(&ctx->cword.encrypt);
267} 318}
@@ -454,6 +505,7 @@ static struct crypto_alg cbc_aes_alg = {
454static int __init padlock_init(void) 505static int __init padlock_init(void)
455{ 506{
456 int ret; 507 int ret;
508 struct cpuinfo_x86 *c = &cpu_data(0);
457 509
458 if (!cpu_has_xcrypt) { 510 if (!cpu_has_xcrypt) {
459 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); 511 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
@@ -476,6 +528,12 @@ static int __init padlock_init(void)
476 528
477 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); 529 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
478 530
531 if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
532 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
533 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
534 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
535 }
536
479out: 537out:
480 return ret; 538 return ret;
481 539
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 450902438208..13efcd362072 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,28 +1,29 @@
1comment "A new alternative FireWire stack is available with EXPERIMENTAL=y" 1comment "You can enable one or both FireWire driver stacks."
2 depends on EXPERIMENTAL=n 2comment "See the help texts for more information."
3
4comment "Enable only one of the two stacks, unless you know what you are doing"
5 depends on EXPERIMENTAL
6 3
7config FIREWIRE 4config FIREWIRE
8 tristate "New FireWire stack, EXPERIMENTAL" 5 tristate "FireWire driver stack"
9 depends on EXPERIMENTAL
10 select CRC_ITU_T 6 select CRC_ITU_T
11 help 7 help
12 This is the "Juju" FireWire stack, a new alternative implementation 8 This is the new-generation IEEE 1394 (FireWire) driver stack
13 designed for robustness and simplicity. You can build either this 9 a.k.a. Juju, a new implementation designed for robustness and
14 stack, or the old stack (the ieee1394 driver, ohci1394 etc.) or both. 10 simplicity.
15 Please read http://ieee1394.wiki.kernel.org/index.php/Juju_Migration 11 See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
16 before you enable the new stack. 12 for information about migration from the older Linux 1394 stack
13 to the new driver stack.
17 14
18 To compile this driver as a module, say M here: the module will be 15 To compile this driver as a module, say M here: the module will be
19 called firewire-core. 16 called firewire-core.
20 17
21 This module functionally replaces ieee1394, raw1394, and video1394. 18 This module functionally replaces ieee1394, raw1394, and video1394.
22 To access it from application programs, you generally need at least 19 To access it from application programs, you generally need at least
23 libraw1394 version 2. IIDC/DCAM applications also need libdc1394 20 libraw1394 v2. IIDC/DCAM applications need libdc1394 v2.
24 version 2. No libraries are required to access storage devices 21 No libraries are required to access storage devices through the
25 through the firewire-sbp2 driver. 22 firewire-sbp2 driver.
23
24 NOTE:
25 FireWire audio devices currently require the old drivers (ieee1394,
26 ohci1394, raw1394).
26 27
27config FIREWIRE_OHCI 28config FIREWIRE_OHCI
28 tristate "OHCI-1394 controllers" 29 tristate "OHCI-1394 controllers"
@@ -37,11 +38,9 @@ config FIREWIRE_OHCI
37 stack. 38 stack.
38 39
39 NOTE: 40 NOTE:
40 41 If you want to install firewire-ohci and ohci1394 together, you
41 You should only build either firewire-ohci or the old ohci1394 driver, 42 should configure them only as modules and blacklist the driver(s)
42 but not both. If you nevertheless want to install both, you should 43 which you don't want to have auto-loaded. Add either
43 configure them only as modules and blacklist the driver(s) which you
44 don't want to have auto-loaded. Add either
45 44
46 blacklist firewire-ohci 45 blacklist firewire-ohci
47 or 46 or
@@ -50,12 +49,7 @@ config FIREWIRE_OHCI
50 blacklist dv1394 49 blacklist dv1394
51 50
52 to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf 51 to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
53 depending on your distribution. The latter two modules should be 52 depending on your distribution.
54 blacklisted together with ohci1394 because they depend on ohci1394.
55
56 If you have an old modprobe which doesn't implement the blacklist
57 directive, use "install modulename /bin/true" for the modules to be
58 blacklisted.
59 53
60config FIREWIRE_OHCI_DEBUG 54config FIREWIRE_OHCI_DEBUG
61 bool 55 bool
@@ -77,3 +71,17 @@ config FIREWIRE_SBP2
77 71
78 You should also enable support for disks, CD-ROMs, etc. in the SCSI 72 You should also enable support for disks, CD-ROMs, etc. in the SCSI
79 configuration section. 73 configuration section.
74
75config FIREWIRE_NET
76 tristate "IP networking over 1394 (EXPERIMENTAL)"
77 depends on FIREWIRE && INET && EXPERIMENTAL
78 help
79 This enables IPv4 over IEEE 1394, providing IP connectivity with
80 other implementations of RFC 2734 as found on several operating
81 systems. Multicast support is currently limited.
82
83 NOTE, this driver is not stable yet!
84
85 To compile this driver as a module, say M here: The module will be
86 called firewire-net. It replaces eth1394 of the classic IEEE 1394
87 stack.
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index bc3b9bf822bf..a8f9bb6d9fdf 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -6,7 +6,9 @@ firewire-core-y += core-card.o core-cdev.o core-device.o \
6 core-iso.o core-topology.o core-transaction.o 6 core-iso.o core-topology.o core-transaction.o
7firewire-ohci-y += ohci.o 7firewire-ohci-y += ohci.o
8firewire-sbp2-y += sbp2.o 8firewire-sbp2-y += sbp2.o
9firewire-net-y += net.o
9 10
10obj-$(CONFIG_FIREWIRE) += firewire-core.o 11obj-$(CONFIG_FIREWIRE) += firewire-core.o
11obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o 12obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
12obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o 13obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o
14obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 4c1be64fdddd..543fccac81bb 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -176,6 +176,7 @@ int fw_core_add_descriptor(struct fw_descriptor *desc)
176 176
177 return 0; 177 return 0;
178} 178}
179EXPORT_SYMBOL(fw_core_add_descriptor);
179 180
180void fw_core_remove_descriptor(struct fw_descriptor *desc) 181void fw_core_remove_descriptor(struct fw_descriptor *desc)
181{ 182{
@@ -189,6 +190,7 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
189 190
190 mutex_unlock(&card_mutex); 191 mutex_unlock(&card_mutex);
191} 192}
193EXPORT_SYMBOL(fw_core_remove_descriptor);
192 194
193static void allocate_broadcast_channel(struct fw_card *card, int generation) 195static void allocate_broadcast_channel(struct fw_card *card, int generation)
194{ 196{
@@ -459,11 +461,11 @@ EXPORT_SYMBOL(fw_card_add);
459 461
460 462
461/* 463/*
462 * The next few functions implements a dummy driver that use once a 464 * The next few functions implement a dummy driver that is used once a card
463 * card driver shuts down an fw_card. This allows the driver to 465 * driver shuts down an fw_card. This allows the driver to cleanly unload,
464 * cleanly unload, as all IO to the card will be handled by the dummy 466 * as all IO to the card will be handled (and failed) by the dummy driver
465 * driver instead of calling into the (possibly) unloaded module. The 467 * instead of calling into the module. Only functions for iso context
466 * dummy driver just fails all IO. 468 * shutdown still need to be provided by the card driver.
467 */ 469 */
468 470
469static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) 471static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
@@ -510,7 +512,7 @@ static int dummy_enable_phys_dma(struct fw_card *card,
510 return -ENODEV; 512 return -ENODEV;
511} 513}
512 514
513static struct fw_card_driver dummy_driver = { 515static const struct fw_card_driver dummy_driver_template = {
514 .enable = dummy_enable, 516 .enable = dummy_enable,
515 .update_phy_reg = dummy_update_phy_reg, 517 .update_phy_reg = dummy_update_phy_reg,
516 .set_config_rom = dummy_set_config_rom, 518 .set_config_rom = dummy_set_config_rom,
@@ -529,6 +531,8 @@ void fw_card_release(struct kref *kref)
529 531
530void fw_core_remove_card(struct fw_card *card) 532void fw_core_remove_card(struct fw_card *card)
531{ 533{
534 struct fw_card_driver dummy_driver = dummy_driver_template;
535
532 card->driver->update_phy_reg(card, 4, 536 card->driver->update_phy_reg(card, 4,
533 PHY_LINK_ACTIVE | PHY_CONTENDER, 0); 537 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
534 fw_core_initiate_bus_reset(card, 1); 538 fw_core_initiate_bus_reset(card, 1);
@@ -537,7 +541,9 @@ void fw_core_remove_card(struct fw_card *card)
537 list_del_init(&card->link); 541 list_del_init(&card->link);
538 mutex_unlock(&card_mutex); 542 mutex_unlock(&card_mutex);
539 543
540 /* Set up the dummy driver. */ 544 /* Switch off most of the card driver interface. */
545 dummy_driver.free_iso_context = card->driver->free_iso_context;
546 dummy_driver.stop_iso = card->driver->stop_iso;
541 card->driver = &dummy_driver; 547 card->driver = &dummy_driver;
542 548
543 fw_destroy_nodes(card); 549 fw_destroy_nodes(card);
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 28076c892d7e..166f19c6d38d 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -71,7 +71,7 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
71 for (j = 0; j < i; j++) { 71 for (j = 0; j < i; j++) {
72 address = page_private(buffer->pages[j]); 72 address = page_private(buffer->pages[j]);
73 dma_unmap_page(card->device, address, 73 dma_unmap_page(card->device, address,
74 PAGE_SIZE, DMA_TO_DEVICE); 74 PAGE_SIZE, direction);
75 __free_page(buffer->pages[j]); 75 __free_page(buffer->pages[j]);
76 } 76 }
77 kfree(buffer->pages); 77 kfree(buffer->pages);
@@ -80,6 +80,7 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
80 80
81 return -ENOMEM; 81 return -ENOMEM;
82} 82}
83EXPORT_SYMBOL(fw_iso_buffer_init);
83 84
84int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) 85int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
85{ 86{
@@ -107,13 +108,14 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
107 for (i = 0; i < buffer->page_count; i++) { 108 for (i = 0; i < buffer->page_count; i++) {
108 address = page_private(buffer->pages[i]); 109 address = page_private(buffer->pages[i]);
109 dma_unmap_page(card->device, address, 110 dma_unmap_page(card->device, address,
110 PAGE_SIZE, DMA_TO_DEVICE); 111 PAGE_SIZE, buffer->direction);
111 __free_page(buffer->pages[i]); 112 __free_page(buffer->pages[i]);
112 } 113 }
113 114
114 kfree(buffer->pages); 115 kfree(buffer->pages);
115 buffer->pages = NULL; 116 buffer->pages = NULL;
116} 117}
118EXPORT_SYMBOL(fw_iso_buffer_destroy);
117 119
118struct fw_iso_context *fw_iso_context_create(struct fw_card *card, 120struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
119 int type, int channel, int speed, size_t header_size, 121 int type, int channel, int speed, size_t header_size,
@@ -136,6 +138,7 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
136 138
137 return ctx; 139 return ctx;
138} 140}
141EXPORT_SYMBOL(fw_iso_context_create);
139 142
140void fw_iso_context_destroy(struct fw_iso_context *ctx) 143void fw_iso_context_destroy(struct fw_iso_context *ctx)
141{ 144{
@@ -143,12 +146,14 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
143 146
144 card->driver->free_iso_context(ctx); 147 card->driver->free_iso_context(ctx);
145} 148}
149EXPORT_SYMBOL(fw_iso_context_destroy);
146 150
147int fw_iso_context_start(struct fw_iso_context *ctx, 151int fw_iso_context_start(struct fw_iso_context *ctx,
148 int cycle, int sync, int tags) 152 int cycle, int sync, int tags)
149{ 153{
150 return ctx->card->driver->start_iso(ctx, cycle, sync, tags); 154 return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
151} 155}
156EXPORT_SYMBOL(fw_iso_context_start);
152 157
153int fw_iso_context_queue(struct fw_iso_context *ctx, 158int fw_iso_context_queue(struct fw_iso_context *ctx,
154 struct fw_iso_packet *packet, 159 struct fw_iso_packet *packet,
@@ -159,11 +164,13 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
159 164
160 return card->driver->queue_iso(ctx, packet, buffer, payload); 165 return card->driver->queue_iso(ctx, packet, buffer, payload);
161} 166}
167EXPORT_SYMBOL(fw_iso_context_queue);
162 168
163int fw_iso_context_stop(struct fw_iso_context *ctx) 169int fw_iso_context_stop(struct fw_iso_context *ctx)
164{ 170{
165 return ctx->card->driver->stop_iso(ctx); 171 return ctx->card->driver->stop_iso(ctx);
166} 172}
173EXPORT_SYMBOL(fw_iso_context_stop);
167 174
168/* 175/*
169 * Isochronous bus resource management (channels, bandwidth), client side 176 * Isochronous bus resource management (channels, bandwidth), client side
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0a25a7b38a80..c3cfc647e5e3 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -1,7 +1,6 @@
1#ifndef _FIREWIRE_CORE_H 1#ifndef _FIREWIRE_CORE_H
2#define _FIREWIRE_CORE_H 2#define _FIREWIRE_CORE_H
3 3
4#include <linux/dma-mapping.h>
5#include <linux/fs.h> 4#include <linux/fs.h>
6#include <linux/list.h> 5#include <linux/list.h>
7#include <linux/idr.h> 6#include <linux/idr.h>
@@ -97,17 +96,6 @@ int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
97int fw_compute_block_crc(u32 *block); 96int fw_compute_block_crc(u32 *block);
98void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); 97void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
99 98
100struct fw_descriptor {
101 struct list_head link;
102 size_t length;
103 u32 immediate;
104 u32 key;
105 const u32 *data;
106};
107
108int fw_core_add_descriptor(struct fw_descriptor *desc);
109void fw_core_remove_descriptor(struct fw_descriptor *desc);
110
111 99
112/* -cdev */ 100/* -cdev */
113 101
@@ -130,77 +118,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
130 118
131/* -iso */ 119/* -iso */
132 120
133/*
134 * The iso packet format allows for an immediate header/payload part
135 * stored in 'header' immediately after the packet info plus an
136 * indirect payload part that is pointer to by the 'payload' field.
137 * Applications can use one or the other or both to implement simple
138 * low-bandwidth streaming (e.g. audio) or more advanced
139 * scatter-gather streaming (e.g. assembling video frame automatically).
140 */
141struct fw_iso_packet {
142 u16 payload_length; /* Length of indirect payload. */
143 u32 interrupt:1; /* Generate interrupt on this packet */
144 u32 skip:1; /* Set to not send packet at all. */
145 u32 tag:2;
146 u32 sy:4;
147 u32 header_length:8; /* Length of immediate header. */
148 u32 header[0];
149};
150
151#define FW_ISO_CONTEXT_TRANSMIT 0
152#define FW_ISO_CONTEXT_RECEIVE 1
153
154#define FW_ISO_CONTEXT_MATCH_TAG0 1
155#define FW_ISO_CONTEXT_MATCH_TAG1 2
156#define FW_ISO_CONTEXT_MATCH_TAG2 4
157#define FW_ISO_CONTEXT_MATCH_TAG3 8
158#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
159
160/*
161 * An iso buffer is just a set of pages mapped for DMA in the
162 * specified direction. Since the pages are to be used for DMA, they
163 * are not mapped into the kernel virtual address space. We store the
164 * DMA address in the page private. The helper function
165 * fw_iso_buffer_map() will map the pages into a given vma.
166 */
167struct fw_iso_buffer {
168 enum dma_data_direction direction;
169 struct page **pages;
170 int page_count;
171};
172
173typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
174 u32 cycle, size_t header_length,
175 void *header, void *data);
176
177struct fw_iso_context {
178 struct fw_card *card;
179 int type;
180 int channel;
181 int speed;
182 size_t header_size;
183 fw_iso_callback_t callback;
184 void *callback_data;
185};
186
187int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
188 int page_count, enum dma_data_direction direction);
189int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); 121int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
190void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
191
192struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
193 int type, int channel, int speed, size_t header_size,
194 fw_iso_callback_t callback, void *callback_data);
195int fw_iso_context_queue(struct fw_iso_context *ctx,
196 struct fw_iso_packet *packet,
197 struct fw_iso_buffer *buffer,
198 unsigned long payload);
199int fw_iso_context_start(struct fw_iso_context *ctx,
200 int cycle, int sync, int tags);
201int fw_iso_context_stop(struct fw_iso_context *ctx);
202void fw_iso_context_destroy(struct fw_iso_context *ctx);
203
204void fw_iso_resource_manage(struct fw_card *card, int generation, 122void fw_iso_resource_manage(struct fw_card *card, int generation,
205 u64 channels_mask, int *channel, int *bandwidth, bool allocate); 123 u64 channels_mask, int *channel, int *bandwidth, bool allocate);
206 124
@@ -285,9 +203,4 @@ void fw_flush_transactions(struct fw_card *card);
285void fw_send_phy_config(struct fw_card *card, 203void fw_send_phy_config(struct fw_card *card,
286 int node_id, int generation, int gap_count); 204 int node_id, int generation, int gap_count);
287 205
288static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
289{
290 return tag << 14 | channel << 8 | sy;
291}
292
293#endif /* _FIREWIRE_CORE_H */ 206#endif /* _FIREWIRE_CORE_H */
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
new file mode 100644
index 000000000000..a42209a73aed
--- /dev/null
+++ b/drivers/firewire/net.c
@@ -0,0 +1,1655 @@
1/*
2 * IPv4 over IEEE 1394, per RFC 2734
3 *
4 * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com>
5 *
6 * based on eth1394 by Ben Collins et al
7 */
8
9#include <linux/bug.h>
10#include <linux/device.h>
11#include <linux/ethtool.h>
12#include <linux/firewire.h>
13#include <linux/firewire-constants.h>
14#include <linux/highmem.h>
15#include <linux/in.h>
16#include <linux/ip.h>
17#include <linux/jiffies.h>
18#include <linux/mod_devicetable.h>
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/mutex.h>
22#include <linux/netdevice.h>
23#include <linux/skbuff.h>
24#include <linux/spinlock.h>
25
26#include <asm/unaligned.h>
27#include <net/arp.h>
28
29#define FWNET_MAX_FRAGMENTS 25 /* arbitrary limit */
30#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16 * 1024 ? 4 : 2)
31
32#define IEEE1394_BROADCAST_CHANNEL 31
33#define IEEE1394_ALL_NODES (0xffc0 | 0x003f)
34#define IEEE1394_MAX_PAYLOAD_S100 512
35#define FWNET_NO_FIFO_ADDR (~0ULL)
36
37#define IANA_SPECIFIER_ID 0x00005eU
38#define RFC2734_SW_VERSION 0x000001U
39
40#define IEEE1394_GASP_HDR_SIZE 8
41
42#define RFC2374_UNFRAG_HDR_SIZE 4
43#define RFC2374_FRAG_HDR_SIZE 8
44#define RFC2374_FRAG_OVERHEAD 4
45
46#define RFC2374_HDR_UNFRAG 0 /* unfragmented */
47#define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */
48#define RFC2374_HDR_LASTFRAG 2 /* last fragment */
49#define RFC2374_HDR_INTFRAG 3 /* interior fragment */
50
51#define RFC2734_HW_ADDR_LEN 16
52
53struct rfc2734_arp {
54 __be16 hw_type; /* 0x0018 */
55 __be16 proto_type; /* 0x0806 */
56 u8 hw_addr_len; /* 16 */
57 u8 ip_addr_len; /* 4 */
58 __be16 opcode; /* ARP Opcode */
59 /* Above is exactly the same format as struct arphdr */
60
61 __be64 s_uniq_id; /* Sender's 64bit EUI */
62 u8 max_rec; /* Sender's max packet size */
63 u8 sspd; /* Sender's max speed */
64 __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */
65 __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
66 __be32 sip; /* Sender's IP Address */
67 __be32 tip; /* IP Address of requested hw addr */
68} __attribute__((packed));
69
70/* This header format is specific to this driver implementation. */
71#define FWNET_ALEN 8
72#define FWNET_HLEN 10
73struct fwnet_header {
74 u8 h_dest[FWNET_ALEN]; /* destination address */
75 __be16 h_proto; /* packet type ID field */
76} __attribute__((packed));
77
78/* IPv4 and IPv6 encapsulation header */
79struct rfc2734_header {
80 u32 w0;
81 u32 w1;
82};
83
84#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
85#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
86#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
87#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
88#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
89
90#define fwnet_set_hdr_lf(lf) ((lf) << 30)
91#define fwnet_set_hdr_ether_type(et) (et)
92#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
93#define fwnet_set_hdr_fg_off(fgo) (fgo)
94
95#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
96
97static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr,
98 unsigned ether_type)
99{
100 hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG)
101 | fwnet_set_hdr_ether_type(ether_type);
102}
103
104static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr,
105 unsigned ether_type, unsigned dg_size, unsigned dgl)
106{
107 hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG)
108 | fwnet_set_hdr_dg_size(dg_size)
109 | fwnet_set_hdr_ether_type(ether_type);
110 hdr->w1 = fwnet_set_hdr_dgl(dgl);
111}
112
113static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr,
114 unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl)
115{
116 hdr->w0 = fwnet_set_hdr_lf(lf)
117 | fwnet_set_hdr_dg_size(dg_size)
118 | fwnet_set_hdr_fg_off(fg_off);
119 hdr->w1 = fwnet_set_hdr_dgl(dgl);
120}
121
122/* This list keeps track of what parts of the datagram have been filled in */
123struct fwnet_fragment_info {
124 struct list_head fi_link;
125 u16 offset;
126 u16 len;
127};
128
129struct fwnet_partial_datagram {
130 struct list_head pd_link;
131 struct list_head fi_list;
132 struct sk_buff *skb;
133 /* FIXME Why not use skb->data? */
134 char *pbuf;
135 u16 datagram_label;
136 u16 ether_type;
137 u16 datagram_size;
138};
139
140static DEFINE_MUTEX(fwnet_device_mutex);
141static LIST_HEAD(fwnet_device_list);
142
143struct fwnet_device {
144 struct list_head dev_link;
145 spinlock_t lock;
146 enum {
147 FWNET_BROADCAST_ERROR,
148 FWNET_BROADCAST_RUNNING,
149 FWNET_BROADCAST_STOPPED,
150 } broadcast_state;
151 struct fw_iso_context *broadcast_rcv_context;
152 struct fw_iso_buffer broadcast_rcv_buffer;
153 void **broadcast_rcv_buffer_ptrs;
154 unsigned broadcast_rcv_next_ptr;
155 unsigned num_broadcast_rcv_ptrs;
156 unsigned rcv_buffer_size;
157 /*
158 * This value is the maximum unfragmented datagram size that can be
159 * sent by the hardware. It already has the GASP overhead and the
160 * unfragmented datagram header overhead calculated into it.
161 */
162 unsigned broadcast_xmt_max_payload;
163 u16 broadcast_xmt_datagramlabel;
164
165 /*
166 * The CSR address that remote nodes must send datagrams to for us to
167 * receive them.
168 */
169 struct fw_address_handler handler;
170 u64 local_fifo;
171
172 /* List of packets to be sent */
173 struct list_head packet_list;
174 /*
175 * List of packets that were broadcasted. When we get an ISO interrupt
176 * one of them has been sent
177 */
178 struct list_head broadcasted_list;
179 /* List of packets that have been sent but not yet acked */
180 struct list_head sent_list;
181
182 struct list_head peer_list;
183 struct fw_card *card;
184 struct net_device *netdev;
185};
186
187struct fwnet_peer {
188 struct list_head peer_link;
189 struct fwnet_device *dev;
190 u64 guid;
191 u64 fifo;
192
193 /* guarded by dev->lock */
194 struct list_head pd_list; /* received partial datagrams */
195 unsigned pdg_size; /* pd_list size */
196
197 u16 datagram_label; /* outgoing datagram label */
198 unsigned max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */
199 int node_id;
200 int generation;
201 unsigned speed;
202};
203
204/* This is our task struct. It's used for the packet complete callback. */
205struct fwnet_packet_task {
206 /*
207 * ptask can actually be on dev->packet_list, dev->broadcasted_list,
208 * or dev->sent_list depending on its current state.
209 */
210 struct list_head pt_link;
211 struct fw_transaction transaction;
212 struct rfc2734_header hdr;
213 struct sk_buff *skb;
214 struct fwnet_device *dev;
215
216 int outstanding_pkts;
217 unsigned max_payload;
218 u64 fifo_addr;
219 u16 dest_node;
220 u8 generation;
221 u8 speed;
222};
223
224/*
225 * saddr == NULL means use device source address.
226 * daddr == NULL means leave destination address (eg unresolved arp).
227 */
228static int fwnet_header_create(struct sk_buff *skb, struct net_device *net,
229 unsigned short type, const void *daddr,
230 const void *saddr, unsigned len)
231{
232 struct fwnet_header *h;
233
234 h = (struct fwnet_header *)skb_push(skb, sizeof(*h));
235 put_unaligned_be16(type, &h->h_proto);
236
237 if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) {
238 memset(h->h_dest, 0, net->addr_len);
239
240 return net->hard_header_len;
241 }
242
243 if (daddr) {
244 memcpy(h->h_dest, daddr, net->addr_len);
245
246 return net->hard_header_len;
247 }
248
249 return -net->hard_header_len;
250}
251
252static int fwnet_header_rebuild(struct sk_buff *skb)
253{
254 struct fwnet_header *h = (struct fwnet_header *)skb->data;
255
256 if (get_unaligned_be16(&h->h_proto) == ETH_P_IP)
257 return arp_find((unsigned char *)&h->h_dest, skb);
258
259 fw_notify("%s: unable to resolve type %04x addresses\n",
260 skb->dev->name, be16_to_cpu(h->h_proto));
261 return 0;
262}
263
264static int fwnet_header_cache(const struct neighbour *neigh,
265 struct hh_cache *hh)
266{
267 struct net_device *net;
268 struct fwnet_header *h;
269
270 if (hh->hh_type == cpu_to_be16(ETH_P_802_3))
271 return -1;
272 net = neigh->dev;
273 h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h));
274 h->h_proto = hh->hh_type;
275 memcpy(h->h_dest, neigh->ha, net->addr_len);
276 hh->hh_len = FWNET_HLEN;
277
278 return 0;
279}
280
281/* Called by Address Resolution module to notify changes in address. */
282static void fwnet_header_cache_update(struct hh_cache *hh,
283 const struct net_device *net, const unsigned char *haddr)
284{
285 memcpy((u8 *)hh->hh_data + 16 - FWNET_HLEN, haddr, net->addr_len);
286}
287
288static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
289{
290 memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN);
291
292 return FWNET_ALEN;
293}
294
295static const struct header_ops fwnet_header_ops = {
296 .create = fwnet_header_create,
297 .rebuild = fwnet_header_rebuild,
298 .cache = fwnet_header_cache,
299 .cache_update = fwnet_header_cache_update,
300 .parse = fwnet_header_parse,
301};
302
303/* FIXME: is this correct for all cases? */
304static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd,
305 unsigned offset, unsigned len)
306{
307 struct fwnet_fragment_info *fi;
308 unsigned end = offset + len;
309
310 list_for_each_entry(fi, &pd->fi_list, fi_link)
311 if (offset < fi->offset + fi->len && end > fi->offset)
312 return true;
313
314 return false;
315}
316
317/* Assumes that new fragment does not overlap any existing fragments */
318static struct fwnet_fragment_info *fwnet_frag_new(
319 struct fwnet_partial_datagram *pd, unsigned offset, unsigned len)
320{
321 struct fwnet_fragment_info *fi, *fi2, *new;
322 struct list_head *list;
323
324 list = &pd->fi_list;
325 list_for_each_entry(fi, &pd->fi_list, fi_link) {
326 if (fi->offset + fi->len == offset) {
327 /* The new fragment can be tacked on to the end */
328 /* Did the new fragment plug a hole? */
329 fi2 = list_entry(fi->fi_link.next,
330 struct fwnet_fragment_info, fi_link);
331 if (fi->offset + fi->len == fi2->offset) {
332 /* glue fragments together */
333 fi->len += len + fi2->len;
334 list_del(&fi2->fi_link);
335 kfree(fi2);
336 } else {
337 fi->len += len;
338 }
339
340 return fi;
341 }
342 if (offset + len == fi->offset) {
343 /* The new fragment can be tacked on to the beginning */
344 /* Did the new fragment plug a hole? */
345 fi2 = list_entry(fi->fi_link.prev,
346 struct fwnet_fragment_info, fi_link);
347 if (fi2->offset + fi2->len == fi->offset) {
348 /* glue fragments together */
349 fi2->len += fi->len + len;
350 list_del(&fi->fi_link);
351 kfree(fi);
352
353 return fi2;
354 }
355 fi->offset = offset;
356 fi->len += len;
357
358 return fi;
359 }
360 if (offset > fi->offset + fi->len) {
361 list = &fi->fi_link;
362 break;
363 }
364 if (offset + len < fi->offset) {
365 list = fi->fi_link.prev;
366 break;
367 }
368 }
369
370 new = kmalloc(sizeof(*new), GFP_ATOMIC);
371 if (!new) {
372 fw_error("out of memory\n");
373 return NULL;
374 }
375
376 new->offset = offset;
377 new->len = len;
378 list_add(&new->fi_link, list);
379
380 return new;
381}
382
383static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net,
384 struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size,
385 void *frag_buf, unsigned frag_off, unsigned frag_len)
386{
387 struct fwnet_partial_datagram *new;
388 struct fwnet_fragment_info *fi;
389
390 new = kmalloc(sizeof(*new), GFP_ATOMIC);
391 if (!new)
392 goto fail;
393
394 INIT_LIST_HEAD(&new->fi_list);
395 fi = fwnet_frag_new(new, frag_off, frag_len);
396 if (fi == NULL)
397 goto fail_w_new;
398
399 new->datagram_label = datagram_label;
400 new->datagram_size = dg_size;
401 new->skb = dev_alloc_skb(dg_size + net->hard_header_len + 15);
402 if (new->skb == NULL)
403 goto fail_w_fi;
404
405 skb_reserve(new->skb, (net->hard_header_len + 15) & ~15);
406 new->pbuf = skb_put(new->skb, dg_size);
407 memcpy(new->pbuf + frag_off, frag_buf, frag_len);
408 list_add_tail(&new->pd_link, &peer->pd_list);
409
410 return new;
411
412fail_w_fi:
413 kfree(fi);
414fail_w_new:
415 kfree(new);
416fail:
417 fw_error("out of memory\n");
418
419 return NULL;
420}
421
422static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer,
423 u16 datagram_label)
424{
425 struct fwnet_partial_datagram *pd;
426
427 list_for_each_entry(pd, &peer->pd_list, pd_link)
428 if (pd->datagram_label == datagram_label)
429 return pd;
430
431 return NULL;
432}
433
434
435static void fwnet_pd_delete(struct fwnet_partial_datagram *old)
436{
437 struct fwnet_fragment_info *fi, *n;
438
439 list_for_each_entry_safe(fi, n, &old->fi_list, fi_link)
440 kfree(fi);
441
442 list_del(&old->pd_link);
443 dev_kfree_skb_any(old->skb);
444 kfree(old);
445}
446
447static bool fwnet_pd_update(struct fwnet_peer *peer,
448 struct fwnet_partial_datagram *pd, void *frag_buf,
449 unsigned frag_off, unsigned frag_len)
450{
451 if (fwnet_frag_new(pd, frag_off, frag_len) == NULL)
452 return false;
453
454 memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
455
456 /*
457 * Move list entry to beginnig of list so that oldest partial
458 * datagrams percolate to the end of the list
459 */
460 list_move_tail(&pd->pd_link, &peer->pd_list);
461
462 return true;
463}
464
465static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd)
466{
467 struct fwnet_fragment_info *fi;
468
469 fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link);
470
471 return fi->len == pd->datagram_size;
472}
473
474/* caller must hold dev->lock */
475static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev,
476 u64 guid)
477{
478 struct fwnet_peer *peer;
479
480 list_for_each_entry(peer, &dev->peer_list, peer_link)
481 if (peer->guid == guid)
482 return peer;
483
484 return NULL;
485}
486
487/* caller must hold dev->lock */
488static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev,
489 int node_id, int generation)
490{
491 struct fwnet_peer *peer;
492
493 list_for_each_entry(peer, &dev->peer_list, peer_link)
494 if (peer->node_id == node_id &&
495 peer->generation == generation)
496 return peer;
497
498 return NULL;
499}
500
501/* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */
502static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed)
503{
504 max_rec = min(max_rec, speed + 8);
505 max_rec = min(max_rec, 0xbU); /* <= 4096 */
506 if (max_rec < 8) {
507 fw_notify("max_rec %x out of range\n", max_rec);
508 max_rec = 8;
509 }
510
511 return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE;
512}
513
514
515static int fwnet_finish_incoming_packet(struct net_device *net,
516 struct sk_buff *skb, u16 source_node_id,
517 bool is_broadcast, u16 ether_type)
518{
519 struct fwnet_device *dev;
520 static const __be64 broadcast_hw = cpu_to_be64(~0ULL);
521 int status;
522 __be64 guid;
523
524 dev = netdev_priv(net);
525 /* Write metadata, and then pass to the receive level */
526 skb->dev = net;
527 skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
528
529 /*
530 * Parse the encapsulation header. This actually does the job of
531 * converting to an ethernet frame header, as well as arp
532 * conversion if needed. ARP conversion is easier in this
533 * direction, since we are using ethernet as our backend.
534 */
535 /*
536 * If this is an ARP packet, convert it. First, we want to make
537 * use of some of the fields, since they tell us a little bit
538 * about the sending machine.
539 */
540 if (ether_type == ETH_P_ARP) {
541 struct rfc2734_arp *arp1394;
542 struct arphdr *arp;
543 unsigned char *arp_ptr;
544 u64 fifo_addr;
545 u64 peer_guid;
546 unsigned sspd;
547 u16 max_payload;
548 struct fwnet_peer *peer;
549 unsigned long flags;
550
551 arp1394 = (struct rfc2734_arp *)skb->data;
552 arp = (struct arphdr *)skb->data;
553 arp_ptr = (unsigned char *)(arp + 1);
554 peer_guid = get_unaligned_be64(&arp1394->s_uniq_id);
555 fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32
556 | get_unaligned_be32(&arp1394->fifo_lo);
557
558 sspd = arp1394->sspd;
559 /* Sanity check. OS X 10.3 PPC reportedly sends 131. */
560 if (sspd > SCODE_3200) {
561 fw_notify("sspd %x out of range\n", sspd);
562 sspd = SCODE_3200;
563 }
564 max_payload = fwnet_max_payload(arp1394->max_rec, sspd);
565
566 spin_lock_irqsave(&dev->lock, flags);
567 peer = fwnet_peer_find_by_guid(dev, peer_guid);
568 if (peer) {
569 peer->fifo = fifo_addr;
570
571 if (peer->speed > sspd)
572 peer->speed = sspd;
573 if (peer->max_payload > max_payload)
574 peer->max_payload = max_payload;
575 }
576 spin_unlock_irqrestore(&dev->lock, flags);
577
578 if (!peer) {
579 fw_notify("No peer for ARP packet from %016llx\n",
580 (unsigned long long)peer_guid);
581 goto failed_proto;
582 }
583
584 /*
585 * Now that we're done with the 1394 specific stuff, we'll
586 * need to alter some of the data. Believe it or not, all
587 * that needs to be done is sender_IP_address needs to be
588 * moved, the destination hardware address get stuffed
589 * in and the hardware address length set to 8.
590 *
591 * IMPORTANT: The code below overwrites 1394 specific data
592 * needed above so keep the munging of the data for the
593 * higher level IP stack last.
594 */
595
596 arp->ar_hln = 8;
597 /* skip over sender unique id */
598 arp_ptr += arp->ar_hln;
599 /* move sender IP addr */
600 put_unaligned(arp1394->sip, (u32 *)arp_ptr);
601 /* skip over sender IP addr */
602 arp_ptr += arp->ar_pln;
603
604 if (arp->ar_op == htons(ARPOP_REQUEST))
605 memset(arp_ptr, 0, sizeof(u64));
606 else
607 memcpy(arp_ptr, net->dev_addr, sizeof(u64));
608 }
609
610 /* Now add the ethernet header. */
611 guid = cpu_to_be64(dev->card->guid);
612 if (dev_hard_header(skb, net, ether_type,
613 is_broadcast ? &broadcast_hw : &guid,
614 NULL, skb->len) >= 0) {
615 struct fwnet_header *eth;
616 u16 *rawp;
617 __be16 protocol;
618
619 skb_reset_mac_header(skb);
620 skb_pull(skb, sizeof(*eth));
621 eth = (struct fwnet_header *)skb_mac_header(skb);
622 if (*eth->h_dest & 1) {
623 if (memcmp(eth->h_dest, net->broadcast,
624 net->addr_len) == 0)
625 skb->pkt_type = PACKET_BROADCAST;
626#if 0
627 else
628 skb->pkt_type = PACKET_MULTICAST;
629#endif
630 } else {
631 if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
632 skb->pkt_type = PACKET_OTHERHOST;
633 }
634 if (ntohs(eth->h_proto) >= 1536) {
635 protocol = eth->h_proto;
636 } else {
637 rawp = (u16 *)skb->data;
638 if (*rawp == 0xffff)
639 protocol = htons(ETH_P_802_3);
640 else
641 protocol = htons(ETH_P_802_2);
642 }
643 skb->protocol = protocol;
644 }
645 status = netif_rx(skb);
646 if (status == NET_RX_DROP) {
647 net->stats.rx_errors++;
648 net->stats.rx_dropped++;
649 } else {
650 net->stats.rx_packets++;
651 net->stats.rx_bytes += skb->len;
652 }
653 if (netif_queue_stopped(net))
654 netif_wake_queue(net);
655
656 return 0;
657
658 failed_proto:
659 net->stats.rx_errors++;
660 net->stats.rx_dropped++;
661
662 dev_kfree_skb_any(skb);
663 if (netif_queue_stopped(net))
664 netif_wake_queue(net);
665
666 net->last_rx = jiffies;
667
668 return 0;
669}
670
671static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
672 int source_node_id, int generation,
673 bool is_broadcast)
674{
675 struct sk_buff *skb;
676 struct net_device *net = dev->netdev;
677 struct rfc2734_header hdr;
678 unsigned lf;
679 unsigned long flags;
680 struct fwnet_peer *peer;
681 struct fwnet_partial_datagram *pd;
682 int fg_off;
683 int dg_size;
684 u16 datagram_label;
685 int retval;
686 u16 ether_type;
687
688 hdr.w0 = be32_to_cpu(buf[0]);
689 lf = fwnet_get_hdr_lf(&hdr);
690 if (lf == RFC2374_HDR_UNFRAG) {
691 /*
692 * An unfragmented datagram has been received by the ieee1394
693 * bus. Build an skbuff around it so we can pass it to the
694 * high level network layer.
695 */
696 ether_type = fwnet_get_hdr_ether_type(&hdr);
697 buf++;
698 len -= RFC2374_UNFRAG_HDR_SIZE;
699
700 skb = dev_alloc_skb(len + net->hard_header_len + 15);
701 if (unlikely(!skb)) {
702 fw_error("out of memory\n");
703 net->stats.rx_dropped++;
704
705 return -1;
706 }
707 skb_reserve(skb, (net->hard_header_len + 15) & ~15);
708 memcpy(skb_put(skb, len), buf, len);
709
710 return fwnet_finish_incoming_packet(net, skb, source_node_id,
711 is_broadcast, ether_type);
712 }
713 /* A datagram fragment has been received, now the fun begins. */
714 hdr.w1 = ntohl(buf[1]);
715 buf += 2;
716 len -= RFC2374_FRAG_HDR_SIZE;
717 if (lf == RFC2374_HDR_FIRSTFRAG) {
718 ether_type = fwnet_get_hdr_ether_type(&hdr);
719 fg_off = 0;
720 } else {
721 ether_type = 0;
722 fg_off = fwnet_get_hdr_fg_off(&hdr);
723 }
724 datagram_label = fwnet_get_hdr_dgl(&hdr);
725 dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
726
727 spin_lock_irqsave(&dev->lock, flags);
728
729 peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
730 if (!peer)
731 goto bad_proto;
732
733 pd = fwnet_pd_find(peer, datagram_label);
734 if (pd == NULL) {
735 while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) {
736 /* remove the oldest */
737 fwnet_pd_delete(list_first_entry(&peer->pd_list,
738 struct fwnet_partial_datagram, pd_link));
739 peer->pdg_size--;
740 }
741 pd = fwnet_pd_new(net, peer, datagram_label,
742 dg_size, buf, fg_off, len);
743 if (pd == NULL) {
744 retval = -ENOMEM;
745 goto bad_proto;
746 }
747 peer->pdg_size++;
748 } else {
749 if (fwnet_frag_overlap(pd, fg_off, len) ||
750 pd->datagram_size != dg_size) {
751 /*
752 * Differing datagram sizes or overlapping fragments,
753 * discard old datagram and start a new one.
754 */
755 fwnet_pd_delete(pd);
756 pd = fwnet_pd_new(net, peer, datagram_label,
757 dg_size, buf, fg_off, len);
758 if (pd == NULL) {
759 retval = -ENOMEM;
760 peer->pdg_size--;
761 goto bad_proto;
762 }
763 } else {
764 if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
765 /*
766 * Couldn't save off fragment anyway
767 * so might as well obliterate the
768 * datagram now.
769 */
770 fwnet_pd_delete(pd);
771 peer->pdg_size--;
772 goto bad_proto;
773 }
774 }
775 } /* new datagram or add to existing one */
776
777 if (lf == RFC2374_HDR_FIRSTFRAG)
778 pd->ether_type = ether_type;
779
780 if (fwnet_pd_is_complete(pd)) {
781 ether_type = pd->ether_type;
782 peer->pdg_size--;
783 skb = skb_get(pd->skb);
784 fwnet_pd_delete(pd);
785
786 spin_unlock_irqrestore(&dev->lock, flags);
787
788 return fwnet_finish_incoming_packet(net, skb, source_node_id,
789 false, ether_type);
790 }
791 /*
792 * Datagram is not complete, we're done for the
793 * moment.
794 */
795 spin_unlock_irqrestore(&dev->lock, flags);
796
797 return 0;
798
799 bad_proto:
800 spin_unlock_irqrestore(&dev->lock, flags);
801
802 if (netif_queue_stopped(net))
803 netif_wake_queue(net);
804
805 return 0;
806}
807
808static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
809 int tcode, int destination, int source, int generation,
810 int speed, unsigned long long offset, void *payload,
811 size_t length, void *callback_data)
812{
813 struct fwnet_device *dev = callback_data;
814 int rcode;
815
816 if (destination == IEEE1394_ALL_NODES) {
817 kfree(r);
818
819 return;
820 }
821
822 if (offset != dev->handler.offset)
823 rcode = RCODE_ADDRESS_ERROR;
824 else if (tcode != TCODE_WRITE_BLOCK_REQUEST)
825 rcode = RCODE_TYPE_ERROR;
826 else if (fwnet_incoming_packet(dev, payload, length,
827 source, generation, false) != 0) {
828 fw_error("Incoming packet failure\n");
829 rcode = RCODE_CONFLICT_ERROR;
830 } else
831 rcode = RCODE_COMPLETE;
832
833 fw_send_response(card, r, rcode);
834}
835
836static void fwnet_receive_broadcast(struct fw_iso_context *context,
837 u32 cycle, size_t header_length, void *header, void *data)
838{
839 struct fwnet_device *dev;
840 struct fw_iso_packet packet;
841 struct fw_card *card;
842 __be16 *hdr_ptr;
843 __be32 *buf_ptr;
844 int retval;
845 u32 length;
846 u16 source_node_id;
847 u32 specifier_id;
848 u32 ver;
849 unsigned long offset;
850 unsigned long flags;
851
852 dev = data;
853 card = dev->card;
854 hdr_ptr = header;
855 length = be16_to_cpup(hdr_ptr);
856
857 spin_lock_irqsave(&dev->lock, flags);
858
859 offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr;
860 buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++];
861 if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs)
862 dev->broadcast_rcv_next_ptr = 0;
863
864 spin_unlock_irqrestore(&dev->lock, flags);
865
866 specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
867 | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
868 ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
869 source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
870
871 if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
872 buf_ptr += 2;
873 length -= IEEE1394_GASP_HDR_SIZE;
874 fwnet_incoming_packet(dev, buf_ptr, length,
875 source_node_id, -1, true);
876 }
877
878 packet.payload_length = dev->rcv_buffer_size;
879 packet.interrupt = 1;
880 packet.skip = 0;
881 packet.tag = 3;
882 packet.sy = 0;
883 packet.header_length = IEEE1394_GASP_HDR_SIZE;
884
885 spin_lock_irqsave(&dev->lock, flags);
886
887 retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet,
888 &dev->broadcast_rcv_buffer, offset);
889
890 spin_unlock_irqrestore(&dev->lock, flags);
891
892 if (retval < 0)
893 fw_error("requeue failed\n");
894}
895
896static struct kmem_cache *fwnet_packet_task_cache;
897
898static int fwnet_send_packet(struct fwnet_packet_task *ptask);
899
900static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
901{
902 struct fwnet_device *dev;
903 unsigned long flags;
904
905 dev = ptask->dev;
906
907 spin_lock_irqsave(&dev->lock, flags);
908 list_del(&ptask->pt_link);
909 spin_unlock_irqrestore(&dev->lock, flags);
910
911 ptask->outstanding_pkts--; /* FIXME access inside lock */
912
913 if (ptask->outstanding_pkts > 0) {
914 u16 dg_size;
915 u16 fg_off;
916 u16 datagram_label;
917 u16 lf;
918 struct sk_buff *skb;
919
920 /* Update the ptask to point to the next fragment and send it */
921 lf = fwnet_get_hdr_lf(&ptask->hdr);
922 switch (lf) {
923 case RFC2374_HDR_LASTFRAG:
924 case RFC2374_HDR_UNFRAG:
925 default:
926 fw_error("Outstanding packet %x lf %x, header %x,%x\n",
927 ptask->outstanding_pkts, lf, ptask->hdr.w0,
928 ptask->hdr.w1);
929 BUG();
930
931 case RFC2374_HDR_FIRSTFRAG:
932 /* Set frag type here for future interior fragments */
933 dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
934 fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
935 datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
936 break;
937
938 case RFC2374_HDR_INTFRAG:
939 dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
940 fg_off = fwnet_get_hdr_fg_off(&ptask->hdr)
941 + ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
942 datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
943 break;
944 }
945 skb = ptask->skb;
946 skb_pull(skb, ptask->max_payload);
947 if (ptask->outstanding_pkts > 1) {
948 fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
949 dg_size, fg_off, datagram_label);
950 } else {
951 fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG,
952 dg_size, fg_off, datagram_label);
953 ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
954 }
955 fwnet_send_packet(ptask);
956 } else {
957 dev_kfree_skb_any(ptask->skb);
958 kmem_cache_free(fwnet_packet_task_cache, ptask);
959 }
960}
961
962static void fwnet_write_complete(struct fw_card *card, int rcode,
963 void *payload, size_t length, void *data)
964{
965 struct fwnet_packet_task *ptask;
966
967 ptask = data;
968
969 if (rcode == RCODE_COMPLETE)
970 fwnet_transmit_packet_done(ptask);
971 else
972 fw_error("fwnet_write_complete: failed: %x\n", rcode);
973 /* ??? error recovery */
974}
975
976static int fwnet_send_packet(struct fwnet_packet_task *ptask)
977{
978 struct fwnet_device *dev;
979 unsigned tx_len;
980 struct rfc2734_header *bufhdr;
981 unsigned long flags;
982
983 dev = ptask->dev;
984 tx_len = ptask->max_payload;
985 switch (fwnet_get_hdr_lf(&ptask->hdr)) {
986 case RFC2374_HDR_UNFRAG:
987 bufhdr = (struct rfc2734_header *)
988 skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE);
989 put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
990 break;
991
992 case RFC2374_HDR_FIRSTFRAG:
993 case RFC2374_HDR_INTFRAG:
994 case RFC2374_HDR_LASTFRAG:
995 bufhdr = (struct rfc2734_header *)
996 skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE);
997 put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
998 put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1);
999 break;
1000
1001 default:
1002 BUG();
1003 }
1004 if (ptask->dest_node == IEEE1394_ALL_NODES) {
1005 u8 *p;
1006 int generation;
1007 int node_id;
1008
1009 /* ptask->generation may not have been set yet */
1010 generation = dev->card->generation;
1011 smp_rmb();
1012 node_id = dev->card->node_id;
1013
1014 p = skb_push(ptask->skb, 8);
1015 put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
1016 put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
1017 | RFC2734_SW_VERSION, &p[4]);
1018
1019 /* We should not transmit if broadcast_channel.valid == 0. */
1020 fw_send_request(dev->card, &ptask->transaction,
1021 TCODE_STREAM_DATA,
1022 fw_stream_packet_destination_id(3,
1023 IEEE1394_BROADCAST_CHANNEL, 0),
1024 generation, SCODE_100, 0ULL, ptask->skb->data,
1025 tx_len + 8, fwnet_write_complete, ptask);
1026
1027 /* FIXME race? */
1028 spin_lock_irqsave(&dev->lock, flags);
1029 list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
1030 spin_unlock_irqrestore(&dev->lock, flags);
1031
1032 return 0;
1033 }
1034
1035 fw_send_request(dev->card, &ptask->transaction,
1036 TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node,
1037 ptask->generation, ptask->speed, ptask->fifo_addr,
1038 ptask->skb->data, tx_len, fwnet_write_complete, ptask);
1039
1040 /* FIXME race? */
1041 spin_lock_irqsave(&dev->lock, flags);
1042 list_add_tail(&ptask->pt_link, &dev->sent_list);
1043 spin_unlock_irqrestore(&dev->lock, flags);
1044
1045 dev->netdev->trans_start = jiffies;
1046
1047 return 0;
1048}
1049
1050static int fwnet_broadcast_start(struct fwnet_device *dev)
1051{
1052 struct fw_iso_context *context;
1053 int retval;
1054 unsigned num_packets;
1055 unsigned max_receive;
1056 struct fw_iso_packet packet;
1057 unsigned long offset;
1058 unsigned u;
1059
1060 if (dev->local_fifo == FWNET_NO_FIFO_ADDR) {
1061 /* outside OHCI posted write area? */
1062 static const struct fw_address_region region = {
1063 .start = 0xffff00000000ULL,
1064 .end = CSR_REGISTER_BASE,
1065 };
1066
1067 dev->handler.length = 4096;
1068 dev->handler.address_callback = fwnet_receive_packet;
1069 dev->handler.callback_data = dev;
1070
1071 retval = fw_core_add_address_handler(&dev->handler, &region);
1072 if (retval < 0)
1073 goto failed_initial;
1074
1075 dev->local_fifo = dev->handler.offset;
1076 }
1077
1078 max_receive = 1U << (dev->card->max_receive + 1);
1079 num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
1080
1081 if (!dev->broadcast_rcv_context) {
1082 void **ptrptr;
1083
1084 context = fw_iso_context_create(dev->card,
1085 FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL,
1086 dev->card->link_speed, 8, fwnet_receive_broadcast, dev);
1087 if (IS_ERR(context)) {
1088 retval = PTR_ERR(context);
1089 goto failed_context_create;
1090 }
1091
1092 retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer,
1093 dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
1094 if (retval < 0)
1095 goto failed_buffer_init;
1096
1097 ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
1098 if (!ptrptr) {
1099 retval = -ENOMEM;
1100 goto failed_ptrs_alloc;
1101 }
1102
1103 dev->broadcast_rcv_buffer_ptrs = ptrptr;
1104 for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
1105 void *ptr;
1106 unsigned v;
1107
1108 ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
1109 for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
1110 *ptrptr++ = (void *)
1111 ((char *)ptr + v * max_receive);
1112 }
1113 dev->broadcast_rcv_context = context;
1114 } else {
1115 context = dev->broadcast_rcv_context;
1116 }
1117
1118 packet.payload_length = max_receive;
1119 packet.interrupt = 1;
1120 packet.skip = 0;
1121 packet.tag = 3;
1122 packet.sy = 0;
1123 packet.header_length = IEEE1394_GASP_HDR_SIZE;
1124 offset = 0;
1125
1126 for (u = 0; u < num_packets; u++) {
1127 retval = fw_iso_context_queue(context, &packet,
1128 &dev->broadcast_rcv_buffer, offset);
1129 if (retval < 0)
1130 goto failed_rcv_queue;
1131
1132 offset += max_receive;
1133 }
1134 dev->num_broadcast_rcv_ptrs = num_packets;
1135 dev->rcv_buffer_size = max_receive;
1136 dev->broadcast_rcv_next_ptr = 0U;
1137 retval = fw_iso_context_start(context, -1, 0,
1138 FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
1139 if (retval < 0)
1140 goto failed_rcv_queue;
1141
1142 /* FIXME: adjust it according to the min. speed of all known peers? */
1143 dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
1144 - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE;
1145 dev->broadcast_state = FWNET_BROADCAST_RUNNING;
1146
1147 return 0;
1148
1149 failed_rcv_queue:
1150 kfree(dev->broadcast_rcv_buffer_ptrs);
1151 dev->broadcast_rcv_buffer_ptrs = NULL;
1152 failed_ptrs_alloc:
1153 fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
1154 failed_buffer_init:
1155 fw_iso_context_destroy(context);
1156 dev->broadcast_rcv_context = NULL;
1157 failed_context_create:
1158 fw_core_remove_address_handler(&dev->handler);
1159 failed_initial:
1160 dev->local_fifo = FWNET_NO_FIFO_ADDR;
1161
1162 return retval;
1163}
1164
1165/* ifup */
1166static int fwnet_open(struct net_device *net)
1167{
1168 struct fwnet_device *dev = netdev_priv(net);
1169 int ret;
1170
1171 if (dev->broadcast_state == FWNET_BROADCAST_ERROR) {
1172 ret = fwnet_broadcast_start(dev);
1173 if (ret)
1174 return ret;
1175 }
1176 netif_start_queue(net);
1177
1178 return 0;
1179}
1180
1181/* ifdown */
1182static int fwnet_stop(struct net_device *net)
1183{
1184 netif_stop_queue(net);
1185
1186 /* Deallocate iso context for use by other applications? */
1187
1188 return 0;
1189}
1190
1191static int fwnet_tx(struct sk_buff *skb, struct net_device *net)
1192{
1193 struct fwnet_header hdr_buf;
1194 struct fwnet_device *dev = netdev_priv(net);
1195 __be16 proto;
1196 u16 dest_node;
1197 unsigned max_payload;
1198 u16 dg_size;
1199 u16 *datagram_label_ptr;
1200 struct fwnet_packet_task *ptask;
1201 struct fwnet_peer *peer;
1202 unsigned long flags;
1203
1204 ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC);
1205 if (ptask == NULL)
1206 goto fail;
1207
1208 skb = skb_share_check(skb, GFP_ATOMIC);
1209 if (!skb)
1210 goto fail;
1211
1212 /*
1213 * Make a copy of the driver-specific header.
1214 * We might need to rebuild the header on tx failure.
1215 */
1216 memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
1217 skb_pull(skb, sizeof(hdr_buf));
1218
1219 proto = hdr_buf.h_proto;
1220 dg_size = skb->len;
1221
1222 /* serialize access to peer, including peer->datagram_label */
1223 spin_lock_irqsave(&dev->lock, flags);
1224
1225 /*
1226 * Set the transmission type for the packet. ARP packets and IP
1227 * broadcast packets are sent via GASP.
1228 */
1229 if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0
1230 || proto == htons(ETH_P_ARP)
1231 || (proto == htons(ETH_P_IP)
1232 && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
1233 max_payload = dev->broadcast_xmt_max_payload;
1234 datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
1235
1236 ptask->fifo_addr = FWNET_NO_FIFO_ADDR;
1237 ptask->generation = 0;
1238 ptask->dest_node = IEEE1394_ALL_NODES;
1239 ptask->speed = SCODE_100;
1240 } else {
1241 __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest);
1242 u8 generation;
1243
1244 peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
1245 if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR)
1246 goto fail_unlock;
1247
1248 generation = peer->generation;
1249 dest_node = peer->node_id;
1250 max_payload = peer->max_payload;
1251 datagram_label_ptr = &peer->datagram_label;
1252
1253 ptask->fifo_addr = peer->fifo;
1254 ptask->generation = generation;
1255 ptask->dest_node = dest_node;
1256 ptask->speed = peer->speed;
1257 }
1258
1259 /* If this is an ARP packet, convert it */
1260 if (proto == htons(ETH_P_ARP)) {
1261 struct arphdr *arp = (struct arphdr *)skb->data;
1262 unsigned char *arp_ptr = (unsigned char *)(arp + 1);
1263 struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data;
1264 __be32 ipaddr;
1265
1266 ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN));
1267
1268 arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN;
1269 arp1394->max_rec = dev->card->max_receive;
1270 arp1394->sspd = dev->card->link_speed;
1271
1272 put_unaligned_be16(dev->local_fifo >> 32,
1273 &arp1394->fifo_hi);
1274 put_unaligned_be32(dev->local_fifo & 0xffffffff,
1275 &arp1394->fifo_lo);
1276 put_unaligned(ipaddr, &arp1394->sip);
1277 }
1278
1279 ptask->hdr.w0 = 0;
1280 ptask->hdr.w1 = 0;
1281 ptask->skb = skb;
1282 ptask->dev = dev;
1283
1284 /* Does it all fit in one packet? */
1285 if (dg_size <= max_payload) {
1286 fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto));
1287 ptask->outstanding_pkts = 1;
1288 max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE;
1289 } else {
1290 u16 datagram_label;
1291
1292 max_payload -= RFC2374_FRAG_OVERHEAD;
1293 datagram_label = (*datagram_label_ptr)++;
1294 fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size,
1295 datagram_label);
1296 ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload);
1297 max_payload += RFC2374_FRAG_HDR_SIZE;
1298 }
1299
1300 spin_unlock_irqrestore(&dev->lock, flags);
1301
1302 ptask->max_payload = max_payload;
1303 fwnet_send_packet(ptask);
1304
1305 return NETDEV_TX_OK;
1306
1307 fail_unlock:
1308 spin_unlock_irqrestore(&dev->lock, flags);
1309 fail:
1310 if (ptask)
1311 kmem_cache_free(fwnet_packet_task_cache, ptask);
1312
1313 if (skb != NULL)
1314 dev_kfree_skb(skb);
1315
1316 net->stats.tx_dropped++;
1317 net->stats.tx_errors++;
1318
1319 /*
1320 * FIXME: According to a patch from 2003-02-26, "returning non-zero
1321 * causes serious problems" here, allegedly. Before that patch,
1322 * -ERRNO was returned which is not appropriate under Linux 2.6.
1323 * Perhaps more needs to be done? Stop the queue in serious
1324 * conditions and restart it elsewhere?
1325 */
1326 return NETDEV_TX_OK;
1327}
1328
1329static int fwnet_change_mtu(struct net_device *net, int new_mtu)
1330{
1331 if (new_mtu < 68)
1332 return -EINVAL;
1333
1334 net->mtu = new_mtu;
1335 return 0;
1336}
1337
1338static void fwnet_get_drvinfo(struct net_device *net,
1339 struct ethtool_drvinfo *info)
1340{
1341 strcpy(info->driver, KBUILD_MODNAME);
1342 strcpy(info->bus_info, "ieee1394");
1343}
1344
1345static struct ethtool_ops fwnet_ethtool_ops = {
1346 .get_drvinfo = fwnet_get_drvinfo,
1347};
1348
1349static const struct net_device_ops fwnet_netdev_ops = {
1350 .ndo_open = fwnet_open,
1351 .ndo_stop = fwnet_stop,
1352 .ndo_start_xmit = fwnet_tx,
1353 .ndo_change_mtu = fwnet_change_mtu,
1354};
1355
1356static void fwnet_init_dev(struct net_device *net)
1357{
1358 net->header_ops = &fwnet_header_ops;
1359 net->netdev_ops = &fwnet_netdev_ops;
1360 net->watchdog_timeo = 2 * HZ;
1361 net->flags = IFF_BROADCAST | IFF_MULTICAST;
1362 net->features = NETIF_F_HIGHDMA;
1363 net->addr_len = FWNET_ALEN;
1364 net->hard_header_len = FWNET_HLEN;
1365 net->type = ARPHRD_IEEE1394;
1366 net->tx_queue_len = 10;
1367 SET_ETHTOOL_OPS(net, &fwnet_ethtool_ops);
1368}
1369
1370/* caller must hold fwnet_device_mutex */
1371static struct fwnet_device *fwnet_dev_find(struct fw_card *card)
1372{
1373 struct fwnet_device *dev;
1374
1375 list_for_each_entry(dev, &fwnet_device_list, dev_link)
1376 if (dev->card == card)
1377 return dev;
1378
1379 return NULL;
1380}
1381
1382static int fwnet_add_peer(struct fwnet_device *dev,
1383 struct fw_unit *unit, struct fw_device *device)
1384{
1385 struct fwnet_peer *peer;
1386
1387 peer = kmalloc(sizeof(*peer), GFP_KERNEL);
1388 if (!peer)
1389 return -ENOMEM;
1390
1391 dev_set_drvdata(&unit->device, peer);
1392
1393 peer->dev = dev;
1394 peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
1395 peer->fifo = FWNET_NO_FIFO_ADDR;
1396 INIT_LIST_HEAD(&peer->pd_list);
1397 peer->pdg_size = 0;
1398 peer->datagram_label = 0;
1399 peer->speed = device->max_speed;
1400 peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed);
1401
1402 peer->generation = device->generation;
1403 smp_rmb();
1404 peer->node_id = device->node_id;
1405
1406 spin_lock_irq(&dev->lock);
1407 list_add_tail(&peer->peer_link, &dev->peer_list);
1408 spin_unlock_irq(&dev->lock);
1409
1410 return 0;
1411}
1412
1413static int fwnet_probe(struct device *_dev)
1414{
1415 struct fw_unit *unit = fw_unit(_dev);
1416 struct fw_device *device = fw_parent_device(unit);
1417 struct fw_card *card = device->card;
1418 struct net_device *net;
1419 bool allocated_netdev = false;
1420 struct fwnet_device *dev;
1421 unsigned max_mtu;
1422 int ret;
1423
1424 mutex_lock(&fwnet_device_mutex);
1425
1426 dev = fwnet_dev_find(card);
1427 if (dev) {
1428 net = dev->netdev;
1429 goto have_dev;
1430 }
1431
1432 net = alloc_netdev(sizeof(*dev), "firewire%d", fwnet_init_dev);
1433 if (net == NULL) {
1434 ret = -ENOMEM;
1435 goto out;
1436 }
1437
1438 allocated_netdev = true;
1439 SET_NETDEV_DEV(net, card->device);
1440 dev = netdev_priv(net);
1441
1442 spin_lock_init(&dev->lock);
1443 dev->broadcast_state = FWNET_BROADCAST_ERROR;
1444 dev->broadcast_rcv_context = NULL;
1445 dev->broadcast_xmt_max_payload = 0;
1446 dev->broadcast_xmt_datagramlabel = 0;
1447
1448 dev->local_fifo = FWNET_NO_FIFO_ADDR;
1449
1450 INIT_LIST_HEAD(&dev->packet_list);
1451 INIT_LIST_HEAD(&dev->broadcasted_list);
1452 INIT_LIST_HEAD(&dev->sent_list);
1453 INIT_LIST_HEAD(&dev->peer_list);
1454
1455 dev->card = card;
1456 dev->netdev = net;
1457
1458 /*
1459 * Use the RFC 2734 default 1500 octets or the maximum payload
1460 * as initial MTU
1461 */
1462 max_mtu = (1 << (card->max_receive + 1))
1463 - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE;
1464 net->mtu = min(1500U, max_mtu);
1465
1466 /* Set our hardware address while we're at it */
1467 put_unaligned_be64(card->guid, net->dev_addr);
1468 put_unaligned_be64(~0ULL, net->broadcast);
1469 ret = register_netdev(net);
1470 if (ret) {
1471 fw_error("Cannot register the driver\n");
1472 goto out;
1473 }
1474
1475 list_add_tail(&dev->dev_link, &fwnet_device_list);
1476 fw_notify("%s: IPv4 over FireWire on device %016llx\n",
1477 net->name, (unsigned long long)card->guid);
1478 have_dev:
1479 ret = fwnet_add_peer(dev, unit, device);
1480 if (ret && allocated_netdev) {
1481 unregister_netdev(net);
1482 list_del(&dev->dev_link);
1483 }
1484 out:
1485 if (ret && allocated_netdev)
1486 free_netdev(net);
1487
1488 mutex_unlock(&fwnet_device_mutex);
1489
1490 return ret;
1491}
1492
1493static void fwnet_remove_peer(struct fwnet_peer *peer)
1494{
1495 struct fwnet_partial_datagram *pd, *pd_next;
1496
1497 spin_lock_irq(&peer->dev->lock);
1498 list_del(&peer->peer_link);
1499 spin_unlock_irq(&peer->dev->lock);
1500
1501 list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
1502 fwnet_pd_delete(pd);
1503
1504 kfree(peer);
1505}
1506
1507static int fwnet_remove(struct device *_dev)
1508{
1509 struct fwnet_peer *peer = dev_get_drvdata(_dev);
1510 struct fwnet_device *dev = peer->dev;
1511 struct net_device *net;
1512 struct fwnet_packet_task *ptask, *pt_next;
1513
1514 mutex_lock(&fwnet_device_mutex);
1515
1516 fwnet_remove_peer(peer);
1517
1518 if (list_empty(&dev->peer_list)) {
1519 net = dev->netdev;
1520 unregister_netdev(net);
1521
1522 if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
1523 fw_core_remove_address_handler(&dev->handler);
1524 if (dev->broadcast_rcv_context) {
1525 fw_iso_context_stop(dev->broadcast_rcv_context);
1526 fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer,
1527 dev->card);
1528 fw_iso_context_destroy(dev->broadcast_rcv_context);
1529 }
1530 list_for_each_entry_safe(ptask, pt_next,
1531 &dev->packet_list, pt_link) {
1532 dev_kfree_skb_any(ptask->skb);
1533 kmem_cache_free(fwnet_packet_task_cache, ptask);
1534 }
1535 list_for_each_entry_safe(ptask, pt_next,
1536 &dev->broadcasted_list, pt_link) {
1537 dev_kfree_skb_any(ptask->skb);
1538 kmem_cache_free(fwnet_packet_task_cache, ptask);
1539 }
1540 list_for_each_entry_safe(ptask, pt_next,
1541 &dev->sent_list, pt_link) {
1542 dev_kfree_skb_any(ptask->skb);
1543 kmem_cache_free(fwnet_packet_task_cache, ptask);
1544 }
1545 list_del(&dev->dev_link);
1546
1547 free_netdev(net);
1548 }
1549
1550 mutex_unlock(&fwnet_device_mutex);
1551
1552 return 0;
1553}
1554
1555/*
1556 * FIXME abort partially sent fragmented datagrams,
1557 * discard partially received fragmented datagrams
1558 */
1559static void fwnet_update(struct fw_unit *unit)
1560{
1561 struct fw_device *device = fw_parent_device(unit);
1562 struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
1563 int generation;
1564
1565 generation = device->generation;
1566
1567 spin_lock_irq(&peer->dev->lock);
1568 peer->node_id = device->node_id;
1569 peer->generation = generation;
1570 spin_unlock_irq(&peer->dev->lock);
1571}
1572
1573static const struct ieee1394_device_id fwnet_id_table[] = {
1574 {
1575 .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
1576 IEEE1394_MATCH_VERSION,
1577 .specifier_id = IANA_SPECIFIER_ID,
1578 .version = RFC2734_SW_VERSION,
1579 },
1580 { }
1581};
1582
1583static struct fw_driver fwnet_driver = {
1584 .driver = {
1585 .owner = THIS_MODULE,
1586 .name = "net",
1587 .bus = &fw_bus_type,
1588 .probe = fwnet_probe,
1589 .remove = fwnet_remove,
1590 },
1591 .update = fwnet_update,
1592 .id_table = fwnet_id_table,
1593};
1594
1595static const u32 rfc2374_unit_directory_data[] = {
1596 0x00040000, /* directory_length */
1597 0x1200005e, /* unit_specifier_id: IANA */
1598 0x81000003, /* textual descriptor offset */
1599 0x13000001, /* unit_sw_version: RFC 2734 */
1600 0x81000005, /* textual descriptor offset */
1601 0x00030000, /* descriptor_length */
1602 0x00000000, /* text */
1603 0x00000000, /* minimal ASCII, en */
1604 0x49414e41, /* I A N A */
1605 0x00030000, /* descriptor_length */
1606 0x00000000, /* text */
1607 0x00000000, /* minimal ASCII, en */
1608 0x49507634, /* I P v 4 */
1609};
1610
1611static struct fw_descriptor rfc2374_unit_directory = {
1612 .length = ARRAY_SIZE(rfc2374_unit_directory_data),
1613 .key = (CSR_DIRECTORY | CSR_UNIT) << 24,
1614 .data = rfc2374_unit_directory_data
1615};
1616
1617static int __init fwnet_init(void)
1618{
1619 int err;
1620
1621 err = fw_core_add_descriptor(&rfc2374_unit_directory);
1622 if (err)
1623 return err;
1624
1625 fwnet_packet_task_cache = kmem_cache_create("packet_task",
1626 sizeof(struct fwnet_packet_task), 0, 0, NULL);
1627 if (!fwnet_packet_task_cache) {
1628 err = -ENOMEM;
1629 goto out;
1630 }
1631
1632 err = driver_register(&fwnet_driver.driver);
1633 if (!err)
1634 return 0;
1635
1636 kmem_cache_destroy(fwnet_packet_task_cache);
1637out:
1638 fw_core_remove_descriptor(&rfc2374_unit_directory);
1639
1640 return err;
1641}
1642module_init(fwnet_init);
1643
1644static void __exit fwnet_cleanup(void)
1645{
1646 driver_unregister(&fwnet_driver.driver);
1647 kmem_cache_destroy(fwnet_packet_task_cache);
1648 fw_core_remove_descriptor(&rfc2374_unit_directory);
1649}
1650module_exit(fwnet_cleanup);
1651
1652MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>");
1653MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734");
1654MODULE_LICENSE("GPL");
1655MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index b5db8b883615..9c2e10082b79 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -140,7 +140,7 @@ static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
140 140
141 dev_dbg(&adap->dev, "Interrupt: %x\n", i); 141 dev_dbg(&adap->dev, "Interrupt: %x\n", i);
142 142
143 wake_up_interruptible(&cpm->i2c_wait); 143 wake_up(&cpm->i2c_wait);
144 144
145 return i ? IRQ_HANDLED : IRQ_NONE; 145 return i ? IRQ_HANDLED : IRQ_NONE;
146} 146}
@@ -364,12 +364,12 @@ static int cpm_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
364 dev_dbg(&adap->dev, "test ready.\n"); 364 dev_dbg(&adap->dev, "test ready.\n");
365 pmsg = &msgs[tptr]; 365 pmsg = &msgs[tptr];
366 if (pmsg->flags & I2C_M_RD) 366 if (pmsg->flags & I2C_M_RD)
367 ret = wait_event_interruptible_timeout(cpm->i2c_wait, 367 ret = wait_event_timeout(cpm->i2c_wait,
368 (in_be16(&tbdf[tptr].cbd_sc) & BD_SC_NAK) || 368 (in_be16(&tbdf[tptr].cbd_sc) & BD_SC_NAK) ||
369 !(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY), 369 !(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY),
370 1 * HZ); 370 1 * HZ);
371 else 371 else
372 ret = wait_event_interruptible_timeout(cpm->i2c_wait, 372 ret = wait_event_timeout(cpm->i2c_wait,
373 !(in_be16(&tbdf[tptr].cbd_sc) & BD_SC_READY), 373 !(in_be16(&tbdf[tptr].cbd_sc) & BD_SC_READY),
374 1 * HZ); 374 1 * HZ);
375 if (ret == 0) { 375 if (ret == 0) {
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b606db85525d..ad8d2010c921 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -339,7 +339,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
339 * to get longer filter period for better noise suppression. 339 * to get longer filter period for better noise suppression.
340 * The filter is iclk (fclk for HS) period. 340 * The filter is iclk (fclk for HS) period.
341 */ 341 */
342 if (dev->speed > 400 || cpu_is_omap_2430()) 342 if (dev->speed > 400 || cpu_is_omap2430())
343 internal_clk = 19200; 343 internal_clk = 19200;
344 else if (dev->speed > 100) 344 else if (dev->speed > 100)
345 internal_clk = 9600; 345 internal_clk = 9600;
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index 03c86209446f..680e5975217f 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -389,8 +389,7 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
389 .init_chipset = init_chipset_cmd64x, 389 .init_chipset = init_chipset_cmd64x,
390 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 390 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
391 .port_ops = &cmd648_port_ops, 391 .port_ops = &cmd648_port_ops,
392 .host_flags = IDE_HFLAG_SERIALIZE | 392 .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
393 IDE_HFLAG_ABUSE_PREFETCH,
394 .pio_mask = ATA_PIO5, 393 .pio_mask = ATA_PIO5,
395 .mwdma_mask = ATA_MWDMA2, 394 .mwdma_mask = ATA_MWDMA2,
396 .udma_mask = ATA_UDMA2, 395 .udma_mask = ATA_UDMA2,
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 95f45f9b8e5e..f102fcc7e52a 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -4,7 +4,7 @@ menu "IEEE 1394 (FireWire) support"
4source "drivers/firewire/Kconfig" 4source "drivers/firewire/Kconfig"
5 5
6config IEEE1394 6config IEEE1394
7 tristate "Stable FireWire stack" 7 tristate "Legacy alternative FireWire driver stack"
8 depends on PCI || BROKEN 8 depends on PCI || BROKEN
9 help 9 help
10 IEEE 1394 describes a high performance serial bus, which is also 10 IEEE 1394 describes a high performance serial bus, which is also
@@ -33,11 +33,9 @@ config IEEE1394_OHCI1394
33 module will be called ohci1394. 33 module will be called ohci1394.
34 34
35 NOTE: 35 NOTE:
36 36 If you want to install firewire-ohci and ohci1394 together, you
37 You should only build either ohci1394 or the new firewire-ohci driver, 37 should configure them only as modules and blacklist the driver(s)
38 but not both. If you nevertheless want to install both, you should 38 which you don't want to have auto-loaded. Add either
39 configure them only as modules and blacklist the driver(s) which you
40 don't want to have auto-loaded. Add either
41 39
42 blacklist firewire-ohci 40 blacklist firewire-ohci
43 or 41 or
@@ -46,12 +44,7 @@ config IEEE1394_OHCI1394
46 blacklist dv1394 44 blacklist dv1394
47 45
48 to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf 46 to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
49 depending on your distribution. The latter two modules should be 47 depending on your distribution.
50 blacklisted together with ohci1394 because they depend on ohci1394.
51
52 If you have an old modprobe which doesn't implement the blacklist
53 directive, use "install modulename /bin/true" for the modules to be
54 blacklisted.
55 48
56comment "PCILynx controller requires I2C" 49comment "PCILynx controller requires I2C"
57 depends on IEEE1394 && I2C=n 50 depends on IEEE1394 && I2C=n
@@ -105,7 +98,7 @@ config IEEE1394_ETH1394_ROM_ENTRY
105 default n 98 default n
106 99
107config IEEE1394_ETH1394 100config IEEE1394_ETH1394
108 tristate "IP over 1394" 101 tristate "IP networking over 1394 (experimental)"
109 depends on IEEE1394 && EXPERIMENTAL && INET 102 depends on IEEE1394 && EXPERIMENTAL && INET
110 select IEEE1394_ETH1394_ROM_ENTRY 103 select IEEE1394_ETH1394_ROM_ENTRY
111 help 104 help
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c
index 3fe158ac7bbf..4216328552f6 100644
--- a/drivers/media/common/ir-keymaps.c
+++ b/drivers/media/common/ir-keymaps.c
@@ -2750,3 +2750,26 @@ IR_KEYTAB_TYPE ir_codes_dm1105_nec[IR_KEYTAB_SIZE] = {
2750 [0x1b] = KEY_B, /*recall*/ 2750 [0x1b] = KEY_B, /*recall*/
2751}; 2751};
2752EXPORT_SYMBOL_GPL(ir_codes_dm1105_nec); 2752EXPORT_SYMBOL_GPL(ir_codes_dm1105_nec);
2753
2754/* EVGA inDtube
2755 Devin Heitmueller <devin.heitmueller@gmail.com>
2756 */
2757IR_KEYTAB_TYPE ir_codes_evga_indtube[IR_KEYTAB_SIZE] = {
2758 [0x12] = KEY_POWER,
2759 [0x02] = KEY_MODE, /* TV */
2760 [0x14] = KEY_MUTE,
2761 [0x1a] = KEY_CHANNELUP,
2762 [0x16] = KEY_TV2, /* PIP */
2763 [0x1d] = KEY_VOLUMEUP,
2764 [0x05] = KEY_CHANNELDOWN,
2765 [0x0f] = KEY_PLAYPAUSE,
2766 [0x19] = KEY_VOLUMEDOWN,
2767 [0x1c] = KEY_REWIND,
2768 [0x0d] = KEY_RECORD,
2769 [0x18] = KEY_FORWARD,
2770 [0x1e] = KEY_PREVIOUS,
2771 [0x1b] = KEY_STOP,
2772 [0x1f] = KEY_NEXT,
2773 [0x13] = KEY_CAMERA,
2774};
2775EXPORT_SYMBOL_GPL(ir_codes_evga_indtube);
diff --git a/drivers/media/dvb/frontends/stv0900.h b/drivers/media/dvb/frontends/stv0900.h
index 8a1332c2031d..bf4e9b633044 100644
--- a/drivers/media/dvb/frontends/stv0900.h
+++ b/drivers/media/dvb/frontends/stv0900.h
@@ -29,6 +29,11 @@
29#include <linux/dvb/frontend.h> 29#include <linux/dvb/frontend.h>
30#include "dvb_frontend.h" 30#include "dvb_frontend.h"
31 31
32struct stv0900_reg {
33 u16 addr;
34 u8 val;
35};
36
32struct stv0900_config { 37struct stv0900_config {
33 u8 demod_address; 38 u8 demod_address;
34 u32 xtal; 39 u32 xtal;
@@ -38,7 +43,7 @@ struct stv0900_config {
38 43
39 u8 path1_mode; 44 u8 path1_mode;
40 u8 path2_mode; 45 u8 path2_mode;
41 46 struct stv0900_reg *ts_config_regs;
42 u8 tun1_maddress;/* 0, 1, 2, 3 for 0xc0, 0xc2, 0xc4, 0xc6 */ 47 u8 tun1_maddress;/* 0, 1, 2, 3 for 0xc0, 0xc2, 0xc4, 0xc6 */
43 u8 tun2_maddress; 48 u8 tun2_maddress;
44 u8 tun1_adc;/* 1 for stv6110, 2 for stb6100 */ 49 u8 tun1_adc;/* 1 for stv6110, 2 for stb6100 */
diff --git a/drivers/media/dvb/frontends/stv0900_core.c b/drivers/media/dvb/frontends/stv0900_core.c
index 8499bcf7f251..1da045fbb4ef 100644
--- a/drivers/media/dvb/frontends/stv0900_core.c
+++ b/drivers/media/dvb/frontends/stv0900_core.c
@@ -149,31 +149,31 @@ void stv0900_write_reg(struct stv0900_internal *i_params, u16 reg_addr,
149 dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret); 149 dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret);
150} 150}
151 151
152u8 stv0900_read_reg(struct stv0900_internal *i_params, u16 reg_addr) 152u8 stv0900_read_reg(struct stv0900_internal *i_params, u16 reg)
153{ 153{
154 u8 data[2];
155 int ret; 154 int ret;
156 struct i2c_msg i2cmsg = { 155 u8 b0[] = { MSB(reg), LSB(reg) };
157 .addr = i_params->i2c_addr, 156 u8 buf = 0;
158 .flags = 0, 157 struct i2c_msg msg[] = {
159 .len = 2, 158 {
160 .buf = data, 159 .addr = i_params->i2c_addr,
160 .flags = 0,
161 .buf = b0,
162 .len = 2,
163 }, {
164 .addr = i_params->i2c_addr,
165 .flags = I2C_M_RD,
166 .buf = &buf,
167 .len = 1,
168 },
161 }; 169 };
162 170
163 data[0] = MSB(reg_addr); 171 ret = i2c_transfer(i_params->i2c_adap, msg, 2);
164 data[1] = LSB(reg_addr); 172 if (ret != 2)
165 173 dprintk(KERN_ERR "%s: i2c error %d, reg[0x%02x]\n",
166 ret = i2c_transfer(i_params->i2c_adap, &i2cmsg, 1); 174 __func__, ret, reg);
167 if (ret != 1)
168 dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret);
169
170 i2cmsg.flags = I2C_M_RD;
171 i2cmsg.len = 1;
172 ret = i2c_transfer(i_params->i2c_adap, &i2cmsg, 1);
173 if (ret != 1)
174 dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret);
175 175
176 return data[0]; 176 return buf;
177} 177}
178 178
179void extract_mask_pos(u32 label, u8 *mask, u8 *pos) 179void extract_mask_pos(u32 label, u8 *mask, u8 *pos)
@@ -712,6 +712,44 @@ static s32 stv0900_carr_get_quality(struct dvb_frontend *fe,
712 return c_n; 712 return c_n;
713} 713}
714 714
715static int stv0900_read_ucblocks(struct dvb_frontend *fe, u32 * ucblocks)
716{
717 struct stv0900_state *state = fe->demodulator_priv;
718 struct stv0900_internal *i_params = state->internal;
719 enum fe_stv0900_demod_num demod = state->demod;
720 u8 err_val1, err_val0;
721 s32 err_field1, err_field0;
722 u32 header_err_val = 0;
723
724 *ucblocks = 0x0;
725 if (stv0900_get_standard(fe, demod) == STV0900_DVBS2_STANDARD) {
726 /* DVB-S2 delineator errors count */
727
728 /* retreiving number for errnous headers */
729 dmd_reg(err_field0, R0900_P1_BBFCRCKO0,
730 R0900_P2_BBFCRCKO0);
731 dmd_reg(err_field1, R0900_P1_BBFCRCKO1,
732 R0900_P2_BBFCRCKO1);
733
734 err_val1 = stv0900_read_reg(i_params, err_field1);
735 err_val0 = stv0900_read_reg(i_params, err_field0);
736 header_err_val = (err_val1<<8) | err_val0;
737
738 /* retreiving number for errnous packets */
739 dmd_reg(err_field0, R0900_P1_UPCRCKO0,
740 R0900_P2_UPCRCKO0);
741 dmd_reg(err_field1, R0900_P1_UPCRCKO1,
742 R0900_P2_UPCRCKO1);
743
744 err_val1 = stv0900_read_reg(i_params, err_field1);
745 err_val0 = stv0900_read_reg(i_params, err_field0);
746 *ucblocks = (err_val1<<8) | err_val0;
747 *ucblocks += header_err_val;
748 }
749
750 return 0;
751}
752
715static int stv0900_read_snr(struct dvb_frontend *fe, u16 *snr) 753static int stv0900_read_snr(struct dvb_frontend *fe, u16 *snr)
716{ 754{
717 *snr = stv0900_carr_get_quality(fe, 755 *snr = stv0900_carr_get_quality(fe,
@@ -1355,7 +1393,7 @@ static enum fe_stv0900_error stv0900_init_internal(struct dvb_frontend *fe,
1355 struct stv0900_state *state = fe->demodulator_priv; 1393 struct stv0900_state *state = fe->demodulator_priv;
1356 enum fe_stv0900_error error = STV0900_NO_ERROR; 1394 enum fe_stv0900_error error = STV0900_NO_ERROR;
1357 enum fe_stv0900_error demodError = STV0900_NO_ERROR; 1395 enum fe_stv0900_error demodError = STV0900_NO_ERROR;
1358 int selosci; 1396 int selosci, i;
1359 1397
1360 struct stv0900_inode *temp_int = find_inode(state->i2c_adap, 1398 struct stv0900_inode *temp_int = find_inode(state->i2c_adap,
1361 state->config->demod_address); 1399 state->config->demod_address);
@@ -1402,7 +1440,23 @@ static enum fe_stv0900_error stv0900_init_internal(struct dvb_frontend *fe,
1402 stv0900_write_bits(state->internal, F0900_P1_ROLLOFF_CONTROL, p_init->rolloff); 1440 stv0900_write_bits(state->internal, F0900_P1_ROLLOFF_CONTROL, p_init->rolloff);
1403 stv0900_write_bits(state->internal, F0900_P2_ROLLOFF_CONTROL, p_init->rolloff); 1441 stv0900_write_bits(state->internal, F0900_P2_ROLLOFF_CONTROL, p_init->rolloff);
1404 1442
1405 stv0900_set_ts_parallel_serial(state->internal, p_init->path1_ts_clock, p_init->path2_ts_clock); 1443 state->internal->ts_config = p_init->ts_config;
1444 if (state->internal->ts_config == NULL)
1445 stv0900_set_ts_parallel_serial(state->internal,
1446 p_init->path1_ts_clock,
1447 p_init->path2_ts_clock);
1448 else {
1449 for (i = 0; state->internal->ts_config[i].addr != 0xffff; i++)
1450 stv0900_write_reg(state->internal,
1451 state->internal->ts_config[i].addr,
1452 state->internal->ts_config[i].val);
1453
1454 stv0900_write_bits(state->internal, F0900_P2_RST_HWARE, 1);
1455 stv0900_write_bits(state->internal, F0900_P2_RST_HWARE, 0);
1456 stv0900_write_bits(state->internal, F0900_P1_RST_HWARE, 1);
1457 stv0900_write_bits(state->internal, F0900_P1_RST_HWARE, 0);
1458 }
1459
1406 stv0900_write_bits(state->internal, F0900_P1_TUN_MADDRESS, p_init->tun1_maddress); 1460 stv0900_write_bits(state->internal, F0900_P1_TUN_MADDRESS, p_init->tun1_maddress);
1407 switch (p_init->tuner1_adc) { 1461 switch (p_init->tuner1_adc) {
1408 case 1: 1462 case 1:
@@ -1882,6 +1936,7 @@ static struct dvb_frontend_ops stv0900_ops = {
1882 .read_ber = stv0900_read_ber, 1936 .read_ber = stv0900_read_ber,
1883 .read_signal_strength = stv0900_read_signal_strength, 1937 .read_signal_strength = stv0900_read_signal_strength,
1884 .read_snr = stv0900_read_snr, 1938 .read_snr = stv0900_read_snr,
1939 .read_ucblocks = stv0900_read_ucblocks,
1885}; 1940};
1886 1941
1887struct dvb_frontend *stv0900_attach(const struct stv0900_config *config, 1942struct dvb_frontend *stv0900_attach(const struct stv0900_config *config,
@@ -1915,6 +1970,7 @@ struct dvb_frontend *stv0900_attach(const struct stv0900_config *config,
1915 init_params.tun1_iq_inversion = STV0900_IQ_NORMAL; 1970 init_params.tun1_iq_inversion = STV0900_IQ_NORMAL;
1916 init_params.tuner1_adc = config->tun1_adc; 1971 init_params.tuner1_adc = config->tun1_adc;
1917 init_params.path2_ts_clock = config->path2_mode; 1972 init_params.path2_ts_clock = config->path2_mode;
1973 init_params.ts_config = config->ts_config_regs;
1918 init_params.tun2_maddress = config->tun2_maddress; 1974 init_params.tun2_maddress = config->tun2_maddress;
1919 init_params.tuner2_adc = config->tun2_adc; 1975 init_params.tuner2_adc = config->tun2_adc;
1920 init_params.tun2_iq_inversion = STV0900_IQ_SWAPPED; 1976 init_params.tun2_iq_inversion = STV0900_IQ_SWAPPED;
diff --git a/drivers/media/dvb/frontends/stv0900_priv.h b/drivers/media/dvb/frontends/stv0900_priv.h
index 67dc8ec634e2..5ed7a145c7d3 100644
--- a/drivers/media/dvb/frontends/stv0900_priv.h
+++ b/drivers/media/dvb/frontends/stv0900_priv.h
@@ -271,6 +271,7 @@ struct stv0900_init_params{
271 271
272 /* IQ from the tuner2 to the demod */ 272 /* IQ from the tuner2 to the demod */
273 enum stv0900_iq_inversion tun2_iq_inversion; 273 enum stv0900_iq_inversion tun2_iq_inversion;
274 struct stv0900_reg *ts_config;
274}; 275};
275 276
276struct stv0900_search_params { 277struct stv0900_search_params {
@@ -363,6 +364,7 @@ struct stv0900_internal{
363 u8 i2c_addr; 364 u8 i2c_addr;
364 u8 clkmode;/* 0 for CLKI, 2 for XTALI */ 365 u8 clkmode;/* 0 for CLKI, 2 for XTALI */
365 u8 chip_id; 366 u8 chip_id;
367 struct stv0900_reg *ts_config;
366 enum fe_stv0900_error errs; 368 enum fe_stv0900_error errs;
367 int dmds_used; 369 int dmds_used;
368}; 370};
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index 96ef745a2e4e..488bdfb34fb3 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -2674,7 +2674,7 @@ static u8 stv090x_optimize_carloop(struct stv090x_state *state, enum stv090x_mod
2674 2674
2675static u8 stv090x_optimize_carloop_short(struct stv090x_state *state) 2675static u8 stv090x_optimize_carloop_short(struct stv090x_state *state)
2676{ 2676{
2677 struct stv090x_short_frame_crloop *short_crl; 2677 struct stv090x_short_frame_crloop *short_crl = NULL;
2678 s32 index = 0; 2678 s32 index = 0;
2679 u8 aclc = 0x0b; 2679 u8 aclc = 0x0b;
2680 2680
@@ -2694,10 +2694,13 @@ static u8 stv090x_optimize_carloop_short(struct stv090x_state *state)
2694 break; 2694 break;
2695 } 2695 }
2696 2696
2697 if (state->dev_ver >= 0x30) 2697 if (state->dev_ver >= 0x30) {
2698 short_crl = stv090x_s2_short_crl_cut20; 2698 /* Cut 3.0 and up */
2699 else if (state->dev_ver >= 0x20)
2700 short_crl = stv090x_s2_short_crl_cut30; 2699 short_crl = stv090x_s2_short_crl_cut30;
2700 } else {
2701 /* Cut 2.0 and up: we don't support cuts older than 2.0 */
2702 short_crl = stv090x_s2_short_crl_cut20;
2703 }
2701 2704
2702 if (state->srate <= 3000000) 2705 if (state->srate <= 3000000)
2703 aclc = short_crl[index].crl_2; 2706 aclc = short_crl[index].crl_2;
diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c
index 4302c563a6b8..cc8862ce4aae 100644
--- a/drivers/media/dvb/frontends/tda10048.c
+++ b/drivers/media/dvb/frontends/tda10048.c
@@ -210,6 +210,7 @@ static struct pll_tab {
210 { TDA10048_CLK_4000, TDA10048_IF_36130, 10, 0, 0 }, 210 { TDA10048_CLK_4000, TDA10048_IF_36130, 10, 0, 0 },
211 { TDA10048_CLK_16000, TDA10048_IF_3300, 10, 3, 0 }, 211 { TDA10048_CLK_16000, TDA10048_IF_3300, 10, 3, 0 },
212 { TDA10048_CLK_16000, TDA10048_IF_3500, 10, 3, 0 }, 212 { TDA10048_CLK_16000, TDA10048_IF_3500, 10, 3, 0 },
213 { TDA10048_CLK_16000, TDA10048_IF_3800, 10, 3, 0 },
213 { TDA10048_CLK_16000, TDA10048_IF_4000, 10, 3, 0 }, 214 { TDA10048_CLK_16000, TDA10048_IF_4000, 10, 3, 0 },
214 { TDA10048_CLK_16000, TDA10048_IF_4300, 10, 3, 0 }, 215 { TDA10048_CLK_16000, TDA10048_IF_4300, 10, 3, 0 },
215 { TDA10048_CLK_16000, TDA10048_IF_36130, 10, 3, 0 }, 216 { TDA10048_CLK_16000, TDA10048_IF_36130, 10, 3, 0 },
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index 32be382f0e97..a246903c3341 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1422,8 +1422,8 @@ int smscore_gpio_configure(struct smscore_device_t *coredev, u8 PinNum,
1422 struct smscore_gpio_config *pGpioConfig) { 1422 struct smscore_gpio_config *pGpioConfig) {
1423 1423
1424 u32 totalLen; 1424 u32 totalLen;
1425 u32 TranslatedPinNum; 1425 u32 TranslatedPinNum = 0;
1426 u32 GroupNum; 1426 u32 GroupNum = 0;
1427 u32 ElectricChar; 1427 u32 ElectricChar;
1428 u32 groupCfg; 1428 u32 groupCfg;
1429 void *buffer; 1429 void *buffer;
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 393623818ade..3cd76dddb6aa 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -322,7 +322,9 @@ static int vidioc_g_tuner(struct file *file, void *priv,
322 v->rangehigh = FREQ_MAX * FREQ_MUL; 322 v->rangehigh = FREQ_MAX * FREQ_MUL;
323 v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO; 323 v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
324 if (r->tunchk & TEA5764_TUNCHK_STEREO) 324 if (r->tunchk & TEA5764_TUNCHK_STEREO)
325 v->rxsubchans = V4L2_TUNER_SUB_STEREO; 325 v->rxsubchans = V4L2_TUNER_SUB_STEREO;
326 else
327 v->rxsubchans = V4L2_TUNER_SUB_MONO;
326 v->audmode = tea5764_get_audout_mode(radio); 328 v->audmode = tea5764_get_audout_mode(radio);
327 v->signal = TEA5764_TUNCHK_LEVEL(r->tunchk) * 0xffff / 0xf; 329 v->signal = TEA5764_TUNCHK_LEVEL(r->tunchk) * 0xffff / 0xf;
328 v->afc = TEA5764_TUNCHK_IFCNT(r->tunchk); 330 v->afc = TEA5764_TUNCHK_IFCNT(r->tunchk);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 94f440535c64..061e147f6f26 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -866,9 +866,13 @@ config USB_W9968CF
866 module will be called w9968cf. 866 module will be called w9968cf.
867 867
868config USB_OV511 868config USB_OV511
869 tristate "USB OV511 Camera support" 869 tristate "USB OV511 Camera support (DEPRECATED)"
870 depends on VIDEO_V4L1 870 depends on VIDEO_V4L1
871 ---help--- 871 ---help---
872 This driver is DEPRECATED please use the gspca ov519 module
873 instead. Note that for the ov511 / ov518 support of the gspca module
874 you need atleast version 0.6.0 of libv4l.
875
872 Say Y here if you want to connect this type of camera to your 876 Say Y here if you want to connect this type of camera to your
873 computer's USB port. See <file:Documentation/video4linux/ov511.txt> 877 computer's USB port. See <file:Documentation/video4linux/ov511.txt>
874 for more information and for a list of supported cameras. 878 for more information and for a list of supported cameras.
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c
index 8e35c3aed544..5136df198338 100644
--- a/drivers/media/video/cx18/cx18-controls.c
+++ b/drivers/media/video/cx18/cx18-controls.c
@@ -61,6 +61,8 @@ int cx18_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qctrl)
61 61
62 switch (qctrl->id) { 62 switch (qctrl->id) {
63 /* Standard V4L2 controls */ 63 /* Standard V4L2 controls */
64 case V4L2_CID_USER_CLASS:
65 return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
64 case V4L2_CID_BRIGHTNESS: 66 case V4L2_CID_BRIGHTNESS:
65 case V4L2_CID_HUE: 67 case V4L2_CID_HUE:
66 case V4L2_CID_SATURATION: 68 case V4L2_CID_SATURATION:
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 6a9464079b4c..28f48f41f218 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -1052,22 +1052,13 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
1052/* Set resolution of the video */ 1052/* Set resolution of the video */
1053int cx231xx_resolution_set(struct cx231xx *dev) 1053int cx231xx_resolution_set(struct cx231xx *dev)
1054{ 1054{
1055 int width, height;
1056 u32 hscale, vscale;
1057 int status = 0;
1058
1059 width = dev->width;
1060 height = dev->height;
1061
1062 get_scale(dev, width, height, &hscale, &vscale);
1063
1064 /* set horzontal scale */ 1055 /* set horzontal scale */
1065 status = vid_blk_write_word(dev, HSCALE_CTRL, hscale); 1056 int status = vid_blk_write_word(dev, HSCALE_CTRL, dev->hscale);
1057 if (status)
1058 return status;
1066 1059
1067 /* set vertical scale */ 1060 /* set vertical scale */
1068 status = vid_blk_write_word(dev, VSCALE_CTRL, vscale); 1061 return vid_blk_write_word(dev, VSCALE_CTRL, dev->vscale);
1069
1070 return status;
1071} 1062}
1072 1063
1073/****************************************************************************** 1064/******************************************************************************
@@ -2055,7 +2046,7 @@ int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type)
2055 2046
2056int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type) 2047int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type)
2057{ 2048{
2058 int rc; 2049 int rc = -1;
2059 u32 ep_mask = -1; 2050 u32 ep_mask = -1;
2060 struct pcb_config *pcb_config; 2051 struct pcb_config *pcb_config;
2061 2052
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index a23ae73fe634..609bae6098d3 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -893,9 +893,9 @@ static int check_dev(struct cx231xx *dev)
893 return 0; 893 return 0;
894} 894}
895 895
896void get_scale(struct cx231xx *dev, 896static void get_scale(struct cx231xx *dev,
897 unsigned int width, unsigned int height, 897 unsigned int width, unsigned int height,
898 unsigned int *hscale, unsigned int *vscale) 898 unsigned int *hscale, unsigned int *vscale)
899{ 899{
900 unsigned int maxw = norm_maxw(dev); 900 unsigned int maxw = norm_maxw(dev);
901 unsigned int maxh = norm_maxh(dev); 901 unsigned int maxh = norm_maxh(dev);
@@ -907,10 +907,6 @@ void get_scale(struct cx231xx *dev,
907 *vscale = (((unsigned long)maxh) << 12) / height - 4096L; 907 *vscale = (((unsigned long)maxh) << 12) / height - 4096L;
908 if (*vscale >= 0x4000) 908 if (*vscale >= 0x4000)
909 *vscale = 0x3fff; 909 *vscale = 0x3fff;
910
911 dev->hscale = *hscale;
912 dev->vscale = *vscale;
913
914} 910}
915 911
916/* ------------------------------------------------------------------ 912/* ------------------------------------------------------------------
@@ -955,8 +951,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
955{ 951{
956 struct cx231xx_fh *fh = priv; 952 struct cx231xx_fh *fh = priv;
957 struct cx231xx *dev = fh->dev; 953 struct cx231xx *dev = fh->dev;
958 int width = f->fmt.pix.width; 954 unsigned int width = f->fmt.pix.width;
959 int height = f->fmt.pix.height; 955 unsigned int height = f->fmt.pix.height;
960 unsigned int maxw = norm_maxw(dev); 956 unsigned int maxw = norm_maxw(dev);
961 unsigned int maxh = norm_maxh(dev); 957 unsigned int maxh = norm_maxh(dev);
962 unsigned int hscale, vscale; 958 unsigned int hscale, vscale;
@@ -971,17 +967,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
971 967
972 /* width must even because of the YUYV format 968 /* width must even because of the YUYV format
973 height must be even because of interlacing */ 969 height must be even because of interlacing */
974 height &= 0xfffe; 970 v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0);
975 width &= 0xfffe;
976
977 if (unlikely(height < 32))
978 height = 32;
979 if (unlikely(height > maxh))
980 height = maxh;
981 if (unlikely(width < 48))
982 width = 48;
983 if (unlikely(width > maxw))
984 width = maxw;
985 971
986 get_scale(dev, width, height, &hscale, &vscale); 972 get_scale(dev, width, height, &hscale, &vscale);
987 973
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index e38eb2d425f7..a0f823ac6b8d 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -722,9 +722,6 @@ int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input);
722int cx231xx_set_decoder_video_input(struct cx231xx *dev, u8 pin_type, u8 input); 722int cx231xx_set_decoder_video_input(struct cx231xx *dev, u8 pin_type, u8 input);
723int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev); 723int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev);
724int cx231xx_set_audio_input(struct cx231xx *dev, u8 input); 724int cx231xx_set_audio_input(struct cx231xx *dev, u8 input);
725void get_scale(struct cx231xx *dev,
726 unsigned int width, unsigned int height,
727 unsigned int *hscale, unsigned int *vscale);
728 725
729/* Provided by cx231xx-video.c */ 726/* Provided by cx231xx-video.c */
730int cx231xx_register_extension(struct cx231xx_ops *dev); 727int cx231xx_register_extension(struct cx231xx_ops *dev);
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c
index 8ded52946334..4c8e95853fa3 100644
--- a/drivers/media/video/cx2341x.c
+++ b/drivers/media/video/cx2341x.c
@@ -500,6 +500,8 @@ int cx2341x_ctrl_query(const struct cx2341x_mpeg_params *params,
500 int err; 500 int err;
501 501
502 switch (qctrl->id) { 502 switch (qctrl->id) {
503 case V4L2_CID_MPEG_CLASS:
504 return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
503 case V4L2_CID_MPEG_STREAM_TYPE: 505 case V4L2_CID_MPEG_STREAM_TYPE:
504 return v4l2_ctrl_query_fill(qctrl, 506 return v4l2_ctrl_query_fill(qctrl,
505 V4L2_MPEG_STREAM_TYPE_MPEG2_PS, 507 V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index e236df23370e..48a975134ac5 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -45,6 +45,7 @@
45#include "dibx000_common.h" 45#include "dibx000_common.h"
46#include "zl10353.h" 46#include "zl10353.h"
47#include "stv0900.h" 47#include "stv0900.h"
48#include "stv0900_reg.h"
48#include "stv6110.h" 49#include "stv6110.h"
49#include "lnbh24.h" 50#include "lnbh24.h"
50#include "cx24116.h" 51#include "cx24116.h"
@@ -242,12 +243,22 @@ static struct tda18271_std_map hauppauge_tda18271_std_map = {
242 .if_lvl = 6, .rfagc_top = 0x37 }, 243 .if_lvl = 6, .rfagc_top = 0x37 },
243}; 244};
244 245
246static struct tda18271_std_map hauppauge_hvr1200_tda18271_std_map = {
247 .dvbt_6 = { .if_freq = 3300, .agc_mode = 3, .std = 4,
248 .if_lvl = 1, .rfagc_top = 0x37, },
249 .dvbt_7 = { .if_freq = 3800, .agc_mode = 3, .std = 5,
250 .if_lvl = 1, .rfagc_top = 0x37, },
251 .dvbt_8 = { .if_freq = 4300, .agc_mode = 3, .std = 6,
252 .if_lvl = 1, .rfagc_top = 0x37, },
253};
254
245static struct tda18271_config hauppauge_tda18271_config = { 255static struct tda18271_config hauppauge_tda18271_config = {
246 .std_map = &hauppauge_tda18271_std_map, 256 .std_map = &hauppauge_tda18271_std_map,
247 .gate = TDA18271_GATE_ANALOG, 257 .gate = TDA18271_GATE_ANALOG,
248}; 258};
249 259
250static struct tda18271_config hauppauge_hvr1200_tuner_config = { 260static struct tda18271_config hauppauge_hvr1200_tuner_config = {
261 .std_map = &hauppauge_hvr1200_tda18271_std_map,
251 .gate = TDA18271_GATE_ANALOG, 262 .gate = TDA18271_GATE_ANALOG,
252}; 263};
253 264
@@ -370,13 +381,25 @@ static struct zl10353_config dvico_fusionhdtv_xc3028 = {
370 .disable_i2c_gate_ctrl = 1, 381 .disable_i2c_gate_ctrl = 1,
371}; 382};
372 383
384static struct stv0900_reg stv0900_ts_regs[] = {
385 { R0900_TSGENERAL, 0x00 },
386 { R0900_P1_TSSPEED, 0x40 },
387 { R0900_P2_TSSPEED, 0x40 },
388 { R0900_P1_TSCFGM, 0xc0 },
389 { R0900_P2_TSCFGM, 0xc0 },
390 { R0900_P1_TSCFGH, 0xe0 },
391 { R0900_P2_TSCFGH, 0xe0 },
392 { R0900_P1_TSCFGL, 0x20 },
393 { R0900_P2_TSCFGL, 0x20 },
394 { 0xffff, 0xff }, /* terminate */
395};
396
373static struct stv0900_config netup_stv0900_config = { 397static struct stv0900_config netup_stv0900_config = {
374 .demod_address = 0x68, 398 .demod_address = 0x68,
375 .xtal = 27000000, 399 .xtal = 27000000,
376 .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ 400 .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */
377 .diseqc_mode = 2,/* 2/3 PWM */ 401 .diseqc_mode = 2,/* 2/3 PWM */
378 .path1_mode = 2,/*Serial continues clock */ 402 .ts_config_regs = stv0900_ts_regs,
379 .path2_mode = 2,/*Serial continues clock */
380 .tun1_maddress = 0,/* 0x60 */ 403 .tun1_maddress = 0,/* 0x60 */
381 .tun2_maddress = 3,/* 0x63 */ 404 .tun2_maddress = 3,/* 0x63 */
382 .tun1_adc = 1,/* 1 Vpp */ 405 .tun1_adc = 1,/* 1 Vpp */
@@ -736,7 +759,8 @@ static int dvb_register(struct cx23885_tsport *port)
736 if (!dvb_attach(lnbh24_attach, 759 if (!dvb_attach(lnbh24_attach,
737 fe0->dvb.frontend, 760 fe0->dvb.frontend,
738 &i2c_bus->i2c_adap, 761 &i2c_bus->i2c_adap,
739 LNBH24_PCL, 0, 0x09)) 762 LNBH24_PCL,
763 LNBH24_TTX, 0x09))
740 printk(KERN_ERR 764 printk(KERN_ERR
741 "No LNBH24 found!\n"); 765 "No LNBH24 found!\n");
742 766
@@ -756,7 +780,8 @@ static int dvb_register(struct cx23885_tsport *port)
756 if (!dvb_attach(lnbh24_attach, 780 if (!dvb_attach(lnbh24_attach,
757 fe0->dvb.frontend, 781 fe0->dvb.frontend,
758 &i2c_bus->i2c_adap, 782 &i2c_bus->i2c_adap,
759 LNBH24_PCL, 0, 0x0a)) 783 LNBH24_PCL,
784 LNBH24_TTX, 0x0a))
760 printk(KERN_ERR 785 printk(KERN_ERR
761 "No LNBH24 found!\n"); 786 "No LNBH24 found!\n");
762 787
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 66bbd2e71105..70836af3ab48 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -963,15 +963,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
963 } 963 }
964 964
965 f->fmt.pix.field = field; 965 f->fmt.pix.field = field;
966 if (f->fmt.pix.height < 32) 966 v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
967 f->fmt.pix.height = 32; 967 &f->fmt.pix.height, 32, maxh, 0, 0);
968 if (f->fmt.pix.height > maxh)
969 f->fmt.pix.height = maxh;
970 if (f->fmt.pix.width < 48)
971 f->fmt.pix.width = 48;
972 if (f->fmt.pix.width > maxw)
973 f->fmt.pix.width = maxw;
974 f->fmt.pix.width &= ~0x03;
975 f->fmt.pix.bytesperline = 968 f->fmt.pix.bytesperline =
976 (f->fmt.pix.width * fmt->depth) >> 3; 969 (f->fmt.pix.width * fmt->depth) >> 3;
977 f->fmt.pix.sizeimage = 970 f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 94b7a52629d0..a5cc1c1fc2d6 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1524,33 +1524,45 @@ static const struct cx88_board cx88_boards[] = {
1524 }, 1524 },
1525 .mpeg = CX88_MPEG_DVB, 1525 .mpeg = CX88_MPEG_DVB,
1526 }, 1526 },
1527 /* Terry Wu <terrywu2009@gmail.com> */
1528 /* TV Audio : set GPIO 2, 18, 19 value to 0, 1, 0 */
1529 /* FM Audio : set GPIO 2, 18, 19 value to 0, 0, 0 */
1530 /* Line-in Audio : set GPIO 2, 18, 19 value to 0, 1, 1 */
1531 /* Mute Audio : set GPIO 2 value to 1 */
1527 [CX88_BOARD_WINFAST_TV2000_XP_GLOBAL] = { 1532 [CX88_BOARD_WINFAST_TV2000_XP_GLOBAL] = {
1528 .name = "Winfast TV2000 XP Global", 1533 .name = "Leadtek TV2000 XP Global",
1529 .tuner_type = TUNER_XC2028, 1534 .tuner_type = TUNER_XC2028,
1530 .tuner_addr = 0x61, 1535 .tuner_addr = 0x61,
1536 .radio_type = TUNER_XC2028,
1537 .radio_addr = 0x61,
1531 .input = { { 1538 .input = { {
1532 .type = CX88_VMUX_TELEVISION, 1539 .type = CX88_VMUX_TELEVISION,
1533 .vmux = 0, 1540 .vmux = 0,
1534 .gpio0 = 0x0400, /* pin 2:mute = 0 (off?) */ 1541 .gpio0 = 0x0400, /* pin 2 = 0 */
1535 .gpio1 = 0x0000, 1542 .gpio1 = 0x0000,
1536 .gpio2 = 0x0800, /* pin 19:audio = 0 (tv) */ 1543 .gpio2 = 0x0C04, /* pin 18 = 1, pin 19 = 0 */
1537 1544 .gpio3 = 0x0000,
1538 }, { 1545 }, {
1539 .type = CX88_VMUX_COMPOSITE1, 1546 .type = CX88_VMUX_COMPOSITE1,
1540 .vmux = 1, 1547 .vmux = 1,
1541 .gpio0 = 0x0400, /* probably? or 0x0404 to turn mute on */ 1548 .gpio0 = 0x0400, /* pin 2 = 0 */
1542 .gpio1 = 0x0000, 1549 .gpio1 = 0x0000,
1543 .gpio2 = 0x0808, /* pin 19:audio = 1 (line) */ 1550 .gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */
1544 1551 .gpio3 = 0x0000,
1545 }, { 1552 }, {
1546 .type = CX88_VMUX_SVIDEO, 1553 .type = CX88_VMUX_SVIDEO,
1547 .vmux = 2, 1554 .vmux = 2,
1555 .gpio0 = 0x0400, /* pin 2 = 0 */
1556 .gpio1 = 0x0000,
1557 .gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */
1558 .gpio3 = 0x0000,
1548 } }, 1559 } },
1549 .radio = { 1560 .radio = {
1550 .type = CX88_RADIO, 1561 .type = CX88_RADIO,
1551 .gpio0 = 0x004ff, 1562 .gpio0 = 0x0400, /* pin 2 = 0 */
1552 .gpio1 = 0x010ff, 1563 .gpio1 = 0x0000,
1553 .gpio2 = 0x0ff, 1564 .gpio2 = 0x0C00, /* pin 18 = 0, pin 19 = 0 */
1565 .gpio3 = 0x0000,
1554 }, 1566 },
1555 }, 1567 },
1556 [CX88_BOARD_POWERCOLOR_REAL_ANGEL] = { 1568 [CX88_BOARD_POWERCOLOR_REAL_ANGEL] = {
@@ -2438,6 +2450,41 @@ static const struct cx88_subid cx88_subids[] = {
2438 .subvendor = 0x107d, 2450 .subvendor = 0x107d,
2439 .subdevice = 0x6654, 2451 .subdevice = 0x6654,
2440 .card = CX88_BOARD_WINFAST_DTV1800H, 2452 .card = CX88_BOARD_WINFAST_DTV1800H,
2453 }, {
2454 /* PVR2000 PAL Model [107d:6630] */
2455 .subvendor = 0x107d,
2456 .subdevice = 0x6630,
2457 .card = CX88_BOARD_LEADTEK_PVR2000,
2458 }, {
2459 /* PVR2000 PAL Model [107d:6638] */
2460 .subvendor = 0x107d,
2461 .subdevice = 0x6638,
2462 .card = CX88_BOARD_LEADTEK_PVR2000,
2463 }, {
2464 /* PVR2000 NTSC Model [107d:6631] */
2465 .subvendor = 0x107d,
2466 .subdevice = 0x6631,
2467 .card = CX88_BOARD_LEADTEK_PVR2000,
2468 }, {
2469 /* PVR2000 NTSC Model [107d:6637] */
2470 .subvendor = 0x107d,
2471 .subdevice = 0x6637,
2472 .card = CX88_BOARD_LEADTEK_PVR2000,
2473 }, {
2474 /* PVR2000 NTSC Model [107d:663d] */
2475 .subvendor = 0x107d,
2476 .subdevice = 0x663d,
2477 .card = CX88_BOARD_LEADTEK_PVR2000,
2478 }, {
2479 /* DV2000 NTSC Model [107d:6621] */
2480 .subvendor = 0x107d,
2481 .subdevice = 0x6621,
2482 .card = CX88_BOARD_WINFAST_DV2000,
2483 }, {
2484 /* TV2000 XP Global [107d:6618] */
2485 .subvendor = 0x107d,
2486 .subdevice = 0x6618,
2487 .card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL,
2441 }, 2488 },
2442}; 2489};
2443 2490
@@ -2446,12 +2493,6 @@ static const struct cx88_subid cx88_subids[] = {
2446 2493
2447static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data) 2494static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
2448{ 2495{
2449 /* This is just for the "Winfast 2000XP Expert" board ATM; I don't have data on
2450 * any others.
2451 *
2452 * Byte 0 is 1 on the NTSC board.
2453 */
2454
2455 if (eeprom_data[4] != 0x7d || 2496 if (eeprom_data[4] != 0x7d ||
2456 eeprom_data[5] != 0x10 || 2497 eeprom_data[5] != 0x10 ||
2457 eeprom_data[7] != 0x66) { 2498 eeprom_data[7] != 0x66) {
@@ -2459,8 +2500,19 @@ static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
2459 return; 2500 return;
2460 } 2501 }
2461 2502
2462 core->board.tuner_type = (eeprom_data[6] == 0x13) ? 2503 /* Terry Wu <terrywu2009@gmail.com> */
2463 TUNER_PHILIPS_FM1236_MK3 : TUNER_PHILIPS_FM1216ME_MK3; 2504 switch (eeprom_data[6]) {
2505 case 0x13: /* SSID 6613 for TV2000 XP Expert NTSC Model */
2506 case 0x21: /* SSID 6621 for DV2000 NTSC Model */
2507 case 0x31: /* SSID 6631 for PVR2000 NTSC Model */
2508 case 0x37: /* SSID 6637 for PVR2000 NTSC Model */
2509 case 0x3d: /* SSID 6637 for PVR2000 NTSC Model */
2510 core->board.tuner_type = TUNER_PHILIPS_FM1236_MK3;
2511 break;
2512 default:
2513 core->board.tuner_type = TUNER_PHILIPS_FM1216ME_MK3;
2514 break;
2515 }
2464 2516
2465 info_printk(core, "Leadtek Winfast 2000XP Expert config: " 2517 info_printk(core, "Leadtek Winfast 2000XP Expert config: "
2466 "tuner=%d, eeprom[0]=0x%02x\n", 2518 "tuner=%d, eeprom[0]=0x%02x\n",
@@ -2713,7 +2765,6 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core,
2713{ 2765{
2714 /* Board-specific callbacks */ 2766 /* Board-specific callbacks */
2715 switch (core->boardnr) { 2767 switch (core->boardnr) {
2716 case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
2717 case CX88_BOARD_POWERCOLOR_REAL_ANGEL: 2768 case CX88_BOARD_POWERCOLOR_REAL_ANGEL:
2718 case CX88_BOARD_GENIATECH_X8000_MT: 2769 case CX88_BOARD_GENIATECH_X8000_MT:
2719 case CX88_BOARD_KWORLD_ATSC_120: 2770 case CX88_BOARD_KWORLD_ATSC_120:
@@ -2725,6 +2776,7 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core,
2725 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO: 2776 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
2726 case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: 2777 case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
2727 return cx88_dvico_xc2028_callback(core, command, arg); 2778 return cx88_dvico_xc2028_callback(core, command, arg);
2779 case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
2728 case CX88_BOARD_WINFAST_DTV1800H: 2780 case CX88_BOARD_WINFAST_DTV1800H:
2729 return cx88_xc3028_winfast1800h_callback(core, command, arg); 2781 return cx88_xc3028_winfast1800h_callback(core, command, arg);
2730 } 2782 }
@@ -2914,6 +2966,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
2914 udelay(1000); 2966 udelay(1000);
2915 break; 2967 break;
2916 2968
2969 case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
2917 case CX88_BOARD_WINFAST_DTV1800H: 2970 case CX88_BOARD_WINFAST_DTV1800H:
2918 /* GPIO 12 (xc3028 tuner reset) */ 2971 /* GPIO 12 (xc3028 tuner reset) */
2919 cx_set(MO_GP1_IO, 0x1010); 2972 cx_set(MO_GP1_IO, 0x1010);
@@ -2950,6 +3003,7 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl)
2950 case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: 3003 case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
2951 ctl->demod = XC3028_FE_OREN538; 3004 ctl->demod = XC3028_FE_OREN538;
2952 break; 3005 break;
3006 case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
2953 case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME: 3007 case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
2954 case CX88_BOARD_PROLINK_PV_8000GT: 3008 case CX88_BOARD_PROLINK_PV_8000GT:
2955 /* 3009 /*
@@ -2993,6 +3047,8 @@ static void cx88_card_setup(struct cx88_core *core)
2993 if (0 == core->i2c_rc) 3047 if (0 == core->i2c_rc)
2994 gdi_eeprom(core, eeprom); 3048 gdi_eeprom(core, eeprom);
2995 break; 3049 break;
3050 case CX88_BOARD_LEADTEK_PVR2000:
3051 case CX88_BOARD_WINFAST_DV2000:
2996 case CX88_BOARD_WINFAST2000XP_EXPERT: 3052 case CX88_BOARD_WINFAST2000XP_EXPERT:
2997 if (0 == core->i2c_rc) 3053 if (0 == core->i2c_rc)
2998 leadtek_eeprom(core, eeprom); 3054 leadtek_eeprom(core, eeprom);
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 0ccac702bea4..b12770848c00 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1111,15 +1111,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
1111 } 1111 }
1112 1112
1113 f->fmt.pix.field = field; 1113 f->fmt.pix.field = field;
1114 if (f->fmt.pix.height < 32) 1114 v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
1115 f->fmt.pix.height = 32; 1115 &f->fmt.pix.height, 32, maxh, 0, 0);
1116 if (f->fmt.pix.height > maxh)
1117 f->fmt.pix.height = maxh;
1118 if (f->fmt.pix.width < 48)
1119 f->fmt.pix.width = 48;
1120 if (f->fmt.pix.width > maxw)
1121 f->fmt.pix.width = maxw;
1122 f->fmt.pix.width &= ~0x03;
1123 f->fmt.pix.bytesperline = 1116 f->fmt.pix.bytesperline =
1124 (f->fmt.pix.width * fmt->depth) >> 3; 1117 (f->fmt.pix.width * fmt->depth) >> 3;
1125 f->fmt.pix.sizeimage = 1118 f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 00cc791a9e44..c43fdb9bc888 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -139,6 +139,24 @@ static struct em28xx_reg_seq kworld_330u_digital[] = {
139 { -1, -1, -1, -1}, 139 { -1, -1, -1, -1},
140}; 140};
141 141
142/* Evga inDtube
143 GPIO0 - Enable digital power (s5h1409) - low to enable
144 GPIO1 - Enable analog power (tvp5150/emp202) - low to enable
145 GPIO4 - xc3028 reset
146 GOP3 - s5h1409 reset
147 */
148static struct em28xx_reg_seq evga_indtube_analog[] = {
149 {EM28XX_R08_GPIO, 0x79, 0xff, 60},
150 { -1, -1, -1, -1},
151};
152
153static struct em28xx_reg_seq evga_indtube_digital[] = {
154 {EM28XX_R08_GPIO, 0x7a, 0xff, 1},
155 {EM2880_R04_GPO, 0x04, 0xff, 10},
156 {EM2880_R04_GPO, 0x0c, 0xff, 1},
157 { -1, -1, -1, -1},
158};
159
142/* Callback for the most boards */ 160/* Callback for the most boards */
143static struct em28xx_reg_seq default_tuner_gpio[] = { 161static struct em28xx_reg_seq default_tuner_gpio[] = {
144 {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10}, 162 {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10},
@@ -1449,6 +1467,33 @@ struct em28xx_board em28xx_boards[] = {
1449 .gpio = terratec_av350_unmute_gpio, 1467 .gpio = terratec_av350_unmute_gpio,
1450 } }, 1468 } },
1451 }, 1469 },
1470 [EM2882_BOARD_EVGA_INDTUBE] = {
1471 .name = "Evga inDtube",
1472 .tuner_type = TUNER_XC2028,
1473 .tuner_gpio = default_tuner_gpio,
1474 .decoder = EM28XX_TVP5150,
1475 .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */
1476 .mts_firmware = 1,
1477 .has_dvb = 1,
1478 .dvb_gpio = evga_indtube_digital,
1479 .ir_codes = ir_codes_evga_indtube,
1480 .input = { {
1481 .type = EM28XX_VMUX_TELEVISION,
1482 .vmux = TVP5150_COMPOSITE0,
1483 .amux = EM28XX_AMUX_VIDEO,
1484 .gpio = evga_indtube_analog,
1485 }, {
1486 .type = EM28XX_VMUX_COMPOSITE1,
1487 .vmux = TVP5150_COMPOSITE1,
1488 .amux = EM28XX_AMUX_LINE_IN,
1489 .gpio = evga_indtube_analog,
1490 }, {
1491 .type = EM28XX_VMUX_SVIDEO,
1492 .vmux = TVP5150_SVIDEO,
1493 .amux = EM28XX_AMUX_LINE_IN,
1494 .gpio = evga_indtube_analog,
1495 } },
1496 },
1452}; 1497};
1453const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards); 1498const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
1454 1499
@@ -1571,6 +1616,7 @@ static struct em28xx_hash_table em28xx_eeprom_hash[] = {
1571 {0x72cc5a8b, EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2, TUNER_YMEC_TVF_5533MF}, 1616 {0x72cc5a8b, EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2, TUNER_YMEC_TVF_5533MF},
1572 {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028}, 1617 {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028},
1573 {0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028}, 1618 {0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028},
1619 {0xcee44a99, EM2882_BOARD_EVGA_INDTUBE, TUNER_XC2028},
1574}; 1620};
1575 1621
1576/* I2C devicelist hash table for devices with generic USB IDs */ 1622/* I2C devicelist hash table for devices with generic USB IDs */
@@ -1834,6 +1880,10 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
1834 ctl->demod = XC3028_FE_CHINA; 1880 ctl->demod = XC3028_FE_CHINA;
1835 ctl->fname = XC2028_DEFAULT_FIRMWARE; 1881 ctl->fname = XC2028_DEFAULT_FIRMWARE;
1836 break; 1882 break;
1883 case EM2882_BOARD_EVGA_INDTUBE:
1884 ctl->demod = XC3028_FE_CHINA;
1885 ctl->fname = XC3028L_DEFAULT_FIRMWARE;
1886 break;
1837 default: 1887 default:
1838 ctl->demod = XC3028_FE_OREN538; 1888 ctl->demod = XC3028_FE_OREN538;
1839 } 1889 }
@@ -2101,6 +2151,12 @@ void em28xx_card_setup(struct em28xx *dev)
2101 case EM2880_BOARD_MSI_DIGIVOX_AD: 2151 case EM2880_BOARD_MSI_DIGIVOX_AD:
2102 if (!em28xx_hint_board(dev)) 2152 if (!em28xx_hint_board(dev))
2103 em28xx_set_model(dev); 2153 em28xx_set_model(dev);
2154
2155 /* In cases where we had to use a board hint, the call to
2156 em28xx_set_mode() in em28xx_pre_card_setup() was a no-op,
2157 so make the call now so the analog GPIOs are set properly
2158 before probing the i2c bus. */
2159 em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
2104 break; 2160 break;
2105 } 2161 }
2106 2162
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index 563dd2b1c8e9..e7b47c8da8f3 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -445,6 +445,7 @@ static int dvb_init(struct em28xx *dev)
445 } 445 }
446 break; 446 break;
447 case EM2883_BOARD_KWORLD_HYBRID_330U: 447 case EM2883_BOARD_KWORLD_HYBRID_330U:
448 case EM2882_BOARD_EVGA_INDTUBE:
448 dvb->frontend = dvb_attach(s5h1409_attach, 449 dvb->frontend = dvb_attach(s5h1409_attach,
449 &em28xx_s5h1409_with_xc3028, 450 &em28xx_s5h1409_with_xc3028,
450 &dev->i2c_adap); 451 &dev->i2c_adap);
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 882796e84dbc..8fe1beecfffa 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -687,8 +687,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
687{ 687{
688 struct em28xx_fh *fh = priv; 688 struct em28xx_fh *fh = priv;
689 struct em28xx *dev = fh->dev; 689 struct em28xx *dev = fh->dev;
690 int width = f->fmt.pix.width; 690 unsigned int width = f->fmt.pix.width;
691 int height = f->fmt.pix.height; 691 unsigned int height = f->fmt.pix.height;
692 unsigned int maxw = norm_maxw(dev); 692 unsigned int maxw = norm_maxw(dev);
693 unsigned int maxh = norm_maxh(dev); 693 unsigned int maxh = norm_maxh(dev);
694 unsigned int hscale, vscale; 694 unsigned int hscale, vscale;
@@ -701,34 +701,20 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
701 return -EINVAL; 701 return -EINVAL;
702 } 702 }
703 703
704 /* width must even because of the YUYV format
705 height must be even because of interlacing */
706 height &= 0xfffe;
707 width &= 0xfffe;
708
709 if (unlikely(height < 32))
710 height = 32;
711 if (unlikely(height > maxh))
712 height = maxh;
713 if (unlikely(width < 48))
714 width = 48;
715 if (unlikely(width > maxw))
716 width = maxw;
717
718 if (dev->board.is_em2800) { 704 if (dev->board.is_em2800) {
719 /* the em2800 can only scale down to 50% */ 705 /* the em2800 can only scale down to 50% */
720 if (height % (maxh / 2)) 706 height = height > (3 * maxh / 4) ? maxh : maxh / 2;
721 height = maxh; 707 width = width > (3 * maxw / 4) ? maxw : maxw / 2;
722 if (width % (maxw / 2)) 708 /* According to empiatech support the MaxPacketSize is too small
723 width = maxw; 709 * to support framesizes larger than 640x480 @ 30 fps or 640x576
724 /* according to empiatech support */ 710 * @ 25 fps. As this would cut of a part of the image we prefer
725 /* the MaxPacketSize is to small to support */ 711 * 360x576 or 360x480 for now */
726 /* framesizes larger than 640x480 @ 30 fps */
727 /* or 640x576 @ 25 fps. As this would cut */
728 /* of a part of the image we prefer */
729 /* 360x576 or 360x480 for now */
730 if (width == maxw && height == maxh) 712 if (width == maxw && height == maxh)
731 width /= 2; 713 width /= 2;
714 } else {
715 /* width must even because of the YUYV format
716 height must be even because of interlacing */
717 v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0);
732 } 718 }
733 719
734 get_scale(dev, width, height, &hscale, &vscale); 720 get_scale(dev, width, height, &hscale, &vscale);
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 8bf81be1da61..813ce45c2f99 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -106,6 +106,7 @@
106#define EM2860_BOARD_TERRATEC_GRABBY 67 106#define EM2860_BOARD_TERRATEC_GRABBY 67
107#define EM2860_BOARD_TERRATEC_AV350 68 107#define EM2860_BOARD_TERRATEC_AV350 68
108#define EM2882_BOARD_KWORLD_ATSC_315U 69 108#define EM2882_BOARD_KWORLD_ATSC_315U 69
109#define EM2882_BOARD_EVGA_INDTUBE 70
109 110
110/* Limits minimum and default number of buffers */ 111/* Limits minimum and default number of buffers */
111#define EM28XX_MIN_BUF 4 112#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index f7e0355ad644..1e89600986c8 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1042,13 +1042,11 @@ static int vidioc_queryctrl(struct file *file, void *priv,
1042 for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) { 1042 for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
1043 if (gspca_dev->ctrl_dis & (1 << i)) 1043 if (gspca_dev->ctrl_dis & (1 << i))
1044 continue; 1044 continue;
1045 if (ctrls->qctrl.id < id) 1045 if (gspca_dev->sd_desc->ctrls[i].qctrl.id < id)
1046 continue; 1046 continue;
1047 if (ctrls != NULL) { 1047 if (ctrls && gspca_dev->sd_desc->ctrls[i].qctrl.id
1048 if (gspca_dev->sd_desc->ctrls[i].qctrl.id
1049 > ctrls->qctrl.id) 1048 > ctrls->qctrl.id)
1050 continue; 1049 continue;
1051 }
1052 ctrls = &gspca_dev->sd_desc->ctrls[i]; 1050 ctrls = &gspca_dev->sd_desc->ctrls[i];
1053 } 1051 }
1054 } else { 1052 } else {
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 188866ac6cef..2f6e135d94bc 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -50,12 +50,18 @@ static int i2c_detect_tries = 10;
50struct sd { 50struct sd {
51 struct gspca_dev gspca_dev; /* !! must be the first item */ 51 struct gspca_dev gspca_dev; /* !! must be the first item */
52 52
53 __u8 packet_nr;
54
53 char bridge; 55 char bridge;
54#define BRIDGE_OV511 0 56#define BRIDGE_OV511 0
55#define BRIDGE_OV511PLUS 1 57#define BRIDGE_OV511PLUS 1
56#define BRIDGE_OV518 2 58#define BRIDGE_OV518 2
57#define BRIDGE_OV518PLUS 3 59#define BRIDGE_OV518PLUS 3
58#define BRIDGE_OV519 4 60#define BRIDGE_OV519 4
61#define BRIDGE_MASK 7
62
63 char invert_led;
64#define BRIDGE_INVERT_LED 8
59 65
60 /* Determined by sensor type */ 66 /* Determined by sensor type */
61 __u8 sif; 67 __u8 sif;
@@ -65,22 +71,25 @@ struct sd {
65 __u8 colors; 71 __u8 colors;
66 __u8 hflip; 72 __u8 hflip;
67 __u8 vflip; 73 __u8 vflip;
74 __u8 autobrightness;
75 __u8 freq;
68 76
69 __u8 stopped; /* Streaming is temporarily paused */ 77 __u8 stopped; /* Streaming is temporarily paused */
70 78
71 __u8 frame_rate; /* current Framerate (OV519 only) */ 79 __u8 frame_rate; /* current Framerate */
72 __u8 clockdiv; /* clockdiv override for OV519 only */ 80 __u8 clockdiv; /* clockdiv override */
73 81
74 char sensor; /* Type of image sensor chip (SEN_*) */ 82 char sensor; /* Type of image sensor chip (SEN_*) */
75#define SEN_UNKNOWN 0 83#define SEN_UNKNOWN 0
76#define SEN_OV6620 1 84#define SEN_OV6620 1
77#define SEN_OV6630 2 85#define SEN_OV6630 2
78#define SEN_OV7610 3 86#define SEN_OV66308AF 3
79#define SEN_OV7620 4 87#define SEN_OV7610 4
80#define SEN_OV7640 5 88#define SEN_OV7620 5
81#define SEN_OV7670 6 89#define SEN_OV7640 6
82#define SEN_OV76BE 7 90#define SEN_OV7670 7
83#define SEN_OV8610 8 91#define SEN_OV76BE 8
92#define SEN_OV8610 9
84}; 93};
85 94
86/* V4L2 controls supported by the driver */ 95/* V4L2 controls supported by the driver */
@@ -94,11 +103,17 @@ static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
94static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val); 103static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
95static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val); 104static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
96static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val); 105static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
106static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val);
107static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val);
108static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
109static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
97static void setbrightness(struct gspca_dev *gspca_dev); 110static void setbrightness(struct gspca_dev *gspca_dev);
98static void setcontrast(struct gspca_dev *gspca_dev); 111static void setcontrast(struct gspca_dev *gspca_dev);
99static void setcolors(struct gspca_dev *gspca_dev); 112static void setcolors(struct gspca_dev *gspca_dev);
113static void setautobrightness(struct sd *sd);
114static void setfreq(struct sd *sd);
100 115
101static struct ctrl sd_ctrls[] = { 116static const struct ctrl sd_ctrls[] = {
102 { 117 {
103 { 118 {
104 .id = V4L2_CID_BRIGHTNESS, 119 .id = V4L2_CID_BRIGHTNESS,
@@ -141,7 +156,7 @@ static struct ctrl sd_ctrls[] = {
141 .set = sd_setcolors, 156 .set = sd_setcolors,
142 .get = sd_getcolors, 157 .get = sd_getcolors,
143 }, 158 },
144/* next controls work with ov7670 only */ 159/* The flip controls work with ov7670 only */
145#define HFLIP_IDX 3 160#define HFLIP_IDX 3
146 { 161 {
147 { 162 {
@@ -172,6 +187,51 @@ static struct ctrl sd_ctrls[] = {
172 .set = sd_setvflip, 187 .set = sd_setvflip,
173 .get = sd_getvflip, 188 .get = sd_getvflip,
174 }, 189 },
190#define AUTOBRIGHT_IDX 5
191 {
192 {
193 .id = V4L2_CID_AUTOBRIGHTNESS,
194 .type = V4L2_CTRL_TYPE_BOOLEAN,
195 .name = "Auto Brightness",
196 .minimum = 0,
197 .maximum = 1,
198 .step = 1,
199#define AUTOBRIGHT_DEF 1
200 .default_value = AUTOBRIGHT_DEF,
201 },
202 .set = sd_setautobrightness,
203 .get = sd_getautobrightness,
204 },
205#define FREQ_IDX 6
206 {
207 {
208 .id = V4L2_CID_POWER_LINE_FREQUENCY,
209 .type = V4L2_CTRL_TYPE_MENU,
210 .name = "Light frequency filter",
211 .minimum = 0,
212 .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
213 .step = 1,
214#define FREQ_DEF 0
215 .default_value = FREQ_DEF,
216 },
217 .set = sd_setfreq,
218 .get = sd_getfreq,
219 },
220#define OV7670_FREQ_IDX 7
221 {
222 {
223 .id = V4L2_CID_POWER_LINE_FREQUENCY,
224 .type = V4L2_CTRL_TYPE_MENU,
225 .name = "Light frequency filter",
226 .minimum = 0,
227 .maximum = 3, /* 0: 0, 1: 50Hz, 2:60Hz 3: Auto Hz */
228 .step = 1,
229#define OV7670_FREQ_DEF 3
230 .default_value = OV7670_FREQ_DEF,
231 },
232 .set = sd_setfreq,
233 .get = sd_getfreq,
234 },
175}; 235};
176 236
177static const struct v4l2_pix_format ov519_vga_mode[] = { 237static const struct v4l2_pix_format ov519_vga_mode[] = {
@@ -187,11 +247,21 @@ static const struct v4l2_pix_format ov519_vga_mode[] = {
187 .priv = 0}, 247 .priv = 0},
188}; 248};
189static const struct v4l2_pix_format ov519_sif_mode[] = { 249static const struct v4l2_pix_format ov519_sif_mode[] = {
250 {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
251 .bytesperline = 160,
252 .sizeimage = 160 * 120 * 3 / 8 + 590,
253 .colorspace = V4L2_COLORSPACE_JPEG,
254 .priv = 3},
190 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 255 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
191 .bytesperline = 176, 256 .bytesperline = 176,
192 .sizeimage = 176 * 144 * 3 / 8 + 590, 257 .sizeimage = 176 * 144 * 3 / 8 + 590,
193 .colorspace = V4L2_COLORSPACE_JPEG, 258 .colorspace = V4L2_COLORSPACE_JPEG,
194 .priv = 1}, 259 .priv = 1},
260 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
261 .bytesperline = 320,
262 .sizeimage = 320 * 240 * 3 / 8 + 590,
263 .colorspace = V4L2_COLORSPACE_JPEG,
264 .priv = 2},
195 {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 265 {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
196 .bytesperline = 352, 266 .bytesperline = 352,
197 .sizeimage = 352 * 288 * 3 / 8 + 590, 267 .sizeimage = 352 * 288 * 3 / 8 + 590,
@@ -199,42 +269,118 @@ static const struct v4l2_pix_format ov519_sif_mode[] = {
199 .priv = 0}, 269 .priv = 0},
200}; 270};
201 271
272/* Note some of the sizeimage values for the ov511 / ov518 may seem
273 larger then necessary, however they need to be this big as the ov511 /
274 ov518 always fills the entire isoc frame, using 0 padding bytes when
275 it doesn't have any data. So with low framerates the amount of data
276 transfered can become quite large (libv4l will remove all the 0 padding
277 in userspace). */
202static const struct v4l2_pix_format ov518_vga_mode[] = { 278static const struct v4l2_pix_format ov518_vga_mode[] = {
203 {320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, 279 {320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
204 .bytesperline = 320, 280 .bytesperline = 320,
205 .sizeimage = 320 * 240 * 3 / 8 + 590, 281 .sizeimage = 320 * 240 * 3,
206 .colorspace = V4L2_COLORSPACE_JPEG, 282 .colorspace = V4L2_COLORSPACE_JPEG,
207 .priv = 1}, 283 .priv = 1},
208 {640, 480, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, 284 {640, 480, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
209 .bytesperline = 640, 285 .bytesperline = 640,
210 .sizeimage = 640 * 480 * 3 / 8 + 590, 286 .sizeimage = 640 * 480 * 2,
211 .colorspace = V4L2_COLORSPACE_JPEG, 287 .colorspace = V4L2_COLORSPACE_JPEG,
212 .priv = 0}, 288 .priv = 0},
213}; 289};
214static const struct v4l2_pix_format ov518_sif_mode[] = { 290static const struct v4l2_pix_format ov518_sif_mode[] = {
291 {160, 120, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
292 .bytesperline = 160,
293 .sizeimage = 70000,
294 .colorspace = V4L2_COLORSPACE_JPEG,
295 .priv = 3},
215 {176, 144, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, 296 {176, 144, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
216 .bytesperline = 176, 297 .bytesperline = 176,
217 .sizeimage = 40000, 298 .sizeimage = 70000,
218 .colorspace = V4L2_COLORSPACE_JPEG, 299 .colorspace = V4L2_COLORSPACE_JPEG,
219 .priv = 1}, 300 .priv = 1},
301 {320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
302 .bytesperline = 320,
303 .sizeimage = 320 * 240 * 3,
304 .colorspace = V4L2_COLORSPACE_JPEG,
305 .priv = 2},
220 {352, 288, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, 306 {352, 288, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
221 .bytesperline = 352, 307 .bytesperline = 352,
222 .sizeimage = 352 * 288 * 3 / 8 + 590, 308 .sizeimage = 352 * 288 * 3,
223 .colorspace = V4L2_COLORSPACE_JPEG, 309 .colorspace = V4L2_COLORSPACE_JPEG,
224 .priv = 0}, 310 .priv = 0},
225}; 311};
226 312
313static const struct v4l2_pix_format ov511_vga_mode[] = {
314 {320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
315 .bytesperline = 320,
316 .sizeimage = 320 * 240 * 3,
317 .colorspace = V4L2_COLORSPACE_JPEG,
318 .priv = 1},
319 {640, 480, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
320 .bytesperline = 640,
321 .sizeimage = 640 * 480 * 2,
322 .colorspace = V4L2_COLORSPACE_JPEG,
323 .priv = 0},
324};
325static const struct v4l2_pix_format ov511_sif_mode[] = {
326 {160, 120, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
327 .bytesperline = 160,
328 .sizeimage = 70000,
329 .colorspace = V4L2_COLORSPACE_JPEG,
330 .priv = 3},
331 {176, 144, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
332 .bytesperline = 176,
333 .sizeimage = 70000,
334 .colorspace = V4L2_COLORSPACE_JPEG,
335 .priv = 1},
336 {320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
337 .bytesperline = 320,
338 .sizeimage = 320 * 240 * 3,
339 .colorspace = V4L2_COLORSPACE_JPEG,
340 .priv = 2},
341 {352, 288, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
342 .bytesperline = 352,
343 .sizeimage = 352 * 288 * 3,
344 .colorspace = V4L2_COLORSPACE_JPEG,
345 .priv = 0},
346};
227 347
228/* Registers common to OV511 / OV518 */ 348/* Registers common to OV511 / OV518 */
349#define R51x_FIFO_PSIZE 0x30 /* 2 bytes wide w/ OV518(+) */
229#define R51x_SYS_RESET 0x50 350#define R51x_SYS_RESET 0x50
351 /* Reset type flags */
352 #define OV511_RESET_OMNICE 0x08
230#define R51x_SYS_INIT 0x53 353#define R51x_SYS_INIT 0x53
231#define R51x_SYS_SNAP 0x52 354#define R51x_SYS_SNAP 0x52
232#define R51x_SYS_CUST_ID 0x5F 355#define R51x_SYS_CUST_ID 0x5F
233#define R51x_COMP_LUT_BEGIN 0x80 356#define R51x_COMP_LUT_BEGIN 0x80
234 357
235/* OV511 Camera interface register numbers */ 358/* OV511 Camera interface register numbers */
359#define R511_CAM_DELAY 0x10
360#define R511_CAM_EDGE 0x11
361#define R511_CAM_PXCNT 0x12
362#define R511_CAM_LNCNT 0x13
363#define R511_CAM_PXDIV 0x14
364#define R511_CAM_LNDIV 0x15
365#define R511_CAM_UV_EN 0x16
366#define R511_CAM_LINE_MODE 0x17
367#define R511_CAM_OPTS 0x18
368
369#define R511_SNAP_FRAME 0x19
370#define R511_SNAP_PXCNT 0x1A
371#define R511_SNAP_LNCNT 0x1B
372#define R511_SNAP_PXDIV 0x1C
373#define R511_SNAP_LNDIV 0x1D
374#define R511_SNAP_UV_EN 0x1E
375#define R511_SNAP_UV_EN 0x1E
376#define R511_SNAP_OPTS 0x1F
377
378#define R511_DRAM_FLOW_CTL 0x20
379#define R511_FIFO_OPTS 0x31
380#define R511_I2C_CTL 0x40
236#define R511_SYS_LED_CTL 0x55 /* OV511+ only */ 381#define R511_SYS_LED_CTL 0x55 /* OV511+ only */
237#define OV511_RESET_NOREGS 0x3F /* All but OV511 & regs */ 382#define R511_COMP_EN 0x78
383#define R511_COMP_LUT_EN 0x79
238 384
239/* OV518 Camera interface register numbers */ 385/* OV518 Camera interface register numbers */
240#define R518_GPIO_OUT 0x56 /* OV518(+) only */ 386#define R518_GPIO_OUT 0x56 /* OV518(+) only */
@@ -383,7 +529,7 @@ static const struct ov_i2c_regvals norm_6x20[] = {
383 { 0x28, 0x05 }, 529 { 0x28, 0x05 },
384 { 0x2a, 0x04 }, /* Disable framerate adjust */ 530 { 0x2a, 0x04 }, /* Disable framerate adjust */
385/* { 0x2b, 0xac }, * Framerate; Set 2a[7] first */ 531/* { 0x2b, 0xac }, * Framerate; Set 2a[7] first */
386 { 0x2d, 0x99 }, 532 { 0x2d, 0x85 },
387 { 0x33, 0xa0 }, /* Color Processing Parameter */ 533 { 0x33, 0xa0 }, /* Color Processing Parameter */
388 { 0x34, 0xd2 }, /* Max A/D range */ 534 { 0x34, 0xd2 }, /* Max A/D range */
389 { 0x38, 0x8b }, 535 { 0x38, 0x8b },
@@ -416,7 +562,7 @@ static const struct ov_i2c_regvals norm_6x30[] = {
416 { 0x07, 0x2d }, /* Sharpness */ 562 { 0x07, 0x2d }, /* Sharpness */
417 { 0x0c, 0x20 }, 563 { 0x0c, 0x20 },
418 { 0x0d, 0x20 }, 564 { 0x0d, 0x20 },
419 { 0x0e, 0x20 }, 565 { 0x0e, 0xa0 }, /* Was 0x20, bit7 enables a 2x gain which we need */
420 { 0x0f, 0x05 }, 566 { 0x0f, 0x05 },
421 { 0x10, 0x9a }, 567 { 0x10, 0x9a },
422 { 0x11, 0x00 }, /* Pixel clock = fastest */ 568 { 0x11, 0x00 }, /* Pixel clock = fastest */
@@ -558,7 +704,7 @@ static const struct ov_i2c_regvals norm_7620[] = {
558 { 0x23, 0x00 }, 704 { 0x23, 0x00 },
559 { 0x26, 0xa2 }, 705 { 0x26, 0xa2 },
560 { 0x27, 0xea }, 706 { 0x27, 0xea },
561 { 0x28, 0x20 }, 707 { 0x28, 0x22 }, /* Was 0x20, bit1 enables a 2x gain which we need */
562 { 0x29, 0x00 }, 708 { 0x29, 0x00 },
563 { 0x2a, 0x10 }, 709 { 0x2a, 0x10 },
564 { 0x2b, 0x00 }, 710 { 0x2b, 0x00 },
@@ -999,13 +1145,128 @@ static int ov518_reg_w32(struct sd *sd, __u16 index, u32 value, int n)
999 return ret; 1145 return ret;
1000} 1146}
1001 1147
1148static int ov511_i2c_w(struct sd *sd, __u8 reg, __u8 value)
1149{
1150 int rc, retries;
1151
1152 PDEBUG(D_USBO, "i2c 0x%02x -> [0x%02x]", value, reg);
1153
1154 /* Three byte write cycle */
1155 for (retries = 6; ; ) {
1156 /* Select camera register */
1157 rc = reg_w(sd, R51x_I2C_SADDR_3, reg);
1158 if (rc < 0)
1159 return rc;
1160
1161 /* Write "value" to I2C data port of OV511 */
1162 rc = reg_w(sd, R51x_I2C_DATA, value);
1163 if (rc < 0)
1164 return rc;
1165
1166 /* Initiate 3-byte write cycle */
1167 rc = reg_w(sd, R511_I2C_CTL, 0x01);
1168 if (rc < 0)
1169 return rc;
1170
1171 do
1172 rc = reg_r(sd, R511_I2C_CTL);
1173 while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
1174
1175 if (rc < 0)
1176 return rc;
1177
1178 if ((rc & 2) == 0) /* Ack? */
1179 break;
1180 if (--retries < 0) {
1181 PDEBUG(D_USBO, "i2c write retries exhausted");
1182 return -1;
1183 }
1184 }
1185
1186 return 0;
1187}
1188
1189static int ov511_i2c_r(struct sd *sd, __u8 reg)
1190{
1191 int rc, value, retries;
1192
1193 /* Two byte write cycle */
1194 for (retries = 6; ; ) {
1195 /* Select camera register */
1196 rc = reg_w(sd, R51x_I2C_SADDR_2, reg);
1197 if (rc < 0)
1198 return rc;
1199
1200 /* Initiate 2-byte write cycle */
1201 rc = reg_w(sd, R511_I2C_CTL, 0x03);
1202 if (rc < 0)
1203 return rc;
1204
1205 do
1206 rc = reg_r(sd, R511_I2C_CTL);
1207 while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
1208
1209 if (rc < 0)
1210 return rc;
1211
1212 if ((rc & 2) == 0) /* Ack? */
1213 break;
1214
1215 /* I2C abort */
1216 reg_w(sd, R511_I2C_CTL, 0x10);
1217
1218 if (--retries < 0) {
1219 PDEBUG(D_USBI, "i2c write retries exhausted");
1220 return -1;
1221 }
1222 }
1223
1224 /* Two byte read cycle */
1225 for (retries = 6; ; ) {
1226 /* Initiate 2-byte read cycle */
1227 rc = reg_w(sd, R511_I2C_CTL, 0x05);
1228 if (rc < 0)
1229 return rc;
1230
1231 do
1232 rc = reg_r(sd, R511_I2C_CTL);
1233 while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
1234
1235 if (rc < 0)
1236 return rc;
1237
1238 if ((rc & 2) == 0) /* Ack? */
1239 break;
1240
1241 /* I2C abort */
1242 rc = reg_w(sd, R511_I2C_CTL, 0x10);
1243 if (rc < 0)
1244 return rc;
1245
1246 if (--retries < 0) {
1247 PDEBUG(D_USBI, "i2c read retries exhausted");
1248 return -1;
1249 }
1250 }
1251
1252 value = reg_r(sd, R51x_I2C_DATA);
1253
1254 PDEBUG(D_USBI, "i2c [0x%02X] -> 0x%02X", reg, value);
1255
1256 /* This is needed to make i2c_w() work */
1257 rc = reg_w(sd, R511_I2C_CTL, 0x05);
1258 if (rc < 0)
1259 return rc;
1260
1261 return value;
1262}
1002 1263
1003/* 1264/*
1004 * The OV518 I2C I/O procedure is different, hence, this function. 1265 * The OV518 I2C I/O procedure is different, hence, this function.
1005 * This is normally only called from i2c_w(). Note that this function 1266 * This is normally only called from i2c_w(). Note that this function
1006 * always succeeds regardless of whether the sensor is present and working. 1267 * always succeeds regardless of whether the sensor is present and working.
1007 */ 1268 */
1008static int i2c_w(struct sd *sd, 1269static int ov518_i2c_w(struct sd *sd,
1009 __u8 reg, 1270 __u8 reg,
1010 __u8 value) 1271 __u8 value)
1011{ 1272{
@@ -1040,7 +1301,7 @@ static int i2c_w(struct sd *sd,
1040 * This is normally only called from i2c_r(). Note that this function 1301 * This is normally only called from i2c_r(). Note that this function
1041 * always succeeds regardless of whether the sensor is present and working. 1302 * always succeeds regardless of whether the sensor is present and working.
1042 */ 1303 */
1043static int i2c_r(struct sd *sd, __u8 reg) 1304static int ov518_i2c_r(struct sd *sd, __u8 reg)
1044{ 1305{
1045 int rc, value; 1306 int rc, value;
1046 1307
@@ -1063,6 +1324,34 @@ static int i2c_r(struct sd *sd, __u8 reg)
1063 return value; 1324 return value;
1064} 1325}
1065 1326
1327static int i2c_w(struct sd *sd, __u8 reg, __u8 value)
1328{
1329 switch (sd->bridge) {
1330 case BRIDGE_OV511:
1331 case BRIDGE_OV511PLUS:
1332 return ov511_i2c_w(sd, reg, value);
1333 case BRIDGE_OV518:
1334 case BRIDGE_OV518PLUS:
1335 case BRIDGE_OV519:
1336 return ov518_i2c_w(sd, reg, value);
1337 }
1338 return -1; /* Should never happen */
1339}
1340
1341static int i2c_r(struct sd *sd, __u8 reg)
1342{
1343 switch (sd->bridge) {
1344 case BRIDGE_OV511:
1345 case BRIDGE_OV511PLUS:
1346 return ov511_i2c_r(sd, reg);
1347 case BRIDGE_OV518:
1348 case BRIDGE_OV518PLUS:
1349 case BRIDGE_OV519:
1350 return ov518_i2c_r(sd, reg);
1351 }
1352 return -1; /* Should never happen */
1353}
1354
1066/* Writes bits at positions specified by mask to an I2C reg. Bits that are in 1355/* Writes bits at positions specified by mask to an I2C reg. Bits that are in
1067 * the same position as 1's in "mask" are cleared and set to "value". Bits 1356 * the same position as 1's in "mask" are cleared and set to "value". Bits
1068 * that are in the same position as 0's in "mask" are preserved, regardless 1357 * that are in the same position as 0's in "mask" are preserved, regardless
@@ -1242,7 +1531,6 @@ static int ov8xx0_configure(struct sd *sd)
1242 } 1531 }
1243 1532
1244 /* Set sensor-specific vars */ 1533 /* Set sensor-specific vars */
1245/* sd->sif = 0; already done */
1246 return 0; 1534 return 0;
1247} 1535}
1248 1536
@@ -1279,15 +1567,13 @@ static int ov7xx0_configure(struct sd *sd)
1279 } 1567 }
1280 } else if ((rc & 3) == 1) { 1568 } else if ((rc & 3) == 1) {
1281 /* I don't know what's different about the 76BE yet. */ 1569 /* I don't know what's different about the 76BE yet. */
1282 if (i2c_r(sd, 0x15) & 1) 1570 if (i2c_r(sd, 0x15) & 1) {
1283 PDEBUG(D_PROBE, "Sensor is an OV7620AE"); 1571 PDEBUG(D_PROBE, "Sensor is an OV7620AE");
1284 else 1572 sd->sensor = SEN_OV7620;
1573 } else {
1285 PDEBUG(D_PROBE, "Sensor is an OV76BE"); 1574 PDEBUG(D_PROBE, "Sensor is an OV76BE");
1286 1575 sd->sensor = SEN_OV76BE;
1287 /* OV511+ will return all zero isoc data unless we 1576 }
1288 * configure the sensor as a 7620. Someone needs to
1289 * find the exact reg. setting that causes this. */
1290 sd->sensor = SEN_OV76BE;
1291 } else if ((rc & 3) == 0) { 1577 } else if ((rc & 3) == 0) {
1292 /* try to read product id registers */ 1578 /* try to read product id registers */
1293 high = i2c_r(sd, 0x0a); 1579 high = i2c_r(sd, 0x0a);
@@ -1333,7 +1619,6 @@ static int ov7xx0_configure(struct sd *sd)
1333 } 1619 }
1334 1620
1335 /* Set sensor-specific vars */ 1621 /* Set sensor-specific vars */
1336/* sd->sif = 0; already done */
1337 return 0; 1622 return 0;
1338} 1623}
1339 1624
@@ -1362,13 +1647,14 @@ static int ov6xx0_configure(struct sd *sd)
1362 break; 1647 break;
1363 case 0x01: 1648 case 0x01:
1364 sd->sensor = SEN_OV6620; 1649 sd->sensor = SEN_OV6620;
1650 PDEBUG(D_PROBE, "Sensor is an OV6620");
1365 break; 1651 break;
1366 case 0x02: 1652 case 0x02:
1367 sd->sensor = SEN_OV6630; 1653 sd->sensor = SEN_OV6630;
1368 PDEBUG(D_PROBE, "Sensor is an OV66308AE"); 1654 PDEBUG(D_PROBE, "Sensor is an OV66308AE");
1369 break; 1655 break;
1370 case 0x03: 1656 case 0x03:
1371 sd->sensor = SEN_OV6630; 1657 sd->sensor = SEN_OV66308AF;
1372 PDEBUG(D_PROBE, "Sensor is an OV66308AF"); 1658 PDEBUG(D_PROBE, "Sensor is an OV66308AF");
1373 break; 1659 break;
1374 case 0x90: 1660 case 0x90:
@@ -1391,6 +1677,9 @@ static int ov6xx0_configure(struct sd *sd)
1391/* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */ 1677/* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */
1392static void ov51x_led_control(struct sd *sd, int on) 1678static void ov51x_led_control(struct sd *sd, int on)
1393{ 1679{
1680 if (sd->invert_led)
1681 on = !on;
1682
1394 switch (sd->bridge) { 1683 switch (sd->bridge) {
1395 /* OV511 has no LED control */ 1684 /* OV511 has no LED control */
1396 case BRIDGE_OV511PLUS: 1685 case BRIDGE_OV511PLUS:
@@ -1406,9 +1695,31 @@ static void ov51x_led_control(struct sd *sd, int on)
1406 } 1695 }
1407} 1696}
1408 1697
1409/* OV518 quantization tables are 8x4 (instead of 8x8) */ 1698static int ov51x_upload_quan_tables(struct sd *sd)
1410static int ov518_upload_quan_tables(struct sd *sd)
1411{ 1699{
1700 const unsigned char yQuanTable511[] = {
1701 0, 1, 1, 2, 2, 3, 3, 4,
1702 1, 1, 1, 2, 2, 3, 4, 4,
1703 1, 1, 2, 2, 3, 4, 4, 4,
1704 2, 2, 2, 3, 4, 4, 4, 4,
1705 2, 2, 3, 4, 4, 5, 5, 5,
1706 3, 3, 4, 4, 5, 5, 5, 5,
1707 3, 4, 4, 4, 5, 5, 5, 5,
1708 4, 4, 4, 4, 5, 5, 5, 5
1709 };
1710
1711 const unsigned char uvQuanTable511[] = {
1712 0, 2, 2, 3, 4, 4, 4, 4,
1713 2, 2, 2, 4, 4, 4, 4, 4,
1714 2, 2, 3, 4, 4, 4, 4, 4,
1715 3, 4, 4, 4, 4, 4, 4, 4,
1716 4, 4, 4, 4, 4, 4, 4, 4,
1717 4, 4, 4, 4, 4, 4, 4, 4,
1718 4, 4, 4, 4, 4, 4, 4, 4,
1719 4, 4, 4, 4, 4, 4, 4, 4
1720 };
1721
1722 /* OV518 quantization tables are 8x4 (instead of 8x8) */
1412 const unsigned char yQuanTable518[] = { 1723 const unsigned char yQuanTable518[] = {
1413 5, 4, 5, 6, 6, 7, 7, 7, 1724 5, 4, 5, 6, 6, 7, 7, 7,
1414 5, 5, 5, 5, 6, 7, 7, 7, 1725 5, 5, 5, 5, 6, 7, 7, 7,
@@ -1423,14 +1734,23 @@ static int ov518_upload_quan_tables(struct sd *sd)
1423 7, 7, 7, 7, 7, 7, 8, 8 1734 7, 7, 7, 7, 7, 7, 8, 8
1424 }; 1735 };
1425 1736
1426 const unsigned char *pYTable = yQuanTable518; 1737 const unsigned char *pYTable, *pUVTable;
1427 const unsigned char *pUVTable = uvQuanTable518;
1428 unsigned char val0, val1; 1738 unsigned char val0, val1;
1429 int i, rc, reg = R51x_COMP_LUT_BEGIN; 1739 int i, size, rc, reg = R51x_COMP_LUT_BEGIN;
1430 1740
1431 PDEBUG(D_PROBE, "Uploading quantization tables"); 1741 PDEBUG(D_PROBE, "Uploading quantization tables");
1432 1742
1433 for (i = 0; i < 16; i++) { 1743 if (sd->bridge == BRIDGE_OV511 || sd->bridge == BRIDGE_OV511PLUS) {
1744 pYTable = yQuanTable511;
1745 pUVTable = uvQuanTable511;
1746 size = 32;
1747 } else {
1748 pYTable = yQuanTable518;
1749 pUVTable = uvQuanTable518;
1750 size = 16;
1751 }
1752
1753 for (i = 0; i < size; i++) {
1434 val0 = *pYTable++; 1754 val0 = *pYTable++;
1435 val1 = *pYTable++; 1755 val1 = *pYTable++;
1436 val0 &= 0x0f; 1756 val0 &= 0x0f;
@@ -1445,7 +1765,7 @@ static int ov518_upload_quan_tables(struct sd *sd)
1445 val0 &= 0x0f; 1765 val0 &= 0x0f;
1446 val1 &= 0x0f; 1766 val1 &= 0x0f;
1447 val0 |= val1 << 4; 1767 val0 |= val1 << 4;
1448 rc = reg_w(sd, reg + 16, val0); 1768 rc = reg_w(sd, reg + size, val0);
1449 if (rc < 0) 1769 if (rc < 0)
1450 return rc; 1770 return rc;
1451 1771
@@ -1455,6 +1775,87 @@ static int ov518_upload_quan_tables(struct sd *sd)
1455 return 0; 1775 return 0;
1456} 1776}
1457 1777
1778/* This initializes the OV511/OV511+ and the sensor */
1779static int ov511_configure(struct gspca_dev *gspca_dev)
1780{
1781 struct sd *sd = (struct sd *) gspca_dev;
1782 int rc;
1783
1784 /* For 511 and 511+ */
1785 const struct ov_regvals init_511[] = {
1786 { R51x_SYS_RESET, 0x7f },
1787 { R51x_SYS_INIT, 0x01 },
1788 { R51x_SYS_RESET, 0x7f },
1789 { R51x_SYS_INIT, 0x01 },
1790 { R51x_SYS_RESET, 0x3f },
1791 { R51x_SYS_INIT, 0x01 },
1792 { R51x_SYS_RESET, 0x3d },
1793 };
1794
1795 const struct ov_regvals norm_511[] = {
1796 { R511_DRAM_FLOW_CTL, 0x01 },
1797 { R51x_SYS_SNAP, 0x00 },
1798 { R51x_SYS_SNAP, 0x02 },
1799 { R51x_SYS_SNAP, 0x00 },
1800 { R511_FIFO_OPTS, 0x1f },
1801 { R511_COMP_EN, 0x00 },
1802 { R511_COMP_LUT_EN, 0x03 },
1803 };
1804
1805 const struct ov_regvals norm_511_p[] = {
1806 { R511_DRAM_FLOW_CTL, 0xff },
1807 { R51x_SYS_SNAP, 0x00 },
1808 { R51x_SYS_SNAP, 0x02 },
1809 { R51x_SYS_SNAP, 0x00 },
1810 { R511_FIFO_OPTS, 0xff },
1811 { R511_COMP_EN, 0x00 },
1812 { R511_COMP_LUT_EN, 0x03 },
1813 };
1814
1815 const struct ov_regvals compress_511[] = {
1816 { 0x70, 0x1f },
1817 { 0x71, 0x05 },
1818 { 0x72, 0x06 },
1819 { 0x73, 0x06 },
1820 { 0x74, 0x14 },
1821 { 0x75, 0x03 },
1822 { 0x76, 0x04 },
1823 { 0x77, 0x04 },
1824 };
1825
1826 PDEBUG(D_PROBE, "Device custom id %x", reg_r(sd, R51x_SYS_CUST_ID));
1827
1828 rc = write_regvals(sd, init_511, ARRAY_SIZE(init_511));
1829 if (rc < 0)
1830 return rc;
1831
1832 switch (sd->bridge) {
1833 case BRIDGE_OV511:
1834 rc = write_regvals(sd, norm_511, ARRAY_SIZE(norm_511));
1835 if (rc < 0)
1836 return rc;
1837 break;
1838 case BRIDGE_OV511PLUS:
1839 rc = write_regvals(sd, norm_511_p, ARRAY_SIZE(norm_511_p));
1840 if (rc < 0)
1841 return rc;
1842 break;
1843 }
1844
1845 /* Init compression */
1846 rc = write_regvals(sd, compress_511, ARRAY_SIZE(compress_511));
1847 if (rc < 0)
1848 return rc;
1849
1850 rc = ov51x_upload_quan_tables(sd);
1851 if (rc < 0) {
1852 PDEBUG(D_ERR, "Error uploading quantization tables");
1853 return rc;
1854 }
1855
1856 return 0;
1857}
1858
1458/* This initializes the OV518/OV518+ and the sensor */ 1859/* This initializes the OV518/OV518+ and the sensor */
1459static int ov518_configure(struct gspca_dev *gspca_dev) 1860static int ov518_configure(struct gspca_dev *gspca_dev)
1460{ 1861{
@@ -1462,7 +1863,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev)
1462 int rc; 1863 int rc;
1463 1864
1464 /* For 518 and 518+ */ 1865 /* For 518 and 518+ */
1465 static struct ov_regvals init_518[] = { 1866 const struct ov_regvals init_518[] = {
1466 { R51x_SYS_RESET, 0x40 }, 1867 { R51x_SYS_RESET, 0x40 },
1467 { R51x_SYS_INIT, 0xe1 }, 1868 { R51x_SYS_INIT, 0xe1 },
1468 { R51x_SYS_RESET, 0x3e }, 1869 { R51x_SYS_RESET, 0x3e },
@@ -1473,7 +1874,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev)
1473 { 0x5d, 0x03 }, 1874 { 0x5d, 0x03 },
1474 }; 1875 };
1475 1876
1476 static struct ov_regvals norm_518[] = { 1877 const struct ov_regvals norm_518[] = {
1477 { R51x_SYS_SNAP, 0x02 }, /* Reset */ 1878 { R51x_SYS_SNAP, 0x02 }, /* Reset */
1478 { R51x_SYS_SNAP, 0x01 }, /* Enable */ 1879 { R51x_SYS_SNAP, 0x01 }, /* Enable */
1479 { 0x31, 0x0f }, 1880 { 0x31, 0x0f },
@@ -1486,7 +1887,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev)
1486 { 0x2f, 0x80 }, 1887 { 0x2f, 0x80 },
1487 }; 1888 };
1488 1889
1489 static struct ov_regvals norm_518_p[] = { 1890 const struct ov_regvals norm_518_p[] = {
1490 { R51x_SYS_SNAP, 0x02 }, /* Reset */ 1891 { R51x_SYS_SNAP, 0x02 }, /* Reset */
1491 { R51x_SYS_SNAP, 0x01 }, /* Enable */ 1892 { R51x_SYS_SNAP, 0x01 }, /* Enable */
1492 { 0x31, 0x0f }, 1893 { 0x31, 0x0f },
@@ -1531,7 +1932,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev)
1531 break; 1932 break;
1532 } 1933 }
1533 1934
1534 rc = ov518_upload_quan_tables(sd); 1935 rc = ov51x_upload_quan_tables(sd);
1535 if (rc < 0) { 1936 if (rc < 0) {
1536 PDEBUG(D_ERR, "Error uploading quantization tables"); 1937 PDEBUG(D_ERR, "Error uploading quantization tables");
1537 return rc; 1938 return rc;
@@ -1573,9 +1974,14 @@ static int sd_config(struct gspca_dev *gspca_dev,
1573 struct cam *cam; 1974 struct cam *cam;
1574 int ret = 0; 1975 int ret = 0;
1575 1976
1576 sd->bridge = id->driver_info; 1977 sd->bridge = id->driver_info & BRIDGE_MASK;
1978 sd->invert_led = id->driver_info & BRIDGE_INVERT_LED;
1577 1979
1578 switch (sd->bridge) { 1980 switch (sd->bridge) {
1981 case BRIDGE_OV511:
1982 case BRIDGE_OV511PLUS:
1983 ret = ov511_configure(gspca_dev);
1984 break;
1579 case BRIDGE_OV518: 1985 case BRIDGE_OV518:
1580 case BRIDGE_OV518PLUS: 1986 case BRIDGE_OV518PLUS:
1581 ret = ov518_configure(gspca_dev); 1987 ret = ov518_configure(gspca_dev);
@@ -1634,6 +2040,16 @@ static int sd_config(struct gspca_dev *gspca_dev,
1634 2040
1635 cam = &gspca_dev->cam; 2041 cam = &gspca_dev->cam;
1636 switch (sd->bridge) { 2042 switch (sd->bridge) {
2043 case BRIDGE_OV511:
2044 case BRIDGE_OV511PLUS:
2045 if (!sd->sif) {
2046 cam->cam_mode = ov511_vga_mode;
2047 cam->nmodes = ARRAY_SIZE(ov511_vga_mode);
2048 } else {
2049 cam->cam_mode = ov511_sif_mode;
2050 cam->nmodes = ARRAY_SIZE(ov511_sif_mode);
2051 }
2052 break;
1637 case BRIDGE_OV518: 2053 case BRIDGE_OV518:
1638 case BRIDGE_OV518PLUS: 2054 case BRIDGE_OV518PLUS:
1639 if (!sd->sif) { 2055 if (!sd->sif) {
@@ -1655,13 +2071,28 @@ static int sd_config(struct gspca_dev *gspca_dev,
1655 break; 2071 break;
1656 } 2072 }
1657 sd->brightness = BRIGHTNESS_DEF; 2073 sd->brightness = BRIGHTNESS_DEF;
1658 sd->contrast = CONTRAST_DEF; 2074 if (sd->sensor == SEN_OV6630 || sd->sensor == SEN_OV66308AF)
2075 sd->contrast = 200; /* The default is too low for the ov6630 */
2076 else
2077 sd->contrast = CONTRAST_DEF;
1659 sd->colors = COLOR_DEF; 2078 sd->colors = COLOR_DEF;
1660 sd->hflip = HFLIP_DEF; 2079 sd->hflip = HFLIP_DEF;
1661 sd->vflip = VFLIP_DEF; 2080 sd->vflip = VFLIP_DEF;
1662 if (sd->sensor != SEN_OV7670) 2081 sd->autobrightness = AUTOBRIGHT_DEF;
1663 gspca_dev->ctrl_dis = (1 << HFLIP_IDX) 2082 if (sd->sensor == SEN_OV7670) {
1664 | (1 << VFLIP_IDX); 2083 sd->freq = OV7670_FREQ_DEF;
2084 gspca_dev->ctrl_dis = 1 << FREQ_IDX;
2085 } else {
2086 sd->freq = FREQ_DEF;
2087 gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX) |
2088 (1 << OV7670_FREQ_IDX);
2089 }
2090 if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670)
2091 gspca_dev->ctrl_dis |= 1 << AUTOBRIGHT_IDX;
2092 /* OV8610 Frequency filter control should work but needs testing */
2093 if (sd->sensor == SEN_OV8610)
2094 gspca_dev->ctrl_dis |= 1 << FREQ_IDX;
2095
1665 return 0; 2096 return 0;
1666error: 2097error:
1667 PDEBUG(D_ERR, "OV519 Config failed"); 2098 PDEBUG(D_ERR, "OV519 Config failed");
@@ -1680,6 +2111,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
1680 return -EIO; 2111 return -EIO;
1681 break; 2112 break;
1682 case SEN_OV6630: 2113 case SEN_OV6630:
2114 case SEN_OV66308AF:
1683 if (write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30))) 2115 if (write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30)))
1684 return -EIO; 2116 return -EIO;
1685 break; 2117 break;
@@ -1688,6 +2120,8 @@ static int sd_init(struct gspca_dev *gspca_dev)
1688/* case SEN_OV76BE: */ 2120/* case SEN_OV76BE: */
1689 if (write_i2c_regvals(sd, norm_7610, ARRAY_SIZE(norm_7610))) 2121 if (write_i2c_regvals(sd, norm_7610, ARRAY_SIZE(norm_7610)))
1690 return -EIO; 2122 return -EIO;
2123 if (i2c_w_mask(sd, 0x0e, 0x00, 0x40))
2124 return -EIO;
1691 break; 2125 break;
1692 case SEN_OV7620: 2126 case SEN_OV7620:
1693 if (write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620))) 2127 if (write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620)))
@@ -1709,6 +2143,126 @@ static int sd_init(struct gspca_dev *gspca_dev)
1709 return 0; 2143 return 0;
1710} 2144}
1711 2145
2146/* Set up the OV511/OV511+ with the given image parameters.
2147 *
2148 * Do not put any sensor-specific code in here (including I2C I/O functions)
2149 */
2150static int ov511_mode_init_regs(struct sd *sd)
2151{
2152 int hsegs, vsegs, packet_size, fps, needed;
2153 int interlaced = 0;
2154 struct usb_host_interface *alt;
2155 struct usb_interface *intf;
2156
2157 intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
2158 alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
2159 if (!alt) {
2160 PDEBUG(D_ERR, "Couldn't get altsetting");
2161 return -EIO;
2162 }
2163
2164 packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
2165 reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5);
2166
2167 reg_w(sd, R511_CAM_UV_EN, 0x01);
2168 reg_w(sd, R511_SNAP_UV_EN, 0x01);
2169 reg_w(sd, R511_SNAP_OPTS, 0x03);
2170
2171 /* Here I'm assuming that snapshot size == image size.
2172 * I hope that's always true. --claudio
2173 */
2174 hsegs = (sd->gspca_dev.width >> 3) - 1;
2175 vsegs = (sd->gspca_dev.height >> 3) - 1;
2176
2177 reg_w(sd, R511_CAM_PXCNT, hsegs);
2178 reg_w(sd, R511_CAM_LNCNT, vsegs);
2179 reg_w(sd, R511_CAM_PXDIV, 0x00);
2180 reg_w(sd, R511_CAM_LNDIV, 0x00);
2181
2182 /* YUV420, low pass filter on */
2183 reg_w(sd, R511_CAM_OPTS, 0x03);
2184
2185 /* Snapshot additions */
2186 reg_w(sd, R511_SNAP_PXCNT, hsegs);
2187 reg_w(sd, R511_SNAP_LNCNT, vsegs);
2188 reg_w(sd, R511_SNAP_PXDIV, 0x00);
2189 reg_w(sd, R511_SNAP_LNDIV, 0x00);
2190
2191 /******** Set the framerate ********/
2192 if (frame_rate > 0)
2193 sd->frame_rate = frame_rate;
2194
2195 switch (sd->sensor) {
2196 case SEN_OV6620:
2197 /* No framerate control, doesn't like higher rates yet */
2198 sd->clockdiv = 3;
2199 break;
2200
2201 /* Note once the FIXME's in mode_init_ov_sensor_regs() are fixed
2202 for more sensors we need to do this for them too */
2203 case SEN_OV7620:
2204 case SEN_OV7640:
2205 case SEN_OV76BE:
2206 if (sd->gspca_dev.width == 320)
2207 interlaced = 1;
2208 /* Fall through */
2209 case SEN_OV6630:
2210 case SEN_OV7610:
2211 case SEN_OV7670:
2212 switch (sd->frame_rate) {
2213 case 30:
2214 case 25:
2215 /* Not enough bandwidth to do 640x480 @ 30 fps */
2216 if (sd->gspca_dev.width != 640) {
2217 sd->clockdiv = 0;
2218 break;
2219 }
2220 /* Fall through for 640x480 case */
2221 default:
2222/* case 20: */
2223/* case 15: */
2224 sd->clockdiv = 1;
2225 break;
2226 case 10:
2227 sd->clockdiv = 2;
2228 break;
2229 case 5:
2230 sd->clockdiv = 5;
2231 break;
2232 }
2233 if (interlaced) {
2234 sd->clockdiv = (sd->clockdiv + 1) * 2 - 1;
2235 /* Higher then 10 does not work */
2236 if (sd->clockdiv > 10)
2237 sd->clockdiv = 10;
2238 }
2239 break;
2240
2241 case SEN_OV8610:
2242 /* No framerate control ?? */
2243 sd->clockdiv = 0;
2244 break;
2245 }
2246
2247 /* Check if we have enough bandwidth to disable compression */
2248 fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1;
2249 needed = fps * sd->gspca_dev.width * sd->gspca_dev.height * 3 / 2;
2250 /* 1400 is a conservative estimate of the max nr of isoc packets/sec */
2251 if (needed > 1400 * packet_size) {
2252 /* Enable Y and UV quantization and compression */
2253 reg_w(sd, R511_COMP_EN, 0x07);
2254 reg_w(sd, R511_COMP_LUT_EN, 0x03);
2255 } else {
2256 reg_w(sd, R511_COMP_EN, 0x06);
2257 reg_w(sd, R511_COMP_LUT_EN, 0x00);
2258 }
2259
2260 reg_w(sd, R51x_SYS_RESET, OV511_RESET_OMNICE);
2261 reg_w(sd, R51x_SYS_RESET, 0);
2262
2263 return 0;
2264}
2265
1712/* Sets up the OV518/OV518+ with the given image parameters 2266/* Sets up the OV518/OV518+ with the given image parameters
1713 * 2267 *
1714 * OV518 needs a completely different approach, until we can figure out what 2268 * OV518 needs a completely different approach, until we can figure out what
@@ -1718,7 +2272,19 @@ static int sd_init(struct gspca_dev *gspca_dev)
1718 */ 2272 */
1719static int ov518_mode_init_regs(struct sd *sd) 2273static int ov518_mode_init_regs(struct sd *sd)
1720{ 2274{
1721 int hsegs, vsegs; 2275 int hsegs, vsegs, packet_size;
2276 struct usb_host_interface *alt;
2277 struct usb_interface *intf;
2278
2279 intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
2280 alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
2281 if (!alt) {
2282 PDEBUG(D_ERR, "Couldn't get altsetting");
2283 return -EIO;
2284 }
2285
2286 packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
2287 ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2);
1722 2288
1723 /******** Set the mode ********/ 2289 /******** Set the mode ********/
1724 2290
@@ -1755,20 +2321,30 @@ static int ov518_mode_init_regs(struct sd *sd)
1755 /* Windows driver does this here; who knows why */ 2321 /* Windows driver does this here; who knows why */
1756 reg_w(sd, 0x2f, 0x80); 2322 reg_w(sd, 0x2f, 0x80);
1757 2323
1758 /******** Set the framerate (to 30 FPS) ********/ 2324 /******** Set the framerate ********/
1759 if (sd->bridge == BRIDGE_OV518PLUS) 2325 sd->clockdiv = 1;
1760 sd->clockdiv = 1;
1761 else
1762 sd->clockdiv = 0;
1763 2326
1764 /* Mode independent, but framerate dependent, regs */ 2327 /* Mode independent, but framerate dependent, regs */
1765 reg_w(sd, 0x51, 0x04); /* Clock divider; lower==faster */ 2328 /* 0x51: Clock divider; Only works on some cams which use 2 crystals */
2329 reg_w(sd, 0x51, 0x04);
1766 reg_w(sd, 0x22, 0x18); 2330 reg_w(sd, 0x22, 0x18);
1767 reg_w(sd, 0x23, 0xff); 2331 reg_w(sd, 0x23, 0xff);
1768 2332
1769 if (sd->bridge == BRIDGE_OV518PLUS) 2333 if (sd->bridge == BRIDGE_OV518PLUS) {
1770 reg_w(sd, 0x21, 0x19); 2334 switch (sd->sensor) {
1771 else 2335 case SEN_OV7620:
2336 if (sd->gspca_dev.width == 320) {
2337 reg_w(sd, 0x20, 0x00);
2338 reg_w(sd, 0x21, 0x19);
2339 } else {
2340 reg_w(sd, 0x20, 0x60);
2341 reg_w(sd, 0x21, 0x1f);
2342 }
2343 break;
2344 default:
2345 reg_w(sd, 0x21, 0x19);
2346 }
2347 } else
1772 reg_w(sd, 0x71, 0x17); /* Compression-related? */ 2348 reg_w(sd, 0x71, 0x17); /* Compression-related? */
1773 2349
1774 /* FIXME: Sensor-specific */ 2350 /* FIXME: Sensor-specific */
@@ -1879,7 +2455,11 @@ static int ov519_mode_init_regs(struct sd *sd)
1879 2455
1880 reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.width >> 4); 2456 reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.width >> 4);
1881 reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.height >> 3); 2457 reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.height >> 3);
1882 reg_w(sd, OV519_R12_X_OFFSETL, 0x00); 2458 if (sd->sensor == SEN_OV7670 &&
2459 sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv)
2460 reg_w(sd, OV519_R12_X_OFFSETL, 0x04);
2461 else
2462 reg_w(sd, OV519_R12_X_OFFSETL, 0x00);
1883 reg_w(sd, OV519_R13_X_OFFSETH, 0x00); 2463 reg_w(sd, OV519_R13_X_OFFSETH, 0x00);
1884 reg_w(sd, OV519_R14_Y_OFFSETL, 0x00); 2464 reg_w(sd, OV519_R14_Y_OFFSETL, 0x00);
1885 reg_w(sd, OV519_R15_Y_OFFSETH, 0x00); 2465 reg_w(sd, OV519_R15_Y_OFFSETH, 0x00);
@@ -1971,7 +2551,7 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
1971 int qvga; 2551 int qvga;
1972 2552
1973 gspca_dev = &sd->gspca_dev; 2553 gspca_dev = &sd->gspca_dev;
1974 qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; 2554 qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 1;
1975 2555
1976 /******** Mode (VGA/QVGA) and sensor specific regs ********/ 2556 /******** Mode (VGA/QVGA) and sensor specific regs ********/
1977 switch (sd->sensor) { 2557 switch (sd->sensor) {
@@ -1983,21 +2563,16 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
1983 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); 2563 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
1984 break; 2564 break;
1985 case SEN_OV7620: 2565 case SEN_OV7620:
1986/* i2c_w(sd, 0x2b, 0x00); */ 2566 case SEN_OV76BE:
1987 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); 2567 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
1988 i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20); 2568 i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
1989 i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); 2569 i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a);
1990 i2c_w(sd, 0x25, qvga ? 0x30 : 0x60); 2570 i2c_w(sd, 0x25, qvga ? 0x30 : 0x60);
1991 i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40); 2571 i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40);
1992 i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0); 2572 i2c_w_mask(sd, 0x67, qvga ? 0xb0 : 0x90, 0xf0);
1993 i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20); 2573 i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20);
1994 break; 2574 break;
1995 case SEN_OV76BE:
1996/* i2c_w(sd, 0x2b, 0x00); */
1997 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
1998 break;
1999 case SEN_OV7640: 2575 case SEN_OV7640:
2000/* i2c_w(sd, 0x2b, 0x00); */
2001 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); 2576 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
2002 i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20); 2577 i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
2003/* i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); */ 2578/* i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); */
@@ -2016,6 +2591,7 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
2016 break; 2591 break;
2017 case SEN_OV6620: 2592 case SEN_OV6620:
2018 case SEN_OV6630: 2593 case SEN_OV6630:
2594 case SEN_OV66308AF:
2019 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); 2595 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
2020 break; 2596 break;
2021 default: 2597 default:
@@ -2023,10 +2599,6 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
2023 } 2599 }
2024 2600
2025 /******** Palette-specific regs ********/ 2601 /******** Palette-specific regs ********/
2026 if (sd->sensor == SEN_OV7610 || sd->sensor == SEN_OV76BE) {
2027 /* not valid on the OV6620/OV7620/6630? */
2028 i2c_w_mask(sd, 0x0e, 0x00, 0x40);
2029 }
2030 2602
2031 /* The OV518 needs special treatment. Although both the OV518 2603 /* The OV518 needs special treatment. Although both the OV518
2032 * and the OV6630 support a 16-bit video bus, only the 8 bit Y 2604 * and the OV6630 support a 16-bit video bus, only the 8 bit Y
@@ -2036,25 +2608,12 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
2036 2608
2037 /* OV7640 is 8-bit only */ 2609 /* OV7640 is 8-bit only */
2038 2610
2039 if (sd->sensor != SEN_OV6630 && sd->sensor != SEN_OV7640) 2611 if (sd->sensor != SEN_OV6630 && sd->sensor != SEN_OV66308AF &&
2612 sd->sensor != SEN_OV7640)
2040 i2c_w_mask(sd, 0x13, 0x00, 0x20); 2613 i2c_w_mask(sd, 0x13, 0x00, 0x20);
2041 2614
2042 /******** Clock programming ********/ 2615 /******** Clock programming ********/
2043 /* The OV6620 needs special handling. This prevents the 2616 i2c_w(sd, 0x11, sd->clockdiv);
2044 * severe banding that normally occurs */
2045 if (sd->sensor == SEN_OV6620) {
2046
2047 /* Clock down */
2048 i2c_w(sd, 0x2a, 0x04);
2049 i2c_w(sd, 0x11, sd->clockdiv);
2050 i2c_w(sd, 0x2a, 0x84);
2051 /* This next setting is critical. It seems to improve
2052 * the gain or the contrast. The "reserved" bits seem
2053 * to have some effect in this case. */
2054 i2c_w(sd, 0x2d, 0x85);
2055 } else {
2056 i2c_w(sd, 0x11, sd->clockdiv);
2057 }
2058 2617
2059 /******** Special Features ********/ 2618 /******** Special Features ********/
2060/* no evidence this is possible with OV7670, either */ 2619/* no evidence this is possible with OV7670, either */
@@ -2098,13 +2657,14 @@ static void sethvflip(struct sd *sd)
2098static int set_ov_sensor_window(struct sd *sd) 2657static int set_ov_sensor_window(struct sd *sd)
2099{ 2658{
2100 struct gspca_dev *gspca_dev; 2659 struct gspca_dev *gspca_dev;
2101 int qvga; 2660 int qvga, crop;
2102 int hwsbase, hwebase, vwsbase, vwebase, hwscale, vwscale; 2661 int hwsbase, hwebase, vwsbase, vwebase, hwscale, vwscale;
2103 int ret, hstart, hstop, vstop, vstart; 2662 int ret, hstart, hstop, vstop, vstart;
2104 __u8 v; 2663 __u8 v;
2105 2664
2106 gspca_dev = &sd->gspca_dev; 2665 gspca_dev = &sd->gspca_dev;
2107 qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; 2666 qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 1;
2667 crop = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 2;
2108 2668
2109 /* The different sensor ICs handle setting up of window differently. 2669 /* The different sensor ICs handle setting up of window differently.
2110 * IF YOU SET IT WRONG, YOU WILL GET ALL ZERO ISOC DATA FROM OV51x!! */ 2670 * IF YOU SET IT WRONG, YOU WILL GET ALL ZERO ISOC DATA FROM OV51x!! */
@@ -2123,14 +2683,19 @@ static int set_ov_sensor_window(struct sd *sd)
2123 break; 2683 break;
2124 case SEN_OV6620: 2684 case SEN_OV6620:
2125 case SEN_OV6630: 2685 case SEN_OV6630:
2686 case SEN_OV66308AF:
2126 hwsbase = 0x38; 2687 hwsbase = 0x38;
2127 hwebase = 0x3a; 2688 hwebase = 0x3a;
2128 vwsbase = 0x05; 2689 vwsbase = 0x05;
2129 vwebase = 0x06; 2690 vwebase = 0x06;
2130 if (qvga) { 2691 if (sd->sensor == SEN_OV66308AF && qvga)
2131 /* HDG: this fixes U and V getting swapped */ 2692 /* HDG: this fixes U and V getting swapped */
2132 hwsbase--; 2693 hwsbase++;
2133 vwsbase--; 2694 if (crop) {
2695 hwsbase += 8;
2696 hwebase += 8;
2697 vwsbase += 11;
2698 vwebase += 11;
2134 } 2699 }
2135 break; 2700 break;
2136 case SEN_OV7620: 2701 case SEN_OV7620:
@@ -2155,6 +2720,7 @@ static int set_ov_sensor_window(struct sd *sd)
2155 switch (sd->sensor) { 2720 switch (sd->sensor) {
2156 case SEN_OV6620: 2721 case SEN_OV6620:
2157 case SEN_OV6630: 2722 case SEN_OV6630:
2723 case SEN_OV66308AF:
2158 if (qvga) { /* QCIF */ 2724 if (qvga) { /* QCIF */
2159 hwscale = 0; 2725 hwscale = 0;
2160 vwscale = 0; 2726 vwscale = 0;
@@ -2207,7 +2773,7 @@ static int set_ov_sensor_window(struct sd *sd)
2207 if (qvga) { /* QVGA from ov7670.c by 2773 if (qvga) { /* QVGA from ov7670.c by
2208 * Jonathan Corbet */ 2774 * Jonathan Corbet */
2209 hstart = 164; 2775 hstart = 164;
2210 hstop = 20; 2776 hstop = 28;
2211 vstart = 14; 2777 vstart = 14;
2212 vstop = 494; 2778 vstop = 494;
2213 } else { /* VGA */ 2779 } else { /* VGA */
@@ -2233,7 +2799,6 @@ static int set_ov_sensor_window(struct sd *sd)
2233 msleep(10); /* need to sleep between read and write to 2799 msleep(10); /* need to sleep between read and write to
2234 * same reg! */ 2800 * same reg! */
2235 i2c_w(sd, OV7670_REG_VREF, v); 2801 i2c_w(sd, OV7670_REG_VREF, v);
2236 sethvflip(sd);
2237 } else { 2802 } else {
2238 i2c_w(sd, 0x17, hwsbase); 2803 i2c_w(sd, 0x17, hwsbase);
2239 i2c_w(sd, 0x18, hwebase + (sd->gspca_dev.width >> hwscale)); 2804 i2c_w(sd, 0x18, hwebase + (sd->gspca_dev.width >> hwscale));
@@ -2250,6 +2815,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
2250 int ret = 0; 2815 int ret = 0;
2251 2816
2252 switch (sd->bridge) { 2817 switch (sd->bridge) {
2818 case BRIDGE_OV511:
2819 case BRIDGE_OV511PLUS:
2820 ret = ov511_mode_init_regs(sd);
2821 break;
2253 case BRIDGE_OV518: 2822 case BRIDGE_OV518:
2254 case BRIDGE_OV518PLUS: 2823 case BRIDGE_OV518PLUS:
2255 ret = ov518_mode_init_regs(sd); 2824 ret = ov518_mode_init_regs(sd);
@@ -2268,6 +2837,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
2268 setcontrast(gspca_dev); 2837 setcontrast(gspca_dev);
2269 setbrightness(gspca_dev); 2838 setbrightness(gspca_dev);
2270 setcolors(gspca_dev); 2839 setcolors(gspca_dev);
2840 sethvflip(sd);
2841 setautobrightness(sd);
2842 setfreq(sd);
2271 2843
2272 ret = ov51x_restart(sd); 2844 ret = ov51x_restart(sd);
2273 if (ret < 0) 2845 if (ret < 0)
@@ -2287,23 +2859,88 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
2287 ov51x_led_control(sd, 0); 2859 ov51x_led_control(sd, 0);
2288} 2860}
2289 2861
2290static void ov518_pkt_scan(struct gspca_dev *gspca_dev, 2862static void ov511_pkt_scan(struct gspca_dev *gspca_dev,
2291 struct gspca_frame *frame, /* target */ 2863 struct gspca_frame *frame, /* target */
2292 __u8 *data, /* isoc packet */ 2864 __u8 *in, /* isoc packet */
2293 int len) /* iso packet length */ 2865 int len) /* iso packet length */
2294{ 2866{
2295 PDEBUG(D_STREAM, "ov518_pkt_scan: %d bytes", len); 2867 struct sd *sd = (struct sd *) gspca_dev;
2296 2868
2297 if (len & 7) { 2869 /* SOF/EOF packets have 1st to 8th bytes zeroed and the 9th
2298 len--; 2870 * byte non-zero. The EOF packet has image width/height in the
2299 PDEBUG(D_STREAM, "packet number: %d\n", (int)data[len]); 2871 * 10th and 11th bytes. The 9th byte is given as follows:
2872 *
2873 * bit 7: EOF
2874 * 6: compression enabled
2875 * 5: 422/420/400 modes
2876 * 4: 422/420/400 modes
2877 * 3: 1
2878 * 2: snapshot button on
2879 * 1: snapshot frame
2880 * 0: even/odd field
2881 */
2882 if (!(in[0] | in[1] | in[2] | in[3] | in[4] | in[5] | in[6] | in[7]) &&
2883 (in[8] & 0x08)) {
2884 if (in[8] & 0x80) {
2885 /* Frame end */
2886 if ((in[9] + 1) * 8 != gspca_dev->width ||
2887 (in[10] + 1) * 8 != gspca_dev->height) {
2888 PDEBUG(D_ERR, "Invalid frame size, got: %dx%d,"
2889 " requested: %dx%d\n",
2890 (in[9] + 1) * 8, (in[10] + 1) * 8,
2891 gspca_dev->width, gspca_dev->height);
2892 gspca_dev->last_packet_type = DISCARD_PACKET;
2893 return;
2894 }
2895 /* Add 11 byte footer to frame, might be usefull */
2896 gspca_frame_add(gspca_dev, LAST_PACKET, frame, in, 11);
2897 return;
2898 } else {
2899 /* Frame start */
2900 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, in, 0);
2901 sd->packet_nr = 0;
2902 }
2300 } 2903 }
2301 2904
2905 /* Ignore the packet number */
2906 len--;
2907
2908 /* intermediate packet */
2909 gspca_frame_add(gspca_dev, INTER_PACKET, frame, in, len);
2910}
2911
2912static void ov518_pkt_scan(struct gspca_dev *gspca_dev,
2913 struct gspca_frame *frame, /* target */
2914 __u8 *data, /* isoc packet */
2915 int len) /* iso packet length */
2916{
2917 struct sd *sd = (struct sd *) gspca_dev;
2918
2302 /* A false positive here is likely, until OVT gives me 2919 /* A false positive here is likely, until OVT gives me
2303 * the definitive SOF/EOF format */ 2920 * the definitive SOF/EOF format */
2304 if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) { 2921 if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) {
2305 gspca_frame_add(gspca_dev, LAST_PACKET, frame, data, 0); 2922 gspca_frame_add(gspca_dev, LAST_PACKET, frame, data, 0);
2306 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, 0); 2923 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, 0);
2924 sd->packet_nr = 0;
2925 }
2926
2927 if (gspca_dev->last_packet_type == DISCARD_PACKET)
2928 return;
2929
2930 /* Does this device use packet numbers ? */
2931 if (len & 7) {
2932 len--;
2933 if (sd->packet_nr == data[len])
2934 sd->packet_nr++;
2935 /* The last few packets of the frame (which are all 0's
2936 except that they may contain part of the footer), are
2937 numbered 0 */
2938 else if (sd->packet_nr == 0 || data[len]) {
2939 PDEBUG(D_ERR, "Invalid packet nr: %d (expect: %d)",
2940 (int)data[len], (int)sd->packet_nr);
2941 gspca_dev->last_packet_type = DISCARD_PACKET;
2942 return;
2943 }
2307 } 2944 }
2308 2945
2309 /* intermediate packet */ 2946 /* intermediate packet */
@@ -2364,6 +3001,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
2364 switch (sd->bridge) { 3001 switch (sd->bridge) {
2365 case BRIDGE_OV511: 3002 case BRIDGE_OV511:
2366 case BRIDGE_OV511PLUS: 3003 case BRIDGE_OV511PLUS:
3004 ov511_pkt_scan(gspca_dev, frame, data, len);
2367 break; 3005 break;
2368 case BRIDGE_OV518: 3006 case BRIDGE_OV518:
2369 case BRIDGE_OV518PLUS: 3007 case BRIDGE_OV518PLUS:
@@ -2389,13 +3027,13 @@ static void setbrightness(struct gspca_dev *gspca_dev)
2389 case SEN_OV76BE: 3027 case SEN_OV76BE:
2390 case SEN_OV6620: 3028 case SEN_OV6620:
2391 case SEN_OV6630: 3029 case SEN_OV6630:
3030 case SEN_OV66308AF:
2392 case SEN_OV7640: 3031 case SEN_OV7640:
2393 i2c_w(sd, OV7610_REG_BRT, val); 3032 i2c_w(sd, OV7610_REG_BRT, val);
2394 break; 3033 break;
2395 case SEN_OV7620: 3034 case SEN_OV7620:
2396 /* 7620 doesn't like manual changes when in auto mode */ 3035 /* 7620 doesn't like manual changes when in auto mode */
2397/*fixme 3036 if (!sd->autobrightness)
2398 * if (!sd->auto_brt) */
2399 i2c_w(sd, OV7610_REG_BRT, val); 3037 i2c_w(sd, OV7610_REG_BRT, val);
2400 break; 3038 break;
2401 case SEN_OV7670: 3039 case SEN_OV7670:
@@ -2418,6 +3056,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
2418 i2c_w(sd, OV7610_REG_CNT, val); 3056 i2c_w(sd, OV7610_REG_CNT, val);
2419 break; 3057 break;
2420 case SEN_OV6630: 3058 case SEN_OV6630:
3059 case SEN_OV66308AF:
2421 i2c_w_mask(sd, OV7610_REG_CNT, val >> 4, 0x0f); 3060 i2c_w_mask(sd, OV7610_REG_CNT, val >> 4, 0x0f);
2422 break; 3061 break;
2423 case SEN_OV8610: { 3062 case SEN_OV8610: {
@@ -2462,6 +3101,7 @@ static void setcolors(struct gspca_dev *gspca_dev)
2462 case SEN_OV76BE: 3101 case SEN_OV76BE:
2463 case SEN_OV6620: 3102 case SEN_OV6620:
2464 case SEN_OV6630: 3103 case SEN_OV6630:
3104 case SEN_OV66308AF:
2465 i2c_w(sd, OV7610_REG_SAT, val); 3105 i2c_w(sd, OV7610_REG_SAT, val);
2466 break; 3106 break;
2467 case SEN_OV7620: 3107 case SEN_OV7620:
@@ -2482,6 +3122,72 @@ static void setcolors(struct gspca_dev *gspca_dev)
2482 } 3122 }
2483} 3123}
2484 3124
3125static void setautobrightness(struct sd *sd)
3126{
3127 if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670)
3128 return;
3129
3130 i2c_w_mask(sd, 0x2d, sd->autobrightness ? 0x10 : 0x00, 0x10);
3131}
3132
3133static void setfreq(struct sd *sd)
3134{
3135 if (sd->sensor == SEN_OV7670) {
3136 switch (sd->freq) {
3137 case 0: /* Banding filter disabled */
3138 i2c_w_mask(sd, OV7670_REG_COM8, 0, OV7670_COM8_BFILT);
3139 break;
3140 case 1: /* 50 hz */
3141 i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT,
3142 OV7670_COM8_BFILT);
3143 i2c_w_mask(sd, OV7670_REG_COM11, 0x08, 0x18);
3144 break;
3145 case 2: /* 60 hz */
3146 i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT,
3147 OV7670_COM8_BFILT);
3148 i2c_w_mask(sd, OV7670_REG_COM11, 0x00, 0x18);
3149 break;
3150 case 3: /* Auto hz */
3151 i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT,
3152 OV7670_COM8_BFILT);
3153 i2c_w_mask(sd, OV7670_REG_COM11, OV7670_COM11_HZAUTO,
3154 0x18);
3155 break;
3156 }
3157 } else {
3158 switch (sd->freq) {
3159 case 0: /* Banding filter disabled */
3160 i2c_w_mask(sd, 0x2d, 0x00, 0x04);
3161 i2c_w_mask(sd, 0x2a, 0x00, 0x80);
3162 break;
3163 case 1: /* 50 hz (filter on and framerate adj) */
3164 i2c_w_mask(sd, 0x2d, 0x04, 0x04);
3165 i2c_w_mask(sd, 0x2a, 0x80, 0x80);
3166 /* 20 fps -> 16.667 fps */
3167 if (sd->sensor == SEN_OV6620 ||
3168 sd->sensor == SEN_OV6630 ||
3169 sd->sensor == SEN_OV66308AF)
3170 i2c_w(sd, 0x2b, 0x5e);
3171 else
3172 i2c_w(sd, 0x2b, 0xac);
3173 break;
3174 case 2: /* 60 hz (filter on, ...) */
3175 i2c_w_mask(sd, 0x2d, 0x04, 0x04);
3176 if (sd->sensor == SEN_OV6620 ||
3177 sd->sensor == SEN_OV6630 ||
3178 sd->sensor == SEN_OV66308AF) {
3179 /* 20 fps -> 15 fps */
3180 i2c_w_mask(sd, 0x2a, 0x80, 0x80);
3181 i2c_w(sd, 0x2b, 0xa8);
3182 } else {
3183 /* no framerate adj. */
3184 i2c_w_mask(sd, 0x2a, 0x00, 0x80);
3185 }
3186 break;
3187 }
3188 }
3189}
3190
2485static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) 3191static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
2486{ 3192{
2487 struct sd *sd = (struct sd *) gspca_dev; 3193 struct sd *sd = (struct sd *) gspca_dev;
@@ -2572,6 +3278,71 @@ static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
2572 return 0; 3278 return 0;
2573} 3279}
2574 3280
3281static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val)
3282{
3283 struct sd *sd = (struct sd *) gspca_dev;
3284
3285 sd->autobrightness = val;
3286 if (gspca_dev->streaming)
3287 setautobrightness(sd);
3288 return 0;
3289}
3290
3291static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val)
3292{
3293 struct sd *sd = (struct sd *) gspca_dev;
3294
3295 *val = sd->autobrightness;
3296 return 0;
3297}
3298
3299static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
3300{
3301 struct sd *sd = (struct sd *) gspca_dev;
3302
3303 sd->freq = val;
3304 if (gspca_dev->streaming)
3305 setfreq(sd);
3306 return 0;
3307}
3308
3309static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
3310{
3311 struct sd *sd = (struct sd *) gspca_dev;
3312
3313 *val = sd->freq;
3314 return 0;
3315}
3316
3317static int sd_querymenu(struct gspca_dev *gspca_dev,
3318 struct v4l2_querymenu *menu)
3319{
3320 struct sd *sd = (struct sd *) gspca_dev;
3321
3322 switch (menu->id) {
3323 case V4L2_CID_POWER_LINE_FREQUENCY:
3324 switch (menu->index) {
3325 case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
3326 strcpy((char *) menu->name, "NoFliker");
3327 return 0;
3328 case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
3329 strcpy((char *) menu->name, "50 Hz");
3330 return 0;
3331 case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
3332 strcpy((char *) menu->name, "60 Hz");
3333 return 0;
3334 case 3:
3335 if (sd->sensor != SEN_OV7670)
3336 return -EINVAL;
3337
3338 strcpy((char *) menu->name, "Automatic");
3339 return 0;
3340 }
3341 break;
3342 }
3343 return -EINVAL;
3344}
3345
2575/* sub-driver description */ 3346/* sub-driver description */
2576static const struct sd_desc sd_desc = { 3347static const struct sd_desc sd_desc = {
2577 .name = MODULE_NAME, 3348 .name = MODULE_NAME,
@@ -2582,6 +3353,7 @@ static const struct sd_desc sd_desc = {
2582 .start = sd_start, 3353 .start = sd_start,
2583 .stopN = sd_stopN, 3354 .stopN = sd_stopN,
2584 .pkt_scan = sd_pkt_scan, 3355 .pkt_scan = sd_pkt_scan,
3356 .querymenu = sd_querymenu,
2585}; 3357};
2586 3358
2587/* -- module initialisation -- */ 3359/* -- module initialisation -- */
@@ -2590,17 +3362,22 @@ static const __devinitdata struct usb_device_id device_table[] = {
2590 {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 }, 3362 {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
2591 {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 }, 3363 {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
2592 {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, 3364 {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
2593 {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 }, 3365 {USB_DEVICE(0x041e, 0x4064),
2594 {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 }, 3366 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
3367 {USB_DEVICE(0x041e, 0x4068),
3368 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
2595 {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 }, 3369 {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
2596 {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 }, 3370 {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
2597 {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 }, 3371 {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
3372 {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },
2598 {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 }, 3373 {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
2599 {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 }, 3374 {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 },
2600 {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 }, 3375 {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 },
2601 {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 }, 3376 {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
2602 {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 }, 3377 {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
3378 {USB_DEVICE(0x05a9, 0xa511), .driver_info = BRIDGE_OV511PLUS },
2603 {USB_DEVICE(0x05a9, 0xa518), .driver_info = BRIDGE_OV518PLUS }, 3379 {USB_DEVICE(0x05a9, 0xa518), .driver_info = BRIDGE_OV518PLUS },
3380 {USB_DEVICE(0x0813, 0x0002), .driver_info = BRIDGE_OV511PLUS },
2604 {} 3381 {}
2605}; 3382};
2606 3383
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index dc6a6f11354a..0d02f41fa7d0 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -46,6 +46,7 @@ struct sd {
46 u8 gamma; 46 u8 gamma;
47 u8 vflip; /* ov7630/ov7648 only */ 47 u8 vflip; /* ov7630/ov7648 only */
48 u8 infrared; /* mt9v111 only */ 48 u8 infrared; /* mt9v111 only */
49 u8 freq; /* ov76xx only */
49 u8 quality; /* image quality */ 50 u8 quality; /* image quality */
50#define QUALITY_MIN 60 51#define QUALITY_MIN 60
51#define QUALITY_MAX 95 52#define QUALITY_MAX 95
@@ -96,8 +97,11 @@ static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
96static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val); 97static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
97static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val); 98static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val);
98static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val); 99static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val);
100static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
101static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
99 102
100static struct ctrl sd_ctrls[] = { 103static struct ctrl sd_ctrls[] = {
104#define BRIGHTNESS_IDX 0
101 { 105 {
102 { 106 {
103 .id = V4L2_CID_BRIGHTNESS, 107 .id = V4L2_CID_BRIGHTNESS,
@@ -113,6 +117,7 @@ static struct ctrl sd_ctrls[] = {
113 .set = sd_setbrightness, 117 .set = sd_setbrightness,
114 .get = sd_getbrightness, 118 .get = sd_getbrightness,
115 }, 119 },
120#define CONTRAST_IDX 1
116 { 121 {
117 { 122 {
118 .id = V4L2_CID_CONTRAST, 123 .id = V4L2_CID_CONTRAST,
@@ -128,20 +133,22 @@ static struct ctrl sd_ctrls[] = {
128 .set = sd_setcontrast, 133 .set = sd_setcontrast,
129 .get = sd_getcontrast, 134 .get = sd_getcontrast,
130 }, 135 },
136#define COLOR_IDX 2
131 { 137 {
132 { 138 {
133 .id = V4L2_CID_SATURATION, 139 .id = V4L2_CID_SATURATION,
134 .type = V4L2_CTRL_TYPE_INTEGER, 140 .type = V4L2_CTRL_TYPE_INTEGER,
135 .name = "Color", 141 .name = "Saturation",
136 .minimum = 0, 142 .minimum = 0,
137 .maximum = 40, 143 .maximum = 40,
138 .step = 1, 144 .step = 1,
139#define COLOR_DEF 32 145#define COLOR_DEF 25
140 .default_value = COLOR_DEF, 146 .default_value = COLOR_DEF,
141 }, 147 },
142 .set = sd_setcolors, 148 .set = sd_setcolors,
143 .get = sd_getcolors, 149 .get = sd_getcolors,
144 }, 150 },
151#define BLUE_BALANCE_IDX 3
145 { 152 {
146 { 153 {
147 .id = V4L2_CID_BLUE_BALANCE, 154 .id = V4L2_CID_BLUE_BALANCE,
@@ -156,6 +163,7 @@ static struct ctrl sd_ctrls[] = {
156 .set = sd_setblue_balance, 163 .set = sd_setblue_balance,
157 .get = sd_getblue_balance, 164 .get = sd_getblue_balance,
158 }, 165 },
166#define RED_BALANCE_IDX 4
159 { 167 {
160 { 168 {
161 .id = V4L2_CID_RED_BALANCE, 169 .id = V4L2_CID_RED_BALANCE,
@@ -170,6 +178,7 @@ static struct ctrl sd_ctrls[] = {
170 .set = sd_setred_balance, 178 .set = sd_setred_balance,
171 .get = sd_getred_balance, 179 .get = sd_getred_balance,
172 }, 180 },
181#define GAMMA_IDX 5
173 { 182 {
174 { 183 {
175 .id = V4L2_CID_GAMMA, 184 .id = V4L2_CID_GAMMA,
@@ -184,7 +193,7 @@ static struct ctrl sd_ctrls[] = {
184 .set = sd_setgamma, 193 .set = sd_setgamma,
185 .get = sd_getgamma, 194 .get = sd_getgamma,
186 }, 195 },
187#define AUTOGAIN_IDX 5 196#define AUTOGAIN_IDX 6
188 { 197 {
189 { 198 {
190 .id = V4L2_CID_AUTOGAIN, 199 .id = V4L2_CID_AUTOGAIN,
@@ -200,7 +209,7 @@ static struct ctrl sd_ctrls[] = {
200 .get = sd_getautogain, 209 .get = sd_getautogain,
201 }, 210 },
202/* ov7630/ov7648 only */ 211/* ov7630/ov7648 only */
203#define VFLIP_IDX 6 212#define VFLIP_IDX 7
204 { 213 {
205 { 214 {
206 .id = V4L2_CID_VFLIP, 215 .id = V4L2_CID_VFLIP,
@@ -209,14 +218,14 @@ static struct ctrl sd_ctrls[] = {
209 .minimum = 0, 218 .minimum = 0,
210 .maximum = 1, 219 .maximum = 1,
211 .step = 1, 220 .step = 1,
212#define VFLIP_DEF 0 /* vflip def = 1 for ov7630 */ 221#define VFLIP_DEF 0
213 .default_value = VFLIP_DEF, 222 .default_value = VFLIP_DEF,
214 }, 223 },
215 .set = sd_setvflip, 224 .set = sd_setvflip,
216 .get = sd_getvflip, 225 .get = sd_getvflip,
217 }, 226 },
218/* mt9v111 only */ 227/* mt9v111 only */
219#define INFRARED_IDX 7 228#define INFRARED_IDX 8
220 { 229 {
221 { 230 {
222 .id = V4L2_CID_INFRARED, 231 .id = V4L2_CID_INFRARED,
@@ -231,28 +240,44 @@ static struct ctrl sd_ctrls[] = {
231 .set = sd_setinfrared, 240 .set = sd_setinfrared,
232 .get = sd_getinfrared, 241 .get = sd_getinfrared,
233 }, 242 },
243/* ov7630/ov7648/ov7660 only */
244#define FREQ_IDX 9
245 {
246 {
247 .id = V4L2_CID_POWER_LINE_FREQUENCY,
248 .type = V4L2_CTRL_TYPE_MENU,
249 .name = "Light frequency filter",
250 .minimum = 0,
251 .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
252 .step = 1,
253#define FREQ_DEF 2
254 .default_value = FREQ_DEF,
255 },
256 .set = sd_setfreq,
257 .get = sd_getfreq,
258 },
234}; 259};
235 260
236/* table of the disabled controls */ 261/* table of the disabled controls */
237static __u32 ctrl_dis[] = { 262static __u32 ctrl_dis[] = {
238 (1 << INFRARED_IDX) | (1 << VFLIP_IDX), 263 (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
239 /* SENSOR_HV7131R 0 */ 264 /* SENSOR_HV7131R 0 */
240 (1 << INFRARED_IDX) | (1 << VFLIP_IDX), 265 (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
241 /* SENSOR_MI0360 1 */ 266 /* SENSOR_MI0360 1 */
242 (1 << INFRARED_IDX) | (1 << VFLIP_IDX), 267 (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
243 /* SENSOR_MO4000 2 */ 268 /* SENSOR_MO4000 2 */
244 (1 << VFLIP_IDX), 269 (1 << VFLIP_IDX) | (1 << FREQ_IDX),
245 /* SENSOR_MT9V111 3 */ 270 /* SENSOR_MT9V111 3 */
246 (1 << INFRARED_IDX) | (1 << VFLIP_IDX), 271 (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
247 /* SENSOR_OM6802 4 */ 272 /* SENSOR_OM6802 4 */
248 (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX), 273 (1 << INFRARED_IDX),
249 /* SENSOR_OV7630 5 */ 274 /* SENSOR_OV7630 5 */
250 (1 << INFRARED_IDX), 275 (1 << INFRARED_IDX),
251 /* SENSOR_OV7648 6 */ 276 /* SENSOR_OV7648 6 */
252 (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX), 277 (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
253 /* SENSOR_OV7660 7 */ 278 /* SENSOR_OV7660 7 */
254 (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX), 279 (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) |
255 /* SENSOR_SP80708 8 */ 280 (1 << FREQ_IDX), /* SENSOR_SP80708 8 */
256}; 281};
257 282
258static const struct v4l2_pix_format vga_mode[] = { 283static const struct v4l2_pix_format vga_mode[] = {
@@ -268,7 +293,8 @@ static const struct v4l2_pix_format vga_mode[] = {
268 .priv = 1}, 293 .priv = 1},
269 {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 294 {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
270 .bytesperline = 640, 295 .bytesperline = 640,
271 .sizeimage = 640 * 480 * 3 / 8 + 590, 296 /* Note 3 / 8 is not large enough, not even 5 / 8 is ?! */
297 .sizeimage = 640 * 480 * 3 / 4 + 590,
272 .colorspace = V4L2_COLORSPACE_JPEG, 298 .colorspace = V4L2_COLORSPACE_JPEG,
273 .priv = 0}, 299 .priv = 0},
274}; 300};
@@ -604,7 +630,9 @@ static const u8 ov7630_sensor_init[][8] = {
604/* win: i2c_r from 00 to 80 */ 630/* win: i2c_r from 00 to 80 */
605 {0xd1, 0x21, 0x03, 0x80, 0x10, 0x20, 0x80, 0x10}, 631 {0xd1, 0x21, 0x03, 0x80, 0x10, 0x20, 0x80, 0x10},
606 {0xb1, 0x21, 0x0c, 0x20, 0x20, 0x00, 0x00, 0x10}, 632 {0xb1, 0x21, 0x0c, 0x20, 0x20, 0x00, 0x00, 0x10},
607 {0xd1, 0x21, 0x11, 0x00, 0x48, 0xc0, 0x00, 0x10}, 633/* HDG: 0x11 was 0x00 change to 0x01 for better exposure (15 fps instead of 30)
634 0x13 was 0xc0 change to 0xc3 for auto gain and exposure */
635 {0xd1, 0x21, 0x11, 0x01, 0x48, 0xc3, 0x00, 0x10},
608 {0xb1, 0x21, 0x15, 0x80, 0x03, 0x00, 0x00, 0x10}, 636 {0xb1, 0x21, 0x15, 0x80, 0x03, 0x00, 0x00, 0x10},
609 {0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10}, 637 {0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10},
610 {0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10}, 638 {0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10},
@@ -638,9 +666,8 @@ static const u8 ov7630_sensor_init[][8] = {
638 {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, 666 {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10},
639 {0xb1, 0x21, 0x01, 0x80, 0x80, 0x00, 0x00, 0x10}, 667 {0xb1, 0x21, 0x01, 0x80, 0x80, 0x00, 0x00, 0x10},
640/* */ 668/* */
641 {0xa1, 0x21, 0x11, 0x00, 0x00, 0x00, 0x00, 0x10}, 669/* {0xa1, 0x21, 0x2a, 0x88, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */
642 {0xa1, 0x21, 0x2a, 0x88, 0x00, 0x00, 0x00, 0x10}, 670/* {0xa1, 0x21, 0x2b, 0x34, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */
643 {0xa1, 0x21, 0x2b, 0x34, 0x00, 0x00, 0x00, 0x10},
644/* */ 671/* */
645 {0xa1, 0x21, 0x10, 0x83, 0x00, 0x00, 0x00, 0x10}, 672 {0xa1, 0x21, 0x10, 0x83, 0x00, 0x00, 0x00, 0x10},
646/* {0xb1, 0x21, 0x01, 0x88, 0x70, 0x00, 0x00, 0x10}, */ 673/* {0xb1, 0x21, 0x01, 0x88, 0x70, 0x00, 0x00, 0x10}, */
@@ -673,7 +700,7 @@ static const u8 ov7648_sensor_init[][8] = {
673 {0xd1, 0x21, 0x21, 0x86, 0x00, 0xde, 0xa0, 0x10}, 700 {0xd1, 0x21, 0x21, 0x86, 0x00, 0xde, 0xa0, 0x10},
674/* {0xd1, 0x21, 0x25, 0x80, 0x32, 0xfe, 0xa0, 0x10}, jfm done */ 701/* {0xd1, 0x21, 0x25, 0x80, 0x32, 0xfe, 0xa0, 0x10}, jfm done */
675/* {0xd1, 0x21, 0x29, 0x00, 0x91, 0x00, 0x88, 0x10}, jfm done */ 702/* {0xd1, 0x21, 0x29, 0x00, 0x91, 0x00, 0x88, 0x10}, jfm done */
676 {0xb1, 0x21, 0x2d, 0x85, 0x00, 0x00, 0x00, 0x10}, 703/* {0xb1, 0x21, 0x2d, 0x85, 0x00, 0x00, 0x00, 0x10}, set by setfreq */
677/*...*/ 704/*...*/
678/* {0xa1, 0x21, 0x12, 0x08, 0x00, 0x00, 0x00, 0x10}, jfm done */ 705/* {0xa1, 0x21, 0x12, 0x08, 0x00, 0x00, 0x00, 0x10}, jfm done */
679/* {0xa1, 0x21, 0x75, 0x06, 0x00, 0x00, 0x00, 0x10}, * COMN 706/* {0xa1, 0x21, 0x75, 0x06, 0x00, 0x00, 0x00, 0x10}, * COMN
@@ -1294,11 +1321,9 @@ static int sd_config(struct gspca_dev *gspca_dev,
1294 sd->gamma = GAMMA_DEF; 1321 sd->gamma = GAMMA_DEF;
1295 sd->autogain = AUTOGAIN_DEF; 1322 sd->autogain = AUTOGAIN_DEF;
1296 sd->ag_cnt = -1; 1323 sd->ag_cnt = -1;
1297 if (sd->sensor != SENSOR_OV7630) 1324 sd->vflip = VFLIP_DEF;
1298 sd->vflip = 0;
1299 else
1300 sd->vflip = 1;
1301 sd->infrared = INFRARED_DEF; 1325 sd->infrared = INFRARED_DEF;
1326 sd->freq = FREQ_DEF;
1302 sd->quality = QUALITY_DEF; 1327 sd->quality = QUALITY_DEF;
1303 sd->jpegqual = 80; 1328 sd->jpegqual = 80;
1304 1329
@@ -1569,7 +1594,7 @@ static void setautogain(struct gspca_dev *gspca_dev)
1569 else 1594 else
1570 comb = 0xa0; 1595 comb = 0xa0;
1571 if (sd->autogain) 1596 if (sd->autogain)
1572 comb |= 0x02; 1597 comb |= 0x03;
1573 i2c_w1(&sd->gspca_dev, 0x13, comb); 1598 i2c_w1(&sd->gspca_dev, 0x13, comb);
1574 return; 1599 return;
1575 } 1600 }
@@ -1585,12 +1610,15 @@ static void setvflip(struct sd *sd)
1585{ 1610{
1586 u8 comn; 1611 u8 comn;
1587 1612
1588 if (sd->sensor == SENSOR_OV7630) 1613 if (sd->sensor == SENSOR_OV7630) {
1589 comn = 0x02; 1614 comn = 0x02;
1590 else 1615 if (!sd->vflip)
1616 comn |= 0x80;
1617 } else {
1591 comn = 0x06; 1618 comn = 0x06;
1592 if (sd->vflip) 1619 if (sd->vflip)
1593 comn |= 0x80; 1620 comn |= 0x80;
1621 }
1594 i2c_w1(&sd->gspca_dev, 0x75, comn); 1622 i2c_w1(&sd->gspca_dev, 0x75, comn);
1595} 1623}
1596 1624
@@ -1602,6 +1630,58 @@ static void setinfrared(struct sd *sd)
1602 sd->infrared ? 0x66 : 0x64); 1630 sd->infrared ? 0x66 : 0x64);
1603} 1631}
1604 1632
1633static void setfreq(struct gspca_dev *gspca_dev)
1634{
1635 struct sd *sd = (struct sd *) gspca_dev;
1636
1637 if (sd->sensor == SENSOR_OV7660) {
1638 switch (sd->freq) {
1639 case 0: /* Banding filter disabled */
1640 i2c_w1(gspca_dev, 0x13, 0xdf);
1641 break;
1642 case 1: /* 50 hz */
1643 i2c_w1(gspca_dev, 0x13, 0xff);
1644 i2c_w1(gspca_dev, 0x3b, 0x0a);
1645 break;
1646 case 2: /* 60 hz */
1647 i2c_w1(gspca_dev, 0x13, 0xff);
1648 i2c_w1(gspca_dev, 0x3b, 0x02);
1649 break;
1650 }
1651 } else {
1652 u8 reg2a = 0, reg2b = 0, reg2d = 0;
1653
1654 /* Get reg2a / reg2d base values */
1655 switch (sd->sensor) {
1656 case SENSOR_OV7630:
1657 reg2a = 0x08;
1658 reg2d = 0x01;
1659 break;
1660 case SENSOR_OV7648:
1661 reg2a = 0x11;
1662 reg2d = 0x81;
1663 break;
1664 }
1665
1666 switch (sd->freq) {
1667 case 0: /* Banding filter disabled */
1668 break;
1669 case 1: /* 50 hz (filter on and framerate adj) */
1670 reg2a |= 0x80;
1671 reg2b = 0xac;
1672 reg2d |= 0x04;
1673 break;
1674 case 2: /* 60 hz (filter on, no framerate adj) */
1675 reg2a |= 0x80;
1676 reg2d |= 0x04;
1677 break;
1678 }
1679 i2c_w1(gspca_dev, 0x2a, reg2a);
1680 i2c_w1(gspca_dev, 0x2b, reg2b);
1681 i2c_w1(gspca_dev, 0x2d, reg2d);
1682 }
1683}
1684
1605static void setjpegqual(struct gspca_dev *gspca_dev) 1685static void setjpegqual(struct gspca_dev *gspca_dev)
1606{ 1686{
1607 struct sd *sd = (struct sd *) gspca_dev; 1687 struct sd *sd = (struct sd *) gspca_dev;
@@ -1828,6 +1908,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
1828 setbrightness(gspca_dev); 1908 setbrightness(gspca_dev);
1829 setcontrast(gspca_dev); 1909 setcontrast(gspca_dev);
1830 setautogain(gspca_dev); 1910 setautogain(gspca_dev);
1911 setfreq(gspca_dev);
1831 return 0; 1912 return 0;
1832} 1913}
1833 1914
@@ -2131,6 +2212,24 @@ static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val)
2131 return 0; 2212 return 0;
2132} 2213}
2133 2214
2215static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
2216{
2217 struct sd *sd = (struct sd *) gspca_dev;
2218
2219 sd->freq = val;
2220 if (gspca_dev->streaming)
2221 setfreq(gspca_dev);
2222 return 0;
2223}
2224
2225static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
2226{
2227 struct sd *sd = (struct sd *) gspca_dev;
2228
2229 *val = sd->freq;
2230 return 0;
2231}
2232
2134static int sd_set_jcomp(struct gspca_dev *gspca_dev, 2233static int sd_set_jcomp(struct gspca_dev *gspca_dev,
2135 struct v4l2_jpegcompression *jcomp) 2234 struct v4l2_jpegcompression *jcomp)
2136{ 2235{
@@ -2159,6 +2258,27 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
2159 return 0; 2258 return 0;
2160} 2259}
2161 2260
2261static int sd_querymenu(struct gspca_dev *gspca_dev,
2262 struct v4l2_querymenu *menu)
2263{
2264 switch (menu->id) {
2265 case V4L2_CID_POWER_LINE_FREQUENCY:
2266 switch (menu->index) {
2267 case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
2268 strcpy((char *) menu->name, "NoFliker");
2269 return 0;
2270 case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
2271 strcpy((char *) menu->name, "50 Hz");
2272 return 0;
2273 case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
2274 strcpy((char *) menu->name, "60 Hz");
2275 return 0;
2276 }
2277 break;
2278 }
2279 return -EINVAL;
2280}
2281
2162/* sub-driver description */ 2282/* sub-driver description */
2163static const struct sd_desc sd_desc = { 2283static const struct sd_desc sd_desc = {
2164 .name = MODULE_NAME, 2284 .name = MODULE_NAME,
@@ -2173,6 +2293,7 @@ static const struct sd_desc sd_desc = {
2173 .dq_callback = do_autogain, 2293 .dq_callback = do_autogain,
2174 .get_jcomp = sd_get_jcomp, 2294 .get_jcomp = sd_get_jcomp,
2175 .set_jcomp = sd_set_jcomp, 2295 .set_jcomp = sd_set_jcomp,
2296 .querymenu = sd_querymenu,
2176}; 2297};
2177 2298
2178/* -- module initialisation -- */ 2299/* -- module initialisation -- */
@@ -2233,7 +2354,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
2233 {USB_DEVICE(0x0c45, 0x613b), BSI(SN9C120, OV7660, 0x21)}, 2354 {USB_DEVICE(0x0c45, 0x613b), BSI(SN9C120, OV7660, 0x21)},
2234#endif 2355#endif
2235 {USB_DEVICE(0x0c45, 0x613c), BSI(SN9C120, HV7131R, 0x11)}, 2356 {USB_DEVICE(0x0c45, 0x613c), BSI(SN9C120, HV7131R, 0x11)},
2236/* {USB_DEVICE(0x0c45, 0x613e), BSI(SN9C120, OV7630, 0x??)}, */ 2357 {USB_DEVICE(0x0c45, 0x613e), BSI(SN9C120, OV7630, 0x21)},
2237 {USB_DEVICE(0x0c45, 0x6143), BSI(SN9C120, SP80708, 0x18)}, 2358 {USB_DEVICE(0x0c45, 0x6143), BSI(SN9C120, SP80708, 0x18)},
2238 {} 2359 {}
2239}; 2360};
diff --git a/drivers/media/video/gspca/stv06xx/Makefile b/drivers/media/video/gspca/stv06xx/Makefile
index feeaa94ab588..2f3c3a606ce4 100644
--- a/drivers/media/video/gspca/stv06xx/Makefile
+++ b/drivers/media/video/gspca/stv06xx/Makefile
@@ -3,7 +3,8 @@ obj-$(CONFIG_USB_STV06XX) += gspca_stv06xx.o
3gspca_stv06xx-objs := stv06xx.o \ 3gspca_stv06xx-objs := stv06xx.o \
4 stv06xx_vv6410.o \ 4 stv06xx_vv6410.o \
5 stv06xx_hdcs.o \ 5 stv06xx_hdcs.o \
6 stv06xx_pb0100.o 6 stv06xx_pb0100.o \
7 stv06xx_st6422.o
7 8
8EXTRA_CFLAGS += -Idrivers/media/video/gspca 9EXTRA_CFLAGS += -Idrivers/media/video/gspca
9 10
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index e573c3406324..0da8e0de0456 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -92,11 +92,10 @@ static int stv06xx_write_sensor_finish(struct sd *sd)
92{ 92{
93 int err = 0; 93 int err = 0;
94 94
95 if (IS_850(sd)) { 95 if (sd->bridge == BRIDGE_STV610) {
96 struct usb_device *udev = sd->gspca_dev.dev; 96 struct usb_device *udev = sd->gspca_dev.dev;
97 __u8 *buf = sd->gspca_dev.usb_buf; 97 __u8 *buf = sd->gspca_dev.usb_buf;
98 98
99 /* Quickam Web needs an extra packet */
100 buf[0] = 0; 99 buf[0] = 0;
101 err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 100 err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
102 0x04, 0x40, 0x1704, 0, buf, 1, 101 0x04, 0x40, 0x1704, 0, buf, 1,
@@ -253,7 +252,7 @@ static int stv06xx_init(struct gspca_dev *gspca_dev)
253 252
254 err = sd->sensor->init(sd); 253 err = sd->sensor->init(sd);
255 254
256 if (dump_sensor) 255 if (dump_sensor && sd->sensor->dump)
257 sd->sensor->dump(sd); 256 sd->sensor->dump(sd);
258 257
259 return (err < 0) ? err : 0; 258 return (err < 0) ? err : 0;
@@ -318,6 +317,8 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
318 __u8 *data, /* isoc packet */ 317 __u8 *data, /* isoc packet */
319 int len) /* iso packet length */ 318 int len) /* iso packet length */
320{ 319{
320 struct sd *sd = (struct sd *) gspca_dev;
321
321 PDEBUG(D_PACK, "Packet of length %d arrived", len); 322 PDEBUG(D_PACK, "Packet of length %d arrived", len);
322 323
323 /* A packet may contain several frames 324 /* A packet may contain several frames
@@ -343,14 +344,29 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
343 if (len < chunk_len) { 344 if (len < chunk_len) {
344 PDEBUG(D_ERR, "URB packet length is smaller" 345 PDEBUG(D_ERR, "URB packet length is smaller"
345 " than the specified chunk length"); 346 " than the specified chunk length");
347 gspca_dev->last_packet_type = DISCARD_PACKET;
346 return; 348 return;
347 } 349 }
348 350
351 /* First byte seem to be 02=data 2nd byte is unknown??? */
352 if (sd->bridge == BRIDGE_ST6422 && (id & 0xFF00) == 0x0200)
353 goto frame_data;
354
349 switch (id) { 355 switch (id) {
350 case 0x0200: 356 case 0x0200:
351 case 0x4200: 357 case 0x4200:
358frame_data:
352 PDEBUG(D_PACK, "Frame data packet detected"); 359 PDEBUG(D_PACK, "Frame data packet detected");
353 360
361 if (sd->to_skip) {
362 int skip = (sd->to_skip < chunk_len) ?
363 sd->to_skip : chunk_len;
364 data += skip;
365 len -= skip;
366 chunk_len -= skip;
367 sd->to_skip -= skip;
368 }
369
354 gspca_frame_add(gspca_dev, INTER_PACKET, frame, 370 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
355 data, chunk_len); 371 data, chunk_len);
356 break; 372 break;
@@ -365,6 +381,9 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
365 gspca_frame_add(gspca_dev, FIRST_PACKET, 381 gspca_frame_add(gspca_dev, FIRST_PACKET,
366 frame, data, 0); 382 frame, data, 0);
367 383
384 if (sd->bridge == BRIDGE_ST6422)
385 sd->to_skip = gspca_dev->width * 4;
386
368 if (chunk_len) 387 if (chunk_len)
369 PDEBUG(D_ERR, "Chunk length is " 388 PDEBUG(D_ERR, "Chunk length is "
370 "non-zero on a SOF"); 389 "non-zero on a SOF");
@@ -395,8 +414,12 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
395 /* Unknown chunk with 2 bytes of data, 414 /* Unknown chunk with 2 bytes of data,
396 occurs 2-3 times per USB interrupt */ 415 occurs 2-3 times per USB interrupt */
397 break; 416 break;
417 case 0x42ff:
418 PDEBUG(D_PACK, "Chunk 0x42ff detected");
419 /* Special chunk seen sometimes on the ST6422 */
420 break;
398 default: 421 default:
399 PDEBUG(D_PACK, "Unknown chunk %d detected", id); 422 PDEBUG(D_PACK, "Unknown chunk 0x%04x detected", id);
400 /* Unknown chunk */ 423 /* Unknown chunk */
401 } 424 }
402 data += chunk_len; 425 data += chunk_len;
@@ -428,11 +451,16 @@ static int stv06xx_config(struct gspca_dev *gspca_dev,
428 451
429 cam = &gspca_dev->cam; 452 cam = &gspca_dev->cam;
430 sd->desc = sd_desc; 453 sd->desc = sd_desc;
454 sd->bridge = id->driver_info;
431 gspca_dev->sd_desc = &sd->desc; 455 gspca_dev->sd_desc = &sd->desc;
432 456
433 if (dump_bridge) 457 if (dump_bridge)
434 stv06xx_dump_bridge(sd); 458 stv06xx_dump_bridge(sd);
435 459
460 sd->sensor = &stv06xx_sensor_st6422;
461 if (!sd->sensor->probe(sd))
462 return 0;
463
436 sd->sensor = &stv06xx_sensor_vv6410; 464 sd->sensor = &stv06xx_sensor_vv6410;
437 if (!sd->sensor->probe(sd)) 465 if (!sd->sensor->probe(sd))
438 return 0; 466 return 0;
@@ -457,9 +485,20 @@ static int stv06xx_config(struct gspca_dev *gspca_dev,
457 485
458/* -- module initialisation -- */ 486/* -- module initialisation -- */
459static const __devinitdata struct usb_device_id device_table[] = { 487static const __devinitdata struct usb_device_id device_table[] = {
460 {USB_DEVICE(0x046d, 0x0840)}, /* QuickCam Express */ 488 /* QuickCam Express */
461 {USB_DEVICE(0x046d, 0x0850)}, /* LEGO cam / QuickCam Web */ 489 {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 },
462 {USB_DEVICE(0x046d, 0x0870)}, /* Dexxa WebCam USB */ 490 /* LEGO cam / QuickCam Web */
491 {USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 },
492 /* Dexxa WebCam USB */
493 {USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 },
494 /* QuickCam Messenger */
495 {USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 },
496 /* QuickCam Communicate */
497 {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 },
498 /* QuickCam Messenger (new) */
499 {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 },
500 /* QuickCam Messenger (new) */
501 {USB_DEVICE(0x046D, 0x08DA), .driver_info = BRIDGE_ST6422 },
463 {} 502 {}
464}; 503};
465MODULE_DEVICE_TABLE(usb, device_table); 504MODULE_DEVICE_TABLE(usb, device_table);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h
index 1207e7d17f14..9df7137fe67e 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.h
@@ -93,6 +93,17 @@ struct sd {
93 93
94 /* Sensor private data */ 94 /* Sensor private data */
95 void *sensor_priv; 95 void *sensor_priv;
96
97 /* The first 4 lines produced by the stv6422 are no good, this keeps
98 track of how many bytes we still need to skip during a frame */
99 int to_skip;
100
101 /* Bridge / Camera type */
102 u8 bridge;
103 #define BRIDGE_STV600 0
104 #define BRIDGE_STV602 1
105 #define BRIDGE_STV610 2
106 #define BRIDGE_ST6422 3 /* With integrated sensor */
96}; 107};
97 108
98int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data); 109int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
index b16903814203..3039ec208f3a 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
@@ -434,7 +434,7 @@ static int hdcs_probe_1x00(struct sd *sd)
434 hdcs->exp.er = 100; 434 hdcs->exp.er = 100;
435 435
436 /* 436 /*
437 * Frame rate on HDCS-1000 0x46D:0x840 depends on PSMP: 437 * Frame rate on HDCS-1000 with STV600 depends on PSMP:
438 * 4 = doesn't work at all 438 * 4 = doesn't work at all
439 * 5 = 7.8 fps, 439 * 5 = 7.8 fps,
440 * 6 = 6.9 fps, 440 * 6 = 6.9 fps,
@@ -443,7 +443,7 @@ static int hdcs_probe_1x00(struct sd *sd)
443 * 15 = 4.4 fps, 443 * 15 = 4.4 fps,
444 * 31 = 2.8 fps 444 * 31 = 2.8 fps
445 * 445 *
446 * Frame rate on HDCS-1000 0x46D:0x870 depends on PSMP: 446 * Frame rate on HDCS-1000 with STV602 depends on PSMP:
447 * 15 = doesn't work at all 447 * 15 = doesn't work at all
448 * 18 = doesn't work at all 448 * 18 = doesn't work at all
449 * 19 = 7.3 fps 449 * 19 = 7.3 fps
@@ -453,7 +453,7 @@ static int hdcs_probe_1x00(struct sd *sd)
453 * 24 = 6.3 fps 453 * 24 = 6.3 fps
454 * 30 = 5.4 fps 454 * 30 = 5.4 fps
455 */ 455 */
456 hdcs->psmp = IS_870(sd) ? 20 : 5; 456 hdcs->psmp = (sd->bridge == BRIDGE_STV602) ? 20 : 5;
457 457
458 sd->sensor_priv = hdcs; 458 sd->sensor_priv = hdcs;
459 459
@@ -530,7 +530,7 @@ static int hdcs_init(struct sd *sd)
530 int i, err = 0; 530 int i, err = 0;
531 531
532 /* Set the STV0602AA in STV0600 emulation mode */ 532 /* Set the STV0602AA in STV0600 emulation mode */
533 if (IS_870(sd)) 533 if (sd->bridge == BRIDGE_STV602)
534 stv06xx_write_bridge(sd, STV_STV0600_EMULATION, 1); 534 stv06xx_write_bridge(sd, STV_STV0600_EMULATION, 1);
535 535
536 /* Execute the bridge init */ 536 /* Execute the bridge init */
@@ -558,7 +558,7 @@ static int hdcs_init(struct sd *sd)
558 return err; 558 return err;
559 559
560 /* Set PGA sample duration 560 /* Set PGA sample duration
561 (was 0x7E for IS_870, but caused slow framerate with HDCS-1020) */ 561 (was 0x7E for the STV602, but caused slow framerate with HDCS-1020) */
562 if (IS_1020(sd)) 562 if (IS_1020(sd))
563 err = stv06xx_write_sensor(sd, HDCS_TCTRL, 563 err = stv06xx_write_sensor(sd, HDCS_TCTRL,
564 (HDCS_ADC_START_SIG_DUR << 6) | hdcs->psmp); 564 (HDCS_ADC_START_SIG_DUR << 6) | hdcs->psmp);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
index e88c42f7d2f8..934b9cebc1ab 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
@@ -32,14 +32,13 @@
32 32
33#include "stv06xx.h" 33#include "stv06xx.h"
34 34
35#define IS_850(sd) ((sd)->gspca_dev.dev->descriptor.idProduct == 0x850)
36#define IS_870(sd) ((sd)->gspca_dev.dev->descriptor.idProduct == 0x870)
37#define IS_1020(sd) ((sd)->sensor == &stv06xx_sensor_hdcs1020) 35#define IS_1020(sd) ((sd)->sensor == &stv06xx_sensor_hdcs1020)
38 36
39extern const struct stv06xx_sensor stv06xx_sensor_vv6410; 37extern const struct stv06xx_sensor stv06xx_sensor_vv6410;
40extern const struct stv06xx_sensor stv06xx_sensor_hdcs1x00; 38extern const struct stv06xx_sensor stv06xx_sensor_hdcs1x00;
41extern const struct stv06xx_sensor stv06xx_sensor_hdcs1020; 39extern const struct stv06xx_sensor stv06xx_sensor_hdcs1020;
42extern const struct stv06xx_sensor stv06xx_sensor_pb0100; 40extern const struct stv06xx_sensor stv06xx_sensor_pb0100;
41extern const struct stv06xx_sensor stv06xx_sensor_st6422;
43 42
44struct stv06xx_sensor { 43struct stv06xx_sensor {
45 /* Defines the name of a sensor */ 44 /* Defines the name of a sensor */
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
new file mode 100644
index 000000000000..87cb5b9ddfa7
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
@@ -0,0 +1,453 @@
1/*
2 * Support for the sensor part which is integrated (I think) into the
3 * st6422 stv06xx alike bridge, as its integrated there are no i2c writes
4 * but instead direct bridge writes.
5 *
6 * Copyright (c) 2009 Hans de Goede <hdegoede@redhat.com>
7 *
8 * Strongly based on qc-usb-messenger, which is:
9 * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
10 * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
11 * Copyright (c) 2002, 2003 Tuukka Toivonen
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 */
28
29#include "stv06xx_st6422.h"
30
31static struct v4l2_pix_format st6422_mode[] = {
32 /* Note we actually get 124 lines of data, of which we skip the 4st
33 4 as they are garbage */
34 {
35 162,
36 120,
37 V4L2_PIX_FMT_SGRBG8,
38 V4L2_FIELD_NONE,
39 .sizeimage = 162 * 120,
40 .bytesperline = 162,
41 .colorspace = V4L2_COLORSPACE_SRGB,
42 .priv = 1
43 },
44 /* Note we actually get 248 lines of data, of which we skip the 4st
45 4 as they are garbage, and we tell the app it only gets the
46 first 240 of the 244 lines it actually gets, so that it ignores
47 the last 4. */
48 {
49 324,
50 240,
51 V4L2_PIX_FMT_SGRBG8,
52 V4L2_FIELD_NONE,
53 .sizeimage = 324 * 244,
54 .bytesperline = 324,
55 .colorspace = V4L2_COLORSPACE_SRGB,
56 .priv = 0
57 },
58};
59
60static const struct ctrl st6422_ctrl[] = {
61#define BRIGHTNESS_IDX 0
62 {
63 {
64 .id = V4L2_CID_BRIGHTNESS,
65 .type = V4L2_CTRL_TYPE_INTEGER,
66 .name = "Brightness",
67 .minimum = 0,
68 .maximum = 31,
69 .step = 1,
70 .default_value = 3
71 },
72 .set = st6422_set_brightness,
73 .get = st6422_get_brightness
74 },
75#define CONTRAST_IDX 1
76 {
77 {
78 .id = V4L2_CID_CONTRAST,
79 .type = V4L2_CTRL_TYPE_INTEGER,
80 .name = "Contrast",
81 .minimum = 0,
82 .maximum = 15,
83 .step = 1,
84 .default_value = 11
85 },
86 .set = st6422_set_contrast,
87 .get = st6422_get_contrast
88 },
89#define GAIN_IDX 2
90 {
91 {
92 .id = V4L2_CID_GAIN,
93 .type = V4L2_CTRL_TYPE_INTEGER,
94 .name = "Gain",
95 .minimum = 0,
96 .maximum = 255,
97 .step = 1,
98 .default_value = 64
99 },
100 .set = st6422_set_gain,
101 .get = st6422_get_gain
102 },
103#define EXPOSURE_IDX 3
104 {
105 {
106 .id = V4L2_CID_EXPOSURE,
107 .type = V4L2_CTRL_TYPE_INTEGER,
108 .name = "Exposure",
109 .minimum = 0,
110 .maximum = 1023,
111 .step = 1,
112 .default_value = 256
113 },
114 .set = st6422_set_exposure,
115 .get = st6422_get_exposure
116 },
117};
118
119static int st6422_probe(struct sd *sd)
120{
121 int i;
122 s32 *sensor_settings;
123
124 if (sd->bridge != BRIDGE_ST6422)
125 return -ENODEV;
126
127 info("st6422 sensor detected");
128
129 sensor_settings = kmalloc(ARRAY_SIZE(st6422_ctrl) * sizeof(s32),
130 GFP_KERNEL);
131 if (!sensor_settings)
132 return -ENOMEM;
133
134 sd->gspca_dev.cam.cam_mode = st6422_mode;
135 sd->gspca_dev.cam.nmodes = ARRAY_SIZE(st6422_mode);
136 sd->desc.ctrls = st6422_ctrl;
137 sd->desc.nctrls = ARRAY_SIZE(st6422_ctrl);
138 sd->sensor_priv = sensor_settings;
139
140 for (i = 0; i < sd->desc.nctrls; i++)
141 sensor_settings[i] = st6422_ctrl[i].qctrl.default_value;
142
143 return 0;
144}
145
146static int st6422_init(struct sd *sd)
147{
148 int err = 0, i;
149
150 const u16 st6422_bridge_init[][2] = {
151 { STV_ISO_ENABLE, 0x00 }, /* disable capture */
152 { 0x1436, 0x00 },
153 { 0x1432, 0x03 }, /* 0x00-0x1F brightness */
154 { 0x143a, 0xF9 }, /* 0x00-0x0F contrast */
155 { 0x0509, 0x38 }, /* R */
156 { 0x050a, 0x38 }, /* G */
157 { 0x050b, 0x38 }, /* B */
158 { 0x050c, 0x2A },
159 { 0x050d, 0x01 },
160
161
162 { 0x1431, 0x00 }, /* 0x00-0x07 ??? */
163 { 0x1433, 0x34 }, /* 160x120, 0x00-0x01 night filter */
164 { 0x1438, 0x18 }, /* 640x480 */
165/* 18 bayes */
166/* 10 compressed? */
167
168 { 0x1439, 0x00 },
169/* antiflimmer?? 0xa2 ger perfekt bild mot monitor */
170
171 { 0x143b, 0x05 },
172 { 0x143c, 0x00 }, /* 0x00-0x01 - ??? */
173
174
175/* shutter time 0x0000-0x03FF */
176/* low value give good picures on moving objects (but requires much light) */
177/* high value gives good picures in darkness (but tends to be overexposed) */
178 { 0x143e, 0x01 },
179 { 0x143d, 0x00 },
180
181 { 0x1442, 0xe2 },
182/* write: 1x1x xxxx */
183/* read: 1x1x xxxx */
184/* bit 5 == button pressed and hold if 0 */
185/* write 0xe2,0xea */
186
187/* 0x144a */
188/* 0x00 init */
189/* bit 7 == button has been pressed, but not handled */
190
191/* interrupt */
192/* if(urb->iso_frame_desc[i].status == 0x80) { */
193/* if(urb->iso_frame_desc[i].status == 0x88) { */
194
195 { 0x1500, 0xd0 },
196 { 0x1500, 0xd0 },
197 { 0x1500, 0x50 }, /* 0x00 - 0xFF 0x80 == compr ? */
198
199 { 0x1501, 0xaf },
200/* high val-> ljus area blir morkare. */
201/* low val -> ljus area blir ljusare. */
202 { 0x1502, 0xc2 },
203/* high val-> ljus area blir morkare. */
204/* low val -> ljus area blir ljusare. */
205 { 0x1503, 0x45 },
206/* high val-> ljus area blir morkare. */
207/* low val -> ljus area blir ljusare. */
208
209 { 0x1505, 0x02 },
210/* 2 : 324x248 80352 bytes */
211/* 7 : 248x162 40176 bytes */
212/* c+f: 162*124 20088 bytes */
213
214 { 0x150e, 0x8e },
215 { 0x150f, 0x37 },
216 { 0x15c0, 0x00 },
217 { 0x15c1, 1023 }, /* 160x120, ISOC_PACKET_SIZE */
218 { 0x15c3, 0x08 }, /* 0x04/0x14 ... test pictures ??? */
219
220
221 { 0x143f, 0x01 }, /* commit settings */
222
223 };
224
225 for (i = 0; i < ARRAY_SIZE(st6422_bridge_init) && !err; i++) {
226 err = stv06xx_write_bridge(sd, st6422_bridge_init[i][0],
227 st6422_bridge_init[i][1]);
228 }
229
230 return err;
231}
232
233static void st6422_disconnect(struct sd *sd)
234{
235 sd->sensor = NULL;
236 kfree(sd->sensor_priv);
237}
238
239static int st6422_start(struct sd *sd)
240{
241 int err, packet_size;
242 struct cam *cam = &sd->gspca_dev.cam;
243 s32 *sensor_settings = sd->sensor_priv;
244 struct usb_host_interface *alt;
245 struct usb_interface *intf;
246
247 intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
248 alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
249 if (!alt) {
250 PDEBUG(D_ERR, "Couldn't get altsetting");
251 return -EIO;
252 }
253
254 packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
255 err = stv06xx_write_bridge(sd, 0x15c1, packet_size);
256 if (err < 0)
257 return err;
258
259 if (cam->cam_mode[sd->gspca_dev.curr_mode].priv)
260 err = stv06xx_write_bridge(sd, 0x1505, 0x0f);
261 else
262 err = stv06xx_write_bridge(sd, 0x1505, 0x02);
263 if (err < 0)
264 return err;
265
266 err = st6422_set_brightness(&sd->gspca_dev,
267 sensor_settings[BRIGHTNESS_IDX]);
268 if (err < 0)
269 return err;
270
271 err = st6422_set_contrast(&sd->gspca_dev,
272 sensor_settings[CONTRAST_IDX]);
273 if (err < 0)
274 return err;
275
276 err = st6422_set_exposure(&sd->gspca_dev,
277 sensor_settings[EXPOSURE_IDX]);
278 if (err < 0)
279 return err;
280
281 err = st6422_set_gain(&sd->gspca_dev,
282 sensor_settings[GAIN_IDX]);
283 if (err < 0)
284 return err;
285
286 PDEBUG(D_STREAM, "Starting stream");
287
288 return 0;
289}
290
291static int st6422_stop(struct sd *sd)
292{
293 PDEBUG(D_STREAM, "Halting stream");
294
295 return 0;
296}
297
298static int st6422_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
299{
300 struct sd *sd = (struct sd *) gspca_dev;
301 s32 *sensor_settings = sd->sensor_priv;
302
303 *val = sensor_settings[BRIGHTNESS_IDX];
304
305 PDEBUG(D_V4L2, "Read brightness %d", *val);
306
307 return 0;
308}
309
310static int st6422_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
311{
312 int err;
313 struct sd *sd = (struct sd *) gspca_dev;
314 s32 *sensor_settings = sd->sensor_priv;
315
316 sensor_settings[BRIGHTNESS_IDX] = val;
317
318 if (!gspca_dev->streaming)
319 return 0;
320
321 /* val goes from 0 -> 31 */
322 PDEBUG(D_V4L2, "Set brightness to %d", val);
323 err = stv06xx_write_bridge(sd, 0x1432, val);
324 if (err < 0)
325 return err;
326
327 /* commit settings */
328 err = stv06xx_write_bridge(sd, 0x143f, 0x01);
329 return (err < 0) ? err : 0;
330}
331
332static int st6422_get_contrast(struct gspca_dev *gspca_dev, __s32 *val)
333{
334 struct sd *sd = (struct sd *) gspca_dev;
335 s32 *sensor_settings = sd->sensor_priv;
336
337 *val = sensor_settings[CONTRAST_IDX];
338
339 PDEBUG(D_V4L2, "Read contrast %d", *val);
340
341 return 0;
342}
343
344static int st6422_set_contrast(struct gspca_dev *gspca_dev, __s32 val)
345{
346 int err;
347 struct sd *sd = (struct sd *) gspca_dev;
348 s32 *sensor_settings = sd->sensor_priv;
349
350 sensor_settings[CONTRAST_IDX] = val;
351
352 if (!gspca_dev->streaming)
353 return 0;
354
355 /* Val goes from 0 -> 15 */
356 PDEBUG(D_V4L2, "Set contrast to %d\n", val);
357 err = stv06xx_write_bridge(sd, 0x143a, 0xf0 | val);
358 if (err < 0)
359 return err;
360
361 /* commit settings */
362 err = stv06xx_write_bridge(sd, 0x143f, 0x01);
363 return (err < 0) ? err : 0;
364}
365
366static int st6422_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
367{
368 struct sd *sd = (struct sd *) gspca_dev;
369 s32 *sensor_settings = sd->sensor_priv;
370
371 *val = sensor_settings[GAIN_IDX];
372
373 PDEBUG(D_V4L2, "Read gain %d", *val);
374
375 return 0;
376}
377
378static int st6422_set_gain(struct gspca_dev *gspca_dev, __s32 val)
379{
380 int err;
381 struct sd *sd = (struct sd *) gspca_dev;
382 s32 *sensor_settings = sd->sensor_priv;
383
384 sensor_settings[GAIN_IDX] = val;
385
386 if (!gspca_dev->streaming)
387 return 0;
388
389 PDEBUG(D_V4L2, "Set gain to %d", val);
390
391 /* Set red, green, blue, gain */
392 err = stv06xx_write_bridge(sd, 0x0509, val);
393 if (err < 0)
394 return err;
395
396 err = stv06xx_write_bridge(sd, 0x050a, val);
397 if (err < 0)
398 return err;
399
400 err = stv06xx_write_bridge(sd, 0x050b, val);
401 if (err < 0)
402 return err;
403
404 /* 2 mystery writes */
405 err = stv06xx_write_bridge(sd, 0x050c, 0x2a);
406 if (err < 0)
407 return err;
408
409 err = stv06xx_write_bridge(sd, 0x050d, 0x01);
410 if (err < 0)
411 return err;
412
413 /* commit settings */
414 err = stv06xx_write_bridge(sd, 0x143f, 0x01);
415 return (err < 0) ? err : 0;
416}
417
418static int st6422_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
419{
420 struct sd *sd = (struct sd *) gspca_dev;
421 s32 *sensor_settings = sd->sensor_priv;
422
423 *val = sensor_settings[EXPOSURE_IDX];
424
425 PDEBUG(D_V4L2, "Read exposure %d", *val);
426
427 return 0;
428}
429
430static int st6422_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
431{
432 int err;
433 struct sd *sd = (struct sd *) gspca_dev;
434 s32 *sensor_settings = sd->sensor_priv;
435
436 sensor_settings[EXPOSURE_IDX] = val;
437
438 if (!gspca_dev->streaming)
439 return 0;
440
441 PDEBUG(D_V4L2, "Set exposure to %d\n", val);
442 err = stv06xx_write_bridge(sd, 0x143d, val & 0xff);
443 if (err < 0)
444 return err;
445
446 err = stv06xx_write_bridge(sd, 0x143e, val >> 8);
447 if (err < 0)
448 return err;
449
450 /* commit settings */
451 err = stv06xx_write_bridge(sd, 0x143f, 0x01);
452 return (err < 0) ? err : 0;
453}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
new file mode 100644
index 000000000000..b2d45fe50522
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
@@ -0,0 +1,59 @@
1/*
2 * Support for the sensor part which is integrated (I think) into the
3 * st6422 stv06xx alike bridge, as its integrated there are no i2c writes
4 * but instead direct bridge writes.
5 *
6 * Copyright (c) 2009 Hans de Goede <hdegoede@redhat.com>
7 *
8 * Strongly based on qc-usb-messenger, which is:
9 * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
10 * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
11 * Copyright (c) 2002, 2003 Tuukka Toivonen
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 */
28
29#ifndef STV06XX_ST6422_H_
30#define STV06XX_ST6422_H_
31
32#include "stv06xx_sensor.h"
33
34static int st6422_probe(struct sd *sd);
35static int st6422_start(struct sd *sd);
36static int st6422_init(struct sd *sd);
37static int st6422_stop(struct sd *sd);
38static void st6422_disconnect(struct sd *sd);
39
40/* V4L2 controls supported by the driver */
41static int st6422_get_brightness(struct gspca_dev *gspca_dev, __s32 *val);
42static int st6422_set_brightness(struct gspca_dev *gspca_dev, __s32 val);
43static int st6422_get_contrast(struct gspca_dev *gspca_dev, __s32 *val);
44static int st6422_set_contrast(struct gspca_dev *gspca_dev, __s32 val);
45static int st6422_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
46static int st6422_set_gain(struct gspca_dev *gspca_dev, __s32 val);
47static int st6422_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
48static int st6422_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
49
50const struct stv06xx_sensor stv06xx_sensor_st6422 = {
51 .name = "ST6422",
52 .init = st6422_init,
53 .probe = st6422_probe,
54 .start = st6422_start,
55 .stop = st6422_stop,
56 .disconnect = st6422_disconnect,
57};
58
59#endif
diff --git a/drivers/media/video/ivtv/ivtv-controls.c b/drivers/media/video/ivtv/ivtv-controls.c
index 84995bcf4a75..a3b77ed3f089 100644
--- a/drivers/media/video/ivtv/ivtv-controls.c
+++ b/drivers/media/video/ivtv/ivtv-controls.c
@@ -60,6 +60,8 @@ int ivtv_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qctrl)
60 60
61 switch (qctrl->id) { 61 switch (qctrl->id) {
62 /* Standard V4L2 controls */ 62 /* Standard V4L2 controls */
63 case V4L2_CID_USER_CLASS:
64 return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
63 case V4L2_CID_BRIGHTNESS: 65 case V4L2_CID_BRIGHTNESS:
64 case V4L2_CID_HUE: 66 case V4L2_CID_HUE:
65 case V4L2_CID_SATURATION: 67 case V4L2_CID_SATURATION:
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 459c04cbf69d..4d794b42d6cd 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -280,15 +280,9 @@ static int mt9m001_try_fmt(struct soc_camera_device *icd,
280{ 280{
281 struct v4l2_pix_format *pix = &f->fmt.pix; 281 struct v4l2_pix_format *pix = &f->fmt.pix;
282 282
283 if (pix->height < 32 + icd->y_skip_top) 283 v4l_bound_align_image(&pix->width, 48, 1280, 1,
284 pix->height = 32 + icd->y_skip_top; 284 &pix->height, 32 + icd->y_skip_top,
285 if (pix->height > 1024 + icd->y_skip_top) 285 1024 + icd->y_skip_top, 0, 0);
286 pix->height = 1024 + icd->y_skip_top;
287 if (pix->width < 48)
288 pix->width = 48;
289 if (pix->width > 1280)
290 pix->width = 1280;
291 pix->width &= ~0x01; /* has to be even, unsure why was ~3 */
292 286
293 return 0; 287 return 0;
294} 288}
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index f72aeb7c4deb..4207fb342670 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -385,17 +385,9 @@ static int mt9t031_try_fmt(struct soc_camera_device *icd,
385{ 385{
386 struct v4l2_pix_format *pix = &f->fmt.pix; 386 struct v4l2_pix_format *pix = &f->fmt.pix;
387 387
388 if (pix->height < MT9T031_MIN_HEIGHT) 388 v4l_bound_align_image(
389 pix->height = MT9T031_MIN_HEIGHT; 389 &pix->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1,
390 if (pix->height > MT9T031_MAX_HEIGHT) 390 &pix->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0);
391 pix->height = MT9T031_MAX_HEIGHT;
392 if (pix->width < MT9T031_MIN_WIDTH)
393 pix->width = MT9T031_MIN_WIDTH;
394 if (pix->width > MT9T031_MAX_WIDTH)
395 pix->width = MT9T031_MAX_WIDTH;
396
397 pix->width &= ~0x01; /* has to be even */
398 pix->height &= ~0x01; /* has to be even */
399 391
400 return 0; 392 return 0;
401} 393}
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index be20d312b1dc..dbdcc86ae50d 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -364,15 +364,9 @@ static int mt9v022_try_fmt(struct soc_camera_device *icd,
364{ 364{
365 struct v4l2_pix_format *pix = &f->fmt.pix; 365 struct v4l2_pix_format *pix = &f->fmt.pix;
366 366
367 if (pix->height < 32 + icd->y_skip_top) 367 v4l_bound_align_image(&pix->width, 48, 752, 2 /* ? */,
368 pix->height = 32 + icd->y_skip_top; 368 &pix->height, 32 + icd->y_skip_top,
369 if (pix->height > 480 + icd->y_skip_top) 369 480 + icd->y_skip_top, 0, 0);
370 pix->height = 480 + icd->y_skip_top;
371 if (pix->width < 48)
372 pix->width = 48;
373 if (pix->width > 752)
374 pix->width = 752;
375 pix->width &= ~0x03; /* ? */
376 370
377 return 0; 371 return 0;
378} 372}
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index 08cfd3e4ae8a..0bc2cf573c76 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -211,8 +211,6 @@ static const int i2c_detect_tries = 5;
211static struct usb_device_id device_table [] = { 211static struct usb_device_id device_table [] = {
212 { USB_DEVICE(VEND_OMNIVISION, PROD_OV511) }, 212 { USB_DEVICE(VEND_OMNIVISION, PROD_OV511) },
213 { USB_DEVICE(VEND_OMNIVISION, PROD_OV511PLUS) }, 213 { USB_DEVICE(VEND_OMNIVISION, PROD_OV511PLUS) },
214 { USB_DEVICE(VEND_OMNIVISION, PROD_OV518) },
215 { USB_DEVICE(VEND_OMNIVISION, PROD_OV518PLUS) },
216 { USB_DEVICE(VEND_MATTEL, PROD_ME2CAM) }, 214 { USB_DEVICE(VEND_MATTEL, PROD_ME2CAM) },
217 { } /* Terminating entry */ 215 { } /* Terminating entry */
218}; 216};
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.c b/drivers/media/video/pvrusb2/pvrusb2-audio.c
index 10ef1a2c13ea..416933ca607d 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-audio.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-audio.c
@@ -48,11 +48,13 @@ static const int routing_scheme0[] = {
48 MSP_DSP_IN_SCART), 48 MSP_DSP_IN_SCART),
49}; 49};
50 50
51static const struct routing_scheme routing_schemes[] = { 51static const struct routing_scheme routing_def0 = {
52 [PVR2_ROUTING_SCHEME_HAUPPAUGE] = { 52 .def = routing_scheme0,
53 .def = routing_scheme0, 53 .cnt = ARRAY_SIZE(routing_scheme0),
54 .cnt = ARRAY_SIZE(routing_scheme0), 54};
55 }, 55
56static const struct routing_scheme *routing_schemes[] = {
57 [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
56}; 58};
57 59
58void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) 60void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
@@ -65,7 +67,7 @@ void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
65 pvr2_trace(PVR2_TRACE_CHIPS, "subdev msp3400 v4l2 set_stereo"); 67 pvr2_trace(PVR2_TRACE_CHIPS, "subdev msp3400 v4l2 set_stereo");
66 68
67 if ((sid < ARRAY_SIZE(routing_schemes)) && 69 if ((sid < ARRAY_SIZE(routing_schemes)) &&
68 ((sp = routing_schemes + sid) != NULL) && 70 ((sp = routing_schemes[sid]) != NULL) &&
69 (hdw->input_val >= 0) && 71 (hdw->input_val >= 0) &&
70 (hdw->input_val < sp->cnt)) { 72 (hdw->input_val < sp->cnt)) {
71 input = sp->def[hdw->input_val]; 73 input = sp->def[hdw->input_val];
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c b/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
index 9023adf3fdcc..68980e19409f 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
@@ -49,11 +49,13 @@ static const int routing_scheme1[] = {
49 [PVR2_CVAL_INPUT_SVIDEO] = 0, 49 [PVR2_CVAL_INPUT_SVIDEO] = 0,
50}; 50};
51 51
52static const struct routing_scheme routing_schemes[] = { 52static const struct routing_scheme routing_def1 = {
53 [PVR2_ROUTING_SCHEME_ONAIR] = { 53 .def = routing_scheme1,
54 .def = routing_scheme1, 54 .cnt = ARRAY_SIZE(routing_scheme1),
55 .cnt = ARRAY_SIZE(routing_scheme1), 55};
56 }, 56
57static const struct routing_scheme *routing_schemes[] = {
58 [PVR2_ROUTING_SCHEME_ONAIR] = &routing_def1,
57}; 59};
58 60
59 61
@@ -65,12 +67,11 @@ void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
65 u32 input; 67 u32 input;
66 pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)", 68 pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)",
67 hdw->input_val); 69 hdw->input_val);
68 if ((sid < ARRAY_SIZE(routing_schemes)) && 70 sp = (sid < ARRAY_SIZE(routing_schemes)) ?
69 ((sp = routing_schemes + sid) != NULL) && 71 routing_schemes[sid] : NULL;
70 (hdw->input_val >= 0) && 72 if ((sp == NULL) ||
71 (hdw->input_val < sp->cnt)) { 73 (hdw->input_val < 0) ||
72 input = sp->def[hdw->input_val]; 74 (hdw->input_val >= sp->cnt)) {
73 } else {
74 pvr2_trace(PVR2_TRACE_ERROR_LEGS, 75 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
75 "*** WARNING *** subdev v4l2 set_input:" 76 "*** WARNING *** subdev v4l2 set_input:"
76 " Invalid routing scheme (%u)" 77 " Invalid routing scheme (%u)"
@@ -78,6 +79,7 @@ void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
78 sid, hdw->input_val); 79 sid, hdw->input_val);
79 return; 80 return;
80 } 81 }
82 input = sp->def[hdw->input_val];
81 sd->ops->audio->s_routing(sd, input, 0, 0); 83 sd->ops->audio->s_routing(sd, input, 0, 0);
82 } 84 }
83} 85}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
index 05e52358ae49..82c135835753 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -68,6 +68,11 @@ static const struct routing_scheme_item routing_scheme0[] = {
68 }, 68 },
69}; 69};
70 70
71static const struct routing_scheme routing_def0 = {
72 .def = routing_scheme0,
73 .cnt = ARRAY_SIZE(routing_scheme0),
74};
75
71/* Specific to gotview device */ 76/* Specific to gotview device */
72static const struct routing_scheme_item routing_schemegv[] = { 77static const struct routing_scheme_item routing_schemegv[] = {
73 [PVR2_CVAL_INPUT_TV] = { 78 [PVR2_CVAL_INPUT_TV] = {
@@ -90,15 +95,14 @@ static const struct routing_scheme_item routing_schemegv[] = {
90 }, 95 },
91}; 96};
92 97
93static const struct routing_scheme routing_schemes[] = { 98static const struct routing_scheme routing_defgv = {
94 [PVR2_ROUTING_SCHEME_HAUPPAUGE] = { 99 .def = routing_schemegv,
95 .def = routing_scheme0, 100 .cnt = ARRAY_SIZE(routing_schemegv),
96 .cnt = ARRAY_SIZE(routing_scheme0), 101};
97 }, 102
98 [PVR2_ROUTING_SCHEME_GOTVIEW] = { 103static const struct routing_scheme *routing_schemes[] = {
99 .def = routing_schemegv, 104 [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
100 .cnt = ARRAY_SIZE(routing_schemegv), 105 [PVR2_ROUTING_SCHEME_GOTVIEW] = &routing_defgv,
101 },
102}; 106};
103 107
104void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) 108void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
@@ -110,13 +114,11 @@ void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
110 const struct routing_scheme *sp; 114 const struct routing_scheme *sp;
111 unsigned int sid = hdw->hdw_desc->signal_routing_scheme; 115 unsigned int sid = hdw->hdw_desc->signal_routing_scheme;
112 116
113 if ((sid < ARRAY_SIZE(routing_schemes)) && 117 sp = (sid < ARRAY_SIZE(routing_schemes)) ?
114 ((sp = routing_schemes + sid) != NULL) && 118 routing_schemes[sid] : NULL;
115 (hdw->input_val >= 0) && 119 if ((sp == NULL) ||
116 (hdw->input_val < sp->cnt)) { 120 (hdw->input_val < 0) ||
117 vid_input = sp->def[hdw->input_val].vid; 121 (hdw->input_val >= sp->cnt)) {
118 aud_input = sp->def[hdw->input_val].aud;
119 } else {
120 pvr2_trace(PVR2_TRACE_ERROR_LEGS, 122 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
121 "*** WARNING *** subdev cx2584x set_input:" 123 "*** WARNING *** subdev cx2584x set_input:"
122 " Invalid routing scheme (%u)" 124 " Invalid routing scheme (%u)"
@@ -124,7 +126,8 @@ void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
124 sid, hdw->input_val); 126 sid, hdw->input_val);
125 return; 127 return;
126 } 128 }
127 129 vid_input = sp->def[hdw->input_val].vid;
130 aud_input = sp->def[hdw->input_val].aud;
128 pvr2_trace(PVR2_TRACE_CHIPS, 131 pvr2_trace(PVR2_TRACE_CHIPS,
129 "subdev cx2584x set_input vid=0x%x aud=0x%x", 132 "subdev cx2584x set_input vid=0x%x aud=0x%x",
130 vid_input, aud_input); 133 vid_input, aud_input);
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 0c745b142fb7..cbc388729d77 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -85,8 +85,8 @@ MODULE_PARM_DESC(video_std,"specify initial video standard");
85module_param_array(tolerance, int, NULL, 0444); 85module_param_array(tolerance, int, NULL, 0444);
86MODULE_PARM_DESC(tolerance,"specify stream error tolerance"); 86MODULE_PARM_DESC(tolerance,"specify stream error tolerance");
87 87
88/* US Broadcast channel 7 (175.25 MHz) */ 88/* US Broadcast channel 3 (61.25 MHz), to help with testing */
89static int default_tv_freq = 175250000L; 89static int default_tv_freq = 61250000L;
90/* 104.3 MHz, a usable FM station for my area */ 90/* 104.3 MHz, a usable FM station for my area */
91static int default_radio_freq = 104300000L; 91static int default_radio_freq = 104300000L;
92 92
@@ -1987,6 +1987,34 @@ static unsigned int pvr2_copy_i2c_addr_list(
1987} 1987}
1988 1988
1989 1989
1990static void pvr2_hdw_cx25840_vbi_hack(struct pvr2_hdw *hdw)
1991{
1992 /*
1993 Mike Isely <isely@pobox.com> 19-Nov-2006 - This bit of nuttiness
1994 for cx25840 causes that module to correctly set up its video
1995 scaling. This is really a problem in the cx25840 module itself,
1996 but we work around it here. The problem has not been seen in
1997 ivtv because there VBI is supported and set up. We don't do VBI
1998 here (at least not yet) and thus we never attempted to even set
1999 it up.
2000 */
2001 struct v4l2_format fmt;
2002 if (hdw->decoder_client_id != PVR2_CLIENT_ID_CX25840) {
2003 /* We're not using a cx25840 so don't enable the hack */
2004 return;
2005 }
2006
2007 pvr2_trace(PVR2_TRACE_INIT,
2008 "Module ID %u:"
2009 " Executing cx25840 VBI hack",
2010 hdw->decoder_client_id);
2011 memset(&fmt, 0, sizeof(fmt));
2012 fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
2013 v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id,
2014 video, s_fmt, &fmt);
2015}
2016
2017
1990static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw, 2018static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
1991 const struct pvr2_device_client_desc *cd) 2019 const struct pvr2_device_client_desc *cd)
1992{ 2020{
@@ -2078,30 +2106,6 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
2078 /* client-specific setup... */ 2106 /* client-specific setup... */
2079 switch (mid) { 2107 switch (mid) {
2080 case PVR2_CLIENT_ID_CX25840: 2108 case PVR2_CLIENT_ID_CX25840:
2081 hdw->decoder_client_id = mid;
2082 {
2083 /*
2084 Mike Isely <isely@pobox.com> 19-Nov-2006 - This
2085 bit of nuttiness for cx25840 causes that module
2086 to correctly set up its video scaling. This is
2087 really a problem in the cx25840 module itself,
2088 but we work around it here. The problem has not
2089 been seen in ivtv because there VBI is supported
2090 and set up. We don't do VBI here (at least not
2091 yet) and thus we never attempted to even set it
2092 up.
2093 */
2094 struct v4l2_format fmt;
2095 pvr2_trace(PVR2_TRACE_INIT,
2096 "Module ID %u:"
2097 " Executing cx25840 VBI hack",
2098 mid);
2099 memset(&fmt, 0, sizeof(fmt));
2100 fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
2101 v4l2_device_call_all(&hdw->v4l2_dev, mid,
2102 video, s_fmt, &fmt);
2103 }
2104 break;
2105 case PVR2_CLIENT_ID_SAA7115: 2109 case PVR2_CLIENT_ID_SAA7115:
2106 hdw->decoder_client_id = mid; 2110 hdw->decoder_client_id = mid;
2107 break; 2111 break;
@@ -2202,6 +2206,8 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
2202 cptr->info->set_value(cptr,~0,cptr->info->default_value); 2206 cptr->info->set_value(cptr,~0,cptr->info->default_value);
2203 } 2207 }
2204 2208
2209 pvr2_hdw_cx25840_vbi_hack(hdw);
2210
2205 /* Set up special default values for the television and radio 2211 /* Set up special default values for the television and radio
2206 frequencies here. It's not really important what these defaults 2212 frequencies here. It's not really important what these defaults
2207 are, but I set them to something usable in the Chicago area just 2213 are, but I set them to something usable in the Chicago area just
@@ -2954,6 +2960,7 @@ static void pvr2_subdev_update(struct pvr2_hdw *hdw)
2954 vs = hdw->std_mask_cur; 2960 vs = hdw->std_mask_cur;
2955 v4l2_device_call_all(&hdw->v4l2_dev, 0, 2961 v4l2_device_call_all(&hdw->v4l2_dev, 0,
2956 core, s_std, vs); 2962 core, s_std, vs);
2963 pvr2_hdw_cx25840_vbi_hack(hdw);
2957 } 2964 }
2958 hdw->tuner_signal_stale = !0; 2965 hdw->tuner_signal_stale = !0;
2959 hdw->cropcap_stale = !0; 2966 hdw->cropcap_stale = !0;
@@ -4076,6 +4083,7 @@ int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *hdw)
4076 if (hdw->decoder_client_id) { 4083 if (hdw->decoder_client_id) {
4077 v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id, 4084 v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id,
4078 core, reset, 0); 4085 core, reset, 0);
4086 pvr2_hdw_cx25840_vbi_hack(hdw);
4079 return 0; 4087 return 0;
4080 } 4088 }
4081 pvr2_trace(PVR2_TRACE_INIT, 4089 pvr2_trace(PVR2_TRACE_INIT,
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
index d2fe7c8f2c3a..4c96cf48c796 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
@@ -54,6 +54,11 @@ static const int routing_scheme0[] = {
54 [PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2, 54 [PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2,
55}; 55};
56 56
57static const struct routing_scheme routing_def0 = {
58 .def = routing_scheme0,
59 .cnt = ARRAY_SIZE(routing_scheme0),
60};
61
57static const int routing_scheme1[] = { 62static const int routing_scheme1[] = {
58 [PVR2_CVAL_INPUT_TV] = SAA7115_COMPOSITE4, 63 [PVR2_CVAL_INPUT_TV] = SAA7115_COMPOSITE4,
59 [PVR2_CVAL_INPUT_RADIO] = SAA7115_COMPOSITE5, 64 [PVR2_CVAL_INPUT_RADIO] = SAA7115_COMPOSITE5,
@@ -61,15 +66,14 @@ static const int routing_scheme1[] = {
61 [PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2, /* or SVIDEO0, it seems */ 66 [PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2, /* or SVIDEO0, it seems */
62}; 67};
63 68
64static const struct routing_scheme routing_schemes[] = { 69static const struct routing_scheme routing_def1 = {
65 [PVR2_ROUTING_SCHEME_HAUPPAUGE] = { 70 .def = routing_scheme1,
66 .def = routing_scheme0, 71 .cnt = ARRAY_SIZE(routing_scheme1),
67 .cnt = ARRAY_SIZE(routing_scheme0), 72};
68 }, 73
69 [PVR2_ROUTING_SCHEME_ONAIR] = { 74static const struct routing_scheme *routing_schemes[] = {
70 .def = routing_scheme1, 75 [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
71 .cnt = ARRAY_SIZE(routing_scheme1), 76 [PVR2_ROUTING_SCHEME_ONAIR] = &routing_def1,
72 },
73}; 77};
74 78
75void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) 79void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
@@ -81,12 +85,12 @@ void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
81 85
82 pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)", 86 pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)",
83 hdw->input_val); 87 hdw->input_val);
84 if ((sid < ARRAY_SIZE(routing_schemes)) && 88
85 ((sp = routing_schemes + sid) != NULL) && 89 sp = (sid < ARRAY_SIZE(routing_schemes)) ?
86 (hdw->input_val >= 0) && 90 routing_schemes[sid] : NULL;
87 (hdw->input_val < sp->cnt)) { 91 if ((sp == NULL) ||
88 input = sp->def[hdw->input_val]; 92 (hdw->input_val < 0) ||
89 } else { 93 (hdw->input_val >= sp->cnt)) {
90 pvr2_trace(PVR2_TRACE_ERROR_LEGS, 94 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
91 "*** WARNING *** subdev v4l2 set_input:" 95 "*** WARNING *** subdev v4l2 set_input:"
92 " Invalid routing scheme (%u)" 96 " Invalid routing scheme (%u)"
@@ -94,6 +98,7 @@ void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
94 sid, hdw->input_val); 98 sid, hdw->input_val);
95 return; 99 return;
96 } 100 }
101 input = sp->def[hdw->input_val];
97 sd->ops->video->s_routing(sd, input, 0, 0); 102 sd->ops->video->s_routing(sd, input, 0, 0);
98 } 103 }
99} 104}
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index f60de40fd21f..46e0d8ad880f 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -163,13 +163,6 @@
163 CICR0_EOFM | CICR0_FOM) 163 CICR0_EOFM | CICR0_FOM)
164 164
165/* 165/*
166 * YUV422P picture size should be a multiple of 16, so the heuristic aligns
167 * height, width on 4 byte boundaries to reach the 16 multiple for the size.
168 */
169#define YUV422P_X_Y_ALIGN 4
170#define YUV422P_SIZE_ALIGN YUV422P_X_Y_ALIGN * YUV422P_X_Y_ALIGN
171
172/*
173 * Structures 166 * Structures
174 */ 167 */
175enum pxa_camera_active_dma { 168enum pxa_camera_active_dma {
@@ -1398,28 +1391,15 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
1398 return -EINVAL; 1391 return -EINVAL;
1399 } 1392 }
1400 1393
1401 /* limit to pxa hardware capabilities */
1402 if (pix->height < 32)
1403 pix->height = 32;
1404 if (pix->height > 2048)
1405 pix->height = 2048;
1406 if (pix->width < 48)
1407 pix->width = 48;
1408 if (pix->width > 2048)
1409 pix->width = 2048;
1410 pix->width &= ~0x01;
1411
1412 /* 1394 /*
1413 * YUV422P planar format requires images size to be a 16 bytes 1395 * Limit to pxa hardware capabilities. YUV422P planar format requires
1414 * multiple. If not, zeros will be inserted between Y and U planes, and 1396 * images size to be a multiple of 16 bytes. If not, zeros will be
1415 * U and V planes, and YUV422P standard would be violated. 1397 * inserted between Y and U planes, and U and V planes, which violates
1398 * the YUV422P standard.
1416 */ 1399 */
1417 if (xlate->host_fmt->fourcc == V4L2_PIX_FMT_YUV422P) { 1400 v4l_bound_align_image(&pix->width, 48, 2048, 1,
1418 if (!IS_ALIGNED(pix->width * pix->height, YUV422P_SIZE_ALIGN)) 1401 &pix->height, 32, 2048, 0,
1419 pix->height = ALIGN(pix->height, YUV422P_X_Y_ALIGN); 1402 xlate->host_fmt->fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0);
1420 if (!IS_ALIGNED(pix->width * pix->height, YUV422P_SIZE_ALIGN))
1421 pix->width = ALIGN(pix->width, YUV422P_X_Y_ALIGN);
1422 }
1423 1403
1424 pix->bytesperline = pix->width * 1404 pix->bytesperline = pix->width *
1425 DIV_ROUND_UP(xlate->host_fmt->depth, 8); 1405 DIV_ROUND_UP(xlate->host_fmt->depth, 8);
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index e305c1674cee..ba87128542e0 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1640,15 +1640,8 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
1640 } 1640 }
1641 1641
1642 f->fmt.pix.field = field; 1642 f->fmt.pix.field = field;
1643 if (f->fmt.pix.width < 48) 1643 v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
1644 f->fmt.pix.width = 48; 1644 &f->fmt.pix.height, 32, maxh, 0, 0);
1645 if (f->fmt.pix.height < 32)
1646 f->fmt.pix.height = 32;
1647 if (f->fmt.pix.width > maxw)
1648 f->fmt.pix.width = maxw;
1649 if (f->fmt.pix.height > maxh)
1650 f->fmt.pix.height = maxh;
1651 f->fmt.pix.width &= ~0x03;
1652 f->fmt.pix.bytesperline = 1645 f->fmt.pix.bytesperline =
1653 (f->fmt.pix.width * fmt->depth) >> 3; 1646 (f->fmt.pix.width * fmt->depth) >> 3;
1654 f->fmt.pix.sizeimage = 1647 f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index d369e8409ab8..0db88a53d92c 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -689,16 +689,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
689 689
690 /* FIXME: calculate using depth and bus width */ 690 /* FIXME: calculate using depth and bus width */
691 691
692 if (f->fmt.pix.height < 4) 692 v4l_bound_align_image(&f->fmt.pix.width, 2, 2560, 1,
693 f->fmt.pix.height = 4; 693 &f->fmt.pix.height, 4, 1920, 2, 0);
694 if (f->fmt.pix.height > 1920)
695 f->fmt.pix.height = 1920;
696 if (f->fmt.pix.width < 2)
697 f->fmt.pix.width = 2;
698 if (f->fmt.pix.width > 2560)
699 f->fmt.pix.width = 2560;
700 f->fmt.pix.width &= ~0x01;
701 f->fmt.pix.height &= ~0x03;
702 694
703 f->fmt.pix.bytesperline = f->fmt.pix.width * 695 f->fmt.pix.bytesperline = f->fmt.pix.width *
704 DIV_ROUND_UP(xlate->host_fmt->depth, 8); 696 DIV_ROUND_UP(xlate->host_fmt->depth, 8);
diff --git a/drivers/media/video/tcm825x.c b/drivers/media/video/tcm825x.c
index b30c49248217..b90e9da3167d 100644
--- a/drivers/media/video/tcm825x.c
+++ b/drivers/media/video/tcm825x.c
@@ -878,7 +878,7 @@ static int tcm825x_probe(struct i2c_client *client,
878 return rval; 878 return rval;
879} 879}
880 880
881static int __exit tcm825x_remove(struct i2c_client *client) 881static int tcm825x_remove(struct i2c_client *client)
882{ 882{
883 struct tcm825x_sensor *sensor = i2c_get_clientdata(client); 883 struct tcm825x_sensor *sensor = i2c_get_clientdata(client);
884 884
@@ -902,7 +902,7 @@ static struct i2c_driver tcm825x_i2c_driver = {
902 .name = TCM825X_NAME, 902 .name = TCM825X_NAME,
903 }, 903 },
904 .probe = tcm825x_probe, 904 .probe = tcm825x_probe,
905 .remove = __exit_p(tcm825x_remove), 905 .remove = tcm825x_remove,
906 .id_table = tcm825x_id, 906 .id_table = tcm825x_id,
907}; 907};
908 908
diff --git a/drivers/media/video/usbvideo/Kconfig b/drivers/media/video/usbvideo/Kconfig
index e4cb99c1f94b..adb1c044ad7d 100644
--- a/drivers/media/video/usbvideo/Kconfig
+++ b/drivers/media/video/usbvideo/Kconfig
@@ -38,10 +38,13 @@ config USB_KONICAWC
38 module will be called konicawc. 38 module will be called konicawc.
39 39
40config USB_QUICKCAM_MESSENGER 40config USB_QUICKCAM_MESSENGER
41 tristate "USB Logitech Quickcam Messenger" 41 tristate "USB Logitech Quickcam Messenger (DEPRECATED)"
42 depends on VIDEO_V4L1 42 depends on VIDEO_V4L1
43 select VIDEO_USBVIDEO 43 select VIDEO_USBVIDEO
44 ---help--- 44 ---help---
45 This driver is DEPRECATED please use the gspca stv06xx module
46 instead.
47
45 Say Y or M here to enable support for the USB Logitech Quickcam 48 Say Y or M here to enable support for the USB Logitech Quickcam
46 Messenger webcam. 49 Messenger webcam.
47 50
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index f96475626da7..b91d66a767d7 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -802,6 +802,17 @@ struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
802 /* Decrease the module use count to match the first try_module_get. */ 802 /* Decrease the module use count to match the first try_module_get. */
803 module_put(client->driver->driver.owner); 803 module_put(client->driver->driver.owner);
804 804
805 if (sd) {
806 /* We return errors from v4l2_subdev_call only if we have the
807 callback as the .s_config is not mandatory */
808 int err = v4l2_subdev_call(sd, core, s_config, 0, NULL);
809
810 if (err && err != -ENOIOCTLCMD) {
811 v4l2_device_unregister_subdev(sd);
812 sd = NULL;
813 }
814 }
815
805error: 816error:
806 /* If we have a client but no subdev, then something went wrong and 817 /* If we have a client but no subdev, then something went wrong and
807 we must unregister the client. */ 818 we must unregister the client. */
@@ -852,6 +863,17 @@ struct v4l2_subdev *v4l2_i2c_new_probed_subdev(struct v4l2_device *v4l2_dev,
852 /* Decrease the module use count to match the first try_module_get. */ 863 /* Decrease the module use count to match the first try_module_get. */
853 module_put(client->driver->driver.owner); 864 module_put(client->driver->driver.owner);
854 865
866 if (sd) {
867 /* We return errors from v4l2_subdev_call only if we have the
868 callback as the .s_config is not mandatory */
869 int err = v4l2_subdev_call(sd, core, s_config, 0, NULL);
870
871 if (err && err != -ENOIOCTLCMD) {
872 v4l2_device_unregister_subdev(sd);
873 sd = NULL;
874 }
875 }
876
855error: 877error:
856 /* If we have a client but no subdev, then something went wrong and 878 /* If we have a client but no subdev, then something went wrong and
857 we must unregister the client. */ 879 we must unregister the client. */
@@ -872,6 +894,89 @@ struct v4l2_subdev *v4l2_i2c_new_probed_subdev_addr(struct v4l2_device *v4l2_dev
872} 894}
873EXPORT_SYMBOL_GPL(v4l2_i2c_new_probed_subdev_addr); 895EXPORT_SYMBOL_GPL(v4l2_i2c_new_probed_subdev_addr);
874 896
897/* Load an i2c sub-device. */
898struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
899 struct i2c_adapter *adapter, const char *module_name,
900 struct i2c_board_info *info, const unsigned short *probe_addrs)
901{
902 struct v4l2_subdev *sd = NULL;
903 struct i2c_client *client;
904
905 BUG_ON(!v4l2_dev);
906
907 if (module_name)
908 request_module(module_name);
909
910 /* Create the i2c client */
911 if (info->addr == 0 && probe_addrs)
912 client = i2c_new_probed_device(adapter, info, probe_addrs);
913 else
914 client = i2c_new_device(adapter, info);
915
916 /* Note: by loading the module first we are certain that c->driver
917 will be set if the driver was found. If the module was not loaded
918 first, then the i2c core tries to delay-load the module for us,
919 and then c->driver is still NULL until the module is finally
920 loaded. This delay-load mechanism doesn't work if other drivers
921 want to use the i2c device, so explicitly loading the module
922 is the best alternative. */
923 if (client == NULL || client->driver == NULL)
924 goto error;
925
926 /* Lock the module so we can safely get the v4l2_subdev pointer */
927 if (!try_module_get(client->driver->driver.owner))
928 goto error;
929 sd = i2c_get_clientdata(client);
930
931 /* Register with the v4l2_device which increases the module's
932 use count as well. */
933 if (v4l2_device_register_subdev(v4l2_dev, sd))
934 sd = NULL;
935 /* Decrease the module use count to match the first try_module_get. */
936 module_put(client->driver->driver.owner);
937
938 if (sd) {
939 /* We return errors from v4l2_subdev_call only if we have the
940 callback as the .s_config is not mandatory */
941 int err = v4l2_subdev_call(sd, core, s_config,
942 info->irq, info->platform_data);
943
944 if (err && err != -ENOIOCTLCMD) {
945 v4l2_device_unregister_subdev(sd);
946 sd = NULL;
947 }
948 }
949
950error:
951 /* If we have a client but no subdev, then something went wrong and
952 we must unregister the client. */
953 if (client && sd == NULL)
954 i2c_unregister_device(client);
955 return sd;
956}
957EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
958
959struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
960 struct i2c_adapter *adapter,
961 const char *module_name, const char *client_type,
962 int irq, void *platform_data,
963 u8 addr, const unsigned short *probe_addrs)
964{
965 struct i2c_board_info info;
966
967 /* Setup the i2c board info with the device type and
968 the device address. */
969 memset(&info, 0, sizeof(info));
970 strlcpy(info.type, client_type, sizeof(info.type));
971 info.addr = addr;
972 info.irq = irq;
973 info.platform_data = platform_data;
974
975 return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, module_name,
976 &info, probe_addrs);
977}
978EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg);
979
875/* Return i2c client address of v4l2_subdev. */ 980/* Return i2c client address of v4l2_subdev. */
876unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd) 981unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
877{ 982{
@@ -916,4 +1021,78 @@ const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
916} 1021}
917EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs); 1022EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
918 1023
919#endif 1024#endif /* defined(CONFIG_I2C) */
1025
1026/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
1027 * and max don't have to be aligned, but there must be at least one valid
1028 * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
1029 * of 16 between 17 and 31. */
1030static unsigned int clamp_align(unsigned int x, unsigned int min,
1031 unsigned int max, unsigned int align)
1032{
1033 /* Bits that must be zero to be aligned */
1034 unsigned int mask = ~((1 << align) - 1);
1035
1036 /* Round to nearest aligned value */
1037 if (align)
1038 x = (x + (1 << (align - 1))) & mask;
1039
1040 /* Clamp to aligned value of min and max */
1041 if (x < min)
1042 x = (min + ~mask) & mask;
1043 else if (x > max)
1044 x = max & mask;
1045
1046 return x;
1047}
1048
1049/* Bound an image to have a width between wmin and wmax, and height between
1050 * hmin and hmax, inclusive. Additionally, the width will be a multiple of
1051 * 2^walign, the height will be a multiple of 2^halign, and the overall size
1052 * (width*height) will be a multiple of 2^salign. The image may be shrunk
1053 * or enlarged to fit the alignment constraints.
1054 *
1055 * The width or height maximum must not be smaller than the corresponding
1056 * minimum. The alignments must not be so high there are no possible image
1057 * sizes within the allowed bounds. wmin and hmin must be at least 1
1058 * (don't use 0). If you don't care about a certain alignment, specify 0,
1059 * as 2^0 is 1 and one byte alignment is equivalent to no alignment. If
1060 * you only want to adjust downward, specify a maximum that's the same as
1061 * the initial value.
1062 */
1063void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
1064 unsigned int walign,
1065 u32 *h, unsigned int hmin, unsigned int hmax,
1066 unsigned int halign, unsigned int salign)
1067{
1068 *w = clamp_align(*w, wmin, wmax, walign);
1069 *h = clamp_align(*h, hmin, hmax, halign);
1070
1071 /* Usually we don't need to align the size and are done now. */
1072 if (!salign)
1073 return;
1074
1075 /* How much alignment do we have? */
1076 walign = __ffs(*w);
1077 halign = __ffs(*h);
1078 /* Enough to satisfy the image alignment? */
1079 if (walign + halign < salign) {
1080 /* Max walign where there is still a valid width */
1081 unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
1082 /* Max halign where there is still a valid height */
1083 unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
1084
1085 /* up the smaller alignment until we have enough */
1086 do {
1087 if (halign >= hmaxa ||
1088 (walign <= halign && walign < wmaxa)) {
1089 *w = clamp_align(*w, wmin, wmax, walign + 1);
1090 walign = __ffs(*w);
1091 } else {
1092 *h = clamp_align(*h, hmin, hmax, halign + 1);
1093 halign = __ffs(*h);
1094 }
1095 } while (halign + walign < salign);
1096 }
1097}
1098EXPORT_SYMBOL_GPL(v4l_bound_align_image);
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index fbfefae7886f..cd7266858462 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -883,15 +883,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
883 maxh = norm_maxh(); 883 maxh = norm_maxh();
884 884
885 f->fmt.pix.field = field; 885 f->fmt.pix.field = field;
886 if (f->fmt.pix.height < 32) 886 v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
887 f->fmt.pix.height = 32; 887 &f->fmt.pix.height, 32, maxh, 0, 0);
888 if (f->fmt.pix.height > maxh)
889 f->fmt.pix.height = maxh;
890 if (f->fmt.pix.width < 48)
891 f->fmt.pix.width = 48;
892 if (f->fmt.pix.width > maxw)
893 f->fmt.pix.width = maxw;
894 f->fmt.pix.width &= ~0x03;
895 f->fmt.pix.bytesperline = 888 f->fmt.pix.bytesperline =
896 (f->fmt.pix.width * fmt->depth) >> 3; 889 (f->fmt.pix.width * fmt->depth) >> 3;
897 f->fmt.pix.sizeimage = 890 f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index f59b2bd07e89..6c3f23e31b5c 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -460,7 +460,7 @@ static int w9968cf_set_picture(struct w9968cf_device*, struct video_picture);
460static int w9968cf_set_window(struct w9968cf_device*, struct video_window); 460static int w9968cf_set_window(struct w9968cf_device*, struct video_window);
461static int w9968cf_postprocess_frame(struct w9968cf_device*, 461static int w9968cf_postprocess_frame(struct w9968cf_device*,
462 struct w9968cf_frame_t*); 462 struct w9968cf_frame_t*);
463static int w9968cf_adjust_window_size(struct w9968cf_device*, u16* w, u16* h); 463static int w9968cf_adjust_window_size(struct w9968cf_device*, u32 *w, u32 *h);
464static void w9968cf_init_framelist(struct w9968cf_device*); 464static void w9968cf_init_framelist(struct w9968cf_device*);
465static void w9968cf_push_frame(struct w9968cf_device*, u8 f_num); 465static void w9968cf_push_frame(struct w9968cf_device*, u8 f_num);
466static void w9968cf_pop_frame(struct w9968cf_device*,struct w9968cf_frame_t**); 466static void w9968cf_pop_frame(struct w9968cf_device*,struct w9968cf_frame_t**);
@@ -1763,8 +1763,7 @@ w9968cf_set_window(struct w9968cf_device* cam, struct video_window win)
1763 #define UNSC(x) ((x) >> 10) 1763 #define UNSC(x) ((x) >> 10)
1764 1764
1765 /* Make sure we are using a supported resolution */ 1765 /* Make sure we are using a supported resolution */
1766 if ((err = w9968cf_adjust_window_size(cam, (u16*)&win.width, 1766 if ((err = w9968cf_adjust_window_size(cam, &win.width, &win.height)))
1767 (u16*)&win.height)))
1768 goto error; 1767 goto error;
1769 1768
1770 /* Scaling factors */ 1769 /* Scaling factors */
@@ -1914,12 +1913,9 @@ error:
1914 Return 0 on success, -1 otherwise. 1913 Return 0 on success, -1 otherwise.
1915 --------------------------------------------------------------------------*/ 1914 --------------------------------------------------------------------------*/
1916static int 1915static int
1917w9968cf_adjust_window_size(struct w9968cf_device* cam, u16* width, u16* height) 1916w9968cf_adjust_window_size(struct w9968cf_device *cam, u32 *width, u32 *height)
1918{ 1917{
1919 u16 maxw, maxh; 1918 unsigned int maxw, maxh, align;
1920
1921 if ((*width < cam->minwidth) || (*height < cam->minheight))
1922 return -ERANGE;
1923 1919
1924 maxw = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) && 1920 maxw = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) &&
1925 w9968cf_vpp ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth) 1921 w9968cf_vpp ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth)
@@ -1927,16 +1923,10 @@ w9968cf_adjust_window_size(struct w9968cf_device* cam, u16* width, u16* height)
1927 maxh = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) && 1923 maxh = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) &&
1928 w9968cf_vpp ? max((u16)W9968CF_MAX_HEIGHT, cam->maxheight) 1924 w9968cf_vpp ? max((u16)W9968CF_MAX_HEIGHT, cam->maxheight)
1929 : cam->maxheight; 1925 : cam->maxheight;
1926 align = (cam->vpp_flag & VPP_DECOMPRESSION) ? 4 : 0;
1930 1927
1931 if (*width > maxw) 1928 v4l_bound_align_image(width, cam->minwidth, maxw, align,
1932 *width = maxw; 1929 height, cam->minheight, maxh, align, 0);
1933 if (*height > maxh)
1934 *height = maxh;
1935
1936 if (cam->vpp_flag & VPP_DECOMPRESSION) {
1937 *width &= ~15L; /* multiple of 16 */
1938 *height &= ~15L;
1939 }
1940 1930
1941 PDBGG("Window size adjusted w=%u, h=%u ", *width, *height) 1931 PDBGG("Window size adjusted w=%u, h=%u ", *width, *height)
1942 1932
@@ -3043,8 +3033,8 @@ static long w9968cf_v4l_ioctl(struct file *filp,
3043 if (win.clipcount != 0 || win.flags != 0) 3033 if (win.clipcount != 0 || win.flags != 0)
3044 return -EINVAL; 3034 return -EINVAL;
3045 3035
3046 if ((err = w9968cf_adjust_window_size(cam, (u16*)&win.width, 3036 if ((err = w9968cf_adjust_window_size(cam, &win.width,
3047 (u16*)&win.height))) { 3037 &win.height))) {
3048 DBG(4, "Resolution not supported (%ux%u). " 3038 DBG(4, "Resolution not supported (%ux%u). "
3049 "VIDIOCSWIN failed", win.width, win.height) 3039 "VIDIOCSWIN failed", win.width, win.height)
3050 return err; 3040 return err;
@@ -3116,6 +3106,7 @@ static long w9968cf_v4l_ioctl(struct file *filp,
3116 { 3106 {
3117 struct video_mmap mmap; 3107 struct video_mmap mmap;
3118 struct w9968cf_frame_t* fr; 3108 struct w9968cf_frame_t* fr;
3109 u32 w, h;
3119 int err = 0; 3110 int err = 0;
3120 3111
3121 if (copy_from_user(&mmap, arg, sizeof(mmap))) 3112 if (copy_from_user(&mmap, arg, sizeof(mmap)))
@@ -3164,8 +3155,10 @@ static long w9968cf_v4l_ioctl(struct file *filp,
3164 } 3155 }
3165 } 3156 }
3166 3157
3167 if ((err = w9968cf_adjust_window_size(cam, (u16*)&mmap.width, 3158 w = mmap.width; h = mmap.height;
3168 (u16*)&mmap.height))) { 3159 err = w9968cf_adjust_window_size(cam, &w, &h);
3160 mmap.width = w; mmap.height = h;
3161 if (err) {
3169 DBG(4, "Resolution not supported (%dx%d). " 3162 DBG(4, "Resolution not supported (%dx%d). "
3170 "VIDIOCMCAPTURE failed", 3163 "VIDIOCMCAPTURE failed",
3171 mmap.width, mmap.height) 3164 mmap.width, mmap.height)
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 643cccaa1aab..3d7df32a3d87 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -2088,16 +2088,10 @@ static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
2088 return -EINVAL; 2088 return -EINVAL;
2089 } 2089 }
2090 2090
2091 bpp = (zoran_formats[i].depth + 7) / 8; 2091 bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8);
2092 fmt->fmt.pix.width &= ~((bpp == 2) ? 1 : 3); 2092 v4l_bound_align_image(
2093 if (fmt->fmt.pix.width > BUZ_MAX_WIDTH) 2093 &fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2,
2094 fmt->fmt.pix.width = BUZ_MAX_WIDTH; 2094 &fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0);
2095 if (fmt->fmt.pix.width < BUZ_MIN_WIDTH)
2096 fmt->fmt.pix.width = BUZ_MIN_WIDTH;
2097 if (fmt->fmt.pix.height > BUZ_MAX_HEIGHT)
2098 fmt->fmt.pix.height = BUZ_MAX_HEIGHT;
2099 if (fmt->fmt.pix.height < BUZ_MIN_HEIGHT)
2100 fmt->fmt.pix.height = BUZ_MIN_HEIGHT;
2101 mutex_unlock(&zr->resource_lock); 2095 mutex_unlock(&zr->resource_lock);
2102 2096
2103 return 0; 2097 return 0;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 40111a6d8d5b..891ef18bd77b 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -94,6 +94,31 @@ config MMC_SDHCI_PLTFM
94 94
95 If unsure, say N. 95 If unsure, say N.
96 96
97config MMC_SDHCI_S3C
98 tristate "SDHCI support on Samsung S3C SoC"
99 depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX)
100 help
101 This selects the Secure Digital Host Controller Interface (SDHCI)
102 often referrered to as the HSMMC block in some of the Samsung S3C
103 range of SoC.
104
105 Note, due to the problems with DMA, the DMA support is only
106 available with CONFIG_EXPERIMENTAL is selected.
107
108 If you have a controller with this interface, say Y or M here.
109
110 If unsure, say N.
111
112config MMC_SDHCI_S3C_DMA
113 bool "DMA support on S3C SDHCI"
114 depends on MMC_SDHCI_S3C && EXPERIMENTAL
115 help
116 Enable DMA support on the Samsung S3C SDHCI glue. The DMA
117 has proved to be problematic if the controller encounters
118 certain errors, and thus should be treated with care.
119
120 YMMV.
121
97config MMC_OMAP 122config MMC_OMAP
98 tristate "TI OMAP Multimedia Card Interface support" 123 tristate "TI OMAP Multimedia Card Interface support"
99 depends on ARCH_OMAP 124 depends on ARCH_OMAP
@@ -265,3 +290,14 @@ config MMC_CB710
265 This driver can also be built as a module. If so, the module 290 This driver can also be built as a module. If so, the module
266 will be called cb710-mmc. 291 will be called cb710-mmc.
267 292
293config MMC_VIA_SDMMC
294 tristate "VIA SD/MMC Card Reader Driver"
295 depends on PCI
296 help
297 This selects the VIA SD/MMC Card Reader driver, say Y or M here.
298 VIA provides one multi-functional card reader which integrated into
299 some motherboards manufactured by VIA. This card reader supports
300 SD/MMC/SDHC.
301 If you have a controller with this interface, say Y or M here.
302
303 If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 79da397c5fea..cf153f628457 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
15obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o 15obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
16obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o 16obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
17obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o 17obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
18obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
18obj-$(CONFIG_MMC_WBSD) += wbsd.o 19obj-$(CONFIG_MMC_WBSD) += wbsd.o
19obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 20obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
20obj-$(CONFIG_MMC_OMAP) += omap.o 21obj-$(CONFIG_MMC_OMAP) += omap.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_MMC_S3C) += s3cmci.o
31obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o 32obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
32obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o 33obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
33obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 34obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
35obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
34 36
35ifeq ($(CONFIG_CB710_DEBUG),y) 37ifeq ($(CONFIG_CB710_DEBUG),y)
36 CFLAGS-cb710-mmc += -DDEBUG 38 CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 4eb4f37544ab..8c08cd7efa7f 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -794,7 +794,7 @@ static void s3cmci_dma_setup(struct s3cmci_host *host,
794 host->mem->start + host->sdidata); 794 host->mem->start + host->sdidata);
795 795
796 if (!setup_ok) { 796 if (!setup_ok) {
797 s3c2410_dma_config(host->dma, 4, 0); 797 s3c2410_dma_config(host->dma, 4);
798 s3c2410_dma_set_buffdone_fn(host->dma, 798 s3c2410_dma_set_buffdone_fn(host->dma,
799 s3cmci_dma_done_callback); 799 s3cmci_dma_done_callback);
800 s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART); 800 s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART);
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c
index 128c614d11aa..d79fa55c3b89 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of.c
@@ -250,6 +250,9 @@ static int __devinit sdhci_of_probe(struct of_device *ofdev,
250 host->ops = &sdhci_of_data->ops; 250 host->ops = &sdhci_of_data->ops;
251 } 251 }
252 252
253 if (of_get_property(np, "sdhci,1-bit-only", NULL))
254 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
255
253 clk = of_get_property(np, "clock-frequency", &size); 256 clk = of_get_property(np, "clock-frequency", &size);
254 if (clk && size == sizeof(*clk) && *clk) 257 if (clk && size == sizeof(*clk) && *clk)
255 of_host->clock = *clk; 258 of_host->clock = *clk;
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 65be27995d5c..2f15cc17d887 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -284,6 +284,18 @@ static const struct sdhci_pci_fixes sdhci_jmicron = {
284 .resume = jmicron_resume, 284 .resume = jmicron_resume,
285}; 285};
286 286
287static int via_probe(struct sdhci_pci_chip *chip)
288{
289 if (chip->pdev->revision == 0x10)
290 chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
291
292 return 0;
293}
294
295static const struct sdhci_pci_fixes sdhci_via = {
296 .probe = via_probe,
297};
298
287static const struct pci_device_id pci_ids[] __devinitdata = { 299static const struct pci_device_id pci_ids[] __devinitdata = {
288 { 300 {
289 .vendor = PCI_VENDOR_ID_RICOH, 301 .vendor = PCI_VENDOR_ID_RICOH,
@@ -349,6 +361,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
349 .driver_data = (kernel_ulong_t)&sdhci_jmicron, 361 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
350 }, 362 },
351 363
364 {
365 .vendor = PCI_VENDOR_ID_VIA,
366 .device = 0x95d0,
367 .subvendor = PCI_ANY_ID,
368 .subdevice = PCI_ANY_ID,
369 .driver_data = (kernel_ulong_t)&sdhci_via,
370 },
371
352 { /* Generic SD host controller */ 372 { /* Generic SD host controller */
353 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 373 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
354 }, 374 },
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
new file mode 100644
index 000000000000..50997d2a63e7
--- /dev/null
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -0,0 +1,428 @@
1/* linux/drivers/mmc/host/sdhci-s3c.c
2 *
3 * Copyright 2008 Openmoko Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * SDHCI (HSMMC) support for Samsung SoC
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/delay.h>
16#include <linux/dma-mapping.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/io.h>
20
21#include <linux/mmc/host.h>
22
23#include <plat/sdhci.h>
24#include <plat/regs-sdhci.h>
25
26#include "sdhci.h"
27
28#define MAX_BUS_CLK (4)
29
30/**
31 * struct sdhci_s3c - S3C SDHCI instance
32 * @host: The SDHCI host created
33 * @pdev: The platform device we where created from.
34 * @ioarea: The resource created when we claimed the IO area.
35 * @pdata: The platform data for this controller.
36 * @cur_clk: The index of the current bus clock.
37 * @clk_io: The clock for the internal bus interface.
38 * @clk_bus: The clocks that are available for the SD/MMC bus clock.
39 */
40struct sdhci_s3c {
41 struct sdhci_host *host;
42 struct platform_device *pdev;
43 struct resource *ioarea;
44 struct s3c_sdhci_platdata *pdata;
45 unsigned int cur_clk;
46
47 struct clk *clk_io;
48 struct clk *clk_bus[MAX_BUS_CLK];
49};
50
51static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
52{
53 return sdhci_priv(host);
54}
55
56/**
57 * get_curclk - convert ctrl2 register to clock source number
58 * @ctrl2: Control2 register value.
59 */
60static u32 get_curclk(u32 ctrl2)
61{
62 ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK;
63 ctrl2 >>= S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
64
65 return ctrl2;
66}
67
68static void sdhci_s3c_check_sclk(struct sdhci_host *host)
69{
70 struct sdhci_s3c *ourhost = to_s3c(host);
71 u32 tmp = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
72
73 if (get_curclk(tmp) != ourhost->cur_clk) {
74 dev_dbg(&ourhost->pdev->dev, "restored ctrl2 clock setting\n");
75
76 tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
77 tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
78 writel(tmp, host->ioaddr + 0x80);
79 }
80}
81
82/**
83 * sdhci_s3c_get_max_clk - callback to get maximum clock frequency.
84 * @host: The SDHCI host instance.
85 *
86 * Callback to return the maximum clock rate acheivable by the controller.
87*/
88static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host)
89{
90 struct sdhci_s3c *ourhost = to_s3c(host);
91 struct clk *busclk;
92 unsigned int rate, max;
93 int clk;
94
95 /* note, a reset will reset the clock source */
96
97 sdhci_s3c_check_sclk(host);
98
99 for (max = 0, clk = 0; clk < MAX_BUS_CLK; clk++) {
100 busclk = ourhost->clk_bus[clk];
101 if (!busclk)
102 continue;
103
104 rate = clk_get_rate(busclk);
105 if (rate > max)
106 max = rate;
107 }
108
109 return max;
110}
111
112static unsigned int sdhci_s3c_get_timeout_clk(struct sdhci_host *host)
113{
114 return sdhci_s3c_get_max_clk(host) / 1000000;
115}
116
117/**
118 * sdhci_s3c_consider_clock - consider one the bus clocks for current setting
119 * @ourhost: Our SDHCI instance.
120 * @src: The source clock index.
121 * @wanted: The clock frequency wanted.
122 */
123static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
124 unsigned int src,
125 unsigned int wanted)
126{
127 unsigned long rate;
128 struct clk *clksrc = ourhost->clk_bus[src];
129 int div;
130
131 if (!clksrc)
132 return UINT_MAX;
133
134 rate = clk_get_rate(clksrc);
135
136 for (div = 1; div < 256; div *= 2) {
137 if ((rate / div) <= wanted)
138 break;
139 }
140
141 dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n",
142 src, rate, wanted, rate / div);
143
144 return (wanted - (rate / div));
145}
146
147/**
148 * sdhci_s3c_set_clock - callback on clock change
149 * @host: The SDHCI host being changed
150 * @clock: The clock rate being requested.
151 *
152 * When the card's clock is going to be changed, look at the new frequency
153 * and find the best clock source to go with it.
154*/
155static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
156{
157 struct sdhci_s3c *ourhost = to_s3c(host);
158 unsigned int best = UINT_MAX;
159 unsigned int delta;
160 int best_src = 0;
161 int src;
162 u32 ctrl;
163
164 /* don't bother if the clock is going off. */
165 if (clock == 0)
166 return;
167
168 for (src = 0; src < MAX_BUS_CLK; src++) {
169 delta = sdhci_s3c_consider_clock(ourhost, src, clock);
170 if (delta < best) {
171 best = delta;
172 best_src = src;
173 }
174 }
175
176 dev_dbg(&ourhost->pdev->dev,
177 "selected source %d, clock %d, delta %d\n",
178 best_src, clock, best);
179
180 /* select the new clock source */
181
182 if (ourhost->cur_clk != best_src) {
183 struct clk *clk = ourhost->clk_bus[best_src];
184
185 /* turn clock off to card before changing clock source */
186 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
187
188 ourhost->cur_clk = best_src;
189 host->max_clk = clk_get_rate(clk);
190 host->timeout_clk = sdhci_s3c_get_timeout_clk(host);
191
192 ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
193 ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
194 ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
195 writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
196 }
197
198 /* reconfigure the hardware for new clock rate */
199
200 {
201 struct mmc_ios ios;
202
203 ios.clock = clock;
204
205 if (ourhost->pdata->cfg_card)
206 (ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr,
207 &ios, NULL);
208 }
209}
210
211static struct sdhci_ops sdhci_s3c_ops = {
212 .get_max_clock = sdhci_s3c_get_max_clk,
213 .get_timeout_clock = sdhci_s3c_get_timeout_clk,
214 .set_clock = sdhci_s3c_set_clock,
215};
216
217static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
218{
219 struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
220 struct device *dev = &pdev->dev;
221 struct sdhci_host *host;
222 struct sdhci_s3c *sc;
223 struct resource *res;
224 int ret, irq, ptr, clks;
225
226 if (!pdata) {
227 dev_err(dev, "no device data specified\n");
228 return -ENOENT;
229 }
230
231 irq = platform_get_irq(pdev, 0);
232 if (irq < 0) {
233 dev_err(dev, "no irq specified\n");
234 return irq;
235 }
236
237 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
238 if (!res) {
239 dev_err(dev, "no memory specified\n");
240 return -ENOENT;
241 }
242
243 host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
244 if (IS_ERR(host)) {
245 dev_err(dev, "sdhci_alloc_host() failed\n");
246 return PTR_ERR(host);
247 }
248
249 sc = sdhci_priv(host);
250
251 sc->host = host;
252 sc->pdev = pdev;
253 sc->pdata = pdata;
254
255 platform_set_drvdata(pdev, host);
256
257 sc->clk_io = clk_get(dev, "hsmmc");
258 if (IS_ERR(sc->clk_io)) {
259 dev_err(dev, "failed to get io clock\n");
260 ret = PTR_ERR(sc->clk_io);
261 goto err_io_clk;
262 }
263
264 /* enable the local io clock and keep it running for the moment. */
265 clk_enable(sc->clk_io);
266
267 for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
268 struct clk *clk;
269 char *name = pdata->clocks[ptr];
270
271 if (name == NULL)
272 continue;
273
274 clk = clk_get(dev, name);
275 if (IS_ERR(clk)) {
276 dev_err(dev, "failed to get clock %s\n", name);
277 continue;
278 }
279
280 clks++;
281 sc->clk_bus[ptr] = clk;
282 clk_enable(clk);
283
284 dev_info(dev, "clock source %d: %s (%ld Hz)\n",
285 ptr, name, clk_get_rate(clk));
286 }
287
288 if (clks == 0) {
289 dev_err(dev, "failed to find any bus clocks\n");
290 ret = -ENOENT;
291 goto err_no_busclks;
292 }
293
294 sc->ioarea = request_mem_region(res->start, resource_size(res),
295 mmc_hostname(host->mmc));
296 if (!sc->ioarea) {
297 dev_err(dev, "failed to reserve register area\n");
298 ret = -ENXIO;
299 goto err_req_regs;
300 }
301
302 host->ioaddr = ioremap_nocache(res->start, resource_size(res));
303 if (!host->ioaddr) {
304 dev_err(dev, "failed to map registers\n");
305 ret = -ENXIO;
306 goto err_req_regs;
307 }
308
309 /* Ensure we have minimal gpio selected CMD/CLK/Detect */
310 if (pdata->cfg_gpio)
311 pdata->cfg_gpio(pdev, pdata->max_width);
312
313 host->hw_name = "samsung-hsmmc";
314 host->ops = &sdhci_s3c_ops;
315 host->quirks = 0;
316 host->irq = irq;
317
318 /* Setup quirks for the controller */
319
320 /* Currently with ADMA enabled we are getting some length
321 * interrupts that are not being dealt with, do disable
322 * ADMA until this is sorted out. */
323 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
324 host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE;
325
326#ifndef CONFIG_MMC_SDHCI_S3C_DMA
327
328 /* we currently see overruns on errors, so disable the SDMA
329 * support as well. */
330 host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
331
332 /* PIO currently has problems with multi-block IO */
333 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
334
335#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
336
337 /* It seems we do not get an DATA transfer complete on non-busy
338 * transfers, not sure if this is a problem with this specific
339 * SDHCI block, or a missing configuration that needs to be set. */
340 host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
341
342 host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
343 SDHCI_QUIRK_32BIT_DMA_SIZE);
344
345 ret = sdhci_add_host(host);
346 if (ret) {
347 dev_err(dev, "sdhci_add_host() failed\n");
348 goto err_add_host;
349 }
350
351 return 0;
352
353 err_add_host:
354 release_resource(sc->ioarea);
355 kfree(sc->ioarea);
356
357 err_req_regs:
358 for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
359 clk_disable(sc->clk_bus[ptr]);
360 clk_put(sc->clk_bus[ptr]);
361 }
362
363 err_no_busclks:
364 clk_disable(sc->clk_io);
365 clk_put(sc->clk_io);
366
367 err_io_clk:
368 sdhci_free_host(host);
369
370 return ret;
371}
372
373static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
374{
375 return 0;
376}
377
378#ifdef CONFIG_PM
379
380static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
381{
382 struct sdhci_host *host = platform_get_drvdata(dev);
383
384 sdhci_suspend_host(host, pm);
385 return 0;
386}
387
388static int sdhci_s3c_resume(struct platform_device *dev)
389{
390 struct sdhci_host *host = platform_get_drvdata(dev);
391
392 sdhci_resume_host(host);
393 return 0;
394}
395
396#else
397#define sdhci_s3c_suspend NULL
398#define sdhci_s3c_resume NULL
399#endif
400
401static struct platform_driver sdhci_s3c_driver = {
402 .probe = sdhci_s3c_probe,
403 .remove = __devexit_p(sdhci_s3c_remove),
404 .suspend = sdhci_s3c_suspend,
405 .resume = sdhci_s3c_resume,
406 .driver = {
407 .owner = THIS_MODULE,
408 .name = "s3c-sdhci",
409 },
410};
411
412static int __init sdhci_s3c_init(void)
413{
414 return platform_driver_register(&sdhci_s3c_driver);
415}
416
417static void __exit sdhci_s3c_exit(void)
418{
419 platform_driver_unregister(&sdhci_s3c_driver);
420}
421
422module_init(sdhci_s3c_init);
423module_exit(sdhci_s3c_exit);
424
425MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
426MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
427MODULE_LICENSE("GPL v2");
428MODULE_ALIAS("platform:s3c-sdhci");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 35789c6edc19..6779b4ecab18 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -584,7 +584,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
584 * longer to time out, but that's much better than having a too-short 584 * longer to time out, but that's much better than having a too-short
585 * timeout value. 585 * timeout value.
586 */ 586 */
587 if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)) 587 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
588 return 0xE; 588 return 0xE;
589 589
590 /* timeout in us */ 590 /* timeout in us */
@@ -1051,12 +1051,19 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
1051 * At least the Marvell CaFe chip gets confused if we set the voltage 1051 * At least the Marvell CaFe chip gets confused if we set the voltage
1052 * and set turn on power at the same time, so set the voltage first. 1052 * and set turn on power at the same time, so set the voltage first.
1053 */ 1053 */
1054 if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) 1054 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1055 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1055 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1056 1056
1057 pwr |= SDHCI_POWER_ON; 1057 pwr |= SDHCI_POWER_ON;
1058 1058
1059 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1059 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1060
1061 /*
1062 * Some controllers need an extra 10ms delay of 10ms before they
1063 * can apply clock after applying power
1064 */
1065 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1066 mdelay(10);
1060} 1067}
1061 1068
1062/*****************************************************************************\ 1069/*****************************************************************************\
@@ -1382,6 +1389,35 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1382 sdhci_finish_command(host); 1389 sdhci_finish_command(host);
1383} 1390}
1384 1391
1392#ifdef DEBUG
1393static void sdhci_show_adma_error(struct sdhci_host *host)
1394{
1395 const char *name = mmc_hostname(host->mmc);
1396 u8 *desc = host->adma_desc;
1397 __le32 *dma;
1398 __le16 *len;
1399 u8 attr;
1400
1401 sdhci_dumpregs(host);
1402
1403 while (true) {
1404 dma = (__le32 *)(desc + 4);
1405 len = (__le16 *)(desc + 2);
1406 attr = *desc;
1407
1408 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
1409 name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
1410
1411 desc += 8;
1412
1413 if (attr & 2)
1414 break;
1415 }
1416}
1417#else
1418static void sdhci_show_adma_error(struct sdhci_host *host) { }
1419#endif
1420
1385static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 1421static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1386{ 1422{
1387 BUG_ON(intmask == 0); 1423 BUG_ON(intmask == 0);
@@ -1411,8 +1447,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1411 host->data->error = -ETIMEDOUT; 1447 host->data->error = -ETIMEDOUT;
1412 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 1448 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1413 host->data->error = -EILSEQ; 1449 host->data->error = -EILSEQ;
1414 else if (intmask & SDHCI_INT_ADMA_ERROR) 1450 else if (intmask & SDHCI_INT_ADMA_ERROR) {
1451 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
1452 sdhci_show_adma_error(host);
1415 host->data->error = -EIO; 1453 host->data->error = -EIO;
1454 }
1416 1455
1417 if (host->data->error) 1456 if (host->data->error)
1418 sdhci_finish_data(host); 1457 sdhci_finish_data(host);
@@ -1729,7 +1768,10 @@ int sdhci_add_host(struct sdhci_host *host)
1729 mmc->ops = &sdhci_ops; 1768 mmc->ops = &sdhci_ops;
1730 mmc->f_min = host->max_clk / 256; 1769 mmc->f_min = host->max_clk / 256;
1731 mmc->f_max = host->max_clk; 1770 mmc->f_max = host->max_clk;
1732 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; 1771 mmc->caps = MMC_CAP_SDIO_IRQ;
1772
1773 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
1774 mmc->caps |= MMC_CAP_4_BIT_DATA;
1733 1775
1734 if (caps & SDHCI_CAN_DO_HISPD) 1776 if (caps & SDHCI_CAN_DO_HISPD)
1735 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1777 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
@@ -1802,7 +1844,7 @@ int sdhci_add_host(struct sdhci_host *host)
1802 /* 1844 /*
1803 * Maximum block count. 1845 * Maximum block count.
1804 */ 1846 */
1805 mmc->max_blk_count = 65535; 1847 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
1806 1848
1807 /* 1849 /*
1808 * Init tasklets. 1850 * Init tasklets.
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 2de08349c3ca..831ddf7dcb49 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -226,6 +226,12 @@ struct sdhci_host {
226#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19) 226#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
227/* Controller has to be forced to use block size of 2048 bytes */ 227/* Controller has to be forced to use block size of 2048 bytes */
228#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20) 228#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
229/* Controller cannot do multi-block transfers */
230#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21)
231/* Controller can only handle 1-bit data transfers */
232#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
233/* Controller needs 10ms delay between applying power and clock */
234#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
229 235
230 int irq; /* Device IRQ */ 236 int irq; /* Device IRQ */
231 void __iomem * ioaddr; /* Mapped address */ 237 void __iomem * ioaddr; /* Mapped address */
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
new file mode 100644
index 000000000000..632858a94376
--- /dev/null
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -0,0 +1,1362 @@
1/*
2 * drivers/mmc/host/via-sdmmc.c - VIA SD/MMC Card Reader driver
3 * Copyright (c) 2008, VIA Technologies Inc. All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 */
10
11#include <linux/pci.h>
12#include <linux/dma-mapping.h>
13#include <linux/highmem.h>
14#include <linux/delay.h>
15
16#include <linux/mmc/host.h>
17
18#define DRV_NAME "via_sdmmc"
19
20#define PCI_DEVICE_ID_VIA_9530 0x9530
21
22#define VIA_CRDR_SDC_OFF 0x200
23#define VIA_CRDR_DDMA_OFF 0x400
24#define VIA_CRDR_PCICTRL_OFF 0x600
25
26#define VIA_CRDR_MIN_CLOCK 375000
27#define VIA_CRDR_MAX_CLOCK 48000000
28
29/*
30 * PCI registers
31 */
32
33#define VIA_CRDR_PCI_WORK_MODE 0x40
34#define VIA_CRDR_PCI_DBG_MODE 0x41
35
36/*
37 * SDC MMIO Registers
38 */
39
40#define VIA_CRDR_SDCTRL 0x0
41#define VIA_CRDR_SDCTRL_START 0x01
42#define VIA_CRDR_SDCTRL_WRITE 0x04
43#define VIA_CRDR_SDCTRL_SINGLE_WR 0x10
44#define VIA_CRDR_SDCTRL_SINGLE_RD 0x20
45#define VIA_CRDR_SDCTRL_MULTI_WR 0x30
46#define VIA_CRDR_SDCTRL_MULTI_RD 0x40
47#define VIA_CRDR_SDCTRL_STOP 0x70
48
49#define VIA_CRDR_SDCTRL_RSP_NONE 0x0
50#define VIA_CRDR_SDCTRL_RSP_R1 0x10000
51#define VIA_CRDR_SDCTRL_RSP_R2 0x20000
52#define VIA_CRDR_SDCTRL_RSP_R3 0x30000
53#define VIA_CRDR_SDCTRL_RSP_R1B 0x90000
54
55#define VIA_CRDR_SDCARG 0x4
56
57#define VIA_CRDR_SDBUSMODE 0x8
58#define VIA_CRDR_SDMODE_4BIT 0x02
59#define VIA_CRDR_SDMODE_CLK_ON 0x40
60
61#define VIA_CRDR_SDBLKLEN 0xc
62/*
63 * Bit 0 -Bit 10 : Block length. So, the maximum block length should be 2048.
64 * Bit 11 - Bit 13 : Reserved.
65 * GPIDET : Select GPI pin to detect card, GPI means CR_CD# in top design.
66 * INTEN : Enable SD host interrupt.
67 * Bit 16 - Bit 31 : Block count. So, the maximun block count should be 65536.
68 */
69#define VIA_CRDR_SDBLKLEN_GPIDET 0x2000
70#define VIA_CRDR_SDBLKLEN_INTEN 0x8000
71#define VIA_CRDR_MAX_BLOCK_COUNT 65536
72#define VIA_CRDR_MAX_BLOCK_LENGTH 2048
73
74#define VIA_CRDR_SDRESP0 0x10
75#define VIA_CRDR_SDRESP1 0x14
76#define VIA_CRDR_SDRESP2 0x18
77#define VIA_CRDR_SDRESP3 0x1c
78
79#define VIA_CRDR_SDCURBLKCNT 0x20
80
81#define VIA_CRDR_SDINTMASK 0x24
82/*
83 * MBDIE : Multiple Blocks transfer Done Interrupt Enable
84 * BDDIE : Block Data transfer Done Interrupt Enable
85 * CIRIE : Card Insertion or Removal Interrupt Enable
86 * CRDIE : Command-Response transfer Done Interrupt Enable
87 * CRTOIE : Command-Response response TimeOut Interrupt Enable
88 * ASCRDIE : Auto Stop Command-Response transfer Done Interrupt Enable
89 * DTIE : Data access Timeout Interrupt Enable
90 * SCIE : reSponse CRC error Interrupt Enable
91 * RCIE : Read data CRC error Interrupt Enable
92 * WCIE : Write data CRC error Interrupt Enable
93 */
94#define VIA_CRDR_SDINTMASK_MBDIE 0x10
95#define VIA_CRDR_SDINTMASK_BDDIE 0x20
96#define VIA_CRDR_SDINTMASK_CIRIE 0x80
97#define VIA_CRDR_SDINTMASK_CRDIE 0x200
98#define VIA_CRDR_SDINTMASK_CRTOIE 0x400
99#define VIA_CRDR_SDINTMASK_ASCRDIE 0x800
100#define VIA_CRDR_SDINTMASK_DTIE 0x1000
101#define VIA_CRDR_SDINTMASK_SCIE 0x2000
102#define VIA_CRDR_SDINTMASK_RCIE 0x4000
103#define VIA_CRDR_SDINTMASK_WCIE 0x8000
104
105#define VIA_CRDR_SDACTIVE_INTMASK \
106 (VIA_CRDR_SDINTMASK_MBDIE | VIA_CRDR_SDINTMASK_CIRIE \
107 | VIA_CRDR_SDINTMASK_CRDIE | VIA_CRDR_SDINTMASK_CRTOIE \
108 | VIA_CRDR_SDINTMASK_DTIE | VIA_CRDR_SDINTMASK_SCIE \
109 | VIA_CRDR_SDINTMASK_RCIE | VIA_CRDR_SDINTMASK_WCIE)
110
111#define VIA_CRDR_SDSTATUS 0x28
112/*
113 * CECC : Reserved
114 * WP : SD card Write Protect status
115 * SLOTD : Reserved
116 * SLOTG : SD SLOT status(Gpi pin status)
117 * MBD : Multiple Blocks transfer Done interrupt status
118 * BDD : Block Data transfer Done interrupt status
119 * CD : Reserved
120 * CIR : Card Insertion or Removal interrupt detected on GPI pin
121 * IO : Reserved
122 * CRD : Command-Response transfer Done interrupt status
123 * CRTO : Command-Response response TimeOut interrupt status
124 * ASCRDIE : Auto Stop Command-Response transfer Done interrupt status
125 * DT : Data access Timeout interrupt status
126 * SC : reSponse CRC error interrupt status
127 * RC : Read data CRC error interrupt status
128 * WC : Write data CRC error interrupt status
129 */
130#define VIA_CRDR_SDSTS_CECC 0x01
131#define VIA_CRDR_SDSTS_WP 0x02
132#define VIA_CRDR_SDSTS_SLOTD 0x04
133#define VIA_CRDR_SDSTS_SLOTG 0x08
134#define VIA_CRDR_SDSTS_MBD 0x10
135#define VIA_CRDR_SDSTS_BDD 0x20
136#define VIA_CRDR_SDSTS_CD 0x40
137#define VIA_CRDR_SDSTS_CIR 0x80
138#define VIA_CRDR_SDSTS_IO 0x100
139#define VIA_CRDR_SDSTS_CRD 0x200
140#define VIA_CRDR_SDSTS_CRTO 0x400
141#define VIA_CRDR_SDSTS_ASCRDIE 0x800
142#define VIA_CRDR_SDSTS_DT 0x1000
143#define VIA_CRDR_SDSTS_SC 0x2000
144#define VIA_CRDR_SDSTS_RC 0x4000
145#define VIA_CRDR_SDSTS_WC 0x8000
146
147#define VIA_CRDR_SDSTS_IGN_MASK\
148 (VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_IO)
149#define VIA_CRDR_SDSTS_INT_MASK \
150 (VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_CD \
151 | VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_IO | VIA_CRDR_SDSTS_CRD \
152 | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \
153 | VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
154#define VIA_CRDR_SDSTS_W1C_MASK \
155 (VIA_CRDR_SDSTS_CECC | VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD \
156 | VIA_CRDR_SDSTS_CD | VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_CRD \
157 | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \
158 | VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
159#define VIA_CRDR_SDSTS_CMD_MASK \
160 (VIA_CRDR_SDSTS_CRD | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_SC)
161#define VIA_CRDR_SDSTS_DATA_MASK\
162 (VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_DT \
163 | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
164
165#define VIA_CRDR_SDSTATUS2 0x2a
166/*
167 * CFE : Enable SD host automatic Clock FReezing
168 */
169#define VIA_CRDR_SDSTS_CFE 0x80
170
171#define VIA_CRDR_SDRSPTMO 0x2C
172
173#define VIA_CRDR_SDCLKSEL 0x30
174
175#define VIA_CRDR_SDEXTCTRL 0x34
176#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SD 0x01
177#define VIS_CRDR_SDEXTCTRL_SHIFT_9 0x02
178#define VIS_CRDR_SDEXTCTRL_MMC_8BIT 0x04
179#define VIS_CRDR_SDEXTCTRL_RELD_BLK 0x08
180#define VIS_CRDR_SDEXTCTRL_BAD_CMDA 0x10
181#define VIS_CRDR_SDEXTCTRL_BAD_DATA 0x20
182#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SPI 0x40
183#define VIA_CRDR_SDEXTCTRL_HISPD 0x80
184/* 0x38-0xFF reserved */
185
186/*
187 * Data DMA Control Registers
188 */
189
190#define VIA_CRDR_DMABASEADD 0x0
191#define VIA_CRDR_DMACOUNTER 0x4
192
193#define VIA_CRDR_DMACTRL 0x8
194/*
195 * DIR :Transaction Direction
196 * 0 : From card to memory
197 * 1 : From memory to card
198 */
199#define VIA_CRDR_DMACTRL_DIR 0x100
200#define VIA_CRDR_DMACTRL_ENIRQ 0x10000
201#define VIA_CRDR_DMACTRL_SFTRST 0x1000000
202
203#define VIA_CRDR_DMASTS 0xc
204
205#define VIA_CRDR_DMASTART 0x10
206/*0x14-0xFF reserved*/
207
208/*
209 * PCI Control Registers
210 */
211
212/*0x0 - 0x1 reserved*/
213#define VIA_CRDR_PCICLKGATT 0x2
214/*
215 * SFTRST :
216 * 0 : Soft reset all the controller and it will be de-asserted automatically
217 * 1 : Soft reset is de-asserted
218 */
219#define VIA_CRDR_PCICLKGATT_SFTRST 0x01
220/*
221 * 3V3 : Pad power select
222 * 0 : 1.8V
223 * 1 : 3.3V
224 * NOTE : No mater what the actual value should be, this bit always
225 * read as 0. This is a hardware bug.
226 */
227#define VIA_CRDR_PCICLKGATT_3V3 0x10
228/*
229 * PAD_PWRON : Pad Power on/off select
230 * 0 : Power off
231 * 1 : Power on
232 * NOTE : No mater what the actual value should be, this bit always
233 * read as 0. This is a hardware bug.
234 */
235#define VIA_CRDR_PCICLKGATT_PAD_PWRON 0x20
236
237#define VIA_CRDR_PCISDCCLK 0x5
238
239#define VIA_CRDR_PCIDMACLK 0x7
240#define VIA_CRDR_PCIDMACLK_SDC 0x2
241
242#define VIA_CRDR_PCIINTCTRL 0x8
243#define VIA_CRDR_PCIINTCTRL_SDCIRQEN 0x04
244
245#define VIA_CRDR_PCIINTSTATUS 0x9
246#define VIA_CRDR_PCIINTSTATUS_SDC 0x04
247
248#define VIA_CRDR_PCITMOCTRL 0xa
249#define VIA_CRDR_PCITMOCTRL_NO 0x0
250#define VIA_CRDR_PCITMOCTRL_32US 0x1
251#define VIA_CRDR_PCITMOCTRL_256US 0x2
252#define VIA_CRDR_PCITMOCTRL_1024US 0x3
253#define VIA_CRDR_PCITMOCTRL_256MS 0x4
254#define VIA_CRDR_PCITMOCTRL_512MS 0x5
255#define VIA_CRDR_PCITMOCTRL_1024MS 0x6
256
257/*0xB-0xFF reserved*/
258
259enum PCI_HOST_CLK_CONTROL {
260 PCI_CLK_375K = 0x03,
261 PCI_CLK_8M = 0x04,
262 PCI_CLK_12M = 0x00,
263 PCI_CLK_16M = 0x05,
264 PCI_CLK_24M = 0x01,
265 PCI_CLK_33M = 0x06,
266 PCI_CLK_48M = 0x02
267};
268
269struct sdhcreg {
270 u32 sdcontrol_reg;
271 u32 sdcmdarg_reg;
272 u32 sdbusmode_reg;
273 u32 sdblklen_reg;
274 u32 sdresp_reg[4];
275 u32 sdcurblkcnt_reg;
276 u32 sdintmask_reg;
277 u32 sdstatus_reg;
278 u32 sdrsptmo_reg;
279 u32 sdclksel_reg;
280 u32 sdextctrl_reg;
281};
282
283struct pcictrlreg {
284 u8 reserve[2];
285 u8 pciclkgat_reg;
286 u8 pcinfcclk_reg;
287 u8 pcimscclk_reg;
288 u8 pcisdclk_reg;
289 u8 pcicaclk_reg;
290 u8 pcidmaclk_reg;
291 u8 pciintctrl_reg;
292 u8 pciintstatus_reg;
293 u8 pcitmoctrl_reg;
294 u8 Resv;
295};
296
297struct via_crdr_mmc_host {
298 struct mmc_host *mmc;
299 struct mmc_request *mrq;
300 struct mmc_command *cmd;
301 struct mmc_data *data;
302
303 void __iomem *mmiobase;
304 void __iomem *sdhc_mmiobase;
305 void __iomem *ddma_mmiobase;
306 void __iomem *pcictrl_mmiobase;
307
308 struct pcictrlreg pm_pcictrl_reg;
309 struct sdhcreg pm_sdhc_reg;
310
311 struct work_struct carddet_work;
312 struct tasklet_struct finish_tasklet;
313
314 struct timer_list timer;
315 spinlock_t lock;
316 u8 power;
317 int reject;
318 unsigned int quirks;
319};
320
321/* some devices need a very long delay for power to stabilize */
322#define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001
323
324static struct pci_device_id via_ids[] = {
325 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530,
326 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
327 {0,}
328};
329
330MODULE_DEVICE_TABLE(pci, via_ids);
331
332static void via_print_sdchc(struct via_crdr_mmc_host *host)
333{
334 void __iomem *addrbase = host->sdhc_mmiobase;
335
336 pr_debug("SDC MMIO Registers:\n");
337 pr_debug("SDCONTROL=%08x, SDCMDARG=%08x, SDBUSMODE=%08x\n",
338 readl(addrbase + VIA_CRDR_SDCTRL),
339 readl(addrbase + VIA_CRDR_SDCARG),
340 readl(addrbase + VIA_CRDR_SDBUSMODE));
341 pr_debug("SDBLKLEN=%08x, SDCURBLKCNT=%08x, SDINTMASK=%08x\n",
342 readl(addrbase + VIA_CRDR_SDBLKLEN),
343 readl(addrbase + VIA_CRDR_SDCURBLKCNT),
344 readl(addrbase + VIA_CRDR_SDINTMASK));
345 pr_debug("SDSTATUS=%08x, SDCLKSEL=%08x, SDEXTCTRL=%08x\n",
346 readl(addrbase + VIA_CRDR_SDSTATUS),
347 readl(addrbase + VIA_CRDR_SDCLKSEL),
348 readl(addrbase + VIA_CRDR_SDEXTCTRL));
349}
350
351static void via_print_pcictrl(struct via_crdr_mmc_host *host)
352{
353 void __iomem *addrbase = host->pcictrl_mmiobase;
354
355 pr_debug("PCI Control Registers:\n");
356 pr_debug("PCICLKGATT=%02x, PCISDCCLK=%02x, PCIDMACLK=%02x\n",
357 readb(addrbase + VIA_CRDR_PCICLKGATT),
358 readb(addrbase + VIA_CRDR_PCISDCCLK),
359 readb(addrbase + VIA_CRDR_PCIDMACLK));
360 pr_debug("PCIINTCTRL=%02x, PCIINTSTATUS=%02x\n",
361 readb(addrbase + VIA_CRDR_PCIINTCTRL),
362 readb(addrbase + VIA_CRDR_PCIINTSTATUS));
363}
364
365static void via_save_pcictrlreg(struct via_crdr_mmc_host *host)
366{
367 struct pcictrlreg *pm_pcictrl_reg;
368 void __iomem *addrbase;
369
370 pm_pcictrl_reg = &(host->pm_pcictrl_reg);
371 addrbase = host->pcictrl_mmiobase;
372
373 pm_pcictrl_reg->pciclkgat_reg = readb(addrbase + VIA_CRDR_PCICLKGATT);
374 pm_pcictrl_reg->pciclkgat_reg |=
375 VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON;
376 pm_pcictrl_reg->pcisdclk_reg = readb(addrbase + VIA_CRDR_PCISDCCLK);
377 pm_pcictrl_reg->pcidmaclk_reg = readb(addrbase + VIA_CRDR_PCIDMACLK);
378 pm_pcictrl_reg->pciintctrl_reg = readb(addrbase + VIA_CRDR_PCIINTCTRL);
379 pm_pcictrl_reg->pciintstatus_reg =
380 readb(addrbase + VIA_CRDR_PCIINTSTATUS);
381 pm_pcictrl_reg->pcitmoctrl_reg = readb(addrbase + VIA_CRDR_PCITMOCTRL);
382}
383
384static void via_restore_pcictrlreg(struct via_crdr_mmc_host *host)
385{
386 struct pcictrlreg *pm_pcictrl_reg;
387 void __iomem *addrbase;
388
389 pm_pcictrl_reg = &(host->pm_pcictrl_reg);
390 addrbase = host->pcictrl_mmiobase;
391
392 writeb(pm_pcictrl_reg->pciclkgat_reg, addrbase + VIA_CRDR_PCICLKGATT);
393 writeb(pm_pcictrl_reg->pcisdclk_reg, addrbase + VIA_CRDR_PCISDCCLK);
394 writeb(pm_pcictrl_reg->pcidmaclk_reg, addrbase + VIA_CRDR_PCIDMACLK);
395 writeb(pm_pcictrl_reg->pciintctrl_reg, addrbase + VIA_CRDR_PCIINTCTRL);
396 writeb(pm_pcictrl_reg->pciintstatus_reg,
397 addrbase + VIA_CRDR_PCIINTSTATUS);
398 writeb(pm_pcictrl_reg->pcitmoctrl_reg, addrbase + VIA_CRDR_PCITMOCTRL);
399}
400
401static void via_save_sdcreg(struct via_crdr_mmc_host *host)
402{
403 struct sdhcreg *pm_sdhc_reg;
404 void __iomem *addrbase;
405
406 pm_sdhc_reg = &(host->pm_sdhc_reg);
407 addrbase = host->sdhc_mmiobase;
408
409 pm_sdhc_reg->sdcontrol_reg = readl(addrbase + VIA_CRDR_SDCTRL);
410 pm_sdhc_reg->sdcmdarg_reg = readl(addrbase + VIA_CRDR_SDCARG);
411 pm_sdhc_reg->sdbusmode_reg = readl(addrbase + VIA_CRDR_SDBUSMODE);
412 pm_sdhc_reg->sdblklen_reg = readl(addrbase + VIA_CRDR_SDBLKLEN);
413 pm_sdhc_reg->sdcurblkcnt_reg = readl(addrbase + VIA_CRDR_SDCURBLKCNT);
414 pm_sdhc_reg->sdintmask_reg = readl(addrbase + VIA_CRDR_SDINTMASK);
415 pm_sdhc_reg->sdstatus_reg = readl(addrbase + VIA_CRDR_SDSTATUS);
416 pm_sdhc_reg->sdrsptmo_reg = readl(addrbase + VIA_CRDR_SDRSPTMO);
417 pm_sdhc_reg->sdclksel_reg = readl(addrbase + VIA_CRDR_SDCLKSEL);
418 pm_sdhc_reg->sdextctrl_reg = readl(addrbase + VIA_CRDR_SDEXTCTRL);
419}
420
421static void via_restore_sdcreg(struct via_crdr_mmc_host *host)
422{
423 struct sdhcreg *pm_sdhc_reg;
424 void __iomem *addrbase;
425
426 pm_sdhc_reg = &(host->pm_sdhc_reg);
427 addrbase = host->sdhc_mmiobase;
428
429 writel(pm_sdhc_reg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL);
430 writel(pm_sdhc_reg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG);
431 writel(pm_sdhc_reg->sdbusmode_reg, addrbase + VIA_CRDR_SDBUSMODE);
432 writel(pm_sdhc_reg->sdblklen_reg, addrbase + VIA_CRDR_SDBLKLEN);
433 writel(pm_sdhc_reg->sdcurblkcnt_reg, addrbase + VIA_CRDR_SDCURBLKCNT);
434 writel(pm_sdhc_reg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK);
435 writel(pm_sdhc_reg->sdstatus_reg, addrbase + VIA_CRDR_SDSTATUS);
436 writel(pm_sdhc_reg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO);
437 writel(pm_sdhc_reg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL);
438 writel(pm_sdhc_reg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL);
439}
440
441static void via_pwron_sleep(struct via_crdr_mmc_host *sdhost)
442{
443 if (sdhost->quirks & VIA_CRDR_QUIRK_300MS_PWRDELAY)
444 msleep(300);
445 else
446 msleep(3);
447}
448
449static void via_set_ddma(struct via_crdr_mmc_host *host,
450 dma_addr_t dmaaddr, u32 count, int dir, int enirq)
451{
452 void __iomem *addrbase;
453 u32 ctrl_data = 0;
454
455 if (enirq)
456 ctrl_data |= VIA_CRDR_DMACTRL_ENIRQ;
457
458 if (dir)
459 ctrl_data |= VIA_CRDR_DMACTRL_DIR;
460
461 addrbase = host->ddma_mmiobase;
462
463 writel(dmaaddr, addrbase + VIA_CRDR_DMABASEADD);
464 writel(count, addrbase + VIA_CRDR_DMACOUNTER);
465 writel(ctrl_data, addrbase + VIA_CRDR_DMACTRL);
466 writel(0x01, addrbase + VIA_CRDR_DMASTART);
467
468 /* It seems that our DMA can not work normally with 375kHz clock */
469 /* FIXME: don't brute-force 8MHz but use PIO at 375kHz !! */
470 addrbase = host->pcictrl_mmiobase;
471 if (readb(addrbase + VIA_CRDR_PCISDCCLK) == PCI_CLK_375K) {
472 dev_info(host->mmc->parent, "forcing card speed to 8MHz\n");
473 writeb(PCI_CLK_8M, addrbase + VIA_CRDR_PCISDCCLK);
474 }
475}
476
477static void via_sdc_preparedata(struct via_crdr_mmc_host *host,
478 struct mmc_data *data)
479{
480 void __iomem *addrbase;
481 u32 blk_reg;
482 int count;
483
484 WARN_ON(host->data);
485
486 /* Sanity checks */
487 BUG_ON(data->blksz > host->mmc->max_blk_size);
488 BUG_ON(data->blocks > host->mmc->max_blk_count);
489
490 host->data = data;
491
492 count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
493 ((data->flags & MMC_DATA_READ) ?
494 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
495 BUG_ON(count != 1);
496
497 via_set_ddma(host, sg_dma_address(data->sg), sg_dma_len(data->sg),
498 (data->flags & MMC_DATA_WRITE) ? 1 : 0, 1);
499
500 addrbase = host->sdhc_mmiobase;
501
502 blk_reg = data->blksz - 1;
503 blk_reg |= VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
504 blk_reg |= (data->blocks) << 16;
505
506 writel(blk_reg, addrbase + VIA_CRDR_SDBLKLEN);
507}
508
509static void via_sdc_get_response(struct via_crdr_mmc_host *host,
510 struct mmc_command *cmd)
511{
512 void __iomem *addrbase = host->sdhc_mmiobase;
513 u32 dwdata0 = readl(addrbase + VIA_CRDR_SDRESP0);
514 u32 dwdata1 = readl(addrbase + VIA_CRDR_SDRESP1);
515 u32 dwdata2 = readl(addrbase + VIA_CRDR_SDRESP2);
516 u32 dwdata3 = readl(addrbase + VIA_CRDR_SDRESP3);
517
518 if (cmd->flags & MMC_RSP_136) {
519 cmd->resp[0] = ((u8) (dwdata1)) |
520 (((u8) (dwdata0 >> 24)) << 8) |
521 (((u8) (dwdata0 >> 16)) << 16) |
522 (((u8) (dwdata0 >> 8)) << 24);
523
524 cmd->resp[1] = ((u8) (dwdata2)) |
525 (((u8) (dwdata1 >> 24)) << 8) |
526 (((u8) (dwdata1 >> 16)) << 16) |
527 (((u8) (dwdata1 >> 8)) << 24);
528
529 cmd->resp[2] = ((u8) (dwdata3)) |
530 (((u8) (dwdata2 >> 24)) << 8) |
531 (((u8) (dwdata2 >> 16)) << 16) |
532 (((u8) (dwdata2 >> 8)) << 24);
533
534 cmd->resp[3] = 0xff |
535 ((((u8) (dwdata3 >> 24))) << 8) |
536 (((u8) (dwdata3 >> 16)) << 16) |
537 (((u8) (dwdata3 >> 8)) << 24);
538 } else {
539 dwdata0 >>= 8;
540 cmd->resp[0] = ((dwdata0 & 0xff) << 24) |
541 (((dwdata0 >> 8) & 0xff) << 16) |
542 (((dwdata0 >> 16) & 0xff) << 8) | (dwdata1 & 0xff);
543
544 dwdata1 >>= 8;
545 cmd->resp[1] = ((dwdata1 & 0xff) << 24) |
546 (((dwdata1 >> 8) & 0xff) << 16) |
547 (((dwdata1 >> 16) & 0xff) << 8);
548 }
549}
550
551static void via_sdc_send_command(struct via_crdr_mmc_host *host,
552 struct mmc_command *cmd)
553{
554 void __iomem *addrbase;
555 struct mmc_data *data;
556 u32 cmdctrl = 0;
557
558 WARN_ON(host->cmd);
559
560 data = cmd->data;
561 mod_timer(&host->timer, jiffies + HZ);
562 host->cmd = cmd;
563
564 /*Command index*/
565 cmdctrl = cmd->opcode << 8;
566
567 /*Response type*/
568 switch (mmc_resp_type(cmd)) {
569 case MMC_RSP_NONE:
570 cmdctrl |= VIA_CRDR_SDCTRL_RSP_NONE;
571 break;
572 case MMC_RSP_R1:
573 cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1;
574 break;
575 case MMC_RSP_R1B:
576 cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1B;
577 break;
578 case MMC_RSP_R2:
579 cmdctrl |= VIA_CRDR_SDCTRL_RSP_R2;
580 break;
581 case MMC_RSP_R3:
582 cmdctrl |= VIA_CRDR_SDCTRL_RSP_R3;
583 break;
584 default:
585 pr_err("%s: cmd->flag is not valid\n", mmc_hostname(host->mmc));
586 break;
587 }
588
589 if (!(cmd->data))
590 goto nodata;
591
592 via_sdc_preparedata(host, data);
593
594 /*Command control*/
595 if (data->blocks > 1) {
596 if (data->flags & MMC_DATA_WRITE) {
597 cmdctrl |= VIA_CRDR_SDCTRL_WRITE;
598 cmdctrl |= VIA_CRDR_SDCTRL_MULTI_WR;
599 } else {
600 cmdctrl |= VIA_CRDR_SDCTRL_MULTI_RD;
601 }
602 } else {
603 if (data->flags & MMC_DATA_WRITE) {
604 cmdctrl |= VIA_CRDR_SDCTRL_WRITE;
605 cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_WR;
606 } else {
607 cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_RD;
608 }
609 }
610
611nodata:
612 if (cmd == host->mrq->stop)
613 cmdctrl |= VIA_CRDR_SDCTRL_STOP;
614
615 cmdctrl |= VIA_CRDR_SDCTRL_START;
616
617 addrbase = host->sdhc_mmiobase;
618 writel(cmd->arg, addrbase + VIA_CRDR_SDCARG);
619 writel(cmdctrl, addrbase + VIA_CRDR_SDCTRL);
620}
621
622static void via_sdc_finish_data(struct via_crdr_mmc_host *host)
623{
624 struct mmc_data *data;
625
626 BUG_ON(!host->data);
627
628 data = host->data;
629 host->data = NULL;
630
631 if (data->error)
632 data->bytes_xfered = 0;
633 else
634 data->bytes_xfered = data->blocks * data->blksz;
635
636 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
637 ((data->flags & MMC_DATA_READ) ?
638 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
639
640 if (data->stop)
641 via_sdc_send_command(host, data->stop);
642 else
643 tasklet_schedule(&host->finish_tasklet);
644}
645
646static void via_sdc_finish_command(struct via_crdr_mmc_host *host)
647{
648 via_sdc_get_response(host, host->cmd);
649
650 host->cmd->error = 0;
651
652 if (!host->cmd->data)
653 tasklet_schedule(&host->finish_tasklet);
654
655 host->cmd = NULL;
656}
657
658static void via_sdc_request(struct mmc_host *mmc, struct mmc_request *mrq)
659{
660 void __iomem *addrbase;
661 struct via_crdr_mmc_host *host;
662 unsigned long flags;
663 u16 status;
664
665 host = mmc_priv(mmc);
666
667 spin_lock_irqsave(&host->lock, flags);
668
669 addrbase = host->pcictrl_mmiobase;
670 writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK);
671
672 status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
673 status &= VIA_CRDR_SDSTS_W1C_MASK;
674 writew(status, host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
675
676 WARN_ON(host->mrq != NULL);
677 host->mrq = mrq;
678
679 status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
680 if (!(status & VIA_CRDR_SDSTS_SLOTG) || host->reject) {
681 host->mrq->cmd->error = -ENOMEDIUM;
682 tasklet_schedule(&host->finish_tasklet);
683 } else {
684 via_sdc_send_command(host, mrq->cmd);
685 }
686
687 mmiowb();
688 spin_unlock_irqrestore(&host->lock, flags);
689}
690
691static void via_sdc_set_power(struct via_crdr_mmc_host *host,
692 unsigned short power, unsigned int on)
693{
694 unsigned long flags;
695 u8 gatt;
696
697 spin_lock_irqsave(&host->lock, flags);
698
699 host->power = (1 << power);
700
701 gatt = readb(host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
702 if (host->power == MMC_VDD_165_195)
703 gatt &= ~VIA_CRDR_PCICLKGATT_3V3;
704 else
705 gatt |= VIA_CRDR_PCICLKGATT_3V3;
706 if (on)
707 gatt |= VIA_CRDR_PCICLKGATT_PAD_PWRON;
708 else
709 gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
710 writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
711
712 mmiowb();
713 spin_unlock_irqrestore(&host->lock, flags);
714
715 via_pwron_sleep(host);
716}
717
718static void via_sdc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
719{
720 struct via_crdr_mmc_host *host;
721 unsigned long flags;
722 void __iomem *addrbase;
723 u32 org_data, sdextctrl;
724 u8 clock;
725
726 host = mmc_priv(mmc);
727
728 spin_lock_irqsave(&host->lock, flags);
729
730 addrbase = host->sdhc_mmiobase;
731 org_data = readl(addrbase + VIA_CRDR_SDBUSMODE);
732 sdextctrl = readl(addrbase + VIA_CRDR_SDEXTCTRL);
733
734 if (ios->bus_width == MMC_BUS_WIDTH_1)
735 org_data &= ~VIA_CRDR_SDMODE_4BIT;
736 else
737 org_data |= VIA_CRDR_SDMODE_4BIT;
738
739 if (ios->power_mode == MMC_POWER_OFF)
740 org_data &= ~VIA_CRDR_SDMODE_CLK_ON;
741 else
742 org_data |= VIA_CRDR_SDMODE_CLK_ON;
743
744 if (ios->timing == MMC_TIMING_SD_HS)
745 sdextctrl |= VIA_CRDR_SDEXTCTRL_HISPD;
746 else
747 sdextctrl &= ~VIA_CRDR_SDEXTCTRL_HISPD;
748
749 writel(org_data, addrbase + VIA_CRDR_SDBUSMODE);
750 writel(sdextctrl, addrbase + VIA_CRDR_SDEXTCTRL);
751
752 if (ios->clock >= 48000000)
753 clock = PCI_CLK_48M;
754 else if (ios->clock >= 33000000)
755 clock = PCI_CLK_33M;
756 else if (ios->clock >= 24000000)
757 clock = PCI_CLK_24M;
758 else if (ios->clock >= 16000000)
759 clock = PCI_CLK_16M;
760 else if (ios->clock >= 12000000)
761 clock = PCI_CLK_12M;
762 else if (ios->clock >= 8000000)
763 clock = PCI_CLK_8M;
764 else
765 clock = PCI_CLK_375K;
766
767 addrbase = host->pcictrl_mmiobase;
768 if (readb(addrbase + VIA_CRDR_PCISDCCLK) != clock)
769 writeb(clock, addrbase + VIA_CRDR_PCISDCCLK);
770
771 mmiowb();
772 spin_unlock_irqrestore(&host->lock, flags);
773
774 if (ios->power_mode != MMC_POWER_OFF)
775 via_sdc_set_power(host, ios->vdd, 1);
776 else
777 via_sdc_set_power(host, ios->vdd, 0);
778}
779
780static int via_sdc_get_ro(struct mmc_host *mmc)
781{
782 struct via_crdr_mmc_host *host;
783 unsigned long flags;
784 u16 status;
785
786 host = mmc_priv(mmc);
787
788 spin_lock_irqsave(&host->lock, flags);
789
790 status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
791
792 spin_unlock_irqrestore(&host->lock, flags);
793
794 return !(status & VIA_CRDR_SDSTS_WP);
795}
796
797static const struct mmc_host_ops via_sdc_ops = {
798 .request = via_sdc_request,
799 .set_ios = via_sdc_set_ios,
800 .get_ro = via_sdc_get_ro,
801};
802
803static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
804{
805 void __iomem *addrbase;
806 unsigned long flags;
807 u8 gatt;
808
809 addrbase = host->pcictrl_mmiobase;
810
811 spin_lock_irqsave(&host->lock, flags);
812
813 via_save_pcictrlreg(host);
814 via_save_sdcreg(host);
815
816 spin_unlock_irqrestore(&host->lock, flags);
817
818 gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON;
819 if (host->power == MMC_VDD_165_195)
820 gatt &= VIA_CRDR_PCICLKGATT_3V3;
821 else
822 gatt |= VIA_CRDR_PCICLKGATT_3V3;
823 writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
824 via_pwron_sleep(host);
825 gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
826 writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
827 msleep(3);
828
829 spin_lock_irqsave(&host->lock, flags);
830
831 via_restore_pcictrlreg(host);
832 via_restore_sdcreg(host);
833
834 mmiowb();
835 spin_unlock_irqrestore(&host->lock, flags);
836}
837
838static void via_sdc_cmd_isr(struct via_crdr_mmc_host *host, u16 intmask)
839{
840 BUG_ON(intmask == 0);
841
842 if (!host->cmd) {
843 pr_err("%s: Got command interrupt 0x%x even "
844 "though no command operation was in progress.\n",
845 mmc_hostname(host->mmc), intmask);
846 return;
847 }
848
849 if (intmask & VIA_CRDR_SDSTS_CRTO)
850 host->cmd->error = -ETIMEDOUT;
851 else if (intmask & VIA_CRDR_SDSTS_SC)
852 host->cmd->error = -EILSEQ;
853
854 if (host->cmd->error)
855 tasklet_schedule(&host->finish_tasklet);
856 else if (intmask & VIA_CRDR_SDSTS_CRD)
857 via_sdc_finish_command(host);
858}
859
860static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask)
861{
862 BUG_ON(intmask == 0);
863
864 if (intmask & VIA_CRDR_SDSTS_DT)
865 host->data->error = -ETIMEDOUT;
866 else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC))
867 host->data->error = -EILSEQ;
868
869 via_sdc_finish_data(host);
870}
871
872static irqreturn_t via_sdc_isr(int irq, void *dev_id)
873{
874 struct via_crdr_mmc_host *sdhost = dev_id;
875 void __iomem *addrbase;
876 u8 pci_status;
877 u16 sd_status;
878 irqreturn_t result;
879
880 if (!sdhost)
881 return IRQ_NONE;
882
883 spin_lock(&sdhost->lock);
884
885 addrbase = sdhost->pcictrl_mmiobase;
886 pci_status = readb(addrbase + VIA_CRDR_PCIINTSTATUS);
887 if (!(pci_status & VIA_CRDR_PCIINTSTATUS_SDC)) {
888 result = IRQ_NONE;
889 goto out;
890 }
891
892 addrbase = sdhost->sdhc_mmiobase;
893 sd_status = readw(addrbase + VIA_CRDR_SDSTATUS);
894 sd_status &= VIA_CRDR_SDSTS_INT_MASK;
895 sd_status &= ~VIA_CRDR_SDSTS_IGN_MASK;
896 if (!sd_status) {
897 result = IRQ_NONE;
898 goto out;
899 }
900
901 if (sd_status & VIA_CRDR_SDSTS_CIR) {
902 writew(sd_status & VIA_CRDR_SDSTS_CIR,
903 addrbase + VIA_CRDR_SDSTATUS);
904
905 schedule_work(&sdhost->carddet_work);
906 }
907
908 sd_status &= ~VIA_CRDR_SDSTS_CIR;
909 if (sd_status & VIA_CRDR_SDSTS_CMD_MASK) {
910 writew(sd_status & VIA_CRDR_SDSTS_CMD_MASK,
911 addrbase + VIA_CRDR_SDSTATUS);
912 via_sdc_cmd_isr(sdhost, sd_status & VIA_CRDR_SDSTS_CMD_MASK);
913 }
914 if (sd_status & VIA_CRDR_SDSTS_DATA_MASK) {
915 writew(sd_status & VIA_CRDR_SDSTS_DATA_MASK,
916 addrbase + VIA_CRDR_SDSTATUS);
917 via_sdc_data_isr(sdhost, sd_status & VIA_CRDR_SDSTS_DATA_MASK);
918 }
919
920 sd_status &= ~(VIA_CRDR_SDSTS_CMD_MASK | VIA_CRDR_SDSTS_DATA_MASK);
921 if (sd_status) {
922 pr_err("%s: Unexpected interrupt 0x%x\n",
923 mmc_hostname(sdhost->mmc), sd_status);
924 writew(sd_status, addrbase + VIA_CRDR_SDSTATUS);
925 }
926
927 result = IRQ_HANDLED;
928
929 mmiowb();
930out:
931 spin_unlock(&sdhost->lock);
932
933 return result;
934}
935
936static void via_sdc_timeout(unsigned long ulongdata)
937{
938 struct via_crdr_mmc_host *sdhost;
939 unsigned long flags;
940
941 sdhost = (struct via_crdr_mmc_host *)ulongdata;
942
943 spin_lock_irqsave(&sdhost->lock, flags);
944
945 if (sdhost->mrq) {
946 pr_err("%s: Timeout waiting for hardware interrupt."
947 "cmd:0x%x\n", mmc_hostname(sdhost->mmc),
948 sdhost->mrq->cmd->opcode);
949
950 if (sdhost->data) {
951 writel(VIA_CRDR_DMACTRL_SFTRST,
952 sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
953 sdhost->data->error = -ETIMEDOUT;
954 via_sdc_finish_data(sdhost);
955 } else {
956 if (sdhost->cmd)
957 sdhost->cmd->error = -ETIMEDOUT;
958 else
959 sdhost->mrq->cmd->error = -ETIMEDOUT;
960 tasklet_schedule(&sdhost->finish_tasklet);
961 }
962 }
963
964 mmiowb();
965 spin_unlock_irqrestore(&sdhost->lock, flags);
966}
967
968static void via_sdc_tasklet_finish(unsigned long param)
969{
970 struct via_crdr_mmc_host *host;
971 unsigned long flags;
972 struct mmc_request *mrq;
973
974 host = (struct via_crdr_mmc_host *)param;
975
976 spin_lock_irqsave(&host->lock, flags);
977
978 del_timer(&host->timer);
979 mrq = host->mrq;
980 host->mrq = NULL;
981 host->cmd = NULL;
982 host->data = NULL;
983
984 spin_unlock_irqrestore(&host->lock, flags);
985
986 mmc_request_done(host->mmc, mrq);
987}
988
989static void via_sdc_card_detect(struct work_struct *work)
990{
991 struct via_crdr_mmc_host *host;
992 void __iomem *addrbase;
993 unsigned long flags;
994 u16 status;
995
996 host = container_of(work, struct via_crdr_mmc_host, carddet_work);
997
998 addrbase = host->ddma_mmiobase;
999 writel(VIA_CRDR_DMACTRL_SFTRST, addrbase + VIA_CRDR_DMACTRL);
1000
1001 spin_lock_irqsave(&host->lock, flags);
1002
1003 addrbase = host->pcictrl_mmiobase;
1004 writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK);
1005
1006 addrbase = host->sdhc_mmiobase;
1007 status = readw(addrbase + VIA_CRDR_SDSTATUS);
1008 if (!(status & VIA_CRDR_SDSTS_SLOTG)) {
1009 if (host->mrq) {
1010 pr_err("%s: Card removed during transfer!\n",
1011 mmc_hostname(host->mmc));
1012 host->mrq->cmd->error = -ENOMEDIUM;
1013 tasklet_schedule(&host->finish_tasklet);
1014 }
1015
1016 mmiowb();
1017 spin_unlock_irqrestore(&host->lock, flags);
1018
1019 via_reset_pcictrl(host);
1020
1021 spin_lock_irqsave(&host->lock, flags);
1022 }
1023
1024 mmiowb();
1025 spin_unlock_irqrestore(&host->lock, flags);
1026
1027 via_print_pcictrl(host);
1028 via_print_sdchc(host);
1029
1030 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
1031}
1032
1033static void via_init_mmc_host(struct via_crdr_mmc_host *host)
1034{
1035 struct mmc_host *mmc = host->mmc;
1036 void __iomem *addrbase;
1037 u32 lenreg;
1038 u32 status;
1039
1040 init_timer(&host->timer);
1041 host->timer.data = (unsigned long)host;
1042 host->timer.function = via_sdc_timeout;
1043
1044 spin_lock_init(&host->lock);
1045
1046 mmc->f_min = VIA_CRDR_MIN_CLOCK;
1047 mmc->f_max = VIA_CRDR_MAX_CLOCK;
1048 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
1049 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED;
1050 mmc->ops = &via_sdc_ops;
1051
1052 /*Hardware cannot do scatter lists*/
1053 mmc->max_hw_segs = 1;
1054 mmc->max_phys_segs = 1;
1055
1056 mmc->max_blk_size = VIA_CRDR_MAX_BLOCK_LENGTH;
1057 mmc->max_blk_count = VIA_CRDR_MAX_BLOCK_COUNT;
1058
1059 mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
1060 mmc->max_req_size = mmc->max_seg_size;
1061
1062 INIT_WORK(&host->carddet_work, via_sdc_card_detect);
1063
1064 tasklet_init(&host->finish_tasklet, via_sdc_tasklet_finish,
1065 (unsigned long)host);
1066
1067 addrbase = host->sdhc_mmiobase;
1068 writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
1069 msleep(1);
1070
1071 lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
1072 writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN);
1073
1074 status = readw(addrbase + VIA_CRDR_SDSTATUS);
1075 status &= VIA_CRDR_SDSTS_W1C_MASK;
1076 writew(status, addrbase + VIA_CRDR_SDSTATUS);
1077
1078 status = readw(addrbase + VIA_CRDR_SDSTATUS2);
1079 status |= VIA_CRDR_SDSTS_CFE;
1080 writew(status, addrbase + VIA_CRDR_SDSTATUS2);
1081
1082 writeb(0x0, addrbase + VIA_CRDR_SDEXTCTRL);
1083
1084 writel(VIA_CRDR_SDACTIVE_INTMASK, addrbase + VIA_CRDR_SDINTMASK);
1085 msleep(1);
1086}
1087
1088static int __devinit via_sd_probe(struct pci_dev *pcidev,
1089 const struct pci_device_id *id)
1090{
1091 struct mmc_host *mmc;
1092 struct via_crdr_mmc_host *sdhost;
1093 u32 base, len;
1094 u8 rev, gatt;
1095 int ret;
1096
1097 pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev);
1098 pr_info(DRV_NAME
1099 ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n",
1100 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1101 (int)rev);
1102
1103 ret = pci_enable_device(pcidev);
1104 if (ret)
1105 return ret;
1106
1107 ret = pci_request_regions(pcidev, DRV_NAME);
1108 if (ret)
1109 goto disable;
1110
1111 pci_write_config_byte(pcidev, VIA_CRDR_PCI_WORK_MODE, 0);
1112 pci_write_config_byte(pcidev, VIA_CRDR_PCI_DBG_MODE, 0);
1113
1114 mmc = mmc_alloc_host(sizeof(struct via_crdr_mmc_host), &pcidev->dev);
1115 if (!mmc) {
1116 ret = -ENOMEM;
1117 goto release;
1118 }
1119
1120 sdhost = mmc_priv(mmc);
1121 sdhost->mmc = mmc;
1122 dev_set_drvdata(&pcidev->dev, sdhost);
1123
1124 len = pci_resource_len(pcidev, 0);
1125 base = pci_resource_start(pcidev, 0);
1126 sdhost->mmiobase = ioremap_nocache(base, len);
1127 if (!sdhost->mmiobase) {
1128 ret = -ENOMEM;
1129 goto free_mmc_host;
1130 }
1131
1132 sdhost->sdhc_mmiobase =
1133 sdhost->mmiobase + VIA_CRDR_SDC_OFF;
1134 sdhost->ddma_mmiobase =
1135 sdhost->mmiobase + VIA_CRDR_DDMA_OFF;
1136 sdhost->pcictrl_mmiobase =
1137 sdhost->mmiobase + VIA_CRDR_PCICTRL_OFF;
1138
1139 sdhost->power = MMC_VDD_165_195;
1140
1141 gatt = VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON;
1142 writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
1143 via_pwron_sleep(sdhost);
1144 gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
1145 writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
1146 msleep(3);
1147
1148 via_init_mmc_host(sdhost);
1149
1150 ret =
1151 request_irq(pcidev->irq, via_sdc_isr, IRQF_SHARED, DRV_NAME,
1152 sdhost);
1153 if (ret)
1154 goto unmap;
1155
1156 writeb(VIA_CRDR_PCIINTCTRL_SDCIRQEN,
1157 sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
1158 writeb(VIA_CRDR_PCITMOCTRL_1024MS,
1159 sdhost->pcictrl_mmiobase + VIA_CRDR_PCITMOCTRL);
1160
1161 /* device-specific quirks */
1162 if (pcidev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
1163 pcidev->subsystem_device == 0x3891)
1164 sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY;
1165
1166 mmc_add_host(mmc);
1167
1168 return 0;
1169
1170unmap:
1171 iounmap(sdhost->mmiobase);
1172free_mmc_host:
1173 dev_set_drvdata(&pcidev->dev, NULL);
1174 mmc_free_host(mmc);
1175release:
1176 pci_release_regions(pcidev);
1177disable:
1178 pci_disable_device(pcidev);
1179
1180 return ret;
1181}
1182
1183static void __devexit via_sd_remove(struct pci_dev *pcidev)
1184{
1185 struct via_crdr_mmc_host *sdhost = pci_get_drvdata(pcidev);
1186 unsigned long flags;
1187 u8 gatt;
1188
1189 spin_lock_irqsave(&sdhost->lock, flags);
1190
1191 /* Ensure we don't accept more commands from mmc layer */
1192 sdhost->reject = 1;
1193
1194 /* Disable generating further interrupts */
1195 writeb(0x0, sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
1196 mmiowb();
1197
1198 if (sdhost->mrq) {
1199 printk(KERN_ERR "%s: Controller removed during "
1200 "transfer\n", mmc_hostname(sdhost->mmc));
1201
1202 /* make sure all DMA is stopped */
1203 writel(VIA_CRDR_DMACTRL_SFTRST,
1204 sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
1205 mmiowb();
1206 sdhost->mrq->cmd->error = -ENOMEDIUM;
1207 if (sdhost->mrq->stop)
1208 sdhost->mrq->stop->error = -ENOMEDIUM;
1209 tasklet_schedule(&sdhost->finish_tasklet);
1210 }
1211 spin_unlock_irqrestore(&sdhost->lock, flags);
1212
1213 mmc_remove_host(sdhost->mmc);
1214
1215 free_irq(pcidev->irq, sdhost);
1216
1217 del_timer_sync(&sdhost->timer);
1218
1219 tasklet_kill(&sdhost->finish_tasklet);
1220
1221 /* switch off power */
1222 gatt = readb(sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
1223 gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
1224 writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
1225
1226 iounmap(sdhost->mmiobase);
1227 dev_set_drvdata(&pcidev->dev, NULL);
1228 mmc_free_host(sdhost->mmc);
1229 pci_release_regions(pcidev);
1230 pci_disable_device(pcidev);
1231
1232 pr_info(DRV_NAME
1233 ": VIA SDMMC controller at %s [%04x:%04x] has been removed\n",
1234 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1235}
1236
1237#ifdef CONFIG_PM
1238
1239static void via_init_sdc_pm(struct via_crdr_mmc_host *host)
1240{
1241 struct sdhcreg *pm_sdhcreg;
1242 void __iomem *addrbase;
1243 u32 lenreg;
1244 u16 status;
1245
1246 pm_sdhcreg = &(host->pm_sdhc_reg);
1247 addrbase = host->sdhc_mmiobase;
1248
1249 writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
1250
1251 lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
1252 writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN);
1253
1254 status = readw(addrbase + VIA_CRDR_SDSTATUS);
1255 status &= VIA_CRDR_SDSTS_W1C_MASK;
1256 writew(status, addrbase + VIA_CRDR_SDSTATUS);
1257
1258 status = readw(addrbase + VIA_CRDR_SDSTATUS2);
1259 status |= VIA_CRDR_SDSTS_CFE;
1260 writew(status, addrbase + VIA_CRDR_SDSTATUS2);
1261
1262 writel(pm_sdhcreg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL);
1263 writel(pm_sdhcreg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG);
1264 writel(pm_sdhcreg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK);
1265 writel(pm_sdhcreg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO);
1266 writel(pm_sdhcreg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL);
1267 writel(pm_sdhcreg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL);
1268
1269 via_print_pcictrl(host);
1270 via_print_sdchc(host);
1271}
1272
1273static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
1274{
1275 struct via_crdr_mmc_host *host;
1276 int ret = 0;
1277
1278 host = pci_get_drvdata(pcidev);
1279
1280 via_save_pcictrlreg(host);
1281 via_save_sdcreg(host);
1282
1283 ret = mmc_suspend_host(host->mmc, state);
1284
1285 pci_save_state(pcidev);
1286 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
1287 pci_disable_device(pcidev);
1288 pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
1289
1290 return ret;
1291}
1292
1293static int via_sd_resume(struct pci_dev *pcidev)
1294{
1295 struct via_crdr_mmc_host *sdhost;
1296 int ret = 0;
1297 u8 gatt;
1298
1299 sdhost = pci_get_drvdata(pcidev);
1300
1301 gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON;
1302 if (sdhost->power == MMC_VDD_165_195)
1303 gatt &= ~VIA_CRDR_PCICLKGATT_3V3;
1304 else
1305 gatt |= VIA_CRDR_PCICLKGATT_3V3;
1306 writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
1307 via_pwron_sleep(sdhost);
1308 gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
1309 writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
1310 msleep(3);
1311
1312 msleep(100);
1313
1314 pci_set_power_state(pcidev, PCI_D0);
1315 pci_restore_state(pcidev);
1316 ret = pci_enable_device(pcidev);
1317 if (ret)
1318 return ret;
1319
1320 via_restore_pcictrlreg(sdhost);
1321 via_init_sdc_pm(sdhost);
1322
1323 ret = mmc_resume_host(sdhost->mmc);
1324
1325 return ret;
1326}
1327
1328#else /* CONFIG_PM */
1329
1330#define via_sd_suspend NULL
1331#define via_sd_resume NULL
1332
1333#endif /* CONFIG_PM */
1334
1335static struct pci_driver via_sd_driver = {
1336 .name = DRV_NAME,
1337 .id_table = via_ids,
1338 .probe = via_sd_probe,
1339 .remove = __devexit_p(via_sd_remove),
1340 .suspend = via_sd_suspend,
1341 .resume = via_sd_resume,
1342};
1343
1344static int __init via_sd_drv_init(void)
1345{
1346 pr_info(DRV_NAME ": VIA SD/MMC Card Reader driver "
1347 "(C) 2008 VIA Technologies, Inc.\n");
1348
1349 return pci_register_driver(&via_sd_driver);
1350}
1351
1352static void __exit via_sd_drv_exit(void)
1353{
1354 pci_unregister_driver(&via_sd_driver);
1355}
1356
1357module_init(via_sd_drv_init);
1358module_exit(via_sd_drv_exit);
1359
1360MODULE_LICENSE("GPL");
1361MODULE_AUTHOR("VIA Technologies Inc.");
1362MODULE_DESCRIPTION("VIA SD/MMC Card Interface driver");
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index c240454fd113..8664feebc93b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -46,6 +46,7 @@
46#define MANUFACTURER_INTEL 0x0089 46#define MANUFACTURER_INTEL 0x0089
47#define I82802AB 0x00ad 47#define I82802AB 0x00ad
48#define I82802AC 0x00ac 48#define I82802AC 0x00ac
49#define PF38F4476 0x881c
49#define MANUFACTURER_ST 0x0020 50#define MANUFACTURER_ST 0x0020
50#define M50LPW080 0x002F 51#define M50LPW080 0x002F
51#define M50FLW080A 0x0080 52#define M50FLW080A 0x0080
@@ -315,10 +316,20 @@ static struct cfi_fixup fixup_table[] = {
315 { 0, 0, NULL, NULL } 316 { 0, 0, NULL, NULL }
316}; 317};
317 318
319static void cfi_fixup_major_minor(struct cfi_private *cfi,
320 struct cfi_pri_intelext *extp)
321{
322 if (cfi->mfr == MANUFACTURER_INTEL &&
323 cfi->id == PF38F4476 && extp->MinorVersion == '3')
324 extp->MinorVersion = '1';
325}
326
318static inline struct cfi_pri_intelext * 327static inline struct cfi_pri_intelext *
319read_pri_intelext(struct map_info *map, __u16 adr) 328read_pri_intelext(struct map_info *map, __u16 adr)
320{ 329{
330 struct cfi_private *cfi = map->fldrv_priv;
321 struct cfi_pri_intelext *extp; 331 struct cfi_pri_intelext *extp;
332 unsigned int extra_size = 0;
322 unsigned int extp_size = sizeof(*extp); 333 unsigned int extp_size = sizeof(*extp);
323 334
324 again: 335 again:
@@ -326,6 +337,8 @@ read_pri_intelext(struct map_info *map, __u16 adr)
326 if (!extp) 337 if (!extp)
327 return NULL; 338 return NULL;
328 339
340 cfi_fixup_major_minor(cfi, extp);
341
329 if (extp->MajorVersion != '1' || 342 if (extp->MajorVersion != '1' ||
330 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) { 343 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
331 printk(KERN_ERR " Unknown Intel/Sharp Extended Query " 344 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
@@ -340,19 +353,24 @@ read_pri_intelext(struct map_info *map, __u16 adr)
340 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask); 353 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
341 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr); 354 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
342 355
343 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') { 356 if (extp->MinorVersion >= '0') {
344 unsigned int extra_size = 0; 357 extra_size = 0;
345 int nb_parts, i;
346 358
347 /* Protection Register info */ 359 /* Protection Register info */
348 extra_size += (extp->NumProtectionFields - 1) * 360 extra_size += (extp->NumProtectionFields - 1) *
349 sizeof(struct cfi_intelext_otpinfo); 361 sizeof(struct cfi_intelext_otpinfo);
362 }
350 363
364 if (extp->MinorVersion >= '1') {
351 /* Burst Read info */ 365 /* Burst Read info */
352 extra_size += 2; 366 extra_size += 2;
353 if (extp_size < sizeof(*extp) + extra_size) 367 if (extp_size < sizeof(*extp) + extra_size)
354 goto need_more; 368 goto need_more;
355 extra_size += extp->extra[extra_size-1]; 369 extra_size += extp->extra[extra_size - 1];
370 }
371
372 if (extp->MinorVersion >= '3') {
373 int nb_parts, i;
356 374
357 /* Number of hardware-partitions */ 375 /* Number of hardware-partitions */
358 extra_size += 1; 376 extra_size += 1;
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index e824b9b9b056..ccc4cfc7e4b5 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -166,6 +166,7 @@
166#define SST39LF040 0x00D7 166#define SST39LF040 0x00D7
167#define SST39SF010A 0x00B5 167#define SST39SF010A 0x00B5
168#define SST39SF020A 0x00B6 168#define SST39SF020A 0x00B6
169#define SST39SF040 0x00B7
169#define SST49LF004B 0x0060 170#define SST49LF004B 0x0060
170#define SST49LF040B 0x0050 171#define SST49LF040B 0x0050
171#define SST49LF008A 0x005a 172#define SST49LF008A 0x005a
@@ -1393,6 +1394,18 @@ static const struct amd_flash_info jedec_table[] = {
1393 } 1394 }
1394 }, { 1395 }, {
1395 .mfr_id = MANUFACTURER_SST, 1396 .mfr_id = MANUFACTURER_SST,
1397 .dev_id = SST39SF040,
1398 .name = "SST 39SF040",
1399 .devtypes = CFI_DEVICETYPE_X8,
1400 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1401 .dev_size = SIZE_512KiB,
1402 .cmd_set = P_ID_AMD_STD,
1403 .nr_regions = 1,
1404 .regions = {
1405 ERASEINFO(0x01000,128),
1406 }
1407 }, {
1408 .mfr_id = MANUFACTURER_SST,
1396 .dev_id = SST49LF040B, 1409 .dev_id = SST49LF040B,
1397 .name = "SST 49LF040B", 1410 .name = "SST 49LF040B",
1398 .devtypes = CFI_DEVICETYPE_X8, 1411 .devtypes = CFI_DEVICETYPE_X8,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index cc6369ea67dd..59c46126a5ce 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -500,6 +500,9 @@ static struct flash_info __devinitdata m25p_data [] = {
500 { "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, }, 500 { "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, },
501 { "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, }, 501 { "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, },
502 502
503 /* Macronix */
504 { "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, },
505
503 /* Spansion -- single (large) sector size only, at least 506 /* Spansion -- single (large) sector size only, at least
504 * for the chips listed here (without boot sectors). 507 * for the chips listed here (without boot sectors).
505 */ 508 */
@@ -528,6 +531,7 @@ static struct flash_info __devinitdata m25p_data [] = {
528 { "m25p64", 0x202017, 0, 64 * 1024, 128, }, 531 { "m25p64", 0x202017, 0, 64 * 1024, 128, },
529 { "m25p128", 0x202018, 0, 256 * 1024, 64, }, 532 { "m25p128", 0x202018, 0, 256 * 1024, 64, },
530 533
534 { "m45pe10", 0x204011, 0, 64 * 1024, 2, },
531 { "m45pe80", 0x204014, 0, 64 * 1024, 16, }, 535 { "m45pe80", 0x204014, 0, 64 * 1024, 16, },
532 { "m45pe16", 0x204015, 0, 64 * 1024, 32, }, 536 { "m45pe16", 0x204015, 0, 64 * 1024, 32, },
533 537
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 82923bd2d9c5..0b98654d8eed 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -105,15 +105,6 @@ config MSP_FLASH_MAP_LIMIT
105 default "0x02000000" 105 default "0x02000000"
106 depends on MSP_FLASH_MAP_LIMIT_32M 106 depends on MSP_FLASH_MAP_LIMIT_32M
107 107
108config MTD_PMC_MSP_RAMROOT
109 tristate "Embedded RAM block device for root on PMC-Sierra MSP"
110 depends on PMC_MSP_EMBEDDED_ROOTFS && \
111 (MTD_BLOCK || MTD_BLOCK_RO) && \
112 MTD_RAM
113 help
114 This provides support for the embedded root file system
115 on PMC MSP devices. This memory is mapped as a MTD block device.
116
117config MTD_SUN_UFLASH 108config MTD_SUN_UFLASH
118 tristate "Sun Microsystems userflash support" 109 tristate "Sun Microsystems userflash support"
119 depends on SPARC && MTD_CFI && PCI 110 depends on SPARC && MTD_CFI && PCI
@@ -270,7 +261,7 @@ config MTD_ALCHEMY
270 261
271config MTD_DILNETPC 262config MTD_DILNETPC
272 tristate "CFI Flash device mapped on DIL/Net PC" 263 tristate "CFI Flash device mapped on DIL/Net PC"
273 depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT 264 depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
274 help 265 help
275 MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP". 266 MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
276 For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm> 267 For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
@@ -501,7 +492,7 @@ config MTD_BFIN_ASYNC
501 If compiled as a module, it will be called bfin-async-flash. 492 If compiled as a module, it will be called bfin-async-flash.
502 493
503config MTD_UCLINUX 494config MTD_UCLINUX
504 tristate "Generic uClinux RAM/ROM filesystem support" 495 bool "Generic uClinux RAM/ROM filesystem support"
505 depends on MTD_PARTITIONS && MTD_RAM && !MMU 496 depends on MTD_PARTITIONS && MTD_RAM && !MMU
506 help 497 help
507 Map driver to support image based filesystems for uClinux. 498 Map driver to support image based filesystems for uClinux.
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 2dbc1bec8488..8bae7f9850c0 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
25obj-$(CONFIG_MTD_PHYSMAP) += physmap.o 25obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
26obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o 26obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
27obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o 27obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
28obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o
29obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o 28obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
30obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o 29obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
31obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o 30obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 576611f605db..365c77b1b871 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -40,6 +40,9 @@ struct async_state {
40 uint32_t flash_ambctl0, flash_ambctl1; 40 uint32_t flash_ambctl0, flash_ambctl1;
41 uint32_t save_ambctl0, save_ambctl1; 41 uint32_t save_ambctl0, save_ambctl1;
42 unsigned long irq_flags; 42 unsigned long irq_flags;
43#ifdef CONFIG_MTD_PARTITIONS
44 struct mtd_partition *parts;
45#endif
43}; 46};
44 47
45static void switch_to_flash(struct async_state *state) 48static void switch_to_flash(struct async_state *state)
@@ -170,6 +173,7 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
170 if (ret > 0) { 173 if (ret > 0) {
171 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n"); 174 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
172 add_mtd_partitions(state->mtd, pdata->parts, ret); 175 add_mtd_partitions(state->mtd, pdata->parts, ret);
176 state->parts = pdata->parts;
173 177
174 } else if (pdata->nr_parts) { 178 } else if (pdata->nr_parts) {
175 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n"); 179 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
@@ -193,6 +197,7 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
193 gpio_free(state->enet_flash_pin); 197 gpio_free(state->enet_flash_pin);
194#ifdef CONFIG_MTD_PARTITIONS 198#ifdef CONFIG_MTD_PARTITIONS
195 del_mtd_partitions(state->mtd); 199 del_mtd_partitions(state->mtd);
200 kfree(state->parts);
196#endif 201#endif
197 map_destroy(state->mtd); 202 map_destroy(state->mtd);
198 kfree(state); 203 kfree(state);
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index c9681a339a59..b08a798ee254 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -36,27 +36,33 @@
36#include <linux/mtd/mtd.h> 36#include <linux/mtd/mtd.h>
37#include <linux/mtd/map.h> 37#include <linux/mtd/map.h>
38#include <linux/mtd/partitions.h> 38#include <linux/mtd/partitions.h>
39#include <linux/mtd/concat.h>
39 40
40#include <asm/mach/flash.h> 41#include <asm/mach/flash.h>
41#include <mach/hardware.h> 42#include <mach/hardware.h>
42#include <asm/system.h> 43#include <asm/system.h>
43 44
44#ifdef CONFIG_ARCH_P720T 45#define SUBDEV_NAME_SIZE (BUS_ID_SIZE + 2)
45#define FLASH_BASE (0x04000000)
46#define FLASH_SIZE (64*1024*1024)
47#endif
48 46
49struct armflash_info { 47struct armflash_subdev_info {
48 char name[SUBDEV_NAME_SIZE];
49 struct mtd_info *mtd;
50 struct map_info map;
50 struct flash_platform_data *plat; 51 struct flash_platform_data *plat;
52};
53
54struct armflash_info {
51 struct resource *res; 55 struct resource *res;
52 struct mtd_partition *parts; 56 struct mtd_partition *parts;
53 struct mtd_info *mtd; 57 struct mtd_info *mtd;
54 struct map_info map; 58 int nr_subdev;
59 struct armflash_subdev_info subdev[0];
55}; 60};
56 61
57static void armflash_set_vpp(struct map_info *map, int on) 62static void armflash_set_vpp(struct map_info *map, int on)
58{ 63{
59 struct armflash_info *info = container_of(map, struct armflash_info, map); 64 struct armflash_subdev_info *info =
65 container_of(map, struct armflash_subdev_info, map);
60 66
61 if (info->plat && info->plat->set_vpp) 67 if (info->plat && info->plat->set_vpp)
62 info->plat->set_vpp(on); 68 info->plat->set_vpp(on);
@@ -64,32 +70,17 @@ static void armflash_set_vpp(struct map_info *map, int on)
64 70
65static const char *probes[] = { "cmdlinepart", "RedBoot", "afs", NULL }; 71static const char *probes[] = { "cmdlinepart", "RedBoot", "afs", NULL };
66 72
67static int armflash_probe(struct platform_device *dev) 73static int armflash_subdev_probe(struct armflash_subdev_info *subdev,
74 struct resource *res)
68{ 75{
69 struct flash_platform_data *plat = dev->dev.platform_data; 76 struct flash_platform_data *plat = subdev->plat;
70 struct resource *res = dev->resource; 77 resource_size_t size = res->end - res->start + 1;
71 unsigned int size = res->end - res->start + 1;
72 struct armflash_info *info;
73 int err;
74 void __iomem *base; 78 void __iomem *base;
79 int err = 0;
75 80
76 info = kzalloc(sizeof(struct armflash_info), GFP_KERNEL); 81 if (!request_mem_region(res->start, size, subdev->name)) {
77 if (!info) {
78 err = -ENOMEM;
79 goto out;
80 }
81
82 info->plat = plat;
83 if (plat && plat->init) {
84 err = plat->init();
85 if (err)
86 goto no_resource;
87 }
88
89 info->res = request_mem_region(res->start, size, "armflash");
90 if (!info->res) {
91 err = -EBUSY; 82 err = -EBUSY;
92 goto no_resource; 83 goto out;
93 } 84 }
94 85
95 base = ioremap(res->start, size); 86 base = ioremap(res->start, size);
@@ -101,27 +92,132 @@ static int armflash_probe(struct platform_device *dev)
101 /* 92 /*
102 * look for CFI based flash parts fitted to this board 93 * look for CFI based flash parts fitted to this board
103 */ 94 */
104 info->map.size = size; 95 subdev->map.size = size;
105 info->map.bankwidth = plat->width; 96 subdev->map.bankwidth = plat->width;
106 info->map.phys = res->start; 97 subdev->map.phys = res->start;
107 info->map.virt = base; 98 subdev->map.virt = base;
108 info->map.name = dev_name(&dev->dev); 99 subdev->map.name = subdev->name;
109 info->map.set_vpp = armflash_set_vpp; 100 subdev->map.set_vpp = armflash_set_vpp;
110 101
111 simple_map_init(&info->map); 102 simple_map_init(&subdev->map);
112 103
113 /* 104 /*
114 * Also, the CFI layer automatically works out what size 105 * Also, the CFI layer automatically works out what size
115 * of chips we have, and does the necessary identification 106 * of chips we have, and does the necessary identification
116 * for us automatically. 107 * for us automatically.
117 */ 108 */
118 info->mtd = do_map_probe(plat->map_name, &info->map); 109 subdev->mtd = do_map_probe(plat->map_name, &subdev->map);
119 if (!info->mtd) { 110 if (!subdev->mtd) {
120 err = -ENXIO; 111 err = -ENXIO;
121 goto no_device; 112 goto no_device;
122 } 113 }
123 114
124 info->mtd->owner = THIS_MODULE; 115 subdev->mtd->owner = THIS_MODULE;
116
117 /* Successful? */
118 if (err == 0)
119 return err;
120
121 if (subdev->mtd)
122 map_destroy(subdev->mtd);
123 no_device:
124 iounmap(base);
125 no_mem:
126 release_mem_region(res->start, size);
127 out:
128 return err;
129}
130
131static void armflash_subdev_remove(struct armflash_subdev_info *subdev)
132{
133 if (subdev->mtd)
134 map_destroy(subdev->mtd);
135 if (subdev->map.virt)
136 iounmap(subdev->map.virt);
137 release_mem_region(subdev->map.phys, subdev->map.size);
138}
139
140static int armflash_probe(struct platform_device *dev)
141{
142 struct flash_platform_data *plat = dev->dev.platform_data;
143 unsigned int size;
144 struct armflash_info *info;
145 int i, nr, err;
146
147 /* Count the number of devices */
148 for (nr = 0; ; nr++)
149 if (!platform_get_resource(dev, IORESOURCE_MEM, nr))
150 break;
151 if (nr == 0) {
152 err = -ENODEV;
153 goto out;
154 }
155
156 size = sizeof(struct armflash_info) +
157 sizeof(struct armflash_subdev_info) * nr;
158 info = kzalloc(size, GFP_KERNEL);
159 if (!info) {
160 err = -ENOMEM;
161 goto out;
162 }
163
164 if (plat && plat->init) {
165 err = plat->init();
166 if (err)
167 goto no_resource;
168 }
169
170 for (i = 0; i < nr; i++) {
171 struct armflash_subdev_info *subdev = &info->subdev[i];
172 struct resource *res;
173
174 res = platform_get_resource(dev, IORESOURCE_MEM, i);
175 if (!res)
176 break;
177
178 if (nr == 1)
179 /* No MTD concatenation, just use the default name */
180 snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s",
181 dev_name(&dev->dev));
182 else
183 snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s-%d",
184 dev_name(&dev->dev), i);
185 subdev->plat = plat;
186
187 err = armflash_subdev_probe(subdev, res);
188 if (err)
189 break;
190 }
191 info->nr_subdev = i;
192
193 if (err)
194 goto subdev_err;
195
196 if (info->nr_subdev == 1)
197 info->mtd = info->subdev[0].mtd;
198 else if (info->nr_subdev > 1) {
199#ifdef CONFIG_MTD_CONCAT
200 struct mtd_info *cdev[info->nr_subdev];
201
202 /*
203 * We detected multiple devices. Concatenate them together.
204 */
205 for (i = 0; i < info->nr_subdev; i++)
206 cdev[i] = info->subdev[i].mtd;
207
208 info->mtd = mtd_concat_create(cdev, info->nr_subdev,
209 dev_name(&dev->dev));
210 if (info->mtd == NULL)
211 err = -ENXIO;
212#else
213 printk(KERN_ERR "armflash: multiple devices found but "
214 "MTD concat support disabled.\n");
215 err = -ENXIO;
216#endif
217 }
218
219 if (err < 0)
220 goto cleanup;
125 221
126 err = parse_mtd_partitions(info->mtd, probes, &info->parts, 0); 222 err = parse_mtd_partitions(info->mtd, probes, &info->parts, 0);
127 if (err > 0) { 223 if (err > 0) {
@@ -131,28 +227,30 @@ static int armflash_probe(struct platform_device *dev)
131 "mtd partition registration failed: %d\n", err); 227 "mtd partition registration failed: %d\n", err);
132 } 228 }
133 229
134 if (err == 0) 230 if (err == 0) {
135 platform_set_drvdata(dev, info); 231 platform_set_drvdata(dev, info);
232 return err;
233 }
136 234
137 /* 235 /*
138 * If we got an error, free all resources. 236 * We got an error, free all resources.
139 */ 237 */
140 if (err < 0) { 238 cleanup:
141 if (info->mtd) { 239 if (info->mtd) {
142 del_mtd_partitions(info->mtd); 240 del_mtd_partitions(info->mtd);
143 map_destroy(info->mtd); 241#ifdef CONFIG_MTD_CONCAT
144 } 242 if (info->mtd != info->subdev[0].mtd)
145 kfree(info->parts); 243 mtd_concat_destroy(info->mtd);
146 244#endif
147 no_device:
148 iounmap(base);
149 no_mem:
150 release_mem_region(res->start, size);
151 no_resource:
152 if (plat && plat->exit)
153 plat->exit();
154 kfree(info);
155 } 245 }
246 kfree(info->parts);
247 subdev_err:
248 for (i = info->nr_subdev - 1; i >= 0; i--)
249 armflash_subdev_remove(&info->subdev[i]);
250 no_resource:
251 if (plat && plat->exit)
252 plat->exit();
253 kfree(info);
156 out: 254 out:
157 return err; 255 return err;
158} 256}
@@ -160,22 +258,26 @@ static int armflash_probe(struct platform_device *dev)
160static int armflash_remove(struct platform_device *dev) 258static int armflash_remove(struct platform_device *dev)
161{ 259{
162 struct armflash_info *info = platform_get_drvdata(dev); 260 struct armflash_info *info = platform_get_drvdata(dev);
261 struct flash_platform_data *plat = dev->dev.platform_data;
262 int i;
163 263
164 platform_set_drvdata(dev, NULL); 264 platform_set_drvdata(dev, NULL);
165 265
166 if (info) { 266 if (info) {
167 if (info->mtd) { 267 if (info->mtd) {
168 del_mtd_partitions(info->mtd); 268 del_mtd_partitions(info->mtd);
169 map_destroy(info->mtd); 269#ifdef CONFIG_MTD_CONCAT
270 if (info->mtd != info->subdev[0].mtd)
271 mtd_concat_destroy(info->mtd);
272#endif
170 } 273 }
171 kfree(info->parts); 274 kfree(info->parts);
172 275
173 iounmap(info->map.virt); 276 for (i = info->nr_subdev - 1; i >= 0; i--)
174 release_resource(info->res); 277 armflash_subdev_remove(&info->subdev[i]);
175 kfree(info->res);
176 278
177 if (info->plat && info->plat->exit) 279 if (plat && plat->exit)
178 info->plat->exit(); 280 plat->exit();
179 281
180 kfree(info); 282 kfree(info);
181 } 283 }
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 29a901157352..380648e9051a 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -195,42 +195,6 @@ err_out:
195} 195}
196 196
197#ifdef CONFIG_PM 197#ifdef CONFIG_PM
198static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state)
199{
200 struct physmap_flash_info *info = platform_get_drvdata(dev);
201 int ret = 0;
202 int i;
203
204 for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
205 if (info->mtd[i]->suspend) {
206 ret = info->mtd[i]->suspend(info->mtd[i]);
207 if (ret)
208 goto fail;
209 }
210
211 return 0;
212fail:
213 for (--i; i >= 0; --i)
214 if (info->mtd[i]->suspend) {
215 BUG_ON(!info->mtd[i]->resume);
216 info->mtd[i]->resume(info->mtd[i]);
217 }
218
219 return ret;
220}
221
222static int physmap_flash_resume(struct platform_device *dev)
223{
224 struct physmap_flash_info *info = platform_get_drvdata(dev);
225 int i;
226
227 for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
228 if (info->mtd[i]->resume)
229 info->mtd[i]->resume(info->mtd[i]);
230
231 return 0;
232}
233
234static void physmap_flash_shutdown(struct platform_device *dev) 198static void physmap_flash_shutdown(struct platform_device *dev)
235{ 199{
236 struct physmap_flash_info *info = platform_get_drvdata(dev); 200 struct physmap_flash_info *info = platform_get_drvdata(dev);
@@ -242,16 +206,12 @@ static void physmap_flash_shutdown(struct platform_device *dev)
242 info->mtd[i]->resume(info->mtd[i]); 206 info->mtd[i]->resume(info->mtd[i]);
243} 207}
244#else 208#else
245#define physmap_flash_suspend NULL
246#define physmap_flash_resume NULL
247#define physmap_flash_shutdown NULL 209#define physmap_flash_shutdown NULL
248#endif 210#endif
249 211
250static struct platform_driver physmap_flash_driver = { 212static struct platform_driver physmap_flash_driver = {
251 .probe = physmap_flash_probe, 213 .probe = physmap_flash_probe,
252 .remove = physmap_flash_remove, 214 .remove = physmap_flash_remove,
253 .suspend = physmap_flash_suspend,
254 .resume = physmap_flash_resume,
255 .shutdown = physmap_flash_shutdown, 215 .shutdown = physmap_flash_shutdown,
256 .driver = { 216 .driver = {
257 .name = "physmap-flash", 217 .name = "physmap-flash",
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c83a60fada53..39d357b2eb47 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -20,16 +20,23 @@
20#include <linux/mtd/mtd.h> 20#include <linux/mtd/mtd.h>
21#include <linux/mtd/map.h> 21#include <linux/mtd/map.h>
22#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
23#include <linux/mtd/concat.h>
23#include <linux/of.h> 24#include <linux/of.h>
24#include <linux/of_platform.h> 25#include <linux/of_platform.h>
25 26
27struct of_flash_list {
28 struct mtd_info *mtd;
29 struct map_info map;
30 struct resource *res;
31};
32
26struct of_flash { 33struct of_flash {
27 struct mtd_info *mtd; 34 struct mtd_info *cmtd;
28 struct map_info map;
29 struct resource *res;
30#ifdef CONFIG_MTD_PARTITIONS 35#ifdef CONFIG_MTD_PARTITIONS
31 struct mtd_partition *parts; 36 struct mtd_partition *parts;
32#endif 37#endif
38 int list_size; /* number of elements in of_flash_list */
39 struct of_flash_list list[0];
33}; 40};
34 41
35#ifdef CONFIG_MTD_PARTITIONS 42#ifdef CONFIG_MTD_PARTITIONS
@@ -88,30 +95,44 @@ static int parse_obsolete_partitions(struct of_device *dev,
88static int of_flash_remove(struct of_device *dev) 95static int of_flash_remove(struct of_device *dev)
89{ 96{
90 struct of_flash *info; 97 struct of_flash *info;
98 int i;
91 99
92 info = dev_get_drvdata(&dev->dev); 100 info = dev_get_drvdata(&dev->dev);
93 if (!info) 101 if (!info)
94 return 0; 102 return 0;
95 dev_set_drvdata(&dev->dev, NULL); 103 dev_set_drvdata(&dev->dev, NULL);
96 104
97 if (info->mtd) { 105#ifdef CONFIG_MTD_CONCAT
106 if (info->cmtd != info->list[0].mtd) {
107 del_mtd_device(info->cmtd);
108 mtd_concat_destroy(info->cmtd);
109 }
110#endif
111
112 if (info->cmtd) {
98 if (OF_FLASH_PARTS(info)) { 113 if (OF_FLASH_PARTS(info)) {
99 del_mtd_partitions(info->mtd); 114 del_mtd_partitions(info->cmtd);
100 kfree(OF_FLASH_PARTS(info)); 115 kfree(OF_FLASH_PARTS(info));
101 } else { 116 } else {
102 del_mtd_device(info->mtd); 117 del_mtd_device(info->cmtd);
103 } 118 }
104 map_destroy(info->mtd);
105 } 119 }
106 120
107 if (info->map.virt) 121 for (i = 0; i < info->list_size; i++) {
108 iounmap(info->map.virt); 122 if (info->list[i].mtd)
123 map_destroy(info->list[i].mtd);
109 124
110 if (info->res) { 125 if (info->list[i].map.virt)
111 release_resource(info->res); 126 iounmap(info->list[i].map.virt);
112 kfree(info->res); 127
128 if (info->list[i].res) {
129 release_resource(info->list[i].res);
130 kfree(info->list[i].res);
131 }
113 } 132 }
114 133
134 kfree(info);
135
115 return 0; 136 return 0;
116} 137}
117 138
@@ -164,68 +185,130 @@ static int __devinit of_flash_probe(struct of_device *dev,
164 const char *probe_type = match->data; 185 const char *probe_type = match->data;
165 const u32 *width; 186 const u32 *width;
166 int err; 187 int err;
167 188 int i;
168 err = -ENXIO; 189 int count;
169 if (of_address_to_resource(dp, 0, &res)) { 190 const u32 *p;
170 dev_err(&dev->dev, "Can't get IO address from device tree\n"); 191 int reg_tuple_size;
192 struct mtd_info **mtd_list = NULL;
193
194 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
195
196 /*
197 * Get number of "reg" tuples. Scan for MTD devices on area's
198 * described by each "reg" region. This makes it possible (including
199 * the concat support) to support the Intel P30 48F4400 chips which
200 * consists internally of 2 non-identical NOR chips on one die.
201 */
202 p = of_get_property(dp, "reg", &count);
203 if (count % reg_tuple_size != 0) {
204 dev_err(&dev->dev, "Malformed reg property on %s\n",
205 dev->node->full_name);
206 err = -EINVAL;
171 goto err_out; 207 goto err_out;
172 } 208 }
173 209 count /= reg_tuple_size;
174 dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
175 (unsigned long long)res.start, (unsigned long long)res.end);
176 210
177 err = -ENOMEM; 211 err = -ENOMEM;
178 info = kzalloc(sizeof(*info), GFP_KERNEL); 212 info = kzalloc(sizeof(struct of_flash) +
213 sizeof(struct of_flash_list) * count, GFP_KERNEL);
214 if (!info)
215 goto err_out;
216
217 mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
179 if (!info) 218 if (!info)
180 goto err_out; 219 goto err_out;
181 220
182 dev_set_drvdata(&dev->dev, info); 221 dev_set_drvdata(&dev->dev, info);
183 222
184 err = -EBUSY; 223 for (i = 0; i < count; i++) {
185 info->res = request_mem_region(res.start, res.end - res.start + 1, 224 err = -ENXIO;
186 dev_name(&dev->dev)); 225 if (of_address_to_resource(dp, i, &res)) {
187 if (!info->res) 226 dev_err(&dev->dev, "Can't get IO address from device"
188 goto err_out; 227 " tree\n");
228 goto err_out;
229 }
189 230
190 err = -ENXIO; 231 dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
191 width = of_get_property(dp, "bank-width", NULL); 232 (unsigned long long)res.start,
192 if (!width) { 233 (unsigned long long)res.end);
193 dev_err(&dev->dev, "Can't get bank width from device tree\n"); 234
194 goto err_out; 235 err = -EBUSY;
195 } 236 info->list[i].res = request_mem_region(res.start, res.end -
237 res.start + 1,
238 dev_name(&dev->dev));
239 if (!info->list[i].res)
240 goto err_out;
241
242 err = -ENXIO;
243 width = of_get_property(dp, "bank-width", NULL);
244 if (!width) {
245 dev_err(&dev->dev, "Can't get bank width from device"
246 " tree\n");
247 goto err_out;
248 }
196 249
197 info->map.name = dev_name(&dev->dev); 250 info->list[i].map.name = dev_name(&dev->dev);
198 info->map.phys = res.start; 251 info->list[i].map.phys = res.start;
199 info->map.size = res.end - res.start + 1; 252 info->list[i].map.size = res.end - res.start + 1;
200 info->map.bankwidth = *width; 253 info->list[i].map.bankwidth = *width;
254
255 err = -ENOMEM;
256 info->list[i].map.virt = ioremap(info->list[i].map.phys,
257 info->list[i].map.size);
258 if (!info->list[i].map.virt) {
259 dev_err(&dev->dev, "Failed to ioremap() flash"
260 " region\n");
261 goto err_out;
262 }
201 263
202 err = -ENOMEM; 264 simple_map_init(&info->list[i].map);
203 info->map.virt = ioremap(info->map.phys, info->map.size);
204 if (!info->map.virt) {
205 dev_err(&dev->dev, "Failed to ioremap() flash region\n");
206 goto err_out;
207 }
208 265
209 simple_map_init(&info->map); 266 if (probe_type) {
267 info->list[i].mtd = do_map_probe(probe_type,
268 &info->list[i].map);
269 } else {
270 info->list[i].mtd = obsolete_probe(dev,
271 &info->list[i].map);
272 }
273 mtd_list[i] = info->list[i].mtd;
210 274
211 if (probe_type) 275 err = -ENXIO;
212 info->mtd = do_map_probe(probe_type, &info->map); 276 if (!info->list[i].mtd) {
213 else 277 dev_err(&dev->dev, "do_map_probe() failed\n");
214 info->mtd = obsolete_probe(dev, &info->map); 278 goto err_out;
279 } else {
280 info->list_size++;
281 }
282 info->list[i].mtd->owner = THIS_MODULE;
283 info->list[i].mtd->dev.parent = &dev->dev;
284 }
215 285
216 err = -ENXIO; 286 err = 0;
217 if (!info->mtd) { 287 if (info->list_size == 1) {
218 dev_err(&dev->dev, "do_map_probe() failed\n"); 288 info->cmtd = info->list[0].mtd;
219 goto err_out; 289 } else if (info->list_size > 1) {
290 /*
291 * We detected multiple devices. Concatenate them together.
292 */
293#ifdef CONFIG_MTD_CONCAT
294 info->cmtd = mtd_concat_create(mtd_list, info->list_size,
295 dev_name(&dev->dev));
296 if (info->cmtd == NULL)
297 err = -ENXIO;
298#else
299 printk(KERN_ERR "physmap_of: multiple devices "
300 "found but MTD concat support disabled.\n");
301 err = -ENXIO;
302#endif
220 } 303 }
221 info->mtd->owner = THIS_MODULE; 304 if (err)
222 info->mtd->dev.parent = &dev->dev; 305 goto err_out;
223 306
224#ifdef CONFIG_MTD_PARTITIONS 307#ifdef CONFIG_MTD_PARTITIONS
225 /* First look for RedBoot table or partitions on the command 308 /* First look for RedBoot table or partitions on the command
226 * line, these take precedence over device tree information */ 309 * line, these take precedence over device tree information */
227 err = parse_mtd_partitions(info->mtd, part_probe_types, 310 err = parse_mtd_partitions(info->cmtd, part_probe_types,
228 &info->parts, 0); 311 &info->parts, 0);
229 if (err < 0) 312 if (err < 0)
230 return err; 313 return err;
231 314
@@ -244,15 +327,19 @@ static int __devinit of_flash_probe(struct of_device *dev,
244 } 327 }
245 328
246 if (err > 0) 329 if (err > 0)
247 add_mtd_partitions(info->mtd, info->parts, err); 330 add_mtd_partitions(info->cmtd, info->parts, err);
248 else 331 else
249#endif 332#endif
250 add_mtd_device(info->mtd); 333 add_mtd_device(info->cmtd);
334
335 kfree(mtd_list);
251 336
252 return 0; 337 return 0;
253 338
254err_out: 339err_out:
340 kfree(mtd_list);
255 of_flash_remove(dev); 341 of_flash_remove(dev);
342
256 return err; 343 return err;
257} 344}
258 345
diff --git a/drivers/mtd/maps/pmcmsp-ramroot.c b/drivers/mtd/maps/pmcmsp-ramroot.c
deleted file mode 100644
index 30de5c0c09a9..000000000000
--- a/drivers/mtd/maps/pmcmsp-ramroot.c
+++ /dev/null
@@ -1,104 +0,0 @@
1/*
2 * Mapping of the rootfs in a physical region of memory
3 *
4 * Copyright (C) 2005-2007 PMC-Sierra Inc.
5 * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
13 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
15 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
16 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
17 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
18 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
19 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
20 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
21 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc.,
25 * 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/kernel.h>
31#include <linux/init.h>
32#include <linux/slab.h>
33#include <linux/fs.h>
34#include <linux/root_dev.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/map.h>
37
38#include <asm/io.h>
39
40#include <msp_prom.h>
41
42static struct mtd_info *rr_mtd;
43
44struct map_info rr_map = {
45 .name = "ramroot",
46 .bankwidth = 4,
47};
48
49static int __init init_rrmap(void)
50{
51 void *ramroot_start;
52 unsigned long ramroot_size;
53
54 /* Check for supported rootfs types */
55 if (get_ramroot(&ramroot_start, &ramroot_size)) {
56 rr_map.phys = CPHYSADDR(ramroot_start);
57 rr_map.size = ramroot_size;
58
59 printk(KERN_NOTICE
60 "PMC embedded root device: 0x%08lx @ 0x%08lx\n",
61 rr_map.size, (unsigned long)rr_map.phys);
62 } else {
63 printk(KERN_ERR
64 "init_rrmap: no supported embedded rootfs detected!\n");
65 return -ENXIO;
66 }
67
68 /* Map rootfs to I/O space for block device driver */
69 rr_map.virt = ioremap(rr_map.phys, rr_map.size);
70 if (!rr_map.virt) {
71 printk(KERN_ERR "Failed to ioremap\n");
72 return -EIO;
73 }
74
75 simple_map_init(&rr_map);
76
77 rr_mtd = do_map_probe("map_ram", &rr_map);
78 if (rr_mtd) {
79 rr_mtd->owner = THIS_MODULE;
80
81 add_mtd_device(rr_mtd);
82
83 return 0;
84 }
85
86 iounmap(rr_map.virt);
87 return -ENXIO;
88}
89
90static void __exit cleanup_rrmap(void)
91{
92 del_mtd_device(rr_mtd);
93 map_destroy(rr_mtd);
94
95 iounmap(rr_map.virt);
96 rr_map.virt = NULL;
97}
98
99MODULE_AUTHOR("PMC-Sierra, Inc");
100MODULE_DESCRIPTION("MTD map driver for embedded PMC-Sierra MSP filesystem");
101MODULE_LICENSE("GPL");
102
103module_init(init_rrmap);
104module_exit(cleanup_rrmap);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 572d32fdf38a..643aa06b599e 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -140,24 +140,6 @@ static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
140} 140}
141 141
142#ifdef CONFIG_PM 142#ifdef CONFIG_PM
143static int pxa2xx_flash_suspend(struct platform_device *dev, pm_message_t state)
144{
145 struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
146 int ret = 0;
147
148 if (info->mtd && info->mtd->suspend)
149 ret = info->mtd->suspend(info->mtd);
150 return ret;
151}
152
153static int pxa2xx_flash_resume(struct platform_device *dev)
154{
155 struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
156
157 if (info->mtd && info->mtd->resume)
158 info->mtd->resume(info->mtd);
159 return 0;
160}
161static void pxa2xx_flash_shutdown(struct platform_device *dev) 143static void pxa2xx_flash_shutdown(struct platform_device *dev)
162{ 144{
163 struct pxa2xx_flash_info *info = platform_get_drvdata(dev); 145 struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
@@ -166,8 +148,6 @@ static void pxa2xx_flash_shutdown(struct platform_device *dev)
166 info->mtd->resume(info->mtd); 148 info->mtd->resume(info->mtd);
167} 149}
168#else 150#else
169#define pxa2xx_flash_suspend NULL
170#define pxa2xx_flash_resume NULL
171#define pxa2xx_flash_shutdown NULL 151#define pxa2xx_flash_shutdown NULL
172#endif 152#endif
173 153
@@ -178,8 +158,6 @@ static struct platform_driver pxa2xx_flash_driver = {
178 }, 158 },
179 .probe = pxa2xx_flash_probe, 159 .probe = pxa2xx_flash_probe,
180 .remove = __devexit_p(pxa2xx_flash_remove), 160 .remove = __devexit_p(pxa2xx_flash_remove),
181 .suspend = pxa2xx_flash_suspend,
182 .resume = pxa2xx_flash_resume,
183 .shutdown = pxa2xx_flash_shutdown, 161 .shutdown = pxa2xx_flash_shutdown,
184}; 162};
185 163
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index d39f0adac846..83ed64512c5e 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -145,25 +145,6 @@ err_out:
145} 145}
146 146
147#ifdef CONFIG_PM 147#ifdef CONFIG_PM
148static int rbtx4939_flash_suspend(struct platform_device *dev,
149 pm_message_t state)
150{
151 struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
152
153 if (info->mtd->suspend)
154 return info->mtd->suspend(info->mtd);
155 return 0;
156}
157
158static int rbtx4939_flash_resume(struct platform_device *dev)
159{
160 struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
161
162 if (info->mtd->resume)
163 info->mtd->resume(info->mtd);
164 return 0;
165}
166
167static void rbtx4939_flash_shutdown(struct platform_device *dev) 148static void rbtx4939_flash_shutdown(struct platform_device *dev)
168{ 149{
169 struct rbtx4939_flash_info *info = platform_get_drvdata(dev); 150 struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
@@ -173,16 +154,12 @@ static void rbtx4939_flash_shutdown(struct platform_device *dev)
173 info->mtd->resume(info->mtd); 154 info->mtd->resume(info->mtd);
174} 155}
175#else 156#else
176#define rbtx4939_flash_suspend NULL
177#define rbtx4939_flash_resume NULL
178#define rbtx4939_flash_shutdown NULL 157#define rbtx4939_flash_shutdown NULL
179#endif 158#endif
180 159
181static struct platform_driver rbtx4939_flash_driver = { 160static struct platform_driver rbtx4939_flash_driver = {
182 .probe = rbtx4939_flash_probe, 161 .probe = rbtx4939_flash_probe,
183 .remove = rbtx4939_flash_remove, 162 .remove = rbtx4939_flash_remove,
184 .suspend = rbtx4939_flash_suspend,
185 .resume = rbtx4939_flash_resume,
186 .shutdown = rbtx4939_flash_shutdown, 163 .shutdown = rbtx4939_flash_shutdown,
187 .driver = { 164 .driver = {
188 .name = "rbtx4939-flash", 165 .name = "rbtx4939-flash",
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 05e9362dc7f0..c6210f5118d1 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -415,25 +415,6 @@ static int __exit sa1100_mtd_remove(struct platform_device *pdev)
415} 415}
416 416
417#ifdef CONFIG_PM 417#ifdef CONFIG_PM
418static int sa1100_mtd_suspend(struct platform_device *dev, pm_message_t state)
419{
420 struct sa_info *info = platform_get_drvdata(dev);
421 int ret = 0;
422
423 if (info)
424 ret = info->mtd->suspend(info->mtd);
425
426 return ret;
427}
428
429static int sa1100_mtd_resume(struct platform_device *dev)
430{
431 struct sa_info *info = platform_get_drvdata(dev);
432 if (info)
433 info->mtd->resume(info->mtd);
434 return 0;
435}
436
437static void sa1100_mtd_shutdown(struct platform_device *dev) 418static void sa1100_mtd_shutdown(struct platform_device *dev)
438{ 419{
439 struct sa_info *info = platform_get_drvdata(dev); 420 struct sa_info *info = platform_get_drvdata(dev);
@@ -441,16 +422,12 @@ static void sa1100_mtd_shutdown(struct platform_device *dev)
441 info->mtd->resume(info->mtd); 422 info->mtd->resume(info->mtd);
442} 423}
443#else 424#else
444#define sa1100_mtd_suspend NULL
445#define sa1100_mtd_resume NULL
446#define sa1100_mtd_shutdown NULL 425#define sa1100_mtd_shutdown NULL
447#endif 426#endif
448 427
449static struct platform_driver sa1100_mtd_driver = { 428static struct platform_driver sa1100_mtd_driver = {
450 .probe = sa1100_mtd_probe, 429 .probe = sa1100_mtd_probe,
451 .remove = __exit_p(sa1100_mtd_remove), 430 .remove = __exit_p(sa1100_mtd_remove),
452 .suspend = sa1100_mtd_suspend,
453 .resume = sa1100_mtd_resume,
454 .shutdown = sa1100_mtd_shutdown, 431 .shutdown = sa1100_mtd_shutdown,
455 .driver = { 432 .driver = {
456 .name = "sa1100-mtd", 433 .name = "sa1100-mtd",
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 81756e397711..d4314fb88212 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -22,15 +22,19 @@
22 22
23/****************************************************************************/ 23/****************************************************************************/
24 24
25extern char _ebss;
26
25struct map_info uclinux_ram_map = { 27struct map_info uclinux_ram_map = {
26 .name = "RAM", 28 .name = "RAM",
29 .phys = (unsigned long)&_ebss,
30 .size = 0,
27}; 31};
28 32
29struct mtd_info *uclinux_ram_mtdinfo; 33static struct mtd_info *uclinux_ram_mtdinfo;
30 34
31/****************************************************************************/ 35/****************************************************************************/
32 36
33struct mtd_partition uclinux_romfs[] = { 37static struct mtd_partition uclinux_romfs[] = {
34 { .name = "ROMfs" } 38 { .name = "ROMfs" }
35}; 39};
36 40
@@ -38,7 +42,7 @@ struct mtd_partition uclinux_romfs[] = {
38 42
39/****************************************************************************/ 43/****************************************************************************/
40 44
41int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len, 45static int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
42 size_t *retlen, void **virt, resource_size_t *phys) 46 size_t *retlen, void **virt, resource_size_t *phys)
43{ 47{
44 struct map_info *map = mtd->priv; 48 struct map_info *map = mtd->priv;
@@ -55,12 +59,10 @@ static int __init uclinux_mtd_init(void)
55{ 59{
56 struct mtd_info *mtd; 60 struct mtd_info *mtd;
57 struct map_info *mapp; 61 struct map_info *mapp;
58 extern char _ebss;
59 unsigned long addr = (unsigned long) &_ebss;
60 62
61 mapp = &uclinux_ram_map; 63 mapp = &uclinux_ram_map;
62 mapp->phys = addr; 64 if (!mapp->size)
63 mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(addr + 8)))); 65 mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
64 mapp->bankwidth = 4; 66 mapp->bankwidth = 4;
65 67
66 printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n", 68 printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index aaac3b6800b7..c3f62654b6df 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -291,7 +291,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
291 gd->private_data = new; 291 gd->private_data = new;
292 new->blkcore_priv = gd; 292 new->blkcore_priv = gd;
293 gd->queue = tr->blkcore_priv->rq; 293 gd->queue = tr->blkcore_priv->rq;
294 gd->driverfs_dev = new->mtd->dev.parent; 294 gd->driverfs_dev = &new->mtd->dev;
295 295
296 if (new->readonly) 296 if (new->readonly)
297 set_disk_ro(gd, 1); 297 set_disk_ro(gd, 1);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 763d3f0a1f42..5b081cb84351 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -14,6 +14,7 @@
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/smp_lock.h> 15#include <linux/smp_lock.h>
16#include <linux/backing-dev.h> 16#include <linux/backing-dev.h>
17#include <linux/compat.h>
17 18
18#include <linux/mtd/mtd.h> 19#include <linux/mtd/mtd.h>
19#include <linux/mtd/compatmac.h> 20#include <linux/mtd/compatmac.h>
@@ -355,6 +356,100 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
355# define otp_select_filemode(f,m) -EOPNOTSUPP 356# define otp_select_filemode(f,m) -EOPNOTSUPP
356#endif 357#endif
357 358
359static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
360 uint64_t start, uint32_t length, void __user *ptr,
361 uint32_t __user *retp)
362{
363 struct mtd_oob_ops ops;
364 uint32_t retlen;
365 int ret = 0;
366
367 if (!(file->f_mode & FMODE_WRITE))
368 return -EPERM;
369
370 if (length > 4096)
371 return -EINVAL;
372
373 if (!mtd->write_oob)
374 ret = -EOPNOTSUPP;
375 else
376 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT;
377
378 if (ret)
379 return ret;
380
381 ops.ooblen = length;
382 ops.ooboffs = start & (mtd->oobsize - 1);
383 ops.datbuf = NULL;
384 ops.mode = MTD_OOB_PLACE;
385
386 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
387 return -EINVAL;
388
389 ops.oobbuf = kmalloc(length, GFP_KERNEL);
390 if (!ops.oobbuf)
391 return -ENOMEM;
392
393 if (copy_from_user(ops.oobbuf, ptr, length)) {
394 kfree(ops.oobbuf);
395 return -EFAULT;
396 }
397
398 start &= ~((uint64_t)mtd->oobsize - 1);
399 ret = mtd->write_oob(mtd, start, &ops);
400
401 if (ops.oobretlen > 0xFFFFFFFFU)
402 ret = -EOVERFLOW;
403 retlen = ops.oobretlen;
404 if (copy_to_user(retp, &retlen, sizeof(length)))
405 ret = -EFAULT;
406
407 kfree(ops.oobbuf);
408 return ret;
409}
410
411static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
412 uint32_t length, void __user *ptr, uint32_t __user *retp)
413{
414 struct mtd_oob_ops ops;
415 int ret = 0;
416
417 if (length > 4096)
418 return -EINVAL;
419
420 if (!mtd->read_oob)
421 ret = -EOPNOTSUPP;
422 else
423 ret = access_ok(VERIFY_WRITE, ptr,
424 length) ? 0 : -EFAULT;
425 if (ret)
426 return ret;
427
428 ops.ooblen = length;
429 ops.ooboffs = start & (mtd->oobsize - 1);
430 ops.datbuf = NULL;
431 ops.mode = MTD_OOB_PLACE;
432
433 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
434 return -EINVAL;
435
436 ops.oobbuf = kmalloc(length, GFP_KERNEL);
437 if (!ops.oobbuf)
438 return -ENOMEM;
439
440 start &= ~((uint64_t)mtd->oobsize - 1);
441 ret = mtd->read_oob(mtd, start, &ops);
442
443 if (put_user(ops.oobretlen, retp))
444 ret = -EFAULT;
445 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
446 ops.oobretlen))
447 ret = -EFAULT;
448
449 kfree(ops.oobbuf);
450 return ret;
451}
452
358static int mtd_ioctl(struct inode *inode, struct file *file, 453static int mtd_ioctl(struct inode *inode, struct file *file,
359 u_int cmd, u_long arg) 454 u_int cmd, u_long arg)
360{ 455{
@@ -417,6 +512,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
417 break; 512 break;
418 513
419 case MEMERASE: 514 case MEMERASE:
515 case MEMERASE64:
420 { 516 {
421 struct erase_info *erase; 517 struct erase_info *erase;
422 518
@@ -427,20 +523,32 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
427 if (!erase) 523 if (!erase)
428 ret = -ENOMEM; 524 ret = -ENOMEM;
429 else { 525 else {
430 struct erase_info_user einfo;
431
432 wait_queue_head_t waitq; 526 wait_queue_head_t waitq;
433 DECLARE_WAITQUEUE(wait, current); 527 DECLARE_WAITQUEUE(wait, current);
434 528
435 init_waitqueue_head(&waitq); 529 init_waitqueue_head(&waitq);
436 530
437 if (copy_from_user(&einfo, argp, 531 if (cmd == MEMERASE64) {
438 sizeof(struct erase_info_user))) { 532 struct erase_info_user64 einfo64;
439 kfree(erase); 533
440 return -EFAULT; 534 if (copy_from_user(&einfo64, argp,
535 sizeof(struct erase_info_user64))) {
536 kfree(erase);
537 return -EFAULT;
538 }
539 erase->addr = einfo64.start;
540 erase->len = einfo64.length;
541 } else {
542 struct erase_info_user einfo32;
543
544 if (copy_from_user(&einfo32, argp,
545 sizeof(struct erase_info_user))) {
546 kfree(erase);
547 return -EFAULT;
548 }
549 erase->addr = einfo32.start;
550 erase->len = einfo32.length;
441 } 551 }
442 erase->addr = einfo.start;
443 erase->len = einfo.length;
444 erase->mtd = mtd; 552 erase->mtd = mtd;
445 erase->callback = mtdchar_erase_callback; 553 erase->callback = mtdchar_erase_callback;
446 erase->priv = (unsigned long)&waitq; 554 erase->priv = (unsigned long)&waitq;
@@ -474,100 +582,56 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
474 case MEMWRITEOOB: 582 case MEMWRITEOOB:
475 { 583 {
476 struct mtd_oob_buf buf; 584 struct mtd_oob_buf buf;
477 struct mtd_oob_ops ops; 585 struct mtd_oob_buf __user *buf_user = argp;
478 struct mtd_oob_buf __user *user_buf = argp;
479 uint32_t retlen;
480
481 if(!(file->f_mode & FMODE_WRITE))
482 return -EPERM;
483
484 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
485 return -EFAULT;
486
487 if (buf.length > 4096)
488 return -EINVAL;
489
490 if (!mtd->write_oob)
491 ret = -EOPNOTSUPP;
492 else
493 ret = access_ok(VERIFY_READ, buf.ptr,
494 buf.length) ? 0 : EFAULT;
495
496 if (ret)
497 return ret;
498
499 ops.ooblen = buf.length;
500 ops.ooboffs = buf.start & (mtd->oobsize - 1);
501 ops.datbuf = NULL;
502 ops.mode = MTD_OOB_PLACE;
503
504 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
505 return -EINVAL;
506
507 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
508 if (!ops.oobbuf)
509 return -ENOMEM;
510
511 if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
512 kfree(ops.oobbuf);
513 return -EFAULT;
514 }
515 586
516 buf.start &= ~(mtd->oobsize - 1); 587 /* NOTE: writes return length to buf_user->length */
517 ret = mtd->write_oob(mtd, buf.start, &ops); 588 if (copy_from_user(&buf, argp, sizeof(buf)))
518
519 if (ops.oobretlen > 0xFFFFFFFFU)
520 ret = -EOVERFLOW;
521 retlen = ops.oobretlen;
522 if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length)))
523 ret = -EFAULT; 589 ret = -EFAULT;
524 590 else
525 kfree(ops.oobbuf); 591 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
592 buf.ptr, &buf_user->length);
526 break; 593 break;
527
528 } 594 }
529 595
530 case MEMREADOOB: 596 case MEMREADOOB:
531 { 597 {
532 struct mtd_oob_buf buf; 598 struct mtd_oob_buf buf;
533 struct mtd_oob_ops ops; 599 struct mtd_oob_buf __user *buf_user = argp;
534
535 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
536 return -EFAULT;
537
538 if (buf.length > 4096)
539 return -EINVAL;
540 600
541 if (!mtd->read_oob) 601 /* NOTE: writes return length to buf_user->start */
542 ret = -EOPNOTSUPP; 602 if (copy_from_user(&buf, argp, sizeof(buf)))
603 ret = -EFAULT;
543 else 604 else
544 ret = access_ok(VERIFY_WRITE, buf.ptr, 605 ret = mtd_do_readoob(mtd, buf.start, buf.length,
545 buf.length) ? 0 : -EFAULT; 606 buf.ptr, &buf_user->start);
546 if (ret) 607 break;
547 return ret; 608 }
548
549 ops.ooblen = buf.length;
550 ops.ooboffs = buf.start & (mtd->oobsize - 1);
551 ops.datbuf = NULL;
552 ops.mode = MTD_OOB_PLACE;
553 609
554 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 610 case MEMWRITEOOB64:
555 return -EINVAL; 611 {
612 struct mtd_oob_buf64 buf;
613 struct mtd_oob_buf64 __user *buf_user = argp;
556 614
557 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 615 if (copy_from_user(&buf, argp, sizeof(buf)))
558 if (!ops.oobbuf) 616 ret = -EFAULT;
559 return -ENOMEM; 617 else
618 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
619 (void __user *)(uintptr_t)buf.usr_ptr,
620 &buf_user->length);
621 break;
622 }
560 623
561 buf.start &= ~(mtd->oobsize - 1); 624 case MEMREADOOB64:
562 ret = mtd->read_oob(mtd, buf.start, &ops); 625 {
626 struct mtd_oob_buf64 buf;
627 struct mtd_oob_buf64 __user *buf_user = argp;
563 628
564 if (put_user(ops.oobretlen, (uint32_t __user *)argp)) 629 if (copy_from_user(&buf, argp, sizeof(buf)))
565 ret = -EFAULT;
566 else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf,
567 ops.oobretlen))
568 ret = -EFAULT; 630 ret = -EFAULT;
569 631 else
570 kfree(ops.oobbuf); 632 ret = mtd_do_readoob(mtd, buf.start, buf.length,
633 (void __user *)(uintptr_t)buf.usr_ptr,
634 &buf_user->length);
571 break; 635 break;
572 } 636 }
573 637
@@ -758,6 +822,68 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
758 return ret; 822 return ret;
759} /* memory_ioctl */ 823} /* memory_ioctl */
760 824
825#ifdef CONFIG_COMPAT
826
827struct mtd_oob_buf32 {
828 u_int32_t start;
829 u_int32_t length;
830 compat_caddr_t ptr; /* unsigned char* */
831};
832
833#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
834#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
835
836static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
837 unsigned long arg)
838{
839 struct inode *inode = file->f_path.dentry->d_inode;
840 struct mtd_file_info *mfi = file->private_data;
841 struct mtd_info *mtd = mfi->mtd;
842 void __user *argp = compat_ptr(arg);
843 int ret = 0;
844
845 lock_kernel();
846
847 switch (cmd) {
848 case MEMWRITEOOB32:
849 {
850 struct mtd_oob_buf32 buf;
851 struct mtd_oob_buf32 __user *buf_user = argp;
852
853 if (copy_from_user(&buf, argp, sizeof(buf)))
854 ret = -EFAULT;
855 else
856 ret = mtd_do_writeoob(file, mtd, buf.start,
857 buf.length, compat_ptr(buf.ptr),
858 &buf_user->length);
859 break;
860 }
861
862 case MEMREADOOB32:
863 {
864 struct mtd_oob_buf32 buf;
865 struct mtd_oob_buf32 __user *buf_user = argp;
866
867 /* NOTE: writes return length to buf->start */
868 if (copy_from_user(&buf, argp, sizeof(buf)))
869 ret = -EFAULT;
870 else
871 ret = mtd_do_readoob(mtd, buf.start,
872 buf.length, compat_ptr(buf.ptr),
873 &buf_user->start);
874 break;
875 }
876 default:
877 ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
878 }
879
880 unlock_kernel();
881
882 return ret;
883}
884
885#endif /* CONFIG_COMPAT */
886
761/* 887/*
762 * try to determine where a shared mapping can be made 888 * try to determine where a shared mapping can be made
763 * - only supported for NOMMU at the moment (MMU can't doesn't copy private 889 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
@@ -817,6 +943,9 @@ static const struct file_operations mtd_fops = {
817 .read = mtd_read, 943 .read = mtd_read,
818 .write = mtd_write, 944 .write = mtd_write,
819 .ioctl = mtd_ioctl, 945 .ioctl = mtd_ioctl,
946#ifdef CONFIG_COMPAT
947 .compat_ioctl = mtd_compat_ioctl,
948#endif
820 .open = mtd_open, 949 .open = mtd_open,
821 .release = mtd_close, 950 .release = mtd_close,
822 .mmap = mtd_mmap, 951 .mmap = mtd_mmap,
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index bccb4b1ffc46..fac54a3fa3f1 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -23,8 +23,15 @@
23 23
24#include "mtdcore.h" 24#include "mtdcore.h"
25 25
26 26static int mtd_cls_suspend(struct device *dev, pm_message_t state);
27static struct class *mtd_class; 27static int mtd_cls_resume(struct device *dev);
28
29static struct class mtd_class = {
30 .name = "mtd",
31 .owner = THIS_MODULE,
32 .suspend = mtd_cls_suspend,
33 .resume = mtd_cls_resume,
34};
28 35
29/* These are exported solely for the purpose of mtd_blkdevs.c. You 36/* These are exported solely for the purpose of mtd_blkdevs.c. You
30 should not use them for _anything_ else */ 37 should not use them for _anything_ else */
@@ -52,7 +59,26 @@ static void mtd_release(struct device *dev)
52 59
53 /* remove /dev/mtdXro node if needed */ 60 /* remove /dev/mtdXro node if needed */
54 if (index) 61 if (index)
55 device_destroy(mtd_class, index + 1); 62 device_destroy(&mtd_class, index + 1);
63}
64
65static int mtd_cls_suspend(struct device *dev, pm_message_t state)
66{
67 struct mtd_info *mtd = dev_to_mtd(dev);
68
69 if (mtd->suspend)
70 return mtd->suspend(mtd);
71 else
72 return 0;
73}
74
75static int mtd_cls_resume(struct device *dev)
76{
77 struct mtd_info *mtd = dev_to_mtd(dev);
78
79 if (mtd->resume)
80 mtd->resume(mtd);
81 return 0;
56} 82}
57 83
58static ssize_t mtd_type_show(struct device *dev, 84static ssize_t mtd_type_show(struct device *dev,
@@ -269,7 +295,7 @@ int add_mtd_device(struct mtd_info *mtd)
269 * physical device. 295 * physical device.
270 */ 296 */
271 mtd->dev.type = &mtd_devtype; 297 mtd->dev.type = &mtd_devtype;
272 mtd->dev.class = mtd_class; 298 mtd->dev.class = &mtd_class;
273 mtd->dev.devt = MTD_DEVT(i); 299 mtd->dev.devt = MTD_DEVT(i);
274 dev_set_name(&mtd->dev, "mtd%d", i); 300 dev_set_name(&mtd->dev, "mtd%d", i);
275 if (device_register(&mtd->dev) != 0) { 301 if (device_register(&mtd->dev) != 0) {
@@ -278,7 +304,7 @@ int add_mtd_device(struct mtd_info *mtd)
278 } 304 }
279 305
280 if (MTD_DEVT(i)) 306 if (MTD_DEVT(i))
281 device_create(mtd_class, mtd->dev.parent, 307 device_create(&mtd_class, mtd->dev.parent,
282 MTD_DEVT(i) + 1, 308 MTD_DEVT(i) + 1,
283 NULL, "mtd%dro", i); 309 NULL, "mtd%dro", i);
284 310
@@ -604,11 +630,12 @@ done:
604 630
605static int __init init_mtd(void) 631static int __init init_mtd(void)
606{ 632{
607 mtd_class = class_create(THIS_MODULE, "mtd"); 633 int ret;
634 ret = class_register(&mtd_class);
608 635
609 if (IS_ERR(mtd_class)) { 636 if (ret) {
610 pr_err("Error creating mtd class.\n"); 637 pr_err("Error registering mtd class: %d\n", ret);
611 return PTR_ERR(mtd_class); 638 return ret;
612 } 639 }
613#ifdef CONFIG_PROC_FS 640#ifdef CONFIG_PROC_FS
614 if ((proc_mtd = create_proc_entry( "mtd", 0, NULL ))) 641 if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
@@ -623,7 +650,7 @@ static void __exit cleanup_mtd(void)
623 if (proc_mtd) 650 if (proc_mtd)
624 remove_proc_entry( "mtd", NULL); 651 remove_proc_entry( "mtd", NULL);
625#endif /* CONFIG_PROC_FS */ 652#endif /* CONFIG_PROC_FS */
626 class_destroy(mtd_class); 653 class_unregister(&mtd_class);
627} 654}
628 655
629module_init(init_mtd); 656module_init(init_mtd);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 29675edb44b4..349fcbe5cc0f 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -27,9 +27,7 @@ struct mtd_part {
27 struct mtd_info mtd; 27 struct mtd_info mtd;
28 struct mtd_info *master; 28 struct mtd_info *master;
29 uint64_t offset; 29 uint64_t offset;
30 int index;
31 struct list_head list; 30 struct list_head list;
32 int registered;
33}; 31};
34 32
35/* 33/*
@@ -321,8 +319,7 @@ int del_mtd_partitions(struct mtd_info *master)
321 list_for_each_entry_safe(slave, next, &mtd_partitions, list) 319 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
322 if (slave->master == master) { 320 if (slave->master == master) {
323 list_del(&slave->list); 321 list_del(&slave->list);
324 if (slave->registered) 322 del_mtd_device(&slave->mtd);
325 del_mtd_device(&slave->mtd);
326 kfree(slave); 323 kfree(slave);
327 } 324 }
328 325
@@ -395,7 +392,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
395 slave->mtd.get_fact_prot_info = part_get_fact_prot_info; 392 slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
396 if (master->sync) 393 if (master->sync)
397 slave->mtd.sync = part_sync; 394 slave->mtd.sync = part_sync;
398 if (!partno && master->suspend && master->resume) { 395 if (!partno && !master->dev.class && master->suspend && master->resume) {
399 slave->mtd.suspend = part_suspend; 396 slave->mtd.suspend = part_suspend;
400 slave->mtd.resume = part_resume; 397 slave->mtd.resume = part_resume;
401 } 398 }
@@ -412,7 +409,6 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
412 slave->mtd.erase = part_erase; 409 slave->mtd.erase = part_erase;
413 slave->master = master; 410 slave->master = master;
414 slave->offset = part->offset; 411 slave->offset = part->offset;
415 slave->index = partno;
416 412
417 if (slave->offset == MTDPART_OFS_APPEND) 413 if (slave->offset == MTDPART_OFS_APPEND)
418 slave->offset = cur_offset; 414 slave->offset = cur_offset;
@@ -500,15 +496,9 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
500 } 496 }
501 497
502out_register: 498out_register:
503 if (part->mtdp) { 499 /* register our partition */
504 /* store the object pointer (caller may or may not register it*/ 500 add_mtd_device(&slave->mtd);
505 *part->mtdp = &slave->mtd; 501
506 slave->registered = 0;
507 } else {
508 /* register our partition */
509 add_mtd_device(&slave->mtd);
510 slave->registered = 1;
511 }
512 return slave; 502 return slave;
513} 503}
514 504
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index f3276897859e..ce96c091f01b 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -74,6 +74,12 @@ config MTD_NAND_AMS_DELTA
74 help 74 help
75 Support for NAND flash on Amstrad E3 (Delta). 75 Support for NAND flash on Amstrad E3 (Delta).
76 76
77config MTD_NAND_OMAP2
78 tristate "NAND Flash device on OMAP2 and OMAP3"
79 depends on ARM && MTD_NAND && (ARCH_OMAP2 || ARCH_OMAP3)
80 help
81 Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
82
77config MTD_NAND_TS7250 83config MTD_NAND_TS7250
78 tristate "NAND Flash device on TS-7250 board" 84 tristate "NAND Flash device on TS-7250 board"
79 depends on MACH_TS72XX 85 depends on MACH_TS72XX
@@ -139,27 +145,27 @@ config MTD_NAND_PPCHAMELEONEVB
139 This enables the NAND flash driver on the PPChameleon EVB Board. 145 This enables the NAND flash driver on the PPChameleon EVB Board.
140 146
141config MTD_NAND_S3C2410 147config MTD_NAND_S3C2410
142 tristate "NAND Flash support for S3C2410/S3C2440 SoC" 148 tristate "NAND Flash support for Samsung S3C SoCs"
143 depends on ARCH_S3C2410 149 depends on ARCH_S3C2410 || ARCH_S3C64XX
144 help 150 help
145 This enables the NAND flash controller on the S3C2410 and S3C2440 151 This enables the NAND flash controller on the S3C24xx and S3C64xx
146 SoCs 152 SoCs
147 153
148 No board specific support is done by this driver, each board 154 No board specific support is done by this driver, each board
149 must advertise a platform_device for the driver to attach. 155 must advertise a platform_device for the driver to attach.
150 156
151config MTD_NAND_S3C2410_DEBUG 157config MTD_NAND_S3C2410_DEBUG
152 bool "S3C2410 NAND driver debug" 158 bool "Samsung S3C NAND driver debug"
153 depends on MTD_NAND_S3C2410 159 depends on MTD_NAND_S3C2410
154 help 160 help
155 Enable debugging of the S3C2410 NAND driver 161 Enable debugging of the S3C NAND driver
156 162
157config MTD_NAND_S3C2410_HWECC 163config MTD_NAND_S3C2410_HWECC
158 bool "S3C2410 NAND Hardware ECC" 164 bool "Samsung S3C NAND Hardware ECC"
159 depends on MTD_NAND_S3C2410 165 depends on MTD_NAND_S3C2410
160 help 166 help
161 Enable the use of the S3C2410's internal ECC generator when 167 Enable the use of the controller's internal ECC generator when
162 using NAND. Early versions of the chip have had problems with 168 using NAND. Early versions of the chips have had problems with
163 incorrect ECC generation, and if using these, the default of 169 incorrect ECC generation, and if using these, the default of
164 software ECC is preferable. 170 software ECC is preferable.
165 171
@@ -171,7 +177,7 @@ config MTD_NAND_NDFC
171 NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs 177 NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
172 178
173config MTD_NAND_S3C2410_CLKSTOP 179config MTD_NAND_S3C2410_CLKSTOP
174 bool "S3C2410 NAND IDLE clock stop" 180 bool "Samsung S3C NAND IDLE clock stop"
175 depends on MTD_NAND_S3C2410 181 depends on MTD_NAND_S3C2410
176 default n 182 default n
177 help 183 help
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d33860ac42c3..f3a786b3cff3 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
25obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o 25obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
26obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o 26obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
27obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o 27obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
28obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
28obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o 29obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
29obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 30obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
30obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o 31obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 47a33cec3793..2802992b39da 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/moduleparam.h>
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <linux/mtd/mtd.h> 29#include <linux/mtd/mtd.h>
29#include <linux/mtd/nand.h> 30#include <linux/mtd/nand.h>
@@ -47,6 +48,9 @@
47#define no_ecc 0 48#define no_ecc 0
48#endif 49#endif
49 50
51static int on_flash_bbt = 0;
52module_param(on_flash_bbt, int, 0);
53
50/* Register access macros */ 54/* Register access macros */
51#define ecc_readl(add, reg) \ 55#define ecc_readl(add, reg) \
52 __raw_readl(add + ATMEL_ECC_##reg) 56 __raw_readl(add + ATMEL_ECC_##reg)
@@ -459,12 +463,17 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
459 463
460 if (host->board->det_pin) { 464 if (host->board->det_pin) {
461 if (gpio_get_value(host->board->det_pin)) { 465 if (gpio_get_value(host->board->det_pin)) {
462 printk("No SmartMedia card inserted.\n"); 466 printk(KERN_INFO "No SmartMedia card inserted.\n");
463 res = ENXIO; 467 res = ENXIO;
464 goto err_no_card; 468 goto err_no_card;
465 } 469 }
466 } 470 }
467 471
472 if (on_flash_bbt) {
473 printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
474 nand_chip->options |= NAND_USE_FLASH_BBT;
475 }
476
468 /* first scan to find the device and get the page size */ 477 /* first scan to find the device and get the page size */
469 if (nand_scan_ident(mtd, 1)) { 478 if (nand_scan_ident(mtd, 1)) {
470 res = -ENXIO; 479 res = -ENXIO;
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 4c2a67ca801e..8506e7e606fd 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -458,7 +458,7 @@ static irqreturn_t bf5xx_nand_dma_irq(int irq, void *dev_id)
458 return IRQ_HANDLED; 458 return IRQ_HANDLED;
459} 459}
460 460
461static int bf5xx_nand_dma_rw(struct mtd_info *mtd, 461static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
462 uint8_t *buf, int is_read) 462 uint8_t *buf, int is_read)
463{ 463{
464 struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); 464 struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
@@ -496,11 +496,20 @@ static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
496 /* setup DMA register with Blackfin DMA API */ 496 /* setup DMA register with Blackfin DMA API */
497 set_dma_config(CH_NFC, 0x0); 497 set_dma_config(CH_NFC, 0x0);
498 set_dma_start_addr(CH_NFC, (unsigned long) buf); 498 set_dma_start_addr(CH_NFC, (unsigned long) buf);
499
500/* The DMAs have different size on BF52x and BF54x */
501#ifdef CONFIG_BF52x
502 set_dma_x_count(CH_NFC, (page_size >> 1));
503 set_dma_x_modify(CH_NFC, 2);
504 val = DI_EN | WDSIZE_16;
505#endif
506
507#ifdef CONFIG_BF54x
499 set_dma_x_count(CH_NFC, (page_size >> 2)); 508 set_dma_x_count(CH_NFC, (page_size >> 2));
500 set_dma_x_modify(CH_NFC, 4); 509 set_dma_x_modify(CH_NFC, 4);
501
502 /* setup write or read operation */
503 val = DI_EN | WDSIZE_32; 510 val = DI_EN | WDSIZE_32;
511#endif
512 /* setup write or read operation */
504 if (is_read) 513 if (is_read)
505 val |= WNR; 514 val |= WNR;
506 set_dma_config(CH_NFC, val); 515 set_dma_config(CH_NFC, val);
@@ -512,8 +521,6 @@ static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
512 else 521 else
513 bfin_write_NFC_PGCTL(0x2); 522 bfin_write_NFC_PGCTL(0x2);
514 wait_for_completion(&info->dma_completion); 523 wait_for_completion(&info->dma_completion);
515
516 return 0;
517} 524}
518 525
519static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd, 526static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd,
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 02700f769b8a..0fad6487e6f4 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -44,7 +44,7 @@
44 * and some flavors of secondary chipselect (e.g. based on A12) as used 44 * and some flavors of secondary chipselect (e.g. based on A12) as used
45 * with multichip packages. 45 * with multichip packages.
46 * 46 *
47 * The 1-bit ECC hardware is supported, but not yet the newer 4-bit ECC 47 * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
48 * available on chips like the DM355 and OMAP-L137 and needed with the 48 * available on chips like the DM355 and OMAP-L137 and needed with the
49 * more error-prone MLC NAND chips. 49 * more error-prone MLC NAND chips.
50 * 50 *
@@ -54,11 +54,14 @@
54struct davinci_nand_info { 54struct davinci_nand_info {
55 struct mtd_info mtd; 55 struct mtd_info mtd;
56 struct nand_chip chip; 56 struct nand_chip chip;
57 struct nand_ecclayout ecclayout;
57 58
58 struct device *dev; 59 struct device *dev;
59 struct clk *clk; 60 struct clk *clk;
60 bool partitioned; 61 bool partitioned;
61 62
63 bool is_readmode;
64
62 void __iomem *base; 65 void __iomem *base;
63 void __iomem *vaddr; 66 void __iomem *vaddr;
64 67
@@ -73,6 +76,7 @@ struct davinci_nand_info {
73}; 76};
74 77
75static DEFINE_SPINLOCK(davinci_nand_lock); 78static DEFINE_SPINLOCK(davinci_nand_lock);
79static bool ecc4_busy;
76 80
77#define to_davinci_nand(m) container_of(m, struct davinci_nand_info, mtd) 81#define to_davinci_nand(m) container_of(m, struct davinci_nand_info, mtd)
78 82
@@ -218,6 +222,192 @@ static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
218/*----------------------------------------------------------------------*/ 222/*----------------------------------------------------------------------*/
219 223
220/* 224/*
225 * 4-bit hardware ECC ... context maintained over entire AEMIF
226 *
227 * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
228 * since that forces use of a problematic "infix OOB" layout.
229 * Among other things, it trashes manufacturer bad block markers.
230 * Also, and specific to this hardware, it ECC-protects the "prepad"
231 * in the OOB ... while having ECC protection for parts of OOB would
232 * seem useful, the current MTD stack sometimes wants to update the
233 * OOB without recomputing ECC.
234 */
235
236static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
237{
238 struct davinci_nand_info *info = to_davinci_nand(mtd);
239 unsigned long flags;
240 u32 val;
241
242 spin_lock_irqsave(&davinci_nand_lock, flags);
243
244 /* Start 4-bit ECC calculation for read/write */
245 val = davinci_nand_readl(info, NANDFCR_OFFSET);
246 val &= ~(0x03 << 4);
247 val |= (info->core_chipsel << 4) | BIT(12);
248 davinci_nand_writel(info, NANDFCR_OFFSET, val);
249
250 info->is_readmode = (mode == NAND_ECC_READ);
251
252 spin_unlock_irqrestore(&davinci_nand_lock, flags);
253}
254
255/* Read raw ECC code after writing to NAND. */
256static void
257nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
258{
259 const u32 mask = 0x03ff03ff;
260
261 code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
262 code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
263 code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
264 code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
265}
266
267/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
268static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
269 const u_char *dat, u_char *ecc_code)
270{
271 struct davinci_nand_info *info = to_davinci_nand(mtd);
272 u32 raw_ecc[4], *p;
273 unsigned i;
274
275 /* After a read, terminate ECC calculation by a dummy read
276 * of some 4-bit ECC register. ECC covers everything that
277 * was read; correct() just uses the hardware state, so
278 * ecc_code is not needed.
279 */
280 if (info->is_readmode) {
281 davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
282 return 0;
283 }
284
285 /* Pack eight raw 10-bit ecc values into ten bytes, making
286 * two passes which each convert four values (in upper and
287 * lower halves of two 32-bit words) into five bytes. The
288 * ROM boot loader uses this same packing scheme.
289 */
290 nand_davinci_readecc_4bit(info, raw_ecc);
291 for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
292 *ecc_code++ = p[0] & 0xff;
293 *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
294 *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
295 *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
296 *ecc_code++ = (p[1] >> 18) & 0xff;
297 }
298
299 return 0;
300}
301
302/* Correct up to 4 bits in data we just read, using state left in the
303 * hardware plus the ecc_code computed when it was first written.
304 */
305static int nand_davinci_correct_4bit(struct mtd_info *mtd,
306 u_char *data, u_char *ecc_code, u_char *null)
307{
308 int i;
309 struct davinci_nand_info *info = to_davinci_nand(mtd);
310 unsigned short ecc10[8];
311 unsigned short *ecc16;
312 u32 syndrome[4];
313 unsigned num_errors, corrected;
314
315 /* All bytes 0xff? It's an erased page; ignore its ECC. */
316 for (i = 0; i < 10; i++) {
317 if (ecc_code[i] != 0xff)
318 goto compare;
319 }
320 return 0;
321
322compare:
323 /* Unpack ten bytes into eight 10 bit values. We know we're
324 * little-endian, and use type punning for less shifting/masking.
325 */
326 if (WARN_ON(0x01 & (unsigned) ecc_code))
327 return -EINVAL;
328 ecc16 = (unsigned short *)ecc_code;
329
330 ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
331 ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
332 ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
333 ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
334 ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
335 ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
336 ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
337 ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
338
339 /* Tell ECC controller about the expected ECC codes. */
340 for (i = 7; i >= 0; i--)
341 davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
342
343 /* Allow time for syndrome calculation ... then read it.
344 * A syndrome of all zeroes 0 means no detected errors.
345 */
346 davinci_nand_readl(info, NANDFSR_OFFSET);
347 nand_davinci_readecc_4bit(info, syndrome);
348 if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
349 return 0;
350
351 /* Start address calculation, and wait for it to complete.
352 * We _could_ start reading more data while this is working,
353 * to speed up the overall page read.
354 */
355 davinci_nand_writel(info, NANDFCR_OFFSET,
356 davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
357 for (;;) {
358 u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
359
360 switch ((fsr >> 8) & 0x0f) {
361 case 0: /* no error, should not happen */
362 return 0;
363 case 1: /* five or more errors detected */
364 return -EIO;
365 case 2: /* error addresses computed */
366 case 3:
367 num_errors = 1 + ((fsr >> 16) & 0x03);
368 goto correct;
369 default: /* still working on it */
370 cpu_relax();
371 continue;
372 }
373 }
374
375correct:
376 /* correct each error */
377 for (i = 0, corrected = 0; i < num_errors; i++) {
378 int error_address, error_value;
379
380 if (i > 1) {
381 error_address = davinci_nand_readl(info,
382 NAND_ERR_ADD2_OFFSET);
383 error_value = davinci_nand_readl(info,
384 NAND_ERR_ERRVAL2_OFFSET);
385 } else {
386 error_address = davinci_nand_readl(info,
387 NAND_ERR_ADD1_OFFSET);
388 error_value = davinci_nand_readl(info,
389 NAND_ERR_ERRVAL1_OFFSET);
390 }
391
392 if (i & 1) {
393 error_address >>= 16;
394 error_value >>= 16;
395 }
396 error_address &= 0x3ff;
397 error_address = (512 + 7) - error_address;
398
399 if (error_address < 512) {
400 data[error_address] ^= error_value;
401 corrected++;
402 }
403 }
404
405 return corrected;
406}
407
408/*----------------------------------------------------------------------*/
409
410/*
221 * NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's 411 * NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
222 * how these chips are normally wired. This translates to both 8 and 16 412 * how these chips are normally wired. This translates to both 8 and 16
223 * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4). 413 * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
@@ -294,6 +484,23 @@ static void __init nand_dm6446evm_flash_init(struct davinci_nand_info *info)
294 484
295/*----------------------------------------------------------------------*/ 485/*----------------------------------------------------------------------*/
296 486
487/* An ECC layout for using 4-bit ECC with small-page flash, storing
488 * ten ECC bytes plus the manufacturer's bad block marker byte, and
489 * and not overlapping the default BBT markers.
490 */
491static struct nand_ecclayout hwecc4_small __initconst = {
492 .eccbytes = 10,
493 .eccpos = { 0, 1, 2, 3, 4,
494 /* offset 5 holds the badblock marker */
495 6, 7,
496 13, 14, 15, },
497 .oobfree = {
498 {.offset = 8, .length = 5, },
499 {.offset = 16, },
500 },
501};
502
503
297static int __init nand_davinci_probe(struct platform_device *pdev) 504static int __init nand_davinci_probe(struct platform_device *pdev)
298{ 505{
299 struct davinci_nand_pdata *pdata = pdev->dev.platform_data; 506 struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
@@ -306,6 +513,10 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
306 uint32_t val; 513 uint32_t val;
307 nand_ecc_modes_t ecc_mode; 514 nand_ecc_modes_t ecc_mode;
308 515
516 /* insist on board-specific configuration */
517 if (!pdata)
518 return -ENODEV;
519
309 /* which external chipselect will we be managing? */ 520 /* which external chipselect will we be managing? */
310 if (pdev->id < 0 || pdev->id > 3) 521 if (pdev->id < 0 || pdev->id > 3)
311 return -ENODEV; 522 return -ENODEV;
@@ -351,7 +562,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
351 info->chip.select_chip = nand_davinci_select_chip; 562 info->chip.select_chip = nand_davinci_select_chip;
352 563
353 /* options such as NAND_USE_FLASH_BBT or 16-bit widths */ 564 /* options such as NAND_USE_FLASH_BBT or 16-bit widths */
354 info->chip.options = pdata ? pdata->options : 0; 565 info->chip.options = pdata->options;
355 566
356 info->ioaddr = (uint32_t __force) vaddr; 567 info->ioaddr = (uint32_t __force) vaddr;
357 568
@@ -360,14 +571,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
360 info->mask_chipsel = pdata->mask_chipsel; 571 info->mask_chipsel = pdata->mask_chipsel;
361 572
362 /* use nandboot-capable ALE/CLE masks by default */ 573 /* use nandboot-capable ALE/CLE masks by default */
363 if (pdata && pdata->mask_ale) 574 info->mask_ale = pdata->mask_cle ? : MASK_ALE;
364 info->mask_ale = pdata->mask_cle; 575 info->mask_cle = pdata->mask_cle ? : MASK_CLE;
365 else
366 info->mask_ale = MASK_ALE;
367 if (pdata && pdata->mask_cle)
368 info->mask_cle = pdata->mask_cle;
369 else
370 info->mask_cle = MASK_CLE;
371 576
372 /* Set address of hardware control function */ 577 /* Set address of hardware control function */
373 info->chip.cmd_ctrl = nand_davinci_hwcontrol; 578 info->chip.cmd_ctrl = nand_davinci_hwcontrol;
@@ -377,30 +582,44 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
377 info->chip.read_buf = nand_davinci_read_buf; 582 info->chip.read_buf = nand_davinci_read_buf;
378 info->chip.write_buf = nand_davinci_write_buf; 583 info->chip.write_buf = nand_davinci_write_buf;
379 584
380 /* use board-specific ECC config; else, the best available */ 585 /* Use board-specific ECC config */
381 if (pdata) 586 ecc_mode = pdata->ecc_mode;
382 ecc_mode = pdata->ecc_mode;
383 else
384 ecc_mode = NAND_ECC_HW;
385 587
588 ret = -EINVAL;
386 switch (ecc_mode) { 589 switch (ecc_mode) {
387 case NAND_ECC_NONE: 590 case NAND_ECC_NONE:
388 case NAND_ECC_SOFT: 591 case NAND_ECC_SOFT:
592 pdata->ecc_bits = 0;
389 break; 593 break;
390 case NAND_ECC_HW: 594 case NAND_ECC_HW:
391 info->chip.ecc.calculate = nand_davinci_calculate_1bit; 595 if (pdata->ecc_bits == 4) {
392 info->chip.ecc.correct = nand_davinci_correct_1bit; 596 /* No sanity checks: CPUs must support this,
393 info->chip.ecc.hwctl = nand_davinci_hwctl_1bit; 597 * and the chips may not use NAND_BUSWIDTH_16.
598 */
599
600 /* No sharing 4-bit hardware between chipselects yet */
601 spin_lock_irq(&davinci_nand_lock);
602 if (ecc4_busy)
603 ret = -EBUSY;
604 else
605 ecc4_busy = true;
606 spin_unlock_irq(&davinci_nand_lock);
607
608 if (ret == -EBUSY)
609 goto err_ecc;
610
611 info->chip.ecc.calculate = nand_davinci_calculate_4bit;
612 info->chip.ecc.correct = nand_davinci_correct_4bit;
613 info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
614 info->chip.ecc.bytes = 10;
615 } else {
616 info->chip.ecc.calculate = nand_davinci_calculate_1bit;
617 info->chip.ecc.correct = nand_davinci_correct_1bit;
618 info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
619 info->chip.ecc.bytes = 3;
620 }
394 info->chip.ecc.size = 512; 621 info->chip.ecc.size = 512;
395 info->chip.ecc.bytes = 3;
396 break; 622 break;
397 case NAND_ECC_HW_SYNDROME:
398 /* FIXME implement */
399 info->chip.ecc.size = 512;
400 info->chip.ecc.bytes = 10;
401
402 dev_warn(&pdev->dev, "4-bit ECC nyet supported\n");
403 /* FALL THROUGH */
404 default: 623 default:
405 ret = -EINVAL; 624 ret = -EINVAL;
406 goto err_ecc; 625 goto err_ecc;
@@ -441,12 +660,56 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
441 spin_unlock_irq(&davinci_nand_lock); 660 spin_unlock_irq(&davinci_nand_lock);
442 661
443 /* Scan to find existence of the device(s) */ 662 /* Scan to find existence of the device(s) */
444 ret = nand_scan(&info->mtd, pdata->mask_chipsel ? 2 : 1); 663 ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1);
445 if (ret < 0) { 664 if (ret < 0) {
446 dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); 665 dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
447 goto err_scan; 666 goto err_scan;
448 } 667 }
449 668
669 /* Update ECC layout if needed ... for 1-bit HW ECC, the default
670 * is OK, but it allocates 6 bytes when only 3 are needed (for
671 * each 512 bytes). For the 4-bit HW ECC, that default is not
672 * usable: 10 bytes are needed, not 6.
673 */
674 if (pdata->ecc_bits == 4) {
675 int chunks = info->mtd.writesize / 512;
676
677 if (!chunks || info->mtd.oobsize < 16) {
678 dev_dbg(&pdev->dev, "too small\n");
679 ret = -EINVAL;
680 goto err_scan;
681 }
682
683 /* For small page chips, preserve the manufacturer's
684 * badblock marking data ... and make sure a flash BBT
685 * table marker fits in the free bytes.
686 */
687 if (chunks == 1) {
688 info->ecclayout = hwecc4_small;
689 info->ecclayout.oobfree[1].length =
690 info->mtd.oobsize - 16;
691 goto syndrome_done;
692 }
693
694 /* For large page chips we'll be wanting to use a
695 * not-yet-implemented mode that reads OOB data
696 * before reading the body of the page, to avoid
697 * the "infix OOB" model of NAND_ECC_HW_SYNDROME
698 * (and preserve manufacturer badblock markings).
699 */
700 dev_warn(&pdev->dev, "no 4-bit ECC support yet "
701 "for large page NAND\n");
702 ret = -EIO;
703 goto err_scan;
704
705syndrome_done:
706 info->chip.ecc.layout = &info->ecclayout;
707 }
708
709 ret = nand_scan_tail(&info->mtd);
710 if (ret < 0)
711 goto err_scan;
712
450 if (mtd_has_partitions()) { 713 if (mtd_has_partitions()) {
451 struct mtd_partition *mtd_parts = NULL; 714 struct mtd_partition *mtd_parts = NULL;
452 int mtd_parts_nb = 0; 715 int mtd_parts_nb = 0;
@@ -455,22 +718,11 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
455 static const char *probes[] __initconst = 718 static const char *probes[] __initconst =
456 { "cmdlinepart", NULL }; 719 { "cmdlinepart", NULL };
457 720
458 const char *master_name;
459
460 /* Set info->mtd.name = 0 temporarily */
461 master_name = info->mtd.name;
462 info->mtd.name = (char *)0;
463
464 /* info->mtd.name == 0, means: don't bother checking
465 <mtd-id> */
466 mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes, 721 mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
467 &mtd_parts, 0); 722 &mtd_parts, 0);
468
469 /* Restore info->mtd.name */
470 info->mtd.name = master_name;
471 } 723 }
472 724
473 if (mtd_parts_nb <= 0 && pdata) { 725 if (mtd_parts_nb <= 0) {
474 mtd_parts = pdata->parts; 726 mtd_parts = pdata->parts;
475 mtd_parts_nb = pdata->nr_parts; 727 mtd_parts_nb = pdata->nr_parts;
476 } 728 }
@@ -483,7 +735,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
483 info->partitioned = true; 735 info->partitioned = true;
484 } 736 }
485 737
486 } else if (pdata && pdata->nr_parts) { 738 } else if (pdata->nr_parts) {
487 dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n", 739 dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n",
488 pdata->nr_parts, info->mtd.name); 740 pdata->nr_parts, info->mtd.name);
489 } 741 }
@@ -509,6 +761,11 @@ err_scan:
509err_clk_enable: 761err_clk_enable:
510 clk_put(info->clk); 762 clk_put(info->clk);
511 763
764 spin_lock_irq(&davinci_nand_lock);
765 if (ecc_mode == NAND_ECC_HW_SYNDROME)
766 ecc4_busy = false;
767 spin_unlock_irq(&davinci_nand_lock);
768
512err_ecc: 769err_ecc:
513err_clk: 770err_clk:
514err_ioremap: 771err_ioremap:
@@ -532,6 +789,11 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
532 else 789 else
533 status = del_mtd_device(&info->mtd); 790 status = del_mtd_device(&info->mtd);
534 791
792 spin_lock_irq(&davinci_nand_lock);
793 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
794 ecc4_busy = false;
795 spin_unlock_irq(&davinci_nand_lock);
796
535 iounmap(info->base); 797 iounmap(info->base);
536 iounmap(info->vaddr); 798 iounmap(info->vaddr);
537 799
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 40c26080ecda..76beea40d2cf 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -138,7 +138,14 @@ static struct nand_ecclayout nand_hw_eccoob_8 = {
138static struct nand_ecclayout nand_hw_eccoob_16 = { 138static struct nand_ecclayout nand_hw_eccoob_16 = {
139 .eccbytes = 5, 139 .eccbytes = 5,
140 .eccpos = {6, 7, 8, 9, 10}, 140 .eccpos = {6, 7, 8, 9, 10},
141 .oobfree = {{0, 6}, {12, 4}, } 141 .oobfree = {{0, 5}, {11, 5}, }
142};
143
144static struct nand_ecclayout nand_hw_eccoob_64 = {
145 .eccbytes = 20,
146 .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26,
147 38, 39, 40, 41, 42, 54, 55, 56, 57, 58},
148 .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, }
142}; 149};
143 150
144#ifdef CONFIG_MTD_PARTITIONS 151#ifdef CONFIG_MTD_PARTITIONS
@@ -192,7 +199,7 @@ static void wait_op_done(struct mxc_nand_host *host, int max_retries,
192 } 199 }
193 udelay(1); 200 udelay(1);
194 } 201 }
195 if (max_retries <= 0) 202 if (max_retries < 0)
196 DEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n", 203 DEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n",
197 __func__, param); 204 __func__, param);
198 } 205 }
@@ -795,9 +802,13 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
795 send_addr(host, (page_addr & 0xff), false); 802 send_addr(host, (page_addr & 0xff), false);
796 803
797 if (host->pagesize_2k) { 804 if (host->pagesize_2k) {
798 send_addr(host, (page_addr >> 8) & 0xFF, false); 805 if (mtd->size >= 0x10000000) {
799 if (mtd->size >= 0x40000000) 806 /* paddr_8 - paddr_15 */
807 send_addr(host, (page_addr >> 8) & 0xff, false);
800 send_addr(host, (page_addr >> 16) & 0xff, true); 808 send_addr(host, (page_addr >> 16) & 0xff, true);
809 } else
810 /* paddr_8 - paddr_15 */
811 send_addr(host, (page_addr >> 8) & 0xff, true);
801 } else { 812 } else {
802 /* One more address cycle for higher density devices */ 813 /* One more address cycle for higher density devices */
803 if (mtd->size >= 0x4000000) { 814 if (mtd->size >= 0x4000000) {
@@ -923,7 +934,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
923 this->ecc.mode = NAND_ECC_HW; 934 this->ecc.mode = NAND_ECC_HW;
924 this->ecc.size = 512; 935 this->ecc.size = 512;
925 this->ecc.bytes = 3; 936 this->ecc.bytes = 3;
926 this->ecc.layout = &nand_hw_eccoob_8;
927 tmp = readw(host->regs + NFC_CONFIG1); 937 tmp = readw(host->regs + NFC_CONFIG1);
928 tmp |= NFC_ECC_EN; 938 tmp |= NFC_ECC_EN;
929 writew(tmp, host->regs + NFC_CONFIG1); 939 writew(tmp, host->regs + NFC_CONFIG1);
@@ -957,12 +967,44 @@ static int __init mxcnd_probe(struct platform_device *pdev)
957 this->ecc.layout = &nand_hw_eccoob_16; 967 this->ecc.layout = &nand_hw_eccoob_16;
958 } 968 }
959 969
960 host->pagesize_2k = 0; 970 /* first scan to find the device and get the page size */
971 if (nand_scan_ident(mtd, 1)) {
972 err = -ENXIO;
973 goto escan;
974 }
961 975
962 /* Scan to find existence of the device */ 976 host->pagesize_2k = (mtd->writesize == 2048) ? 1 : 0;
963 if (nand_scan(mtd, 1)) { 977
964 DEBUG(MTD_DEBUG_LEVEL0, 978 if (this->ecc.mode == NAND_ECC_HW) {
965 "MXC_ND: Unable to find any NAND device.\n"); 979 switch (mtd->oobsize) {
980 case 8:
981 this->ecc.layout = &nand_hw_eccoob_8;
982 break;
983 case 16:
984 this->ecc.layout = &nand_hw_eccoob_16;
985 break;
986 case 64:
987 this->ecc.layout = &nand_hw_eccoob_64;
988 break;
989 default:
990 /* page size not handled by HW ECC */
991 /* switching back to soft ECC */
992 this->ecc.size = 512;
993 this->ecc.bytes = 3;
994 this->ecc.layout = &nand_hw_eccoob_8;
995 this->ecc.mode = NAND_ECC_SOFT;
996 this->ecc.calculate = NULL;
997 this->ecc.correct = NULL;
998 this->ecc.hwctl = NULL;
999 tmp = readw(host->regs + NFC_CONFIG1);
1000 tmp &= ~NFC_ECC_EN;
1001 writew(tmp, host->regs + NFC_CONFIG1);
1002 break;
1003 }
1004 }
1005
1006 /* second phase scan */
1007 if (nand_scan_tail(mtd)) {
966 err = -ENXIO; 1008 err = -ENXIO;
967 goto escan; 1009 goto escan;
968 } 1010 }
@@ -985,7 +1027,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
985 return 0; 1027 return 0;
986 1028
987escan: 1029escan:
988 free_irq(host->irq, NULL); 1030 free_irq(host->irq, host);
989eirq: 1031eirq:
990 iounmap(host->regs); 1032 iounmap(host->regs);
991eres: 1033eres:
@@ -1005,7 +1047,7 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
1005 platform_set_drvdata(pdev, NULL); 1047 platform_set_drvdata(pdev, NULL);
1006 1048
1007 nand_release(&host->mtd); 1049 nand_release(&host->mtd);
1008 free_irq(host->irq, NULL); 1050 free_irq(host->irq, host);
1009 iounmap(host->regs); 1051 iounmap(host->regs);
1010 kfree(host); 1052 kfree(host);
1011 1053
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3d7ed432fa41..8c21b89d2d0c 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2756,7 +2756,8 @@ int nand_scan_tail(struct mtd_info *mtd)
2756 * the out of band area 2756 * the out of band area
2757 */ 2757 */
2758 chip->ecc.layout->oobavail = 0; 2758 chip->ecc.layout->oobavail = 0;
2759 for (i = 0; chip->ecc.layout->oobfree[i].length; i++) 2759 for (i = 0; chip->ecc.layout->oobfree[i].length
2760 && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
2760 chip->ecc.layout->oobavail += 2761 chip->ecc.layout->oobavail +=
2761 chip->ecc.layout->oobfree[i].length; 2762 chip->ecc.layout->oobfree[i].length;
2762 mtd->oobavail = chip->ecc.layout->oobavail; 2763 mtd->oobavail = chip->ecc.layout->oobavail;
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 868147acce2c..c0cb87d6d16e 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -428,8 +428,8 @@ EXPORT_SYMBOL(nand_calculate_ecc);
428int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, 428int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
429 unsigned char *read_ecc, unsigned char *calc_ecc) 429 unsigned char *read_ecc, unsigned char *calc_ecc)
430{ 430{
431 unsigned char b0, b1, b2; 431 unsigned char b0, b1, b2, bit_addr;
432 unsigned char byte_addr, bit_addr; 432 unsigned int byte_addr;
433 /* 256 or 512 bytes/ecc */ 433 /* 256 or 512 bytes/ecc */
434 const uint32_t eccsize_mult = 434 const uint32_t eccsize_mult =
435 (((struct nand_chip *)mtd->priv)->ecc.size) >> 8; 435 (((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
new file mode 100644
index 000000000000..0cd76f89f4b0
--- /dev/null
+++ b/drivers/mtd/nand/omap2.c
@@ -0,0 +1,776 @@
1/*
2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3 * Copyright © 2004 Micron Technology Inc.
4 * Copyright © 2004 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h>
13#include <linux/delay.h>
14#include <linux/mtd/mtd.h>
15#include <linux/mtd/nand.h>
16#include <linux/mtd/partitions.h>
17#include <linux/io.h>
18
19#include <asm/dma.h>
20
21#include <mach/gpmc.h>
22#include <mach/nand.h>
23
24#define GPMC_IRQ_STATUS 0x18
25#define GPMC_ECC_CONFIG 0x1F4
26#define GPMC_ECC_CONTROL 0x1F8
27#define GPMC_ECC_SIZE_CONFIG 0x1FC
28#define GPMC_ECC1_RESULT 0x200
29
30#define DRIVER_NAME "omap2-nand"
31
32/* size (4 KiB) for IO mapping */
33#define NAND_IO_SIZE SZ_4K
34
35#define NAND_WP_OFF 0
36#define NAND_WP_BIT 0x00000010
37#define WR_RD_PIN_MONITORING 0x00600000
38
39#define GPMC_BUF_FULL 0x00000001
40#define GPMC_BUF_EMPTY 0x00000000
41
42#define NAND_Ecc_P1e (1 << 0)
43#define NAND_Ecc_P2e (1 << 1)
44#define NAND_Ecc_P4e (1 << 2)
45#define NAND_Ecc_P8e (1 << 3)
46#define NAND_Ecc_P16e (1 << 4)
47#define NAND_Ecc_P32e (1 << 5)
48#define NAND_Ecc_P64e (1 << 6)
49#define NAND_Ecc_P128e (1 << 7)
50#define NAND_Ecc_P256e (1 << 8)
51#define NAND_Ecc_P512e (1 << 9)
52#define NAND_Ecc_P1024e (1 << 10)
53#define NAND_Ecc_P2048e (1 << 11)
54
55#define NAND_Ecc_P1o (1 << 16)
56#define NAND_Ecc_P2o (1 << 17)
57#define NAND_Ecc_P4o (1 << 18)
58#define NAND_Ecc_P8o (1 << 19)
59#define NAND_Ecc_P16o (1 << 20)
60#define NAND_Ecc_P32o (1 << 21)
61#define NAND_Ecc_P64o (1 << 22)
62#define NAND_Ecc_P128o (1 << 23)
63#define NAND_Ecc_P256o (1 << 24)
64#define NAND_Ecc_P512o (1 << 25)
65#define NAND_Ecc_P1024o (1 << 26)
66#define NAND_Ecc_P2048o (1 << 27)
67
68#define TF(value) (value ? 1 : 0)
69
70#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
71#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
72#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
73#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
74#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
75#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
76#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
77#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
78
79#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
80#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
81#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
82#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
83#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
84#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
85#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
86#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
87
88#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
89#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
90#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
91#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
92#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
93#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
94#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
95#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
96
97#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
98#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
99#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
100#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
101#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
102#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
103#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
104#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
105
106#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
107#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
108
109#ifdef CONFIG_MTD_PARTITIONS
110static const char *part_probes[] = { "cmdlinepart", NULL };
111#endif
112
113struct omap_nand_info {
114 struct nand_hw_control controller;
115 struct omap_nand_platform_data *pdata;
116 struct mtd_info mtd;
117 struct mtd_partition *parts;
118 struct nand_chip nand;
119 struct platform_device *pdev;
120
121 int gpmc_cs;
122 unsigned long phys_base;
123 void __iomem *gpmc_cs_baseaddr;
124 void __iomem *gpmc_baseaddr;
125};
126
127/**
128 * omap_nand_wp - This function enable or disable the Write Protect feature
129 * @mtd: MTD device structure
130 * @mode: WP ON/OFF
131 */
132static void omap_nand_wp(struct mtd_info *mtd, int mode)
133{
134 struct omap_nand_info *info = container_of(mtd,
135 struct omap_nand_info, mtd);
136
137 unsigned long config = __raw_readl(info->gpmc_baseaddr + GPMC_CONFIG);
138
139 if (mode)
140 config &= ~(NAND_WP_BIT); /* WP is ON */
141 else
142 config |= (NAND_WP_BIT); /* WP is OFF */
143
144 __raw_writel(config, (info->gpmc_baseaddr + GPMC_CONFIG));
145}
146
147/**
148 * omap_hwcontrol - hardware specific access to control-lines
149 * @mtd: MTD device structure
150 * @cmd: command to device
151 * @ctrl:
152 * NAND_NCE: bit 0 -> don't care
153 * NAND_CLE: bit 1 -> Command Latch
154 * NAND_ALE: bit 2 -> Address Latch
155 *
156 * NOTE: boards may use different bits for these!!
157 */
158static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
159{
160 struct omap_nand_info *info = container_of(mtd,
161 struct omap_nand_info, mtd);
162 switch (ctrl) {
163 case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
164 info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
165 GPMC_CS_NAND_COMMAND;
166 info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
167 GPMC_CS_NAND_DATA;
168 break;
169
170 case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
171 info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
172 GPMC_CS_NAND_ADDRESS;
173 info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
174 GPMC_CS_NAND_DATA;
175 break;
176
177 case NAND_CTRL_CHANGE | NAND_NCE:
178 info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
179 GPMC_CS_NAND_DATA;
180 info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
181 GPMC_CS_NAND_DATA;
182 break;
183 }
184
185 if (cmd != NAND_CMD_NONE)
186 __raw_writeb(cmd, info->nand.IO_ADDR_W);
187}
188
189/**
190 * omap_read_buf16 - read data from NAND controller into buffer
191 * @mtd: MTD device structure
192 * @buf: buffer to store date
193 * @len: number of bytes to read
194 */
195static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
196{
197 struct nand_chip *nand = mtd->priv;
198
199 __raw_readsw(nand->IO_ADDR_R, buf, len / 2);
200}
201
202/**
203 * omap_write_buf16 - write buffer to NAND controller
204 * @mtd: MTD device structure
205 * @buf: data buffer
206 * @len: number of bytes to write
207 */
208static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
209{
210 struct omap_nand_info *info = container_of(mtd,
211 struct omap_nand_info, mtd);
212 u16 *p = (u16 *) buf;
213
214 /* FIXME try bursts of writesw() or DMA ... */
215 len >>= 1;
216
217 while (len--) {
218 writew(*p++, info->nand.IO_ADDR_W);
219
220 while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
221 GPMC_STATUS) & GPMC_BUF_FULL))
222 ;
223 }
224}
225/**
226 * omap_verify_buf - Verify chip data against buffer
227 * @mtd: MTD device structure
228 * @buf: buffer containing the data to compare
229 * @len: number of bytes to compare
230 */
231static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
232{
233 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
234 mtd);
235 u16 *p = (u16 *) buf;
236
237 len >>= 1;
238 while (len--) {
239 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
240 return -EFAULT;
241 }
242
243 return 0;
244}
245
246#ifdef CONFIG_MTD_NAND_OMAP_HWECC
247/**
248 * omap_hwecc_init - Initialize the HW ECC for NAND flash in GPMC controller
249 * @mtd: MTD device structure
250 */
251static void omap_hwecc_init(struct mtd_info *mtd)
252{
253 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
254 mtd);
255 struct nand_chip *chip = mtd->priv;
256 unsigned long val = 0x0;
257
258 /* Read from ECC Control Register */
259 val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONTROL);
260 /* Clear all ECC | Enable Reg1 */
261 val = ((0x00000001<<8) | 0x00000001);
262 __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
263
264 /* Read from ECC Size Config Register */
265 val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
266 /* ECCSIZE1=512 | Select eccResultsize[0-3] */
267 val = ((((chip->ecc.size >> 1) - 1) << 22) | (0x0000000F));
268 __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
269}
270
271/**
272 * gen_true_ecc - This function will generate true ECC value
273 * @ecc_buf: buffer to store ecc code
274 *
275 * This generated true ECC value can be used when correcting
276 * data read from NAND flash memory core
277 */
278static void gen_true_ecc(u8 *ecc_buf)
279{
280 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
281 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
282
283 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
284 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
285 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
286 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
287 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
288 P1e(tmp) | P2048o(tmp) | P2048e(tmp));
289}
290
291/**
292 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
293 * @ecc_data1: ecc code from nand spare area
294 * @ecc_data2: ecc code from hardware register obtained from hardware ecc
295 * @page_data: page data
296 *
297 * This function compares two ECC's and indicates if there is an error.
298 * If the error can be corrected it will be corrected to the buffer.
299 */
300static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
301 u8 *ecc_data2, /* read from register */
302 u8 *page_data)
303{
304 uint i;
305 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
306 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
307 u8 ecc_bit[24];
308 u8 ecc_sum = 0;
309 u8 find_bit = 0;
310 uint find_byte = 0;
311 int isEccFF;
312
313 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
314
315 gen_true_ecc(ecc_data1);
316 gen_true_ecc(ecc_data2);
317
318 for (i = 0; i <= 2; i++) {
319 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
320 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
321 }
322
323 for (i = 0; i < 8; i++) {
324 tmp0_bit[i] = *ecc_data1 % 2;
325 *ecc_data1 = *ecc_data1 / 2;
326 }
327
328 for (i = 0; i < 8; i++) {
329 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
330 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
331 }
332
333 for (i = 0; i < 8; i++) {
334 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
335 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
336 }
337
338 for (i = 0; i < 8; i++) {
339 comp0_bit[i] = *ecc_data2 % 2;
340 *ecc_data2 = *ecc_data2 / 2;
341 }
342
343 for (i = 0; i < 8; i++) {
344 comp1_bit[i] = *(ecc_data2 + 1) % 2;
345 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
346 }
347
348 for (i = 0; i < 8; i++) {
349 comp2_bit[i] = *(ecc_data2 + 2) % 2;
350 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
351 }
352
353 for (i = 0; i < 6; i++)
354 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
355
356 for (i = 0; i < 8; i++)
357 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
358
359 for (i = 0; i < 8; i++)
360 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
361
362 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
363 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
364
365 for (i = 0; i < 24; i++)
366 ecc_sum += ecc_bit[i];
367
368 switch (ecc_sum) {
369 case 0:
370 /* Not reached because this function is not called if
371 * ECC values are equal
372 */
373 return 0;
374
375 case 1:
376 /* Uncorrectable error */
377 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
378 return -1;
379
380 case 11:
381 /* UN-Correctable error */
382 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
383 return -1;
384
385 case 12:
386 /* Correctable error */
387 find_byte = (ecc_bit[23] << 8) +
388 (ecc_bit[21] << 7) +
389 (ecc_bit[19] << 6) +
390 (ecc_bit[17] << 5) +
391 (ecc_bit[15] << 4) +
392 (ecc_bit[13] << 3) +
393 (ecc_bit[11] << 2) +
394 (ecc_bit[9] << 1) +
395 ecc_bit[7];
396
397 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
398
399 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
400 "offset: %d, bit: %d\n", find_byte, find_bit);
401
402 page_data[find_byte] ^= (1 << find_bit);
403
404 return 0;
405 default:
406 if (isEccFF) {
407 if (ecc_data2[0] == 0 &&
408 ecc_data2[1] == 0 &&
409 ecc_data2[2] == 0)
410 return 0;
411 }
412 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
413 return -1;
414 }
415}
416
417/**
418 * omap_correct_data - Compares the ECC read with HW generated ECC
419 * @mtd: MTD device structure
420 * @dat: page data
421 * @read_ecc: ecc read from nand flash
422 * @calc_ecc: ecc read from HW ECC registers
423 *
424 * Compares the ecc read from nand spare area with ECC registers values
425 * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
426 * and correction.
427 */
428static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
429 u_char *read_ecc, u_char *calc_ecc)
430{
431 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
432 mtd);
433 int blockCnt = 0, i = 0, ret = 0;
434
435 /* Ex NAND_ECC_HW12_2048 */
436 if ((info->nand.ecc.mode == NAND_ECC_HW) &&
437 (info->nand.ecc.size == 2048))
438 blockCnt = 4;
439 else
440 blockCnt = 1;
441
442 for (i = 0; i < blockCnt; i++) {
443 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
444 ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
445 if (ret < 0)
446 return ret;
447 }
448 read_ecc += 3;
449 calc_ecc += 3;
450 dat += 512;
451 }
452 return 0;
453}
454
455/**
456 * omap_calcuate_ecc - Generate non-inverted ECC bytes.
457 * @mtd: MTD device structure
458 * @dat: The pointer to data on which ecc is computed
459 * @ecc_code: The ecc_code buffer
460 *
461 * Using noninverted ECC can be considered ugly since writing a blank
462 * page ie. padding will clear the ECC bytes. This is no problem as long
463 * nobody is trying to write data on the seemingly unused page. Reading
464 * an erased page will produce an ECC mismatch between generated and read
465 * ECC bytes that has to be dealt with separately.
466 */
467static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
468 u_char *ecc_code)
469{
470 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
471 mtd);
472 unsigned long val = 0x0;
473 unsigned long reg;
474
475 /* Start Reading from HW ECC1_Result = 0x200 */
476 reg = (unsigned long)(info->gpmc_baseaddr + GPMC_ECC1_RESULT);
477 val = __raw_readl(reg);
478 *ecc_code++ = val; /* P128e, ..., P1e */
479 *ecc_code++ = val >> 16; /* P128o, ..., P1o */
480 /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
481 *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
482 reg += 4;
483
484 return 0;
485}
486
487/**
488 * omap_enable_hwecc - This function enables the hardware ecc functionality
489 * @mtd: MTD device structure
490 * @mode: Read/Write mode
491 */
492static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
493{
494 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
495 mtd);
496 struct nand_chip *chip = mtd->priv;
497 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
498 unsigned long val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONFIG);
499
500 switch (mode) {
501 case NAND_ECC_READ:
502 __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
503 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
504 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
505 break;
506 case NAND_ECC_READSYN:
507 __raw_writel(0x100, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
508 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
509 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
510 break;
511 case NAND_ECC_WRITE:
512 __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
513 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
514 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
515 break;
516 default:
517 DEBUG(MTD_DEBUG_LEVEL0, "Error: Unrecognized Mode[%d]!\n",
518 mode);
519 break;
520 }
521
522 __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONFIG);
523}
524#endif
525
526/**
527 * omap_wait - wait until the command is done
528 * @mtd: MTD device structure
529 * @chip: NAND Chip structure
530 *
531 * Wait function is called during Program and erase operations and
532 * the way it is called from MTD layer, we should wait till the NAND
533 * chip is ready after the programming/erase operation has completed.
534 *
535 * Erase can take up to 400ms and program up to 20ms according to
536 * general NAND and SmartMedia specs
537 */
538static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
539{
540 struct nand_chip *this = mtd->priv;
541 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
542 mtd);
543 unsigned long timeo = jiffies;
544 int status, state = this->state;
545
546 if (state == FL_ERASING)
547 timeo += (HZ * 400) / 1000;
548 else
549 timeo += (HZ * 20) / 1000;
550
551 this->IO_ADDR_W = (void *) info->gpmc_cs_baseaddr +
552 GPMC_CS_NAND_COMMAND;
553 this->IO_ADDR_R = (void *) info->gpmc_cs_baseaddr + GPMC_CS_NAND_DATA;
554
555 __raw_writeb(NAND_CMD_STATUS & 0xFF, this->IO_ADDR_W);
556
557 while (time_before(jiffies, timeo)) {
558 status = __raw_readb(this->IO_ADDR_R);
559 if (!(status & 0x40))
560 break;
561 }
562 return status;
563}
564
565/**
566 * omap_dev_ready - calls the platform specific dev_ready function
567 * @mtd: MTD device structure
568 */
569static int omap_dev_ready(struct mtd_info *mtd)
570{
571 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
572 mtd);
573 unsigned int val = __raw_readl(info->gpmc_baseaddr + GPMC_IRQ_STATUS);
574
575 if ((val & 0x100) == 0x100) {
576 /* Clear IRQ Interrupt */
577 val |= 0x100;
578 val &= ~(0x0);
579 __raw_writel(val, info->gpmc_baseaddr + GPMC_IRQ_STATUS);
580 } else {
581 unsigned int cnt = 0;
582 while (cnt++ < 0x1FF) {
583 if ((val & 0x100) == 0x100)
584 return 0;
585 val = __raw_readl(info->gpmc_baseaddr +
586 GPMC_IRQ_STATUS);
587 }
588 }
589
590 return 1;
591}
592
593static int __devinit omap_nand_probe(struct platform_device *pdev)
594{
595 struct omap_nand_info *info;
596 struct omap_nand_platform_data *pdata;
597 int err;
598 unsigned long val;
599
600
601 pdata = pdev->dev.platform_data;
602 if (pdata == NULL) {
603 dev_err(&pdev->dev, "platform data missing\n");
604 return -ENODEV;
605 }
606
607 info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
608 if (!info)
609 return -ENOMEM;
610
611 platform_set_drvdata(pdev, info);
612
613 spin_lock_init(&info->controller.lock);
614 init_waitqueue_head(&info->controller.wq);
615
616 info->pdev = pdev;
617
618 info->gpmc_cs = pdata->cs;
619 info->gpmc_baseaddr = pdata->gpmc_baseaddr;
620 info->gpmc_cs_baseaddr = pdata->gpmc_cs_baseaddr;
621
622 info->mtd.priv = &info->nand;
623 info->mtd.name = dev_name(&pdev->dev);
624 info->mtd.owner = THIS_MODULE;
625
626 err = gpmc_cs_request(info->gpmc_cs, NAND_IO_SIZE, &info->phys_base);
627 if (err < 0) {
628 dev_err(&pdev->dev, "Cannot request GPMC CS\n");
629 goto out_free_info;
630 }
631
632 /* Enable RD PIN Monitoring Reg */
633 if (pdata->dev_ready) {
634 val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1);
635 val |= WR_RD_PIN_MONITORING;
636 gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG1, val);
637 }
638
639 val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG7);
640 val &= ~(0xf << 8);
641 val |= (0xc & 0xf) << 8;
642 gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG7, val);
643
644 /* NAND write protect off */
645 omap_nand_wp(&info->mtd, NAND_WP_OFF);
646
647 if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
648 pdev->dev.driver->name)) {
649 err = -EBUSY;
650 goto out_free_cs;
651 }
652
653 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
654 if (!info->nand.IO_ADDR_R) {
655 err = -ENOMEM;
656 goto out_release_mem_region;
657 }
658 info->nand.controller = &info->controller;
659
660 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
661 info->nand.cmd_ctrl = omap_hwcontrol;
662
663 /* REVISIT: only supports 16-bit NAND flash */
664
665 info->nand.read_buf = omap_read_buf16;
666 info->nand.write_buf = omap_write_buf16;
667 info->nand.verify_buf = omap_verify_buf;
668
669 /*
670 * If RDY/BSY line is connected to OMAP then use the omap ready
671 * funcrtion and the generic nand_wait function which reads the status
672 * register after monitoring the RDY/BSY line.Otherwise use a standard
673 * chip delay which is slightly more than tR (AC Timing) of the NAND
674 * device and read status register until you get a failure or success
675 */
676 if (pdata->dev_ready) {
677 info->nand.dev_ready = omap_dev_ready;
678 info->nand.chip_delay = 0;
679 } else {
680 info->nand.waitfunc = omap_wait;
681 info->nand.chip_delay = 50;
682 }
683
684 info->nand.options |= NAND_SKIP_BBTSCAN;
685 if ((gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1) & 0x3000)
686 == 0x1000)
687 info->nand.options |= NAND_BUSWIDTH_16;
688
689#ifdef CONFIG_MTD_NAND_OMAP_HWECC
690 info->nand.ecc.bytes = 3;
691 info->nand.ecc.size = 512;
692 info->nand.ecc.calculate = omap_calculate_ecc;
693 info->nand.ecc.hwctl = omap_enable_hwecc;
694 info->nand.ecc.correct = omap_correct_data;
695 info->nand.ecc.mode = NAND_ECC_HW;
696
697 /* init HW ECC */
698 omap_hwecc_init(&info->mtd);
699#else
700 info->nand.ecc.mode = NAND_ECC_SOFT;
701#endif
702
703 /* DIP switches on some boards change between 8 and 16 bit
704 * bus widths for flash. Try the other width if the first try fails.
705 */
706 if (nand_scan(&info->mtd, 1)) {
707 info->nand.options ^= NAND_BUSWIDTH_16;
708 if (nand_scan(&info->mtd, 1)) {
709 err = -ENXIO;
710 goto out_release_mem_region;
711 }
712 }
713
714#ifdef CONFIG_MTD_PARTITIONS
715 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
716 if (err > 0)
717 add_mtd_partitions(&info->mtd, info->parts, err);
718 else if (pdata->parts)
719 add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
720 else
721#endif
722 add_mtd_device(&info->mtd);
723
724 platform_set_drvdata(pdev, &info->mtd);
725
726 return 0;
727
728out_release_mem_region:
729 release_mem_region(info->phys_base, NAND_IO_SIZE);
730out_free_cs:
731 gpmc_cs_free(info->gpmc_cs);
732out_free_info:
733 kfree(info);
734
735 return err;
736}
737
738static int omap_nand_remove(struct platform_device *pdev)
739{
740 struct mtd_info *mtd = platform_get_drvdata(pdev);
741 struct omap_nand_info *info = mtd->priv;
742
743 platform_set_drvdata(pdev, NULL);
744 /* Release NAND device, its internal structures and partitions */
745 nand_release(&info->mtd);
746 iounmap(info->nand.IO_ADDR_R);
747 kfree(&info->mtd);
748 return 0;
749}
750
751static struct platform_driver omap_nand_driver = {
752 .probe = omap_nand_probe,
753 .remove = omap_nand_remove,
754 .driver = {
755 .name = DRIVER_NAME,
756 .owner = THIS_MODULE,
757 },
758};
759
760static int __init omap_nand_init(void)
761{
762 printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
763 return platform_driver_register(&omap_nand_driver);
764}
765
766static void __exit omap_nand_exit(void)
767{
768 platform_driver_unregister(&omap_nand_driver);
769}
770
771module_init(omap_nand_init);
772module_exit(omap_nand_exit);
773
774MODULE_ALIAS(DRIVER_NAME);
775MODULE_LICENSE("GPL");
776MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index c2dfd3ea353d..7ad972229db4 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -47,6 +47,28 @@ static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl
47 writeb(cmd, nc->IO_ADDR_W + offs); 47 writeb(cmd, nc->IO_ADDR_W + offs);
48} 48}
49 49
50static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
51{
52 struct nand_chip *chip = mtd->priv;
53 void __iomem *io_base = chip->IO_ADDR_R;
54 uint64_t *buf64;
55 int i = 0;
56
57 while (len && (unsigned long)buf & 7) {
58 *buf++ = readb(io_base);
59 len--;
60 }
61 buf64 = (uint64_t *)buf;
62 while (i < len/8) {
63 uint64_t x;
64 asm ("ldrd\t%0, [%1]" : "=r" (x) : "r" (io_base));
65 buf64[i++] = x;
66 }
67 i *= 8;
68 while (i < len)
69 buf[i++] = readb(io_base);
70}
71
50static int __init orion_nand_probe(struct platform_device *pdev) 72static int __init orion_nand_probe(struct platform_device *pdev)
51{ 73{
52 struct mtd_info *mtd; 74 struct mtd_info *mtd;
@@ -83,6 +105,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
83 nc->priv = board; 105 nc->priv = board;
84 nc->IO_ADDR_R = nc->IO_ADDR_W = io_base; 106 nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
85 nc->cmd_ctrl = orion_nand_cmd_ctrl; 107 nc->cmd_ctrl = orion_nand_cmd_ctrl;
108 nc->read_buf = orion_nand_read_buf;
86 nc->ecc.mode = NAND_ECC_SOFT; 109 nc->ecc.mode = NAND_ECC_SOFT;
87 110
88 if (board->chip_delay) 111 if (board->chip_delay)
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 86e1d08eee00..4e16c6f5bdd5 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -61,6 +61,8 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
61 data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl; 61 data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
62 data->chip.dev_ready = pdata->ctrl.dev_ready; 62 data->chip.dev_ready = pdata->ctrl.dev_ready;
63 data->chip.select_chip = pdata->ctrl.select_chip; 63 data->chip.select_chip = pdata->ctrl.select_chip;
64 data->chip.write_buf = pdata->ctrl.write_buf;
65 data->chip.read_buf = pdata->ctrl.read_buf;
64 data->chip.chip_delay = pdata->chip.chip_delay; 66 data->chip.chip_delay = pdata->chip.chip_delay;
65 data->chip.options |= pdata->chip.options; 67 data->chip.options |= pdata->chip.options;
66 68
@@ -70,6 +72,13 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
70 72
71 platform_set_drvdata(pdev, data); 73 platform_set_drvdata(pdev, data);
72 74
75 /* Handle any platform specific setup */
76 if (pdata->ctrl.probe) {
77 res = pdata->ctrl.probe(pdev);
78 if (res)
79 goto out;
80 }
81
73 /* Scan to find existance of the device */ 82 /* Scan to find existance of the device */
74 if (nand_scan(&data->mtd, 1)) { 83 if (nand_scan(&data->mtd, 1)) {
75 res = -ENXIO; 84 res = -ENXIO;
@@ -86,6 +95,8 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
86 return 0; 95 return 0;
87 } 96 }
88 } 97 }
98 if (pdata->chip.set_parts)
99 pdata->chip.set_parts(data->mtd.size, &pdata->chip);
89 if (pdata->chip.partitions) { 100 if (pdata->chip.partitions) {
90 data->parts = pdata->chip.partitions; 101 data->parts = pdata->chip.partitions;
91 res = add_mtd_partitions(&data->mtd, data->parts, 102 res = add_mtd_partitions(&data->mtd, data->parts,
@@ -99,6 +110,8 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
99 110
100 nand_release(&data->mtd); 111 nand_release(&data->mtd);
101out: 112out:
113 if (pdata->ctrl.remove)
114 pdata->ctrl.remove(pdev);
102 platform_set_drvdata(pdev, NULL); 115 platform_set_drvdata(pdev, NULL);
103 iounmap(data->io_base); 116 iounmap(data->io_base);
104 kfree(data); 117 kfree(data);
@@ -111,15 +124,15 @@ out:
111static int __devexit plat_nand_remove(struct platform_device *pdev) 124static int __devexit plat_nand_remove(struct platform_device *pdev)
112{ 125{
113 struct plat_nand_data *data = platform_get_drvdata(pdev); 126 struct plat_nand_data *data = platform_get_drvdata(pdev);
114#ifdef CONFIG_MTD_PARTITIONS
115 struct platform_nand_data *pdata = pdev->dev.platform_data; 127 struct platform_nand_data *pdata = pdev->dev.platform_data;
116#endif
117 128
118 nand_release(&data->mtd); 129 nand_release(&data->mtd);
119#ifdef CONFIG_MTD_PARTITIONS 130#ifdef CONFIG_MTD_PARTITIONS
120 if (data->parts && data->parts != pdata->chip.partitions) 131 if (data->parts && data->parts != pdata->chip.partitions)
121 kfree(data->parts); 132 kfree(data->parts);
122#endif 133#endif
134 if (pdata->ctrl.remove)
135 pdata->ctrl.remove(pdev);
123 iounmap(data->io_base); 136 iounmap(data->io_base);
124 kfree(data); 137 kfree(data);
125 138
@@ -128,7 +141,7 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
128 141
129static struct platform_driver plat_nand_driver = { 142static struct platform_driver plat_nand_driver = {
130 .probe = plat_nand_probe, 143 .probe = plat_nand_probe,
131 .remove = plat_nand_remove, 144 .remove = __devexit_p(plat_nand_remove),
132 .driver = { 145 .driver = {
133 .name = "gen_nand", 146 .name = "gen_nand",
134 .owner = THIS_MODULE, 147 .owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 8e375d5fe231..11dc7e69c4fb 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -74,6 +74,14 @@ static struct nand_ecclayout nand_hw_eccoob = {
74 74
75struct s3c2410_nand_info; 75struct s3c2410_nand_info;
76 76
77/**
78 * struct s3c2410_nand_mtd - driver MTD structure
79 * @mtd: The MTD instance to pass to the MTD layer.
80 * @chip: The NAND chip information.
81 * @set: The platform information supplied for this set of NAND chips.
82 * @info: Link back to the hardware information.
83 * @scan_res: The result from calling nand_scan_ident().
84*/
77struct s3c2410_nand_mtd { 85struct s3c2410_nand_mtd {
78 struct mtd_info mtd; 86 struct mtd_info mtd;
79 struct nand_chip chip; 87 struct nand_chip chip;
@@ -90,6 +98,21 @@ enum s3c_cpu_type {
90 98
91/* overview of the s3c2410 nand state */ 99/* overview of the s3c2410 nand state */
92 100
101/**
102 * struct s3c2410_nand_info - NAND controller state.
103 * @mtds: An array of MTD instances on this controoler.
104 * @platform: The platform data for this board.
105 * @device: The platform device we bound to.
106 * @area: The IO area resource that came from request_mem_region().
107 * @clk: The clock resource for this controller.
108 * @regs: The area mapped for the hardware registers described by @area.
109 * @sel_reg: Pointer to the register controlling the NAND selection.
110 * @sel_bit: The bit in @sel_reg to select the NAND chip.
111 * @mtd_count: The number of MTDs created from this controller.
112 * @save_sel: The contents of @sel_reg to be saved over suspend.
113 * @clk_rate: The clock rate from @clk.
114 * @cpu_type: The exact type of this controller.
115 */
93struct s3c2410_nand_info { 116struct s3c2410_nand_info {
94 /* mtd info */ 117 /* mtd info */
95 struct nand_hw_control controller; 118 struct nand_hw_control controller;
@@ -145,12 +168,19 @@ static inline int allow_clk_stop(struct s3c2410_nand_info *info)
145 168
146#define NS_IN_KHZ 1000000 169#define NS_IN_KHZ 1000000
147 170
171/**
172 * s3c_nand_calc_rate - calculate timing data.
173 * @wanted: The cycle time in nanoseconds.
174 * @clk: The clock rate in kHz.
175 * @max: The maximum divider value.
176 *
177 * Calculate the timing value from the given parameters.
178 */
148static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max) 179static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
149{ 180{
150 int result; 181 int result;
151 182
152 result = (wanted * clk) / NS_IN_KHZ; 183 result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
153 result++;
154 184
155 pr_debug("result %d from %ld, %d\n", result, clk, wanted); 185 pr_debug("result %d from %ld, %d\n", result, clk, wanted);
156 186
@@ -169,13 +199,21 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
169 199
170/* controller setup */ 200/* controller setup */
171 201
202/**
203 * s3c2410_nand_setrate - setup controller timing information.
204 * @info: The controller instance.
205 *
206 * Given the information supplied by the platform, calculate and set
207 * the necessary timing registers in the hardware to generate the
208 * necessary timing cycles to the hardware.
209 */
172static int s3c2410_nand_setrate(struct s3c2410_nand_info *info) 210static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
173{ 211{
174 struct s3c2410_platform_nand *plat = info->platform; 212 struct s3c2410_platform_nand *plat = info->platform;
175 int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4; 213 int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
176 int tacls, twrph0, twrph1; 214 int tacls, twrph0, twrph1;
177 unsigned long clkrate = clk_get_rate(info->clk); 215 unsigned long clkrate = clk_get_rate(info->clk);
178 unsigned long set, cfg, mask; 216 unsigned long uninitialized_var(set), cfg, uninitialized_var(mask);
179 unsigned long flags; 217 unsigned long flags;
180 218
181 /* calculate the timing information for the controller */ 219 /* calculate the timing information for the controller */
@@ -215,9 +253,9 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
215 253
216 case TYPE_S3C2440: 254 case TYPE_S3C2440:
217 case TYPE_S3C2412: 255 case TYPE_S3C2412:
218 mask = (S3C2410_NFCONF_TACLS(tacls_max - 1) | 256 mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
219 S3C2410_NFCONF_TWRPH0(7) | 257 S3C2440_NFCONF_TWRPH0(7) |
220 S3C2410_NFCONF_TWRPH1(7)); 258 S3C2440_NFCONF_TWRPH1(7));
221 259
222 set = S3C2440_NFCONF_TACLS(tacls - 1); 260 set = S3C2440_NFCONF_TACLS(tacls - 1);
223 set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1); 261 set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
@@ -225,14 +263,9 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
225 break; 263 break;
226 264
227 default: 265 default:
228 /* keep compiler happy */
229 mask = 0;
230 set = 0;
231 BUG(); 266 BUG();
232 } 267 }
233 268
234 dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
235
236 local_irq_save(flags); 269 local_irq_save(flags);
237 270
238 cfg = readl(info->regs + S3C2410_NFCONF); 271 cfg = readl(info->regs + S3C2410_NFCONF);
@@ -242,9 +275,18 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
242 275
243 local_irq_restore(flags); 276 local_irq_restore(flags);
244 277
278 dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
279
245 return 0; 280 return 0;
246} 281}
247 282
283/**
284 * s3c2410_nand_inithw - basic hardware initialisation
285 * @info: The hardware state.
286 *
287 * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
288 * to setup the hardware access speeds and set the controller to be enabled.
289*/
248static int s3c2410_nand_inithw(struct s3c2410_nand_info *info) 290static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
249{ 291{
250 int ret; 292 int ret;
@@ -268,8 +310,19 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
268 return 0; 310 return 0;
269} 311}
270 312
271/* select chip */ 313/**
272 314 * s3c2410_nand_select_chip - select the given nand chip
315 * @mtd: The MTD instance for this chip.
316 * @chip: The chip number.
317 *
318 * This is called by the MTD layer to either select a given chip for the
319 * @mtd instance, or to indicate that the access has finished and the
320 * chip can be de-selected.
321 *
322 * The routine ensures that the nFCE line is correctly setup, and any
323 * platform specific selection code is called to route nFCE to the specific
324 * chip.
325 */
273static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip) 326static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
274{ 327{
275 struct s3c2410_nand_info *info; 328 struct s3c2410_nand_info *info;
@@ -530,7 +583,16 @@ static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
530static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) 583static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
531{ 584{
532 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 585 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
533 readsl(info->regs + S3C2440_NFDATA, buf, len / 4); 586
587 readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
588
589 /* cleanup if we've got less than a word to do */
590 if (len & 3) {
591 buf += len & ~3;
592
593 for (; len & 3; len--)
594 *buf++ = readb(info->regs + S3C2440_NFDATA);
595 }
534} 596}
535 597
536static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 598static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
@@ -542,7 +604,16 @@ static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int
542static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 604static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
543{ 605{
544 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 606 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
545 writesl(info->regs + S3C2440_NFDATA, buf, len / 4); 607
608 writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
609
610 /* cleanup any fractional write */
611 if (len & 3) {
612 buf += len & ~3;
613
614 for (; len & 3; len--, buf++)
615 writeb(*buf, info->regs + S3C2440_NFDATA);
616 }
546} 617}
547 618
548/* cpufreq driver support */ 619/* cpufreq driver support */
@@ -593,7 +664,7 @@ static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *inf
593 664
594/* device management functions */ 665/* device management functions */
595 666
596static int s3c2410_nand_remove(struct platform_device *pdev) 667static int s3c24xx_nand_remove(struct platform_device *pdev)
597{ 668{
598 struct s3c2410_nand_info *info = to_nand_info(pdev); 669 struct s3c2410_nand_info *info = to_nand_info(pdev);
599 670
@@ -645,17 +716,31 @@ static int s3c2410_nand_remove(struct platform_device *pdev)
645} 716}
646 717
647#ifdef CONFIG_MTD_PARTITIONS 718#ifdef CONFIG_MTD_PARTITIONS
719const char *part_probes[] = { "cmdlinepart", NULL };
648static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, 720static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
649 struct s3c2410_nand_mtd *mtd, 721 struct s3c2410_nand_mtd *mtd,
650 struct s3c2410_nand_set *set) 722 struct s3c2410_nand_set *set)
651{ 723{
724 struct mtd_partition *part_info;
725 int nr_part = 0;
726
652 if (set == NULL) 727 if (set == NULL)
653 return add_mtd_device(&mtd->mtd); 728 return add_mtd_device(&mtd->mtd);
654 729
655 if (set->nr_partitions > 0 && set->partitions != NULL) { 730 if (set->nr_partitions == 0) {
656 return add_mtd_partitions(&mtd->mtd, set->partitions, set->nr_partitions); 731 mtd->mtd.name = set->name;
732 nr_part = parse_mtd_partitions(&mtd->mtd, part_probes,
733 &part_info, 0);
734 } else {
735 if (set->nr_partitions > 0 && set->partitions != NULL) {
736 nr_part = set->nr_partitions;
737 part_info = set->partitions;
738 }
657 } 739 }
658 740
741 if (nr_part > 0 && part_info)
742 return add_mtd_partitions(&mtd->mtd, part_info, nr_part);
743
659 return add_mtd_device(&mtd->mtd); 744 return add_mtd_device(&mtd->mtd);
660} 745}
661#else 746#else
@@ -667,11 +752,16 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
667} 752}
668#endif 753#endif
669 754
670/* s3c2410_nand_init_chip 755/**
756 * s3c2410_nand_init_chip - initialise a single instance of an chip
757 * @info: The base NAND controller the chip is on.
758 * @nmtd: The new controller MTD instance to fill in.
759 * @set: The information passed from the board specific platform data.
671 * 760 *
672 * init a single instance of an chip 761 * Initialise the given @nmtd from the information in @info and @set. This
673*/ 762 * readies the structure for use with the MTD layer functions by ensuring
674 763 * all pointers are setup and the necessary control routines selected.
764 */
675static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, 765static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
676 struct s3c2410_nand_mtd *nmtd, 766 struct s3c2410_nand_mtd *nmtd,
677 struct s3c2410_nand_set *set) 767 struct s3c2410_nand_set *set)
@@ -757,14 +847,40 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
757 847
758 if (set->disable_ecc) 848 if (set->disable_ecc)
759 chip->ecc.mode = NAND_ECC_NONE; 849 chip->ecc.mode = NAND_ECC_NONE;
850
851 switch (chip->ecc.mode) {
852 case NAND_ECC_NONE:
853 dev_info(info->device, "NAND ECC disabled\n");
854 break;
855 case NAND_ECC_SOFT:
856 dev_info(info->device, "NAND soft ECC\n");
857 break;
858 case NAND_ECC_HW:
859 dev_info(info->device, "NAND hardware ECC\n");
860 break;
861 default:
862 dev_info(info->device, "NAND ECC UNKNOWN\n");
863 break;
864 }
865
866 /* If you use u-boot BBT creation code, specifying this flag will
867 * let the kernel fish out the BBT from the NAND, and also skip the
868 * full NAND scan that can take 1/2s or so. Little things... */
869 if (set->flash_bbt)
870 chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
760} 871}
761 872
762/* s3c2410_nand_update_chip 873/**
874 * s3c2410_nand_update_chip - post probe update
875 * @info: The controller instance.
876 * @nmtd: The driver version of the MTD instance.
763 * 877 *
764 * post-probe chip update, to change any items, such as the 878 * This routine is called after the chip probe has succesfully completed
765 * layout for large page nand 879 * and the relevant per-chip information updated. This call ensure that
766 */ 880 * we update the internal state accordingly.
767 881 *
882 * The internal state is currently limited to the ECC state information.
883*/
768static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info, 884static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
769 struct s3c2410_nand_mtd *nmtd) 885 struct s3c2410_nand_mtd *nmtd)
770{ 886{
@@ -773,33 +889,33 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
773 dev_dbg(info->device, "chip %p => page shift %d\n", 889 dev_dbg(info->device, "chip %p => page shift %d\n",
774 chip, chip->page_shift); 890 chip, chip->page_shift);
775 891
776 if (hardware_ecc) { 892 if (chip->ecc.mode != NAND_ECC_HW)
893 return;
894
777 /* change the behaviour depending on wether we are using 895 /* change the behaviour depending on wether we are using
778 * the large or small page nand device */ 896 * the large or small page nand device */
779 897
780 if (chip->page_shift > 10) { 898 if (chip->page_shift > 10) {
781 chip->ecc.size = 256; 899 chip->ecc.size = 256;
782 chip->ecc.bytes = 3; 900 chip->ecc.bytes = 3;
783 } else { 901 } else {
784 chip->ecc.size = 512; 902 chip->ecc.size = 512;
785 chip->ecc.bytes = 3; 903 chip->ecc.bytes = 3;
786 chip->ecc.layout = &nand_hw_eccoob; 904 chip->ecc.layout = &nand_hw_eccoob;
787 }
788 } 905 }
789} 906}
790 907
791/* s3c2410_nand_probe 908/* s3c24xx_nand_probe
792 * 909 *
793 * called by device layer when it finds a device matching 910 * called by device layer when it finds a device matching
794 * one our driver can handled. This code checks to see if 911 * one our driver can handled. This code checks to see if
795 * it can allocate all necessary resources then calls the 912 * it can allocate all necessary resources then calls the
796 * nand layer to look for devices 913 * nand layer to look for devices
797*/ 914*/
798 915static int s3c24xx_nand_probe(struct platform_device *pdev)
799static int s3c24xx_nand_probe(struct platform_device *pdev,
800 enum s3c_cpu_type cpu_type)
801{ 916{
802 struct s3c2410_platform_nand *plat = to_nand_plat(pdev); 917 struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
918 enum s3c_cpu_type cpu_type;
803 struct s3c2410_nand_info *info; 919 struct s3c2410_nand_info *info;
804 struct s3c2410_nand_mtd *nmtd; 920 struct s3c2410_nand_mtd *nmtd;
805 struct s3c2410_nand_set *sets; 921 struct s3c2410_nand_set *sets;
@@ -809,6 +925,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
809 int nr_sets; 925 int nr_sets;
810 int setno; 926 int setno;
811 927
928 cpu_type = platform_get_device_id(pdev)->driver_data;
929
812 pr_debug("s3c2410_nand_probe(%p)\n", pdev); 930 pr_debug("s3c2410_nand_probe(%p)\n", pdev);
813 931
814 info = kmalloc(sizeof(*info), GFP_KERNEL); 932 info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -922,7 +1040,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
922 return 0; 1040 return 0;
923 1041
924 exit_error: 1042 exit_error:
925 s3c2410_nand_remove(pdev); 1043 s3c24xx_nand_remove(pdev);
926 1044
927 if (err == 0) 1045 if (err == 0)
928 err = -EINVAL; 1046 err = -EINVAL;
@@ -983,50 +1101,33 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
983 1101
984/* driver device registration */ 1102/* driver device registration */
985 1103
986static int s3c2410_nand_probe(struct platform_device *dev) 1104static struct platform_device_id s3c24xx_driver_ids[] = {
987{ 1105 {
988 return s3c24xx_nand_probe(dev, TYPE_S3C2410); 1106 .name = "s3c2410-nand",
989} 1107 .driver_data = TYPE_S3C2410,
990 1108 }, {
991static int s3c2440_nand_probe(struct platform_device *dev) 1109 .name = "s3c2440-nand",
992{ 1110 .driver_data = TYPE_S3C2440,
993 return s3c24xx_nand_probe(dev, TYPE_S3C2440); 1111 }, {
994} 1112 .name = "s3c2412-nand",
995 1113 .driver_data = TYPE_S3C2412,
996static int s3c2412_nand_probe(struct platform_device *dev) 1114 }, {
997{ 1115 .name = "s3c6400-nand",
998 return s3c24xx_nand_probe(dev, TYPE_S3C2412); 1116 .driver_data = TYPE_S3C2412, /* compatible with 2412 */
999}
1000
1001static struct platform_driver s3c2410_nand_driver = {
1002 .probe = s3c2410_nand_probe,
1003 .remove = s3c2410_nand_remove,
1004 .suspend = s3c24xx_nand_suspend,
1005 .resume = s3c24xx_nand_resume,
1006 .driver = {
1007 .name = "s3c2410-nand",
1008 .owner = THIS_MODULE,
1009 }, 1117 },
1118 { }
1010}; 1119};
1011 1120
1012static struct platform_driver s3c2440_nand_driver = { 1121MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
1013 .probe = s3c2440_nand_probe,
1014 .remove = s3c2410_nand_remove,
1015 .suspend = s3c24xx_nand_suspend,
1016 .resume = s3c24xx_nand_resume,
1017 .driver = {
1018 .name = "s3c2440-nand",
1019 .owner = THIS_MODULE,
1020 },
1021};
1022 1122
1023static struct platform_driver s3c2412_nand_driver = { 1123static struct platform_driver s3c24xx_nand_driver = {
1024 .probe = s3c2412_nand_probe, 1124 .probe = s3c24xx_nand_probe,
1025 .remove = s3c2410_nand_remove, 1125 .remove = s3c24xx_nand_remove,
1026 .suspend = s3c24xx_nand_suspend, 1126 .suspend = s3c24xx_nand_suspend,
1027 .resume = s3c24xx_nand_resume, 1127 .resume = s3c24xx_nand_resume,
1128 .id_table = s3c24xx_driver_ids,
1028 .driver = { 1129 .driver = {
1029 .name = "s3c2412-nand", 1130 .name = "s3c24xx-nand",
1030 .owner = THIS_MODULE, 1131 .owner = THIS_MODULE,
1031 }, 1132 },
1032}; 1133};
@@ -1035,16 +1136,12 @@ static int __init s3c2410_nand_init(void)
1035{ 1136{
1036 printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n"); 1137 printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
1037 1138
1038 platform_driver_register(&s3c2412_nand_driver); 1139 return platform_driver_register(&s3c24xx_nand_driver);
1039 platform_driver_register(&s3c2440_nand_driver);
1040 return platform_driver_register(&s3c2410_nand_driver);
1041} 1140}
1042 1141
1043static void __exit s3c2410_nand_exit(void) 1142static void __exit s3c2410_nand_exit(void)
1044{ 1143{
1045 platform_driver_unregister(&s3c2412_nand_driver); 1144 platform_driver_unregister(&s3c24xx_nand_driver);
1046 platform_driver_unregister(&s3c2440_nand_driver);
1047 platform_driver_unregister(&s3c2410_nand_driver);
1048} 1145}
1049 1146
1050module_init(s3c2410_nand_init); 1147module_init(s3c2410_nand_init);
@@ -1053,6 +1150,3 @@ module_exit(s3c2410_nand_exit);
1053MODULE_LICENSE("GPL"); 1150MODULE_LICENSE("GPL");
1054MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 1151MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
1055MODULE_DESCRIPTION("S3C24XX MTD NAND driver"); 1152MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
1056MODULE_ALIAS("platform:s3c2410-nand");
1057MODULE_ALIAS("platform:s3c2412-nand");
1058MODULE_ALIAS("platform:s3c2440-nand");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 812479264896..488088eff2ca 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -64,7 +64,7 @@ struct txx9ndfmc_priv {
64 struct nand_chip chip; 64 struct nand_chip chip;
65 struct mtd_info mtd; 65 struct mtd_info mtd;
66 int cs; 66 int cs;
67 char mtdname[BUS_ID_SIZE + 2]; 67 const char *mtdname;
68}; 68};
69 69
70#define MAX_TXX9NDFMC_DEV 4 70#define MAX_TXX9NDFMC_DEV 4
@@ -334,16 +334,23 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
334 334
335 if (plat->ch_mask != 1) { 335 if (plat->ch_mask != 1) {
336 txx9_priv->cs = i; 336 txx9_priv->cs = i;
337 sprintf(txx9_priv->mtdname, "%s.%u", 337 txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
338 dev_name(&dev->dev), i); 338 dev_name(&dev->dev), i);
339 } else { 339 } else {
340 txx9_priv->cs = -1; 340 txx9_priv->cs = -1;
341 strcpy(txx9_priv->mtdname, dev_name(&dev->dev)); 341 txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
342 GFP_KERNEL);
343 }
344 if (!txx9_priv->mtdname) {
345 kfree(txx9_priv);
346 dev_err(&dev->dev, "Unable to allocate MTD name.\n");
347 continue;
342 } 348 }
343 if (plat->wide_mask & (1 << i)) 349 if (plat->wide_mask & (1 << i))
344 chip->options |= NAND_BUSWIDTH_16; 350 chip->options |= NAND_BUSWIDTH_16;
345 351
346 if (nand_scan(mtd, 1)) { 352 if (nand_scan(mtd, 1)) {
353 kfree(txx9_priv->mtdname);
347 kfree(txx9_priv); 354 kfree(txx9_priv);
348 continue; 355 continue;
349 } 356 }
@@ -385,6 +392,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
385 kfree(drvdata->parts[i]); 392 kfree(drvdata->parts[i]);
386#endif 393#endif
387 del_mtd_device(mtd); 394 del_mtd_device(mtd);
395 kfree(txx9_priv->mtdname);
388 kfree(txx9_priv); 396 kfree(txx9_priv);
389 } 397 }
390 return 0; 398 return 0;
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 6391e3dc8002..38d656b9b2ee 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -565,7 +565,7 @@ int omap2_onenand_rephase(void)
565 NULL, __adjust_timing); 565 NULL, __adjust_timing);
566} 566}
567 567
568static void __devexit omap2_onenand_shutdown(struct platform_device *pdev) 568static void omap2_onenand_shutdown(struct platform_device *pdev)
569{ 569{
570 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); 570 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
571 571
@@ -777,7 +777,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
777 777
778static struct platform_driver omap2_onenand_driver = { 778static struct platform_driver omap2_onenand_driver = {
779 .probe = omap2_onenand_probe, 779 .probe = omap2_onenand_probe,
780 .remove = omap2_onenand_remove, 780 .remove = __devexit_p(omap2_onenand_remove),
781 .shutdown = omap2_onenand_shutdown, 781 .shutdown = omap2_onenand_shutdown,
782 .driver = { 782 .driver = {
783 .name = DRIVER_NAME, 783 .name = DRIVER_NAME,
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 30d6999e5f9f..6e829095ea9d 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -9,6 +9,10 @@
9 * auto-placement support, read-while load support, various fixes 9 * auto-placement support, read-while load support, various fixes
10 * Copyright (C) Nokia Corporation, 2007 10 * Copyright (C) Nokia Corporation, 2007
11 * 11 *
12 * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
13 * Flex-OneNAND support
14 * Copyright (C) Samsung Electronics, 2008
15 *
12 * This program is free software; you can redistribute it and/or modify 16 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 17 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation. 18 * published by the Free Software Foundation.
@@ -16,6 +20,7 @@
16 20
17#include <linux/kernel.h> 21#include <linux/kernel.h>
18#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/moduleparam.h>
19#include <linux/init.h> 24#include <linux/init.h>
20#include <linux/sched.h> 25#include <linux/sched.h>
21#include <linux/delay.h> 26#include <linux/delay.h>
@@ -27,6 +32,38 @@
27 32
28#include <asm/io.h> 33#include <asm/io.h>
29 34
35/* Default Flex-OneNAND boundary and lock respectively */
36static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
37
38module_param_array(flex_bdry, int, NULL, 0400);
39MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
40 "Syntax:flex_bdry=DIE_BDRY,LOCK,..."
41 "DIE_BDRY: SLC boundary of the die"
42 "LOCK: Locking information for SLC boundary"
43 " : 0->Set boundary in unlocked status"
44 " : 1->Set boundary in locked status");
45
46/**
47 * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
48 * For now, we expose only 64 out of 80 ecc bytes
49 */
50static struct nand_ecclayout onenand_oob_128 = {
51 .eccbytes = 64,
52 .eccpos = {
53 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
54 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
55 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
56 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
57 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
58 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
59 102, 103, 104, 105
60 },
61 .oobfree = {
62 {2, 4}, {18, 4}, {34, 4}, {50, 4},
63 {66, 4}, {82, 4}, {98, 4}, {114, 4}
64 }
65};
66
30/** 67/**
31 * onenand_oob_64 - oob info for large (2KB) page 68 * onenand_oob_64 - oob info for large (2KB) page
32 */ 69 */
@@ -65,6 +102,14 @@ static const unsigned char ffchars[] = {
65 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */ 102 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */
66 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 103 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
67 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */ 104 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */
105 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
106 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */
107 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
108 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */
109 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
110 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */
111 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
112 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */
68}; 113};
69 114
70/** 115/**
@@ -171,6 +216,70 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
171} 216}
172 217
173/** 218/**
219 * flexonenand_block- For given address return block number
220 * @param this - OneNAND device structure
221 * @param addr - Address for which block number is needed
222 */
223static unsigned flexonenand_block(struct onenand_chip *this, loff_t addr)
224{
225 unsigned boundary, blk, die = 0;
226
227 if (ONENAND_IS_DDP(this) && addr >= this->diesize[0]) {
228 die = 1;
229 addr -= this->diesize[0];
230 }
231
232 boundary = this->boundary[die];
233
234 blk = addr >> (this->erase_shift - 1);
235 if (blk > boundary)
236 blk = (blk + boundary + 1) >> 1;
237
238 blk += die ? this->density_mask : 0;
239 return blk;
240}
241
242inline unsigned onenand_block(struct onenand_chip *this, loff_t addr)
243{
244 if (!FLEXONENAND(this))
245 return addr >> this->erase_shift;
246 return flexonenand_block(this, addr);
247}
248
249/**
250 * flexonenand_addr - Return address of the block
251 * @this: OneNAND device structure
252 * @block: Block number on Flex-OneNAND
253 *
254 * Return address of the block
255 */
256static loff_t flexonenand_addr(struct onenand_chip *this, int block)
257{
258 loff_t ofs = 0;
259 int die = 0, boundary;
260
261 if (ONENAND_IS_DDP(this) && block >= this->density_mask) {
262 block -= this->density_mask;
263 die = 1;
264 ofs = this->diesize[0];
265 }
266
267 boundary = this->boundary[die];
268 ofs += (loff_t)block << (this->erase_shift - 1);
269 if (block > (boundary + 1))
270 ofs += (loff_t)(block - boundary - 1) << (this->erase_shift - 1);
271 return ofs;
272}
273
274loff_t onenand_addr(struct onenand_chip *this, int block)
275{
276 if (!FLEXONENAND(this))
277 return (loff_t)block << this->erase_shift;
278 return flexonenand_addr(this, block);
279}
280EXPORT_SYMBOL(onenand_addr);
281
282/**
174 * onenand_get_density - [DEFAULT] Get OneNAND density 283 * onenand_get_density - [DEFAULT] Get OneNAND density
175 * @param dev_id OneNAND device ID 284 * @param dev_id OneNAND device ID
176 * 285 *
@@ -183,6 +292,22 @@ static inline int onenand_get_density(int dev_id)
183} 292}
184 293
185/** 294/**
295 * flexonenand_region - [Flex-OneNAND] Return erase region of addr
296 * @param mtd MTD device structure
297 * @param addr address whose erase region needs to be identified
298 */
299int flexonenand_region(struct mtd_info *mtd, loff_t addr)
300{
301 int i;
302
303 for (i = 0; i < mtd->numeraseregions; i++)
304 if (addr < mtd->eraseregions[i].offset)
305 break;
306 return i - 1;
307}
308EXPORT_SYMBOL(flexonenand_region);
309
310/**
186 * onenand_command - [DEFAULT] Send command to OneNAND device 311 * onenand_command - [DEFAULT] Send command to OneNAND device
187 * @param mtd MTD device structure 312 * @param mtd MTD device structure
188 * @param cmd the command to be sent 313 * @param cmd the command to be sent
@@ -207,16 +332,28 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
207 page = -1; 332 page = -1;
208 break; 333 break;
209 334
335 case FLEXONENAND_CMD_PI_ACCESS:
336 /* addr contains die index */
337 block = addr * this->density_mask;
338 page = -1;
339 break;
340
210 case ONENAND_CMD_ERASE: 341 case ONENAND_CMD_ERASE:
211 case ONENAND_CMD_BUFFERRAM: 342 case ONENAND_CMD_BUFFERRAM:
212 case ONENAND_CMD_OTP_ACCESS: 343 case ONENAND_CMD_OTP_ACCESS:
213 block = (int) (addr >> this->erase_shift); 344 block = onenand_block(this, addr);
214 page = -1; 345 page = -1;
215 break; 346 break;
216 347
348 case FLEXONENAND_CMD_READ_PI:
349 cmd = ONENAND_CMD_READ;
350 block = addr * this->density_mask;
351 page = 0;
352 break;
353
217 default: 354 default:
218 block = (int) (addr >> this->erase_shift); 355 block = onenand_block(this, addr);
219 page = (int) (addr >> this->page_shift); 356 page = (int) (addr - onenand_addr(this, block)) >> this->page_shift;
220 357
221 if (ONENAND_IS_2PLANE(this)) { 358 if (ONENAND_IS_2PLANE(this)) {
222 /* Make the even block number */ 359 /* Make the even block number */
@@ -236,7 +373,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
236 value = onenand_bufferram_address(this, block); 373 value = onenand_bufferram_address(this, block);
237 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); 374 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
238 375
239 if (ONENAND_IS_2PLANE(this)) 376 if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this))
240 /* It is always BufferRAM0 */ 377 /* It is always BufferRAM0 */
241 ONENAND_SET_BUFFERRAM0(this); 378 ONENAND_SET_BUFFERRAM0(this);
242 else 379 else
@@ -258,13 +395,18 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
258 395
259 if (page != -1) { 396 if (page != -1) {
260 /* Now we use page size operation */ 397 /* Now we use page size operation */
261 int sectors = 4, count = 4; 398 int sectors = 0, count = 0;
262 int dataram; 399 int dataram;
263 400
264 switch (cmd) { 401 switch (cmd) {
402 case FLEXONENAND_CMD_RECOVER_LSB:
265 case ONENAND_CMD_READ: 403 case ONENAND_CMD_READ:
266 case ONENAND_CMD_READOOB: 404 case ONENAND_CMD_READOOB:
267 dataram = ONENAND_SET_NEXT_BUFFERRAM(this); 405 if (ONENAND_IS_MLC(this))
406 /* It is always BufferRAM0 */
407 dataram = ONENAND_SET_BUFFERRAM0(this);
408 else
409 dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
268 break; 410 break;
269 411
270 default: 412 default:
@@ -293,6 +435,30 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
293} 435}
294 436
295/** 437/**
438 * onenand_read_ecc - return ecc status
439 * @param this onenand chip structure
440 */
441static inline int onenand_read_ecc(struct onenand_chip *this)
442{
443 int ecc, i, result = 0;
444
445 if (!FLEXONENAND(this))
446 return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
447
448 for (i = 0; i < 4; i++) {
449 ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i);
450 if (likely(!ecc))
451 continue;
452 if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
453 return ONENAND_ECC_2BIT_ALL;
454 else
455 result = ONENAND_ECC_1BIT_ALL;
456 }
457
458 return result;
459}
460
461/**
296 * onenand_wait - [DEFAULT] wait until the command is done 462 * onenand_wait - [DEFAULT] wait until the command is done
297 * @param mtd MTD device structure 463 * @param mtd MTD device structure
298 * @param state state to select the max. timeout value 464 * @param state state to select the max. timeout value
@@ -331,14 +497,14 @@ static int onenand_wait(struct mtd_info *mtd, int state)
331 * power off recovery (POR) test, it should read ECC status first 497 * power off recovery (POR) test, it should read ECC status first
332 */ 498 */
333 if (interrupt & ONENAND_INT_READ) { 499 if (interrupt & ONENAND_INT_READ) {
334 int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS); 500 int ecc = onenand_read_ecc(this);
335 if (ecc) { 501 if (ecc) {
336 if (ecc & ONENAND_ECC_2BIT_ALL) { 502 if (ecc & ONENAND_ECC_2BIT_ALL) {
337 printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc); 503 printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc);
338 mtd->ecc_stats.failed++; 504 mtd->ecc_stats.failed++;
339 return -EBADMSG; 505 return -EBADMSG;
340 } else if (ecc & ONENAND_ECC_1BIT_ALL) { 506 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
341 printk(KERN_INFO "onenand_wait: correctable ECC error = 0x%04x\n", ecc); 507 printk(KERN_DEBUG "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
342 mtd->ecc_stats.corrected++; 508 mtd->ecc_stats.corrected++;
343 } 509 }
344 } 510 }
@@ -656,7 +822,7 @@ static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr)
656 822
657 if (found && ONENAND_IS_DDP(this)) { 823 if (found && ONENAND_IS_DDP(this)) {
658 /* Select DataRAM for DDP */ 824 /* Select DataRAM for DDP */
659 int block = (int) (addr >> this->erase_shift); 825 int block = onenand_block(this, addr);
660 int value = onenand_bufferram_address(this, block); 826 int value = onenand_bufferram_address(this, block);
661 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); 827 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
662 } 828 }
@@ -816,6 +982,149 @@ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int col
816} 982}
817 983
818/** 984/**
985 * onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data
986 * @param mtd MTD device structure
987 * @param addr address to recover
988 * @param status return value from onenand_wait / onenand_bbt_wait
989 *
990 * MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has
991 * lower page address and MSB page has higher page address in paired pages.
992 * If power off occurs during MSB page program, the paired LSB page data can
993 * become corrupt. LSB page recovery read is a way to read LSB page though page
994 * data are corrupted. When uncorrectable error occurs as a result of LSB page
995 * read after power up, issue LSB page recovery read.
996 */
997static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
998{
999 struct onenand_chip *this = mtd->priv;
1000 int i;
1001
1002 /* Recovery is only for Flex-OneNAND */
1003 if (!FLEXONENAND(this))
1004 return status;
1005
1006 /* check if we failed due to uncorrectable error */
1007 if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR)
1008 return status;
1009
1010 /* check if address lies in MLC region */
1011 i = flexonenand_region(mtd, addr);
1012 if (mtd->eraseregions[i].erasesize < (1 << this->erase_shift))
1013 return status;
1014
1015 /* We are attempting to reread, so decrement stats.failed
1016 * which was incremented by onenand_wait due to read failure
1017 */
1018 printk(KERN_INFO "onenand_recover_lsb: Attempting to recover from uncorrectable read\n");
1019 mtd->ecc_stats.failed--;
1020
1021 /* Issue the LSB page recovery command */
1022 this->command(mtd, FLEXONENAND_CMD_RECOVER_LSB, addr, this->writesize);
1023 return this->wait(mtd, FL_READING);
1024}
1025
1026/**
1027 * onenand_mlc_read_ops_nolock - MLC OneNAND read main and/or out-of-band
1028 * @param mtd MTD device structure
1029 * @param from offset to read from
1030 * @param ops: oob operation description structure
1031 *
1032 * MLC OneNAND / Flex-OneNAND has 4KB page size and 4KB dataram.
1033 * So, read-while-load is not present.
1034 */
1035static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1036 struct mtd_oob_ops *ops)
1037{
1038 struct onenand_chip *this = mtd->priv;
1039 struct mtd_ecc_stats stats;
1040 size_t len = ops->len;
1041 size_t ooblen = ops->ooblen;
1042 u_char *buf = ops->datbuf;
1043 u_char *oobbuf = ops->oobbuf;
1044 int read = 0, column, thislen;
1045 int oobread = 0, oobcolumn, thisooblen, oobsize;
1046 int ret = 0;
1047 int writesize = this->writesize;
1048
1049 DEBUG(MTD_DEBUG_LEVEL3, "onenand_mlc_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
1050
1051 if (ops->mode == MTD_OOB_AUTO)
1052 oobsize = this->ecclayout->oobavail;
1053 else
1054 oobsize = mtd->oobsize;
1055
1056 oobcolumn = from & (mtd->oobsize - 1);
1057
1058 /* Do not allow reads past end of device */
1059 if (from + len > mtd->size) {
1060 printk(KERN_ERR "onenand_mlc_read_ops_nolock: Attempt read beyond end of device\n");
1061 ops->retlen = 0;
1062 ops->oobretlen = 0;
1063 return -EINVAL;
1064 }
1065
1066 stats = mtd->ecc_stats;
1067
1068 while (read < len) {
1069 cond_resched();
1070
1071 thislen = min_t(int, writesize, len - read);
1072
1073 column = from & (writesize - 1);
1074 if (column + thislen > writesize)
1075 thislen = writesize - column;
1076
1077 if (!onenand_check_bufferram(mtd, from)) {
1078 this->command(mtd, ONENAND_CMD_READ, from, writesize);
1079
1080 ret = this->wait(mtd, FL_READING);
1081 if (unlikely(ret))
1082 ret = onenand_recover_lsb(mtd, from, ret);
1083 onenand_update_bufferram(mtd, from, !ret);
1084 if (ret == -EBADMSG)
1085 ret = 0;
1086 }
1087
1088 this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
1089 if (oobbuf) {
1090 thisooblen = oobsize - oobcolumn;
1091 thisooblen = min_t(int, thisooblen, ooblen - oobread);
1092
1093 if (ops->mode == MTD_OOB_AUTO)
1094 onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
1095 else
1096 this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
1097 oobread += thisooblen;
1098 oobbuf += thisooblen;
1099 oobcolumn = 0;
1100 }
1101
1102 read += thislen;
1103 if (read == len)
1104 break;
1105
1106 from += thislen;
1107 buf += thislen;
1108 }
1109
1110 /*
1111 * Return success, if no ECC failures, else -EBADMSG
1112 * fs driver will take care of that, because
1113 * retlen == desired len and result == -EBADMSG
1114 */
1115 ops->retlen = read;
1116 ops->oobretlen = oobread;
1117
1118 if (ret)
1119 return ret;
1120
1121 if (mtd->ecc_stats.failed - stats.failed)
1122 return -EBADMSG;
1123
1124 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
1125}
1126
1127/**
819 * onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band 1128 * onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band
820 * @param mtd MTD device structure 1129 * @param mtd MTD device structure
821 * @param from offset to read from 1130 * @param from offset to read from
@@ -962,7 +1271,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
962 size_t len = ops->ooblen; 1271 size_t len = ops->ooblen;
963 mtd_oob_mode_t mode = ops->mode; 1272 mtd_oob_mode_t mode = ops->mode;
964 u_char *buf = ops->oobbuf; 1273 u_char *buf = ops->oobbuf;
965 int ret = 0; 1274 int ret = 0, readcmd;
966 1275
967 from += ops->ooboffs; 1276 from += ops->ooboffs;
968 1277
@@ -993,17 +1302,22 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
993 1302
994 stats = mtd->ecc_stats; 1303 stats = mtd->ecc_stats;
995 1304
1305 readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
1306
996 while (read < len) { 1307 while (read < len) {
997 cond_resched(); 1308 cond_resched();
998 1309
999 thislen = oobsize - column; 1310 thislen = oobsize - column;
1000 thislen = min_t(int, thislen, len); 1311 thislen = min_t(int, thislen, len);
1001 1312
1002 this->command(mtd, ONENAND_CMD_READOOB, from, mtd->oobsize); 1313 this->command(mtd, readcmd, from, mtd->oobsize);
1003 1314
1004 onenand_update_bufferram(mtd, from, 0); 1315 onenand_update_bufferram(mtd, from, 0);
1005 1316
1006 ret = this->wait(mtd, FL_READING); 1317 ret = this->wait(mtd, FL_READING);
1318 if (unlikely(ret))
1319 ret = onenand_recover_lsb(mtd, from, ret);
1320
1007 if (ret && ret != -EBADMSG) { 1321 if (ret && ret != -EBADMSG) {
1008 printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret); 1322 printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
1009 break; 1323 break;
@@ -1053,6 +1367,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1053static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len, 1367static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
1054 size_t *retlen, u_char *buf) 1368 size_t *retlen, u_char *buf)
1055{ 1369{
1370 struct onenand_chip *this = mtd->priv;
1056 struct mtd_oob_ops ops = { 1371 struct mtd_oob_ops ops = {
1057 .len = len, 1372 .len = len,
1058 .ooblen = 0, 1373 .ooblen = 0,
@@ -1062,7 +1377,9 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
1062 int ret; 1377 int ret;
1063 1378
1064 onenand_get_device(mtd, FL_READING); 1379 onenand_get_device(mtd, FL_READING);
1065 ret = onenand_read_ops_nolock(mtd, from, &ops); 1380 ret = ONENAND_IS_MLC(this) ?
1381 onenand_mlc_read_ops_nolock(mtd, from, &ops) :
1382 onenand_read_ops_nolock(mtd, from, &ops);
1066 onenand_release_device(mtd); 1383 onenand_release_device(mtd);
1067 1384
1068 *retlen = ops.retlen; 1385 *retlen = ops.retlen;
@@ -1080,6 +1397,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
1080static int onenand_read_oob(struct mtd_info *mtd, loff_t from, 1397static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
1081 struct mtd_oob_ops *ops) 1398 struct mtd_oob_ops *ops)
1082{ 1399{
1400 struct onenand_chip *this = mtd->priv;
1083 int ret; 1401 int ret;
1084 1402
1085 switch (ops->mode) { 1403 switch (ops->mode) {
@@ -1094,7 +1412,9 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
1094 1412
1095 onenand_get_device(mtd, FL_READING); 1413 onenand_get_device(mtd, FL_READING);
1096 if (ops->datbuf) 1414 if (ops->datbuf)
1097 ret = onenand_read_ops_nolock(mtd, from, ops); 1415 ret = ONENAND_IS_MLC(this) ?
1416 onenand_mlc_read_ops_nolock(mtd, from, ops) :
1417 onenand_read_ops_nolock(mtd, from, ops);
1098 else 1418 else
1099 ret = onenand_read_oob_nolock(mtd, from, ops); 1419 ret = onenand_read_oob_nolock(mtd, from, ops);
1100 onenand_release_device(mtd); 1420 onenand_release_device(mtd);
@@ -1128,11 +1448,11 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
1128 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); 1448 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
1129 1449
1130 if (interrupt & ONENAND_INT_READ) { 1450 if (interrupt & ONENAND_INT_READ) {
1131 int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS); 1451 int ecc = onenand_read_ecc(this);
1132 if (ecc & ONENAND_ECC_2BIT_ALL) { 1452 if (ecc & ONENAND_ECC_2BIT_ALL) {
1133 printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x" 1453 printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x"
1134 ", controller error 0x%04x\n", ecc, ctrl); 1454 ", controller error 0x%04x\n", ecc, ctrl);
1135 return ONENAND_BBT_READ_ERROR; 1455 return ONENAND_BBT_READ_ECC_ERROR;
1136 } 1456 }
1137 } else { 1457 } else {
1138 printk(KERN_ERR "onenand_bbt_wait: read timeout!" 1458 printk(KERN_ERR "onenand_bbt_wait: read timeout!"
@@ -1163,7 +1483,7 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
1163{ 1483{
1164 struct onenand_chip *this = mtd->priv; 1484 struct onenand_chip *this = mtd->priv;
1165 int read = 0, thislen, column; 1485 int read = 0, thislen, column;
1166 int ret = 0; 1486 int ret = 0, readcmd;
1167 size_t len = ops->ooblen; 1487 size_t len = ops->ooblen;
1168 u_char *buf = ops->oobbuf; 1488 u_char *buf = ops->oobbuf;
1169 1489
@@ -1183,17 +1503,22 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
1183 1503
1184 column = from & (mtd->oobsize - 1); 1504 column = from & (mtd->oobsize - 1);
1185 1505
1506 readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
1507
1186 while (read < len) { 1508 while (read < len) {
1187 cond_resched(); 1509 cond_resched();
1188 1510
1189 thislen = mtd->oobsize - column; 1511 thislen = mtd->oobsize - column;
1190 thislen = min_t(int, thislen, len); 1512 thislen = min_t(int, thislen, len);
1191 1513
1192 this->command(mtd, ONENAND_CMD_READOOB, from, mtd->oobsize); 1514 this->command(mtd, readcmd, from, mtd->oobsize);
1193 1515
1194 onenand_update_bufferram(mtd, from, 0); 1516 onenand_update_bufferram(mtd, from, 0);
1195 1517
1196 ret = onenand_bbt_wait(mtd, FL_READING); 1518 ret = this->bbt_wait(mtd, FL_READING);
1519 if (unlikely(ret))
1520 ret = onenand_recover_lsb(mtd, from, ret);
1521
1197 if (ret) 1522 if (ret)
1198 break; 1523 break;
1199 1524
@@ -1230,9 +1555,11 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
1230{ 1555{
1231 struct onenand_chip *this = mtd->priv; 1556 struct onenand_chip *this = mtd->priv;
1232 u_char *oob_buf = this->oob_buf; 1557 u_char *oob_buf = this->oob_buf;
1233 int status, i; 1558 int status, i, readcmd;
1234 1559
1235 this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize); 1560 readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
1561
1562 this->command(mtd, readcmd, to, mtd->oobsize);
1236 onenand_update_bufferram(mtd, to, 0); 1563 onenand_update_bufferram(mtd, to, 0);
1237 status = this->wait(mtd, FL_READING); 1564 status = this->wait(mtd, FL_READING);
1238 if (status) 1565 if (status)
@@ -1633,7 +1960,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
1633{ 1960{
1634 struct onenand_chip *this = mtd->priv; 1961 struct onenand_chip *this = mtd->priv;
1635 int column, ret = 0, oobsize; 1962 int column, ret = 0, oobsize;
1636 int written = 0; 1963 int written = 0, oobcmd;
1637 u_char *oobbuf; 1964 u_char *oobbuf;
1638 size_t len = ops->ooblen; 1965 size_t len = ops->ooblen;
1639 const u_char *buf = ops->oobbuf; 1966 const u_char *buf = ops->oobbuf;
@@ -1675,6 +2002,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
1675 2002
1676 oobbuf = this->oob_buf; 2003 oobbuf = this->oob_buf;
1677 2004
2005 oobcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
2006
1678 /* Loop until all data write */ 2007 /* Loop until all data write */
1679 while (written < len) { 2008 while (written < len) {
1680 int thislen = min_t(int, oobsize, len - written); 2009 int thislen = min_t(int, oobsize, len - written);
@@ -1692,7 +2021,14 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
1692 memcpy(oobbuf + column, buf, thislen); 2021 memcpy(oobbuf + column, buf, thislen);
1693 this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize); 2022 this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
1694 2023
1695 this->command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize); 2024 if (ONENAND_IS_MLC(this)) {
2025 /* Set main area of DataRAM to 0xff*/
2026 memset(this->page_buf, 0xff, mtd->writesize);
2027 this->write_bufferram(mtd, ONENAND_DATARAM,
2028 this->page_buf, 0, mtd->writesize);
2029 }
2030
2031 this->command(mtd, oobcmd, to, mtd->oobsize);
1696 2032
1697 onenand_update_bufferram(mtd, to, 0); 2033 onenand_update_bufferram(mtd, to, 0);
1698 if (ONENAND_IS_2PLANE(this)) { 2034 if (ONENAND_IS_2PLANE(this)) {
@@ -1815,29 +2151,48 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
1815{ 2151{
1816 struct onenand_chip *this = mtd->priv; 2152 struct onenand_chip *this = mtd->priv;
1817 unsigned int block_size; 2153 unsigned int block_size;
1818 loff_t addr; 2154 loff_t addr = instr->addr;
1819 int len; 2155 loff_t len = instr->len;
1820 int ret = 0; 2156 int ret = 0, i;
2157 struct mtd_erase_region_info *region = NULL;
2158 loff_t region_end = 0;
1821 2159
1822 DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len); 2160 DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len);
1823 2161
1824 block_size = (1 << this->erase_shift); 2162 /* Do not allow erase past end of device */
1825 2163 if (unlikely((len + addr) > mtd->size)) {
1826 /* Start address must align on block boundary */ 2164 printk(KERN_ERR "onenand_erase: Erase past end of device\n");
1827 if (unlikely(instr->addr & (block_size - 1))) {
1828 printk(KERN_ERR "onenand_erase: Unaligned address\n");
1829 return -EINVAL; 2165 return -EINVAL;
1830 } 2166 }
1831 2167
1832 /* Length must align on block boundary */ 2168 if (FLEXONENAND(this)) {
1833 if (unlikely(instr->len & (block_size - 1))) { 2169 /* Find the eraseregion of this address */
1834 printk(KERN_ERR "onenand_erase: Length not block aligned\n"); 2170 i = flexonenand_region(mtd, addr);
1835 return -EINVAL; 2171 region = &mtd->eraseregions[i];
2172
2173 block_size = region->erasesize;
2174 region_end = region->offset + region->erasesize * region->numblocks;
2175
2176 /* Start address within region must align on block boundary.
2177 * Erase region's start offset is always block start address.
2178 */
2179 if (unlikely((addr - region->offset) & (block_size - 1))) {
2180 printk(KERN_ERR "onenand_erase: Unaligned address\n");
2181 return -EINVAL;
2182 }
2183 } else {
2184 block_size = 1 << this->erase_shift;
2185
2186 /* Start address must align on block boundary */
2187 if (unlikely(addr & (block_size - 1))) {
2188 printk(KERN_ERR "onenand_erase: Unaligned address\n");
2189 return -EINVAL;
2190 }
1836 } 2191 }
1837 2192
1838 /* Do not allow erase past end of device */ 2193 /* Length must align on block boundary */
1839 if (unlikely((instr->len + instr->addr) > mtd->size)) { 2194 if (unlikely(len & (block_size - 1))) {
1840 printk(KERN_ERR "onenand_erase: Erase past end of device\n"); 2195 printk(KERN_ERR "onenand_erase: Length not block aligned\n");
1841 return -EINVAL; 2196 return -EINVAL;
1842 } 2197 }
1843 2198
@@ -1847,9 +2202,6 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
1847 onenand_get_device(mtd, FL_ERASING); 2202 onenand_get_device(mtd, FL_ERASING);
1848 2203
1849 /* Loop throught the pages */ 2204 /* Loop throught the pages */
1850 len = instr->len;
1851 addr = instr->addr;
1852
1853 instr->state = MTD_ERASING; 2205 instr->state = MTD_ERASING;
1854 2206
1855 while (len) { 2207 while (len) {
@@ -1869,7 +2221,8 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
1869 ret = this->wait(mtd, FL_ERASING); 2221 ret = this->wait(mtd, FL_ERASING);
1870 /* Check, if it is write protected */ 2222 /* Check, if it is write protected */
1871 if (ret) { 2223 if (ret) {
1872 printk(KERN_ERR "onenand_erase: Failed erase, block %d\n", (unsigned) (addr >> this->erase_shift)); 2224 printk(KERN_ERR "onenand_erase: Failed erase, block %d\n",
2225 onenand_block(this, addr));
1873 instr->state = MTD_ERASE_FAILED; 2226 instr->state = MTD_ERASE_FAILED;
1874 instr->fail_addr = addr; 2227 instr->fail_addr = addr;
1875 goto erase_exit; 2228 goto erase_exit;
@@ -1877,6 +2230,22 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
1877 2230
1878 len -= block_size; 2231 len -= block_size;
1879 addr += block_size; 2232 addr += block_size;
2233
2234 if (addr == region_end) {
2235 if (!len)
2236 break;
2237 region++;
2238
2239 block_size = region->erasesize;
2240 region_end = region->offset + region->erasesize * region->numblocks;
2241
2242 if (len & (block_size - 1)) {
2243 /* FIXME: This should be handled at MTD partitioning level. */
2244 printk(KERN_ERR "onenand_erase: Unaligned address\n");
2245 goto erase_exit;
2246 }
2247 }
2248
1880 } 2249 }
1881 2250
1882 instr->state = MTD_ERASE_DONE; 2251 instr->state = MTD_ERASE_DONE;
@@ -1955,13 +2324,17 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
1955 int block; 2324 int block;
1956 2325
1957 /* Get block number */ 2326 /* Get block number */
1958 block = ((int) ofs) >> bbm->bbt_erase_shift; 2327 block = onenand_block(this, ofs);
1959 if (bbm->bbt) 2328 if (bbm->bbt)
1960 bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 2329 bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
1961 2330
1962 /* We write two bytes, so we dont have to mess with 16 bit access */ 2331 /* We write two bytes, so we dont have to mess with 16 bit access */
1963 ofs += mtd->oobsize + (bbm->badblockpos & ~0x01); 2332 ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
1964 return onenand_write_oob_nolock(mtd, ofs, &ops); 2333 /* FIXME : What to do when marking SLC block in partition
2334 * with MLC erasesize? For now, it is not advisable to
2335 * create partitions containing both SLC and MLC regions.
2336 */
2337 return onenand_write_oob_nolock(mtd, ofs, &ops);
1965} 2338}
1966 2339
1967/** 2340/**
@@ -2005,8 +2378,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
2005 int start, end, block, value, status; 2378 int start, end, block, value, status;
2006 int wp_status_mask; 2379 int wp_status_mask;
2007 2380
2008 start = ofs >> this->erase_shift; 2381 start = onenand_block(this, ofs);
2009 end = len >> this->erase_shift; 2382 end = onenand_block(this, ofs + len) - 1;
2010 2383
2011 if (cmd == ONENAND_CMD_LOCK) 2384 if (cmd == ONENAND_CMD_LOCK)
2012 wp_status_mask = ONENAND_WP_LS; 2385 wp_status_mask = ONENAND_WP_LS;
@@ -2018,7 +2391,7 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
2018 /* Set start block address */ 2391 /* Set start block address */
2019 this->write_word(start, this->base + ONENAND_REG_START_BLOCK_ADDRESS); 2392 this->write_word(start, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
2020 /* Set end block address */ 2393 /* Set end block address */
2021 this->write_word(start + end - 1, this->base + ONENAND_REG_END_BLOCK_ADDRESS); 2394 this->write_word(end, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
2022 /* Write lock command */ 2395 /* Write lock command */
2023 this->command(mtd, cmd, 0, 0); 2396 this->command(mtd, cmd, 0, 0);
2024 2397
@@ -2039,7 +2412,7 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
2039 } 2412 }
2040 2413
2041 /* Block lock scheme */ 2414 /* Block lock scheme */
2042 for (block = start; block < start + end; block++) { 2415 for (block = start; block < end + 1; block++) {
2043 /* Set block address */ 2416 /* Set block address */
2044 value = onenand_block_address(this, block); 2417 value = onenand_block_address(this, block);
2045 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1); 2418 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
@@ -2147,7 +2520,7 @@ static void onenand_unlock_all(struct mtd_info *mtd)
2147{ 2520{
2148 struct onenand_chip *this = mtd->priv; 2521 struct onenand_chip *this = mtd->priv;
2149 loff_t ofs = 0; 2522 loff_t ofs = 0;
2150 size_t len = this->chipsize; 2523 loff_t len = mtd->size;
2151 2524
2152 if (this->options & ONENAND_HAS_UNLOCK_ALL) { 2525 if (this->options & ONENAND_HAS_UNLOCK_ALL) {
2153 /* Set start block address */ 2526 /* Set start block address */
@@ -2163,12 +2536,16 @@ static void onenand_unlock_all(struct mtd_info *mtd)
2163 & ONENAND_CTRL_ONGO) 2536 & ONENAND_CTRL_ONGO)
2164 continue; 2537 continue;
2165 2538
2539 /* Don't check lock status */
2540 if (this->options & ONENAND_SKIP_UNLOCK_CHECK)
2541 return;
2542
2166 /* Check lock status */ 2543 /* Check lock status */
2167 if (onenand_check_lock_status(this)) 2544 if (onenand_check_lock_status(this))
2168 return; 2545 return;
2169 2546
2170 /* Workaround for all block unlock in DDP */ 2547 /* Workaround for all block unlock in DDP */
2171 if (ONENAND_IS_DDP(this)) { 2548 if (ONENAND_IS_DDP(this) && !FLEXONENAND(this)) {
2172 /* All blocks on another chip */ 2549 /* All blocks on another chip */
2173 ofs = this->chipsize >> 1; 2550 ofs = this->chipsize >> 1;
2174 len = this->chipsize >> 1; 2551 len = this->chipsize >> 1;
@@ -2210,7 +2587,9 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
2210 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); 2587 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
2211 this->wait(mtd, FL_OTPING); 2588 this->wait(mtd, FL_OTPING);
2212 2589
2213 ret = onenand_read_ops_nolock(mtd, from, &ops); 2590 ret = ONENAND_IS_MLC(this) ?
2591 onenand_mlc_read_ops_nolock(mtd, from, &ops) :
2592 onenand_read_ops_nolock(mtd, from, &ops);
2214 2593
2215 /* Exit OTP access mode */ 2594 /* Exit OTP access mode */
2216 this->command(mtd, ONENAND_CMD_RESET, 0, 0); 2595 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -2277,21 +2656,32 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
2277 size_t *retlen, u_char *buf) 2656 size_t *retlen, u_char *buf)
2278{ 2657{
2279 struct onenand_chip *this = mtd->priv; 2658 struct onenand_chip *this = mtd->priv;
2280 struct mtd_oob_ops ops = { 2659 struct mtd_oob_ops ops;
2281 .mode = MTD_OOB_PLACE,
2282 .ooblen = len,
2283 .oobbuf = buf,
2284 .ooboffs = 0,
2285 };
2286 int ret; 2660 int ret;
2287 2661
2288 /* Enter OTP access mode */ 2662 /* Enter OTP access mode */
2289 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); 2663 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
2290 this->wait(mtd, FL_OTPING); 2664 this->wait(mtd, FL_OTPING);
2291 2665
2292 ret = onenand_write_oob_nolock(mtd, from, &ops); 2666 if (FLEXONENAND(this)) {
2293 2667 /*
2294 *retlen = ops.oobretlen; 2668 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
2669 * main area of page 49.
2670 */
2671 ops.len = mtd->writesize;
2672 ops.ooblen = 0;
2673 ops.datbuf = buf;
2674 ops.oobbuf = NULL;
2675 ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
2676 *retlen = ops.retlen;
2677 } else {
2678 ops.mode = MTD_OOB_PLACE;
2679 ops.ooblen = len;
2680 ops.oobbuf = buf;
2681 ops.ooboffs = 0;
2682 ret = onenand_write_oob_nolock(mtd, from, &ops);
2683 *retlen = ops.oobretlen;
2684 }
2295 2685
2296 /* Exit OTP access mode */ 2686 /* Exit OTP access mode */
2297 this->command(mtd, ONENAND_CMD_RESET, 0, 0); 2687 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -2475,27 +2865,34 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
2475 size_t len) 2865 size_t len)
2476{ 2866{
2477 struct onenand_chip *this = mtd->priv; 2867 struct onenand_chip *this = mtd->priv;
2478 u_char *oob_buf = this->oob_buf; 2868 u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
2479 size_t retlen; 2869 size_t retlen;
2480 int ret; 2870 int ret;
2481 2871
2482 memset(oob_buf, 0xff, mtd->oobsize); 2872 memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
2873 : mtd->oobsize);
2483 /* 2874 /*
2484 * Note: OTP lock operation 2875 * Note: OTP lock operation
2485 * OTP block : 0xXXFC 2876 * OTP block : 0xXXFC
2486 * 1st block : 0xXXF3 (If chip support) 2877 * 1st block : 0xXXF3 (If chip support)
2487 * Both : 0xXXF0 (If chip support) 2878 * Both : 0xXXF0 (If chip support)
2488 */ 2879 */
2489 oob_buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC; 2880 if (FLEXONENAND(this))
2881 buf[FLEXONENAND_OTP_LOCK_OFFSET] = 0xFC;
2882 else
2883 buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
2490 2884
2491 /* 2885 /*
2492 * Write lock mark to 8th word of sector0 of page0 of the spare0. 2886 * Write lock mark to 8th word of sector0 of page0 of the spare0.
2493 * We write 16 bytes spare area instead of 2 bytes. 2887 * We write 16 bytes spare area instead of 2 bytes.
2888 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
2889 * main area of page 49.
2494 */ 2890 */
2891
2495 from = 0; 2892 from = 0;
2496 len = 16; 2893 len = FLEXONENAND(this) ? mtd->writesize : 16;
2497 2894
2498 ret = onenand_otp_walk(mtd, from, len, &retlen, oob_buf, do_otp_lock, MTD_OTP_USER); 2895 ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
2499 2896
2500 return ret ? : retlen; 2897 return ret ? : retlen;
2501} 2898}
@@ -2542,6 +2939,14 @@ static void onenand_check_features(struct mtd_info *mtd)
2542 break; 2939 break;
2543 } 2940 }
2544 2941
2942 if (ONENAND_IS_MLC(this))
2943 this->options &= ~ONENAND_HAS_2PLANE;
2944
2945 if (FLEXONENAND(this)) {
2946 this->options &= ~ONENAND_HAS_CONT_LOCK;
2947 this->options |= ONENAND_HAS_UNLOCK_ALL;
2948 }
2949
2545 if (this->options & ONENAND_HAS_CONT_LOCK) 2950 if (this->options & ONENAND_HAS_CONT_LOCK)
2546 printk(KERN_DEBUG "Lock scheme is Continuous Lock\n"); 2951 printk(KERN_DEBUG "Lock scheme is Continuous Lock\n");
2547 if (this->options & ONENAND_HAS_UNLOCK_ALL) 2952 if (this->options & ONENAND_HAS_UNLOCK_ALL)
@@ -2559,14 +2964,16 @@ static void onenand_check_features(struct mtd_info *mtd)
2559 */ 2964 */
2560static void onenand_print_device_info(int device, int version) 2965static void onenand_print_device_info(int device, int version)
2561{ 2966{
2562 int vcc, demuxed, ddp, density; 2967 int vcc, demuxed, ddp, density, flexonenand;
2563 2968
2564 vcc = device & ONENAND_DEVICE_VCC_MASK; 2969 vcc = device & ONENAND_DEVICE_VCC_MASK;
2565 demuxed = device & ONENAND_DEVICE_IS_DEMUX; 2970 demuxed = device & ONENAND_DEVICE_IS_DEMUX;
2566 ddp = device & ONENAND_DEVICE_IS_DDP; 2971 ddp = device & ONENAND_DEVICE_IS_DDP;
2567 density = onenand_get_density(device); 2972 density = onenand_get_density(device);
2568 printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n", 2973 flexonenand = device & DEVICE_IS_FLEXONENAND;
2569 demuxed ? "" : "Muxed ", 2974 printk(KERN_INFO "%s%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
2975 demuxed ? "" : "Muxed ",
2976 flexonenand ? "Flex-" : "",
2570 ddp ? "(DDP)" : "", 2977 ddp ? "(DDP)" : "",
2571 (16 << density), 2978 (16 << density),
2572 vcc ? "2.65/3.3" : "1.8", 2979 vcc ? "2.65/3.3" : "1.8",
@@ -2576,6 +2983,7 @@ static void onenand_print_device_info(int device, int version)
2576 2983
2577static const struct onenand_manufacturers onenand_manuf_ids[] = { 2984static const struct onenand_manufacturers onenand_manuf_ids[] = {
2578 {ONENAND_MFR_SAMSUNG, "Samsung"}, 2985 {ONENAND_MFR_SAMSUNG, "Samsung"},
2986 {ONENAND_MFR_NUMONYX, "Numonyx"},
2579}; 2987};
2580 2988
2581/** 2989/**
@@ -2605,6 +3013,261 @@ static int onenand_check_maf(int manuf)
2605} 3013}
2606 3014
2607/** 3015/**
3016* flexonenand_get_boundary - Reads the SLC boundary
3017* @param onenand_info - onenand info structure
3018**/
3019static int flexonenand_get_boundary(struct mtd_info *mtd)
3020{
3021 struct onenand_chip *this = mtd->priv;
3022 unsigned die, bdry;
3023 int ret, syscfg, locked;
3024
3025 /* Disable ECC */
3026 syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
3027 this->write_word((syscfg | 0x0100), this->base + ONENAND_REG_SYS_CFG1);
3028
3029 for (die = 0; die < this->dies; die++) {
3030 this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
3031 this->wait(mtd, FL_SYNCING);
3032
3033 this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
3034 ret = this->wait(mtd, FL_READING);
3035
3036 bdry = this->read_word(this->base + ONENAND_DATARAM);
3037 if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
3038 locked = 0;
3039 else
3040 locked = 1;
3041 this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
3042
3043 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
3044 ret = this->wait(mtd, FL_RESETING);
3045
3046 printk(KERN_INFO "Die %d boundary: %d%s\n", die,
3047 this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
3048 }
3049
3050 /* Enable ECC */
3051 this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
3052 return 0;
3053}
3054
3055/**
3056 * flexonenand_get_size - Fill up fields in onenand_chip and mtd_info
3057 * boundary[], diesize[], mtd->size, mtd->erasesize
3058 * @param mtd - MTD device structure
3059 */
3060static void flexonenand_get_size(struct mtd_info *mtd)
3061{
3062 struct onenand_chip *this = mtd->priv;
3063 int die, i, eraseshift, density;
3064 int blksperdie, maxbdry;
3065 loff_t ofs;
3066
3067 density = onenand_get_density(this->device_id);
3068 blksperdie = ((loff_t)(16 << density) << 20) >> (this->erase_shift);
3069 blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
3070 maxbdry = blksperdie - 1;
3071 eraseshift = this->erase_shift - 1;
3072
3073 mtd->numeraseregions = this->dies << 1;
3074
3075 /* This fills up the device boundary */
3076 flexonenand_get_boundary(mtd);
3077 die = ofs = 0;
3078 i = -1;
3079 for (; die < this->dies; die++) {
3080 if (!die || this->boundary[die-1] != maxbdry) {
3081 i++;
3082 mtd->eraseregions[i].offset = ofs;
3083 mtd->eraseregions[i].erasesize = 1 << eraseshift;
3084 mtd->eraseregions[i].numblocks =
3085 this->boundary[die] + 1;
3086 ofs += mtd->eraseregions[i].numblocks << eraseshift;
3087 eraseshift++;
3088 } else {
3089 mtd->numeraseregions -= 1;
3090 mtd->eraseregions[i].numblocks +=
3091 this->boundary[die] + 1;
3092 ofs += (this->boundary[die] + 1) << (eraseshift - 1);
3093 }
3094 if (this->boundary[die] != maxbdry) {
3095 i++;
3096 mtd->eraseregions[i].offset = ofs;
3097 mtd->eraseregions[i].erasesize = 1 << eraseshift;
3098 mtd->eraseregions[i].numblocks = maxbdry ^
3099 this->boundary[die];
3100 ofs += mtd->eraseregions[i].numblocks << eraseshift;
3101 eraseshift--;
3102 } else
3103 mtd->numeraseregions -= 1;
3104 }
3105
3106 /* Expose MLC erase size except when all blocks are SLC */
3107 mtd->erasesize = 1 << this->erase_shift;
3108 if (mtd->numeraseregions == 1)
3109 mtd->erasesize >>= 1;
3110
3111 printk(KERN_INFO "Device has %d eraseregions\n", mtd->numeraseregions);
3112 for (i = 0; i < mtd->numeraseregions; i++)
3113 printk(KERN_INFO "[offset: 0x%08x, erasesize: 0x%05x,"
3114 " numblocks: %04u]\n",
3115 (unsigned int) mtd->eraseregions[i].offset,
3116 mtd->eraseregions[i].erasesize,
3117 mtd->eraseregions[i].numblocks);
3118
3119 for (die = 0, mtd->size = 0; die < this->dies; die++) {
3120 this->diesize[die] = (loff_t)blksperdie << this->erase_shift;
3121 this->diesize[die] -= (loff_t)(this->boundary[die] + 1)
3122 << (this->erase_shift - 1);
3123 mtd->size += this->diesize[die];
3124 }
3125}
3126
3127/**
3128 * flexonenand_check_blocks_erased - Check if blocks are erased
3129 * @param mtd_info - mtd info structure
3130 * @param start - first erase block to check
3131 * @param end - last erase block to check
3132 *
3133 * Converting an unerased block from MLC to SLC
3134 * causes byte values to change. Since both data and its ECC
3135 * have changed, reads on the block give uncorrectable error.
3136 * This might lead to the block being detected as bad.
3137 *
3138 * Avoid this by ensuring that the block to be converted is
3139 * erased.
3140 */
3141static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int end)
3142{
3143 struct onenand_chip *this = mtd->priv;
3144 int i, ret;
3145 int block;
3146 struct mtd_oob_ops ops = {
3147 .mode = MTD_OOB_PLACE,
3148 .ooboffs = 0,
3149 .ooblen = mtd->oobsize,
3150 .datbuf = NULL,
3151 .oobbuf = this->oob_buf,
3152 };
3153 loff_t addr;
3154
3155 printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end);
3156
3157 for (block = start; block <= end; block++) {
3158 addr = flexonenand_addr(this, block);
3159 if (onenand_block_isbad_nolock(mtd, addr, 0))
3160 continue;
3161
3162 /*
3163 * Since main area write results in ECC write to spare,
3164 * it is sufficient to check only ECC bytes for change.
3165 */
3166 ret = onenand_read_oob_nolock(mtd, addr, &ops);
3167 if (ret)
3168 return ret;
3169
3170 for (i = 0; i < mtd->oobsize; i++)
3171 if (this->oob_buf[i] != 0xff)
3172 break;
3173
3174 if (i != mtd->oobsize) {
3175 printk(KERN_WARNING "Block %d not erased.\n", block);
3176 return 1;
3177 }
3178 }
3179
3180 return 0;
3181}
3182
3183/**
3184 * flexonenand_set_boundary - Writes the SLC boundary
3185 * @param mtd - mtd info structure
3186 */
3187int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3188 int boundary, int lock)
3189{
3190 struct onenand_chip *this = mtd->priv;
3191 int ret, density, blksperdie, old, new, thisboundary;
3192 loff_t addr;
3193
3194 /* Change only once for SDP Flex-OneNAND */
3195 if (die && (!ONENAND_IS_DDP(this)))
3196 return 0;
3197
3198 /* boundary value of -1 indicates no required change */
3199 if (boundary < 0 || boundary == this->boundary[die])
3200 return 0;
3201
3202 density = onenand_get_density(this->device_id);
3203 blksperdie = ((16 << density) << 20) >> this->erase_shift;
3204 blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
3205
3206 if (boundary >= blksperdie) {
3207 printk(KERN_ERR "flexonenand_set_boundary: Invalid boundary value. "
3208 "Boundary not changed.\n");
3209 return -EINVAL;
3210 }
3211
3212 /* Check if converting blocks are erased */
3213 old = this->boundary[die] + (die * this->density_mask);
3214 new = boundary + (die * this->density_mask);
3215 ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
3216 if (ret) {
3217 printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n");
3218 return ret;
3219 }
3220
3221 this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
3222 this->wait(mtd, FL_SYNCING);
3223
3224 /* Check is boundary is locked */
3225 this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
3226 ret = this->wait(mtd, FL_READING);
3227
3228 thisboundary = this->read_word(this->base + ONENAND_DATARAM);
3229 if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
3230 printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n");
3231 ret = 1;
3232 goto out;
3233 }
3234
3235 printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n",
3236 die, boundary, lock ? "(Locked)" : "(Unlocked)");
3237
3238 addr = die ? this->diesize[0] : 0;
3239
3240 boundary &= FLEXONENAND_PI_MASK;
3241 boundary |= lock ? 0 : (3 << FLEXONENAND_PI_UNLOCK_SHIFT);
3242
3243 this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
3244 ret = this->wait(mtd, FL_ERASING);
3245 if (ret) {
3246 printk(KERN_ERR "flexonenand_set_boundary: Failed PI erase for Die %d\n", die);
3247 goto out;
3248 }
3249
3250 this->write_word(boundary, this->base + ONENAND_DATARAM);
3251 this->command(mtd, ONENAND_CMD_PROG, addr, 0);
3252 ret = this->wait(mtd, FL_WRITING);
3253 if (ret) {
3254 printk(KERN_ERR "flexonenand_set_boundary: Failed PI write for Die %d\n", die);
3255 goto out;
3256 }
3257
3258 this->command(mtd, FLEXONENAND_CMD_PI_UPDATE, die, 0);
3259 ret = this->wait(mtd, FL_WRITING);
3260out:
3261 this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
3262 this->wait(mtd, FL_RESETING);
3263 if (!ret)
3264 /* Recalculate device size on boundary change*/
3265 flexonenand_get_size(mtd);
3266
3267 return ret;
3268}
3269
3270/**
2608 * onenand_probe - [OneNAND Interface] Probe the OneNAND device 3271 * onenand_probe - [OneNAND Interface] Probe the OneNAND device
2609 * @param mtd MTD device structure 3272 * @param mtd MTD device structure
2610 * 3273 *
@@ -2621,7 +3284,7 @@ static int onenand_probe(struct mtd_info *mtd)
2621 /* Save system configuration 1 */ 3284 /* Save system configuration 1 */
2622 syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1); 3285 syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
2623 /* Clear Sync. Burst Read mode to read BootRAM */ 3286 /* Clear Sync. Burst Read mode to read BootRAM */
2624 this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ), this->base + ONENAND_REG_SYS_CFG1); 3287 this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE), this->base + ONENAND_REG_SYS_CFG1);
2625 3288
2626 /* Send the command for reading device ID from BootRAM */ 3289 /* Send the command for reading device ID from BootRAM */
2627 this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM); 3290 this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM);
@@ -2646,6 +3309,7 @@ static int onenand_probe(struct mtd_info *mtd)
2646 maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID); 3309 maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
2647 dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID); 3310 dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
2648 ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID); 3311 ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
3312 this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
2649 3313
2650 /* Check OneNAND device */ 3314 /* Check OneNAND device */
2651 if (maf_id != bram_maf_id || dev_id != bram_dev_id) 3315 if (maf_id != bram_maf_id || dev_id != bram_dev_id)
@@ -2657,29 +3321,55 @@ static int onenand_probe(struct mtd_info *mtd)
2657 this->version_id = ver_id; 3321 this->version_id = ver_id;
2658 3322
2659 density = onenand_get_density(dev_id); 3323 density = onenand_get_density(dev_id);
3324 if (FLEXONENAND(this)) {
3325 this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
3326 /* Maximum possible erase regions */
3327 mtd->numeraseregions = this->dies << 1;
3328 mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
3329 * (this->dies << 1), GFP_KERNEL);
3330 if (!mtd->eraseregions)
3331 return -ENOMEM;
3332 }
3333
3334 /*
3335 * For Flex-OneNAND, chipsize represents maximum possible device size.
3336 * mtd->size represents the actual device size.
3337 */
2660 this->chipsize = (16 << density) << 20; 3338 this->chipsize = (16 << density) << 20;
2661 /* Set density mask. it is used for DDP */
2662 if (ONENAND_IS_DDP(this))
2663 this->density_mask = (1 << (density + 6));
2664 else
2665 this->density_mask = 0;
2666 3339
2667 /* OneNAND page size & block size */ 3340 /* OneNAND page size & block size */
2668 /* The data buffer size is equal to page size */ 3341 /* The data buffer size is equal to page size */
2669 mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE); 3342 mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
3343 /* We use the full BufferRAM */
3344 if (ONENAND_IS_MLC(this))
3345 mtd->writesize <<= 1;
3346
2670 mtd->oobsize = mtd->writesize >> 5; 3347 mtd->oobsize = mtd->writesize >> 5;
2671 /* Pages per a block are always 64 in OneNAND */ 3348 /* Pages per a block are always 64 in OneNAND */
2672 mtd->erasesize = mtd->writesize << 6; 3349 mtd->erasesize = mtd->writesize << 6;
3350 /*
3351 * Flex-OneNAND SLC area has 64 pages per block.
3352 * Flex-OneNAND MLC area has 128 pages per block.
3353 * Expose MLC erase size to find erase_shift and page_mask.
3354 */
3355 if (FLEXONENAND(this))
3356 mtd->erasesize <<= 1;
2673 3357
2674 this->erase_shift = ffs(mtd->erasesize) - 1; 3358 this->erase_shift = ffs(mtd->erasesize) - 1;
2675 this->page_shift = ffs(mtd->writesize) - 1; 3359 this->page_shift = ffs(mtd->writesize) - 1;
2676 this->page_mask = (1 << (this->erase_shift - this->page_shift)) - 1; 3360 this->page_mask = (1 << (this->erase_shift - this->page_shift)) - 1;
3361 /* Set density mask. it is used for DDP */
3362 if (ONENAND_IS_DDP(this))
3363 this->density_mask = this->chipsize >> (this->erase_shift + 1);
2677 /* It's real page size */ 3364 /* It's real page size */
2678 this->writesize = mtd->writesize; 3365 this->writesize = mtd->writesize;
2679 3366
2680 /* REVIST: Multichip handling */ 3367 /* REVIST: Multichip handling */
2681 3368
2682 mtd->size = this->chipsize; 3369 if (FLEXONENAND(this))
3370 flexonenand_get_size(mtd);
3371 else
3372 mtd->size = this->chipsize;
2683 3373
2684 /* Check OneNAND features */ 3374 /* Check OneNAND features */
2685 onenand_check_features(mtd); 3375 onenand_check_features(mtd);
@@ -2734,7 +3424,7 @@ static void onenand_resume(struct mtd_info *mtd)
2734 */ 3424 */
2735int onenand_scan(struct mtd_info *mtd, int maxchips) 3425int onenand_scan(struct mtd_info *mtd, int maxchips)
2736{ 3426{
2737 int i; 3427 int i, ret;
2738 struct onenand_chip *this = mtd->priv; 3428 struct onenand_chip *this = mtd->priv;
2739 3429
2740 if (!this->read_word) 3430 if (!this->read_word)
@@ -2746,6 +3436,10 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
2746 this->command = onenand_command; 3436 this->command = onenand_command;
2747 if (!this->wait) 3437 if (!this->wait)
2748 onenand_setup_wait(mtd); 3438 onenand_setup_wait(mtd);
3439 if (!this->bbt_wait)
3440 this->bbt_wait = onenand_bbt_wait;
3441 if (!this->unlock_all)
3442 this->unlock_all = onenand_unlock_all;
2749 3443
2750 if (!this->read_bufferram) 3444 if (!this->read_bufferram)
2751 this->read_bufferram = onenand_read_bufferram; 3445 this->read_bufferram = onenand_read_bufferram;
@@ -2796,6 +3490,10 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
2796 * Allow subpage writes up to oobsize. 3490 * Allow subpage writes up to oobsize.
2797 */ 3491 */
2798 switch (mtd->oobsize) { 3492 switch (mtd->oobsize) {
3493 case 128:
3494 this->ecclayout = &onenand_oob_128;
3495 mtd->subpage_sft = 0;
3496 break;
2799 case 64: 3497 case 64:
2800 this->ecclayout = &onenand_oob_64; 3498 this->ecclayout = &onenand_oob_64;
2801 mtd->subpage_sft = 2; 3499 mtd->subpage_sft = 2;
@@ -2859,9 +3557,18 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
2859 mtd->owner = THIS_MODULE; 3557 mtd->owner = THIS_MODULE;
2860 3558
2861 /* Unlock whole block */ 3559 /* Unlock whole block */
2862 onenand_unlock_all(mtd); 3560 this->unlock_all(mtd);
3561
3562 ret = this->scan_bbt(mtd);
3563 if ((!FLEXONENAND(this)) || ret)
3564 return ret;
2863 3565
2864 return this->scan_bbt(mtd); 3566 /* Change Flex-OneNAND boundaries if required */
3567 for (i = 0; i < MAX_DIES; i++)
3568 flexonenand_set_boundary(mtd, i, flex_bdry[2 * i],
3569 flex_bdry[(2 * i) + 1]);
3570
3571 return 0;
2865} 3572}
2866 3573
2867/** 3574/**
@@ -2890,6 +3597,7 @@ void onenand_release(struct mtd_info *mtd)
2890 kfree(this->page_buf); 3597 kfree(this->page_buf);
2891 if (this->options & ONENAND_OOBBUF_ALLOC) 3598 if (this->options & ONENAND_OOBBUF_ALLOC)
2892 kfree(this->oob_buf); 3599 kfree(this->oob_buf);
3600 kfree(mtd->eraseregions);
2893} 3601}
2894 3602
2895EXPORT_SYMBOL_GPL(onenand_scan); 3603EXPORT_SYMBOL_GPL(onenand_scan);
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 2f53b51c6805..a91fcac1af01 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -63,6 +63,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
63 loff_t from; 63 loff_t from;
64 size_t readlen, ooblen; 64 size_t readlen, ooblen;
65 struct mtd_oob_ops ops; 65 struct mtd_oob_ops ops;
66 int rgn;
66 67
67 printk(KERN_INFO "Scanning device for bad blocks\n"); 68 printk(KERN_INFO "Scanning device for bad blocks\n");
68 69
@@ -76,7 +77,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
76 /* Note that numblocks is 2 * (real numblocks) here; 77 /* Note that numblocks is 2 * (real numblocks) here;
77 * see i += 2 below as it makses shifting and masking less painful 78 * see i += 2 below as it makses shifting and masking less painful
78 */ 79 */
79 numblocks = mtd->size >> (bbm->bbt_erase_shift - 1); 80 numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
80 startblock = 0; 81 startblock = 0;
81 from = 0; 82 from = 0;
82 83
@@ -106,7 +107,12 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
106 } 107 }
107 } 108 }
108 i += 2; 109 i += 2;
109 from += (1 << bbm->bbt_erase_shift); 110
111 if (FLEXONENAND(this)) {
112 rgn = flexonenand_region(mtd, from);
113 from += mtd->eraseregions[rgn].erasesize;
114 } else
115 from += (1 << bbm->bbt_erase_shift);
110 } 116 }
111 117
112 return 0; 118 return 0;
@@ -143,7 +149,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
143 uint8_t res; 149 uint8_t res;
144 150
145 /* Get block number * 2 */ 151 /* Get block number * 2 */
146 block = (int) (offs >> (bbm->bbt_erase_shift - 1)); 152 block = (int) (onenand_block(this, offs) << 1);
147 res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03; 153 res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
148 154
149 DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n", 155 DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
@@ -178,7 +184,7 @@ int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
178 struct bbm_info *bbm = this->bbm; 184 struct bbm_info *bbm = this->bbm;
179 int len, ret = 0; 185 int len, ret = 0;
180 186
181 len = mtd->size >> (this->erase_shift + 2); 187 len = this->chipsize >> (this->erase_shift + 2);
182 /* Allocate memory (2bit per block) and clear the memory bad block table */ 188 /* Allocate memory (2bit per block) and clear the memory bad block table */
183 bbm->bbt = kzalloc(len, GFP_KERNEL); 189 bbm->bbt = kzalloc(len, GFP_KERNEL);
184 if (!bbm->bbt) { 190 if (!bbm->bbt) {
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
index d64200b7c94b..f6e3c8aebd3a 100644
--- a/drivers/mtd/onenand/onenand_sim.c
+++ b/drivers/mtd/onenand/onenand_sim.c
@@ -6,6 +6,10 @@
6 * Copyright © 2005-2007 Samsung Electronics 6 * Copyright © 2005-2007 Samsung Electronics
7 * Kyungmin Park <kyungmin.park@samsung.com> 7 * Kyungmin Park <kyungmin.park@samsung.com>
8 * 8 *
9 * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
10 * Flex-OneNAND simulator support
11 * Copyright (C) Samsung Electronics, 2008
12 *
9 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 14 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 15 * published by the Free Software Foundation.
@@ -24,16 +28,38 @@
24#ifndef CONFIG_ONENAND_SIM_MANUFACTURER 28#ifndef CONFIG_ONENAND_SIM_MANUFACTURER
25#define CONFIG_ONENAND_SIM_MANUFACTURER 0xec 29#define CONFIG_ONENAND_SIM_MANUFACTURER 0xec
26#endif 30#endif
31
27#ifndef CONFIG_ONENAND_SIM_DEVICE_ID 32#ifndef CONFIG_ONENAND_SIM_DEVICE_ID
28#define CONFIG_ONENAND_SIM_DEVICE_ID 0x04 33#define CONFIG_ONENAND_SIM_DEVICE_ID 0x04
29#endif 34#endif
35
36#define CONFIG_FLEXONENAND ((CONFIG_ONENAND_SIM_DEVICE_ID >> 9) & 1)
37
30#ifndef CONFIG_ONENAND_SIM_VERSION_ID 38#ifndef CONFIG_ONENAND_SIM_VERSION_ID
31#define CONFIG_ONENAND_SIM_VERSION_ID 0x1e 39#define CONFIG_ONENAND_SIM_VERSION_ID 0x1e
32#endif 40#endif
33 41
42#ifndef CONFIG_ONENAND_SIM_TECHNOLOGY_ID
43#define CONFIG_ONENAND_SIM_TECHNOLOGY_ID CONFIG_FLEXONENAND
44#endif
45
46/* Initial boundary values for Flex-OneNAND Simulator */
47#ifndef CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY
48#define CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY 0x01
49#endif
50
51#ifndef CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY
52#define CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY 0x01
53#endif
54
34static int manuf_id = CONFIG_ONENAND_SIM_MANUFACTURER; 55static int manuf_id = CONFIG_ONENAND_SIM_MANUFACTURER;
35static int device_id = CONFIG_ONENAND_SIM_DEVICE_ID; 56static int device_id = CONFIG_ONENAND_SIM_DEVICE_ID;
36static int version_id = CONFIG_ONENAND_SIM_VERSION_ID; 57static int version_id = CONFIG_ONENAND_SIM_VERSION_ID;
58static int technology_id = CONFIG_ONENAND_SIM_TECHNOLOGY_ID;
59static int boundary[] = {
60 CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY,
61 CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY,
62};
37 63
38struct onenand_flash { 64struct onenand_flash {
39 void __iomem *base; 65 void __iomem *base;
@@ -57,12 +83,18 @@ struct onenand_flash {
57 (writew(v, this->base + ONENAND_REG_WP_STATUS)) 83 (writew(v, this->base + ONENAND_REG_WP_STATUS))
58 84
59/* It has all 0xff chars */ 85/* It has all 0xff chars */
60#define MAX_ONENAND_PAGESIZE (2048 + 64) 86#define MAX_ONENAND_PAGESIZE (4096 + 128)
61static unsigned char *ffchars; 87static unsigned char *ffchars;
62 88
89#if CONFIG_FLEXONENAND
90#define PARTITION_NAME "Flex-OneNAND simulator partition"
91#else
92#define PARTITION_NAME "OneNAND simulator partition"
93#endif
94
63static struct mtd_partition os_partitions[] = { 95static struct mtd_partition os_partitions[] = {
64 { 96 {
65 .name = "OneNAND simulator partition", 97 .name = PARTITION_NAME,
66 .offset = 0, 98 .offset = 0,
67 .size = MTDPART_SIZ_FULL, 99 .size = MTDPART_SIZ_FULL,
68 }, 100 },
@@ -104,6 +136,7 @@ static void onenand_lock_handle(struct onenand_chip *this, int cmd)
104 136
105 switch (cmd) { 137 switch (cmd) {
106 case ONENAND_CMD_UNLOCK: 138 case ONENAND_CMD_UNLOCK:
139 case ONENAND_CMD_UNLOCK_ALL:
107 if (block_lock_scheme) 140 if (block_lock_scheme)
108 ONENAND_SET_WP_STATUS(ONENAND_WP_US, this); 141 ONENAND_SET_WP_STATUS(ONENAND_WP_US, this);
109 else 142 else
@@ -228,10 +261,12 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
228{ 261{
229 struct mtd_info *mtd = &info->mtd; 262 struct mtd_info *mtd = &info->mtd;
230 struct onenand_flash *flash = this->priv; 263 struct onenand_flash *flash = this->priv;
231 int main_offset, spare_offset; 264 int main_offset, spare_offset, die = 0;
232 void __iomem *src; 265 void __iomem *src;
233 void __iomem *dest; 266 void __iomem *dest;
234 unsigned int i; 267 unsigned int i;
268 static int pi_operation;
269 int erasesize, rgn;
235 270
236 if (dataram) { 271 if (dataram) {
237 main_offset = mtd->writesize; 272 main_offset = mtd->writesize;
@@ -241,10 +276,27 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
241 spare_offset = 0; 276 spare_offset = 0;
242 } 277 }
243 278
279 if (pi_operation) {
280 die = readw(this->base + ONENAND_REG_START_ADDRESS2);
281 die >>= ONENAND_DDP_SHIFT;
282 }
283
244 switch (cmd) { 284 switch (cmd) {
285 case FLEXONENAND_CMD_PI_ACCESS:
286 pi_operation = 1;
287 break;
288
289 case ONENAND_CMD_RESET:
290 pi_operation = 0;
291 break;
292
245 case ONENAND_CMD_READ: 293 case ONENAND_CMD_READ:
246 src = ONENAND_CORE(flash) + offset; 294 src = ONENAND_CORE(flash) + offset;
247 dest = ONENAND_MAIN_AREA(this, main_offset); 295 dest = ONENAND_MAIN_AREA(this, main_offset);
296 if (pi_operation) {
297 writew(boundary[die], this->base + ONENAND_DATARAM);
298 break;
299 }
248 memcpy(dest, src, mtd->writesize); 300 memcpy(dest, src, mtd->writesize);
249 /* Fall through */ 301 /* Fall through */
250 302
@@ -257,6 +309,10 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
257 case ONENAND_CMD_PROG: 309 case ONENAND_CMD_PROG:
258 src = ONENAND_MAIN_AREA(this, main_offset); 310 src = ONENAND_MAIN_AREA(this, main_offset);
259 dest = ONENAND_CORE(flash) + offset; 311 dest = ONENAND_CORE(flash) + offset;
312 if (pi_operation) {
313 boundary[die] = readw(this->base + ONENAND_DATARAM);
314 break;
315 }
260 /* To handle partial write */ 316 /* To handle partial write */
261 for (i = 0; i < (1 << mtd->subpage_sft); i++) { 317 for (i = 0; i < (1 << mtd->subpage_sft); i++) {
262 int off = i * this->subpagesize; 318 int off = i * this->subpagesize;
@@ -284,9 +340,18 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
284 break; 340 break;
285 341
286 case ONENAND_CMD_ERASE: 342 case ONENAND_CMD_ERASE:
287 memset(ONENAND_CORE(flash) + offset, 0xff, mtd->erasesize); 343 if (pi_operation)
344 break;
345
346 if (FLEXONENAND(this)) {
347 rgn = flexonenand_region(mtd, offset);
348 erasesize = mtd->eraseregions[rgn].erasesize;
349 } else
350 erasesize = mtd->erasesize;
351
352 memset(ONENAND_CORE(flash) + offset, 0xff, erasesize);
288 memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff, 353 memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff,
289 (mtd->erasesize >> 5)); 354 (erasesize >> 5));
290 break; 355 break;
291 356
292 default: 357 default:
@@ -339,7 +404,7 @@ static void onenand_command_handle(struct onenand_chip *this, int cmd)
339 } 404 }
340 405
341 if (block != -1) 406 if (block != -1)
342 offset += block << this->erase_shift; 407 offset = onenand_addr(this, block);
343 408
344 if (page != -1) 409 if (page != -1)
345 offset += page << this->page_shift; 410 offset += page << this->page_shift;
@@ -390,6 +455,7 @@ static int __init flash_init(struct onenand_flash *flash)
390 } 455 }
391 456
392 density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT; 457 density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
458 density &= ONENAND_DEVICE_DENSITY_MASK;
393 size = ((16 << 20) << density); 459 size = ((16 << 20) << density);
394 460
395 ONENAND_CORE(flash) = vmalloc(size + (size >> 5)); 461 ONENAND_CORE(flash) = vmalloc(size + (size >> 5));
@@ -405,8 +471,9 @@ static int __init flash_init(struct onenand_flash *flash)
405 writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID); 471 writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID);
406 writew(device_id, flash->base + ONENAND_REG_DEVICE_ID); 472 writew(device_id, flash->base + ONENAND_REG_DEVICE_ID);
407 writew(version_id, flash->base + ONENAND_REG_VERSION_ID); 473 writew(version_id, flash->base + ONENAND_REG_VERSION_ID);
474 writew(technology_id, flash->base + ONENAND_REG_TECHNOLOGY);
408 475
409 if (density < 2) 476 if (density < 2 && (!CONFIG_FLEXONENAND))
410 buffer_size = 0x0400; /* 1KiB page */ 477 buffer_size = 0x0400; /* 1KiB page */
411 else 478 else
412 buffer_size = 0x0800; /* 2KiB page */ 479 buffer_size = 0x0800; /* 2KiB page */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 892a9e4e275f..1dc721517e4c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2443,6 +2443,17 @@ config JME
2443 To compile this driver as a module, choose M here. The module 2443 To compile this driver as a module, choose M here. The module
2444 will be called jme. 2444 will be called jme.
2445 2445
2446config S6GMAC
2447 tristate "S6105 GMAC ethernet support"
2448 depends on XTENSA_VARIANT_S6000
2449 select PHYLIB
2450 help
2451 This driver supports the on chip ethernet device on the
2452 S6105 xtensa processor.
2453
2454 To compile this driver as a module, choose M here. The module
2455 will be called s6gmac.
2456
2446endif # NETDEV_1000 2457endif # NETDEV_1000
2447 2458
2448# 2459#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index d366fb2b40e9..4b58a59f211b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -245,6 +245,7 @@ obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
245 245
246obj-$(CONFIG_DNET) += dnet.o 246obj-$(CONFIG_DNET) += dnet.o
247obj-$(CONFIG_MACB) += macb.o 247obj-$(CONFIG_MACB) += macb.o
248obj-$(CONFIG_S6GMAC) += s6gmac.o
248 249
249obj-$(CONFIG_ARM) += arm/ 250obj-$(CONFIG_ARM) += arm/
250obj-$(CONFIG_DEV_APPLETALK) += appletalk/ 251obj-$(CONFIG_DEV_APPLETALK) += appletalk/
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index b4bb06fdf307..f703758f0a6e 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -65,7 +65,7 @@ static inline char *nic_name(struct pci_dev *pdev)
65#define TX_CQ_LEN 1024 65#define TX_CQ_LEN 1024
66#define RX_Q_LEN 1024 /* Does not support any other value */ 66#define RX_Q_LEN 1024 /* Does not support any other value */
67#define RX_CQ_LEN 1024 67#define RX_CQ_LEN 1024
68#define MCC_Q_LEN 64 /* total size not to exceed 8 pages */ 68#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
69#define MCC_CQ_LEN 256 69#define MCC_CQ_LEN 256
70 70
71#define BE_NAPI_WEIGHT 64 71#define BE_NAPI_WEIGHT 64
@@ -91,6 +91,61 @@ struct be_queue_info {
91 atomic_t used; /* Number of valid elements in the queue */ 91 atomic_t used; /* Number of valid elements in the queue */
92}; 92};
93 93
94static inline u32 MODULO(u16 val, u16 limit)
95{
96 BUG_ON(limit & (limit - 1));
97 return val & (limit - 1);
98}
99
100static inline void index_adv(u16 *index, u16 val, u16 limit)
101{
102 *index = MODULO((*index + val), limit);
103}
104
105static inline void index_inc(u16 *index, u16 limit)
106{
107 *index = MODULO((*index + 1), limit);
108}
109
110static inline void *queue_head_node(struct be_queue_info *q)
111{
112 return q->dma_mem.va + q->head * q->entry_size;
113}
114
115static inline void *queue_tail_node(struct be_queue_info *q)
116{
117 return q->dma_mem.va + q->tail * q->entry_size;
118}
119
120static inline void queue_head_inc(struct be_queue_info *q)
121{
122 index_inc(&q->head, q->len);
123}
124
125static inline void queue_tail_inc(struct be_queue_info *q)
126{
127 index_inc(&q->tail, q->len);
128}
129
130
131struct be_eq_obj {
132 struct be_queue_info q;
133 char desc[32];
134
135 /* Adaptive interrupt coalescing (AIC) info */
136 bool enable_aic;
137 u16 min_eqd; /* in usecs */
138 u16 max_eqd; /* in usecs */
139 u16 cur_eqd; /* in usecs */
140
141 struct napi_struct napi;
142};
143
144struct be_mcc_obj {
145 struct be_queue_info q;
146 struct be_queue_info cq;
147};
148
94struct be_ctrl_info { 149struct be_ctrl_info {
95 u8 __iomem *csr; 150 u8 __iomem *csr;
96 u8 __iomem *db; /* Door Bell */ 151 u8 __iomem *db; /* Door Bell */
@@ -98,11 +153,20 @@ struct be_ctrl_info {
98 int pci_func; 153 int pci_func;
99 154
100 /* Mbox used for cmd request/response */ 155 /* Mbox used for cmd request/response */
101 spinlock_t cmd_lock; /* For serializing cmds to BE card */ 156 spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
102 struct be_dma_mem mbox_mem; 157 struct be_dma_mem mbox_mem;
103 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr 158 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
104 * is stored for freeing purpose */ 159 * is stored for freeing purpose */
105 struct be_dma_mem mbox_mem_alloced; 160 struct be_dma_mem mbox_mem_alloced;
161
162 /* MCC Rings */
163 struct be_mcc_obj mcc_obj;
164 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
165 spinlock_t mcc_cq_lock;
166
167 /* MCC Async callback */
168 void (*async_cb)(void *adapter, bool link_up);
169 void *adapter_ctxt;
106}; 170};
107 171
108#include "be_cmds.h" 172#include "be_cmds.h"
@@ -150,19 +214,6 @@ struct be_stats_obj {
150 struct be_dma_mem cmd; 214 struct be_dma_mem cmd;
151}; 215};
152 216
153struct be_eq_obj {
154 struct be_queue_info q;
155 char desc[32];
156
157 /* Adaptive interrupt coalescing (AIC) info */
158 bool enable_aic;
159 u16 min_eqd; /* in usecs */
160 u16 max_eqd; /* in usecs */
161 u16 cur_eqd; /* in usecs */
162
163 struct napi_struct napi;
164};
165
166struct be_tx_obj { 217struct be_tx_obj {
167 struct be_queue_info q; 218 struct be_queue_info q;
168 struct be_queue_info cq; 219 struct be_queue_info cq;
@@ -225,8 +276,9 @@ struct be_adapter {
225 u32 if_handle; /* Used to configure filtering */ 276 u32 if_handle; /* Used to configure filtering */
226 u32 pmac_id; /* MAC addr handle used by BE card */ 277 u32 pmac_id; /* MAC addr handle used by BE card */
227 278
228 struct be_link_info link; 279 bool link_up;
229 u32 port_num; 280 u32 port_num;
281 bool promiscuous;
230}; 282};
231 283
232extern struct ethtool_ops be_ethtool_ops; 284extern struct ethtool_ops be_ethtool_ops;
@@ -235,22 +287,6 @@ extern struct ethtool_ops be_ethtool_ops;
235 287
236#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops) 288#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
237 289
238static inline u32 MODULO(u16 val, u16 limit)
239{
240 BUG_ON(limit & (limit - 1));
241 return val & (limit - 1);
242}
243
244static inline void index_adv(u16 *index, u16 val, u16 limit)
245{
246 *index = MODULO((*index + val), limit);
247}
248
249static inline void index_inc(u16 *index, u16 limit)
250{
251 *index = MODULO((*index + 1), limit);
252}
253
254#define PAGE_SHIFT_4K 12 290#define PAGE_SHIFT_4K 12
255#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) 291#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
256 292
@@ -339,4 +375,6 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
339 return val; 375 return val;
340} 376}
341 377
378extern void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
379 u16 num_popped);
342#endif /* BE_H */ 380#endif /* BE_H */
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index d444aed962bc..583517ed56f0 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -17,6 +17,133 @@
17 17
18#include "be.h" 18#include "be.h"
19 19
20static void be_mcc_notify(struct be_ctrl_info *ctrl)
21{
22 struct be_queue_info *mccq = &ctrl->mcc_obj.q;
23 u32 val = 0;
24
25 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
26 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
27 iowrite32(val, ctrl->db + DB_MCCQ_OFFSET);
28}
29
30/* To check if valid bit is set, check the entire word as we don't know
31 * the endianness of the data (old entry is host endian while a new entry is
32 * little endian) */
33static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
34{
35 if (compl->flags != 0) {
36 compl->flags = le32_to_cpu(compl->flags);
37 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
38 return true;
39 } else {
40 return false;
41 }
42}
43
44/* Need to reset the entire word that houses the valid bit */
45static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
46{
47 compl->flags = 0;
48}
49
50static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
51 struct be_mcc_cq_entry *compl)
52{
53 u16 compl_status, extd_status;
54
55 /* Just swap the status to host endian; mcc tag is opaquely copied
56 * from mcc_wrb */
57 be_dws_le_to_cpu(compl, 4);
58
59 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
60 CQE_STATUS_COMPL_MASK;
61 if (compl_status != MCC_STATUS_SUCCESS) {
62 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
63 CQE_STATUS_EXTD_MASK;
64 printk(KERN_WARNING DRV_NAME
65 " error in cmd completion: status(compl/extd)=%d/%d\n",
66 compl_status, extd_status);
67 return -1;
68 }
69 return 0;
70}
71
72/* Link state evt is a string of bytes; no need for endian swapping */
73static void be_async_link_state_process(struct be_ctrl_info *ctrl,
74 struct be_async_event_link_state *evt)
75{
76 ctrl->async_cb(ctrl->adapter_ctxt,
77 evt->port_link_status == ASYNC_EVENT_LINK_UP ? true : false);
78}
79
80static inline bool is_link_state_evt(u32 trailer)
81{
82 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
83 ASYNC_TRAILER_EVENT_CODE_MASK) ==
84 ASYNC_EVENT_CODE_LINK_STATE);
85}
86
87static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl)
88{
89 struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq;
90 struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
91
92 if (be_mcc_compl_is_new(compl)) {
93 queue_tail_inc(mcc_cq);
94 return compl;
95 }
96 return NULL;
97}
98
99void be_process_mcc(struct be_ctrl_info *ctrl)
100{
101 struct be_mcc_cq_entry *compl;
102 int num = 0;
103
104 spin_lock_bh(&ctrl->mcc_cq_lock);
105 while ((compl = be_mcc_compl_get(ctrl))) {
106 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
107 /* Interpret flags as an async trailer */
108 BUG_ON(!is_link_state_evt(compl->flags));
109
110 /* Interpret compl as a async link evt */
111 be_async_link_state_process(ctrl,
112 (struct be_async_event_link_state *) compl);
113 } else {
114 be_mcc_compl_process(ctrl, compl);
115 atomic_dec(&ctrl->mcc_obj.q.used);
116 }
117 be_mcc_compl_use(compl);
118 num++;
119 }
120 if (num)
121 be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num);
122 spin_unlock_bh(&ctrl->mcc_cq_lock);
123}
124
125/* Wait till no more pending mcc requests are present */
126static void be_mcc_wait_compl(struct be_ctrl_info *ctrl)
127{
128#define mcc_timeout 50000 /* 5s timeout */
129 int i;
130 for (i = 0; i < mcc_timeout; i++) {
131 be_process_mcc(ctrl);
132 if (atomic_read(&ctrl->mcc_obj.q.used) == 0)
133 break;
134 udelay(100);
135 }
136 if (i == mcc_timeout)
137 printk(KERN_WARNING DRV_NAME "mcc poll timed out\n");
138}
139
140/* Notify MCC requests and wait for completion */
141static void be_mcc_notify_wait(struct be_ctrl_info *ctrl)
142{
143 be_mcc_notify(ctrl);
144 be_mcc_wait_compl(ctrl);
145}
146
20static int be_mbox_db_ready_wait(void __iomem *db) 147static int be_mbox_db_ready_wait(void __iomem *db)
21{ 148{
22 int cnt = 0, wait = 5; 149 int cnt = 0, wait = 5;
@@ -44,11 +171,11 @@ static int be_mbox_db_ready_wait(void __iomem *db)
44 171
45/* 172/*
46 * Insert the mailbox address into the doorbell in two steps 173 * Insert the mailbox address into the doorbell in two steps
174 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
47 */ 175 */
48static int be_mbox_db_ring(struct be_ctrl_info *ctrl) 176static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
49{ 177{
50 int status; 178 int status;
51 u16 compl_status, extd_status;
52 u32 val = 0; 179 u32 val = 0;
53 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; 180 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
54 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem; 181 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
@@ -79,24 +206,17 @@ static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
79 if (status != 0) 206 if (status != 0)
80 return status; 207 return status;
81 208
82 /* compl entry has been made now */ 209 /* A cq entry has been made now */
83 be_dws_le_to_cpu(cqe, sizeof(*cqe)); 210 if (be_mcc_compl_is_new(cqe)) {
84 if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) { 211 status = be_mcc_compl_process(ctrl, &mbox->cqe);
85 printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n"); 212 be_mcc_compl_use(cqe);
213 if (status)
214 return status;
215 } else {
216 printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
86 return -1; 217 return -1;
87 } 218 }
88 219 return 0;
89 compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) &
90 CQE_STATUS_COMPL_MASK;
91 if (compl_status != MCC_STATUS_SUCCESS) {
92 extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) &
93 CQE_STATUS_EXTD_MASK;
94 printk(KERN_WARNING DRV_NAME
95 ": ERROR in cmd compl. status(compl/extd)=%d/%d\n",
96 compl_status, extd_status);
97 }
98
99 return compl_status;
100} 220}
101 221
102static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage) 222static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
@@ -235,6 +355,18 @@ static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
235 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; 355 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
236} 356}
237 357
358static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
359{
360 struct be_mcc_wrb *wrb = NULL;
361 if (atomic_read(&mccq->used) < mccq->len) {
362 wrb = queue_head_node(mccq);
363 queue_head_inc(mccq);
364 atomic_inc(&mccq->used);
365 memset(wrb, 0, sizeof(*wrb));
366 }
367 return wrb;
368}
369
238int be_cmd_eq_create(struct be_ctrl_info *ctrl, 370int be_cmd_eq_create(struct be_ctrl_info *ctrl,
239 struct be_queue_info *eq, int eq_delay) 371 struct be_queue_info *eq, int eq_delay)
240{ 372{
@@ -244,7 +376,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
244 struct be_dma_mem *q_mem = &eq->dma_mem; 376 struct be_dma_mem *q_mem = &eq->dma_mem;
245 int status; 377 int status;
246 378
247 spin_lock(&ctrl->cmd_lock); 379 spin_lock(&ctrl->mbox_lock);
248 memset(wrb, 0, sizeof(*wrb)); 380 memset(wrb, 0, sizeof(*wrb));
249 381
250 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 382 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -272,7 +404,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
272 eq->id = le16_to_cpu(resp->eq_id); 404 eq->id = le16_to_cpu(resp->eq_id);
273 eq->created = true; 405 eq->created = true;
274 } 406 }
275 spin_unlock(&ctrl->cmd_lock); 407 spin_unlock(&ctrl->mbox_lock);
276 return status; 408 return status;
277} 409}
278 410
@@ -284,7 +416,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
284 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); 416 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
285 int status; 417 int status;
286 418
287 spin_lock(&ctrl->cmd_lock); 419 spin_lock(&ctrl->mbox_lock);
288 memset(wrb, 0, sizeof(*wrb)); 420 memset(wrb, 0, sizeof(*wrb));
289 421
290 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 422 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -304,7 +436,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
304 if (!status) 436 if (!status)
305 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 437 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
306 438
307 spin_unlock(&ctrl->cmd_lock); 439 spin_unlock(&ctrl->mbox_lock);
308 return status; 440 return status;
309} 441}
310 442
@@ -315,7 +447,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
315 struct be_cmd_req_pmac_add *req = embedded_payload(wrb); 447 struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
316 int status; 448 int status;
317 449
318 spin_lock(&ctrl->cmd_lock); 450 spin_lock(&ctrl->mbox_lock);
319 memset(wrb, 0, sizeof(*wrb)); 451 memset(wrb, 0, sizeof(*wrb));
320 452
321 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 453 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -332,7 +464,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
332 *pmac_id = le32_to_cpu(resp->pmac_id); 464 *pmac_id = le32_to_cpu(resp->pmac_id);
333 } 465 }
334 466
335 spin_unlock(&ctrl->cmd_lock); 467 spin_unlock(&ctrl->mbox_lock);
336 return status; 468 return status;
337} 469}
338 470
@@ -342,7 +474,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
342 struct be_cmd_req_pmac_del *req = embedded_payload(wrb); 474 struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
343 int status; 475 int status;
344 476
345 spin_lock(&ctrl->cmd_lock); 477 spin_lock(&ctrl->mbox_lock);
346 memset(wrb, 0, sizeof(*wrb)); 478 memset(wrb, 0, sizeof(*wrb));
347 479
348 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 480 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -354,7 +486,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
354 req->pmac_id = cpu_to_le32(pmac_id); 486 req->pmac_id = cpu_to_le32(pmac_id);
355 487
356 status = be_mbox_db_ring(ctrl); 488 status = be_mbox_db_ring(ctrl);
357 spin_unlock(&ctrl->cmd_lock); 489 spin_unlock(&ctrl->mbox_lock);
358 490
359 return status; 491 return status;
360} 492}
@@ -370,7 +502,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
370 void *ctxt = &req->context; 502 void *ctxt = &req->context;
371 int status; 503 int status;
372 504
373 spin_lock(&ctrl->cmd_lock); 505 spin_lock(&ctrl->mbox_lock);
374 memset(wrb, 0, sizeof(*wrb)); 506 memset(wrb, 0, sizeof(*wrb));
375 507
376 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 508 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -388,7 +520,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
388 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); 520 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
389 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); 521 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
390 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); 522 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
391 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0); 523 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
392 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func); 524 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
393 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 525 be_dws_cpu_to_le(ctxt, sizeof(req->context));
394 526
@@ -399,7 +531,56 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
399 cq->id = le16_to_cpu(resp->cq_id); 531 cq->id = le16_to_cpu(resp->cq_id);
400 cq->created = true; 532 cq->created = true;
401 } 533 }
402 spin_unlock(&ctrl->cmd_lock); 534 spin_unlock(&ctrl->mbox_lock);
535
536 return status;
537}
538
539static u32 be_encoded_q_len(int q_len)
540{
541 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
542 if (len_encoded == 16)
543 len_encoded = 0;
544 return len_encoded;
545}
546
547int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
548 struct be_queue_info *mccq,
549 struct be_queue_info *cq)
550{
551 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
552 struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
553 struct be_dma_mem *q_mem = &mccq->dma_mem;
554 void *ctxt = &req->context;
555 int status;
556
557 spin_lock(&ctrl->mbox_lock);
558 memset(wrb, 0, sizeof(*wrb));
559
560 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
561
562 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
563 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
564
565 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
566
567 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func);
568 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
569 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
570 be_encoded_q_len(mccq->len));
571 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
572
573 be_dws_cpu_to_le(ctxt, sizeof(req->context));
574
575 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
576
577 status = be_mbox_db_ring(ctrl);
578 if (!status) {
579 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
580 mccq->id = le16_to_cpu(resp->id);
581 mccq->created = true;
582 }
583 spin_unlock(&ctrl->mbox_lock);
403 584
404 return status; 585 return status;
405} 586}
@@ -415,7 +596,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
415 int status; 596 int status;
416 u32 len_encoded; 597 u32 len_encoded;
417 598
418 spin_lock(&ctrl->cmd_lock); 599 spin_lock(&ctrl->mbox_lock);
419 memset(wrb, 0, sizeof(*wrb)); 600 memset(wrb, 0, sizeof(*wrb));
420 601
421 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 602 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -446,7 +627,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
446 txq->id = le16_to_cpu(resp->cid); 627 txq->id = le16_to_cpu(resp->cid);
447 txq->created = true; 628 txq->created = true;
448 } 629 }
449 spin_unlock(&ctrl->cmd_lock); 630 spin_unlock(&ctrl->mbox_lock);
450 631
451 return status; 632 return status;
452} 633}
@@ -460,7 +641,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
460 struct be_dma_mem *q_mem = &rxq->dma_mem; 641 struct be_dma_mem *q_mem = &rxq->dma_mem;
461 int status; 642 int status;
462 643
463 spin_lock(&ctrl->cmd_lock); 644 spin_lock(&ctrl->mbox_lock);
464 memset(wrb, 0, sizeof(*wrb)); 645 memset(wrb, 0, sizeof(*wrb));
465 646
466 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 647 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -482,7 +663,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
482 rxq->id = le16_to_cpu(resp->id); 663 rxq->id = le16_to_cpu(resp->id);
483 rxq->created = true; 664 rxq->created = true;
484 } 665 }
485 spin_unlock(&ctrl->cmd_lock); 666 spin_unlock(&ctrl->mbox_lock);
486 667
487 return status; 668 return status;
488} 669}
@@ -496,7 +677,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
496 u8 subsys = 0, opcode = 0; 677 u8 subsys = 0, opcode = 0;
497 int status; 678 int status;
498 679
499 spin_lock(&ctrl->cmd_lock); 680 spin_lock(&ctrl->mbox_lock);
500 681
501 memset(wrb, 0, sizeof(*wrb)); 682 memset(wrb, 0, sizeof(*wrb));
502 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 683 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -518,6 +699,10 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
518 subsys = CMD_SUBSYSTEM_ETH; 699 subsys = CMD_SUBSYSTEM_ETH;
519 opcode = OPCODE_ETH_RX_DESTROY; 700 opcode = OPCODE_ETH_RX_DESTROY;
520 break; 701 break;
702 case QTYPE_MCCQ:
703 subsys = CMD_SUBSYSTEM_COMMON;
704 opcode = OPCODE_COMMON_MCC_DESTROY;
705 break;
521 default: 706 default:
522 printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n"); 707 printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
523 status = -1; 708 status = -1;
@@ -528,7 +713,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
528 713
529 status = be_mbox_db_ring(ctrl); 714 status = be_mbox_db_ring(ctrl);
530err: 715err:
531 spin_unlock(&ctrl->cmd_lock); 716 spin_unlock(&ctrl->mbox_lock);
532 717
533 return status; 718 return status;
534} 719}
@@ -541,7 +726,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
541 struct be_cmd_req_if_create *req = embedded_payload(wrb); 726 struct be_cmd_req_if_create *req = embedded_payload(wrb);
542 int status; 727 int status;
543 728
544 spin_lock(&ctrl->cmd_lock); 729 spin_lock(&ctrl->mbox_lock);
545 memset(wrb, 0, sizeof(*wrb)); 730 memset(wrb, 0, sizeof(*wrb));
546 731
547 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 732 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -562,7 +747,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
562 *pmac_id = le32_to_cpu(resp->pmac_id); 747 *pmac_id = le32_to_cpu(resp->pmac_id);
563 } 748 }
564 749
565 spin_unlock(&ctrl->cmd_lock); 750 spin_unlock(&ctrl->mbox_lock);
566 return status; 751 return status;
567} 752}
568 753
@@ -572,7 +757,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
572 struct be_cmd_req_if_destroy *req = embedded_payload(wrb); 757 struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
573 int status; 758 int status;
574 759
575 spin_lock(&ctrl->cmd_lock); 760 spin_lock(&ctrl->mbox_lock);
576 memset(wrb, 0, sizeof(*wrb)); 761 memset(wrb, 0, sizeof(*wrb));
577 762
578 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 763 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -583,7 +768,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
583 req->interface_id = cpu_to_le32(interface_id); 768 req->interface_id = cpu_to_le32(interface_id);
584 status = be_mbox_db_ring(ctrl); 769 status = be_mbox_db_ring(ctrl);
585 770
586 spin_unlock(&ctrl->cmd_lock); 771 spin_unlock(&ctrl->mbox_lock);
587 772
588 return status; 773 return status;
589} 774}
@@ -598,7 +783,7 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
598 struct be_sge *sge = nonembedded_sgl(wrb); 783 struct be_sge *sge = nonembedded_sgl(wrb);
599 int status; 784 int status;
600 785
601 spin_lock(&ctrl->cmd_lock); 786 spin_lock(&ctrl->mbox_lock);
602 memset(wrb, 0, sizeof(*wrb)); 787 memset(wrb, 0, sizeof(*wrb));
603 788
604 memset(req, 0, sizeof(*req)); 789 memset(req, 0, sizeof(*req));
@@ -617,18 +802,20 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
617 be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats)); 802 be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
618 } 803 }
619 804
620 spin_unlock(&ctrl->cmd_lock); 805 spin_unlock(&ctrl->mbox_lock);
621 return status; 806 return status;
622} 807}
623 808
624int be_cmd_link_status_query(struct be_ctrl_info *ctrl, 809int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
625 struct be_link_info *link) 810 bool *link_up)
626{ 811{
627 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 812 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
628 struct be_cmd_req_link_status *req = embedded_payload(wrb); 813 struct be_cmd_req_link_status *req = embedded_payload(wrb);
629 int status; 814 int status;
630 815
631 spin_lock(&ctrl->cmd_lock); 816 spin_lock(&ctrl->mbox_lock);
817
818 *link_up = false;
632 memset(wrb, 0, sizeof(*wrb)); 819 memset(wrb, 0, sizeof(*wrb));
633 820
634 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 821 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -639,14 +826,11 @@ int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
639 status = be_mbox_db_ring(ctrl); 826 status = be_mbox_db_ring(ctrl);
640 if (!status) { 827 if (!status) {
641 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 828 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
642 link->speed = resp->mac_speed; 829 if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
643 link->duplex = resp->mac_duplex; 830 *link_up = true;
644 link->fault = resp->mac_fault;
645 } else {
646 link->speed = PHY_LINK_SPEED_ZERO;
647 } 831 }
648 832
649 spin_unlock(&ctrl->cmd_lock); 833 spin_unlock(&ctrl->mbox_lock);
650 return status; 834 return status;
651} 835}
652 836
@@ -656,7 +840,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
656 struct be_cmd_req_get_fw_version *req = embedded_payload(wrb); 840 struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
657 int status; 841 int status;
658 842
659 spin_lock(&ctrl->cmd_lock); 843 spin_lock(&ctrl->mbox_lock);
660 memset(wrb, 0, sizeof(*wrb)); 844 memset(wrb, 0, sizeof(*wrb));
661 845
662 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 846 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -670,7 +854,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
670 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); 854 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
671 } 855 }
672 856
673 spin_unlock(&ctrl->cmd_lock); 857 spin_unlock(&ctrl->mbox_lock);
674 return status; 858 return status;
675} 859}
676 860
@@ -681,7 +865,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
681 struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb); 865 struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
682 int status; 866 int status;
683 867
684 spin_lock(&ctrl->cmd_lock); 868 spin_lock(&ctrl->mbox_lock);
685 memset(wrb, 0, sizeof(*wrb)); 869 memset(wrb, 0, sizeof(*wrb));
686 870
687 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 871 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -696,7 +880,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
696 880
697 status = be_mbox_db_ring(ctrl); 881 status = be_mbox_db_ring(ctrl);
698 882
699 spin_unlock(&ctrl->cmd_lock); 883 spin_unlock(&ctrl->mbox_lock);
700 return status; 884 return status;
701} 885}
702 886
@@ -707,7 +891,7 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
707 struct be_cmd_req_vlan_config *req = embedded_payload(wrb); 891 struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
708 int status; 892 int status;
709 893
710 spin_lock(&ctrl->cmd_lock); 894 spin_lock(&ctrl->mbox_lock);
711 memset(wrb, 0, sizeof(*wrb)); 895 memset(wrb, 0, sizeof(*wrb));
712 896
713 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 897 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -726,18 +910,22 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
726 910
727 status = be_mbox_db_ring(ctrl); 911 status = be_mbox_db_ring(ctrl);
728 912
729 spin_unlock(&ctrl->cmd_lock); 913 spin_unlock(&ctrl->mbox_lock);
730 return status; 914 return status;
731} 915}
732 916
917/* Use MCC for this command as it may be called in BH context */
733int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en) 918int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
734{ 919{
735 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 920 struct be_mcc_wrb *wrb;
736 struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb); 921 struct be_cmd_req_promiscuous_config *req;
737 int status;
738 922
739 spin_lock(&ctrl->cmd_lock); 923 spin_lock_bh(&ctrl->mcc_lock);
740 memset(wrb, 0, sizeof(*wrb)); 924
925 wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
926 BUG_ON(!wrb);
927
928 req = embedded_payload(wrb);
741 929
742 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 930 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
743 931
@@ -749,21 +937,29 @@ int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
749 else 937 else
750 req->port0_promiscuous = en; 938 req->port0_promiscuous = en;
751 939
752 status = be_mbox_db_ring(ctrl); 940 be_mcc_notify_wait(ctrl);
753 941
754 spin_unlock(&ctrl->cmd_lock); 942 spin_unlock_bh(&ctrl->mcc_lock);
755 return status; 943 return 0;
756} 944}
757 945
758int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table, 946/*
759 u32 num, bool promiscuous) 947 * Use MCC for this command as it may be called in BH context
948 * (mc == NULL) => multicast promiscous
949 */
950int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
951 struct dev_mc_list *mc_list, u32 mc_count)
760{ 952{
761 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 953#define BE_MAX_MC 32 /* set mcast promisc if > 32 */
762 struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb); 954 struct be_mcc_wrb *wrb;
763 int status; 955 struct be_cmd_req_mcast_mac_config *req;
764 956
765 spin_lock(&ctrl->cmd_lock); 957 spin_lock_bh(&ctrl->mcc_lock);
766 memset(wrb, 0, sizeof(*wrb)); 958
959 wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
960 BUG_ON(!wrb);
961
962 req = embedded_payload(wrb);
767 963
768 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 964 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
769 965
@@ -771,17 +967,23 @@ int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
771 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req)); 967 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
772 968
773 req->interface_id = if_id; 969 req->interface_id = if_id;
774 req->promiscuous = promiscuous; 970 if (mc_list && mc_count <= BE_MAX_MC) {
775 if (!promiscuous) { 971 int i;
776 req->num_mac = cpu_to_le16(num); 972 struct dev_mc_list *mc;
777 if (num) 973
778 memcpy(req->mac, mac_table, ETH_ALEN * num); 974 req->num_mac = cpu_to_le16(mc_count);
975
976 for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
977 memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
978 } else {
979 req->promiscuous = 1;
779 } 980 }
780 981
781 status = be_mbox_db_ring(ctrl); 982 be_mcc_notify_wait(ctrl);
782 983
783 spin_unlock(&ctrl->cmd_lock); 984 spin_unlock_bh(&ctrl->mcc_lock);
784 return status; 985
986 return 0;
785} 987}
786 988
787int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc) 989int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
@@ -790,7 +992,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
790 struct be_cmd_req_set_flow_control *req = embedded_payload(wrb); 992 struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
791 int status; 993 int status;
792 994
793 spin_lock(&ctrl->cmd_lock); 995 spin_lock(&ctrl->mbox_lock);
794 996
795 memset(wrb, 0, sizeof(*wrb)); 997 memset(wrb, 0, sizeof(*wrb));
796 998
@@ -804,7 +1006,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
804 1006
805 status = be_mbox_db_ring(ctrl); 1007 status = be_mbox_db_ring(ctrl);
806 1008
807 spin_unlock(&ctrl->cmd_lock); 1009 spin_unlock(&ctrl->mbox_lock);
808 return status; 1010 return status;
809} 1011}
810 1012
@@ -814,7 +1016,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
814 struct be_cmd_req_get_flow_control *req = embedded_payload(wrb); 1016 struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
815 int status; 1017 int status;
816 1018
817 spin_lock(&ctrl->cmd_lock); 1019 spin_lock(&ctrl->mbox_lock);
818 1020
819 memset(wrb, 0, sizeof(*wrb)); 1021 memset(wrb, 0, sizeof(*wrb));
820 1022
@@ -831,7 +1033,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
831 *rx_fc = le16_to_cpu(resp->rx_flow_control); 1033 *rx_fc = le16_to_cpu(resp->rx_flow_control);
832 } 1034 }
833 1035
834 spin_unlock(&ctrl->cmd_lock); 1036 spin_unlock(&ctrl->mbox_lock);
835 return status; 1037 return status;
836} 1038}
837 1039
@@ -841,7 +1043,7 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
841 struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb); 1043 struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
842 int status; 1044 int status;
843 1045
844 spin_lock(&ctrl->cmd_lock); 1046 spin_lock(&ctrl->mbox_lock);
845 1047
846 memset(wrb, 0, sizeof(*wrb)); 1048 memset(wrb, 0, sizeof(*wrb));
847 1049
@@ -856,6 +1058,6 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
856 *port_num = le32_to_cpu(resp->phys_port); 1058 *port_num = le32_to_cpu(resp->phys_port);
857 } 1059 }
858 1060
859 spin_unlock(&ctrl->cmd_lock); 1061 spin_unlock(&ctrl->mbox_lock);
860 return status; 1062 return status;
861} 1063}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index e499e2d5b8c3..747626da7b4e 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -76,6 +76,34 @@ struct be_mcc_cq_entry {
76 u32 flags; /* dword 3 */ 76 u32 flags; /* dword 3 */
77}; 77};
78 78
79/* When the async bit of mcc_compl is set, the last 4 bytes of
80 * mcc_compl is interpreted as follows:
81 */
82#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
83#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
84#define ASYNC_EVENT_CODE_LINK_STATE 0x1
85struct be_async_event_trailer {
86 u32 code;
87};
88
89enum {
90 ASYNC_EVENT_LINK_DOWN = 0x0,
91 ASYNC_EVENT_LINK_UP = 0x1
92};
93
94/* When the event code of an async trailer is link-state, the mcc_compl
95 * must be interpreted as follows
96 */
97struct be_async_event_link_state {
98 u8 physical_port;
99 u8 port_link_status;
100 u8 port_duplex;
101 u8 port_speed;
102 u8 port_fault;
103 u8 rsvd0[7];
104 struct be_async_event_trailer trailer;
105} __packed;
106
79struct be_mcc_mailbox { 107struct be_mcc_mailbox {
80 struct be_mcc_wrb wrb; 108 struct be_mcc_wrb wrb;
81 struct be_mcc_cq_entry cqe; 109 struct be_mcc_cq_entry cqe;
@@ -101,6 +129,7 @@ struct be_mcc_mailbox {
101#define OPCODE_COMMON_FIRMWARE_CONFIG 42 129#define OPCODE_COMMON_FIRMWARE_CONFIG 42
102#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50 130#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
103#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51 131#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
132#define OPCODE_COMMON_MCC_DESTROY 53
104#define OPCODE_COMMON_CQ_DESTROY 54 133#define OPCODE_COMMON_CQ_DESTROY 54
105#define OPCODE_COMMON_EQ_DESTROY 55 134#define OPCODE_COMMON_EQ_DESTROY 55
106#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58 135#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
@@ -269,6 +298,38 @@ struct be_cmd_resp_cq_create {
269 u16 rsvd0; 298 u16 rsvd0;
270} __packed; 299} __packed;
271 300
301/******************** Create MCCQ ***************************/
302/* Pseudo amap definition in which each bit of the actual structure is defined
303 * as a byte: used to calculate offset/shift/mask of each field */
304struct amap_mcc_context {
305 u8 con_index[14];
306 u8 rsvd0[2];
307 u8 ring_size[4];
308 u8 fetch_wrb;
309 u8 fetch_r2t;
310 u8 cq_id[10];
311 u8 prod_index[14];
312 u8 fid[8];
313 u8 pdid[9];
314 u8 valid;
315 u8 rsvd1[32];
316 u8 rsvd2[32];
317} __packed;
318
319struct be_cmd_req_mcc_create {
320 struct be_cmd_req_hdr hdr;
321 u16 num_pages;
322 u16 rsvd0;
323 u8 context[sizeof(struct amap_mcc_context) / 8];
324 struct phys_addr pages[8];
325} __packed;
326
327struct be_cmd_resp_mcc_create {
328 struct be_cmd_resp_hdr hdr;
329 u16 id;
330 u16 rsvd0;
331} __packed;
332
272/******************** Create TxQ ***************************/ 333/******************** Create TxQ ***************************/
273#define BE_ETH_TX_RING_TYPE_STANDARD 2 334#define BE_ETH_TX_RING_TYPE_STANDARD 2
274#define BE_ULP1_NUM 1 335#define BE_ULP1_NUM 1
@@ -341,7 +402,8 @@ enum {
341 QTYPE_EQ = 1, 402 QTYPE_EQ = 1,
342 QTYPE_CQ, 403 QTYPE_CQ,
343 QTYPE_TXQ, 404 QTYPE_TXQ,
344 QTYPE_RXQ 405 QTYPE_RXQ,
406 QTYPE_MCCQ
345}; 407};
346 408
347struct be_cmd_req_q_destroy { 409struct be_cmd_req_q_destroy {
@@ -546,12 +608,6 @@ struct be_cmd_req_link_status {
546 u32 rsvd; 608 u32 rsvd;
547}; 609};
548 610
549struct be_link_info {
550 u8 duplex;
551 u8 speed;
552 u8 fault;
553};
554
555enum { 611enum {
556 PHY_LINK_DUPLEX_NONE = 0x0, 612 PHY_LINK_DUPLEX_NONE = 0x0,
557 PHY_LINK_DUPLEX_HALF = 0x1, 613 PHY_LINK_DUPLEX_HALF = 0x1,
@@ -657,6 +713,9 @@ extern int be_cmd_cq_create(struct be_ctrl_info *ctrl,
657 struct be_queue_info *cq, struct be_queue_info *eq, 713 struct be_queue_info *cq, struct be_queue_info *eq,
658 bool sol_evts, bool no_delay, 714 bool sol_evts, bool no_delay,
659 int num_cqe_dma_coalesce); 715 int num_cqe_dma_coalesce);
716extern int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
717 struct be_queue_info *mccq,
718 struct be_queue_info *cq);
660extern int be_cmd_txq_create(struct be_ctrl_info *ctrl, 719extern int be_cmd_txq_create(struct be_ctrl_info *ctrl,
661 struct be_queue_info *txq, 720 struct be_queue_info *txq,
662 struct be_queue_info *cq); 721 struct be_queue_info *cq);
@@ -667,7 +726,7 @@ extern int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
667extern int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, 726extern int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
668 int type); 727 int type);
669extern int be_cmd_link_status_query(struct be_ctrl_info *ctrl, 728extern int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
670 struct be_link_info *link); 729 bool *link_up);
671extern int be_cmd_reset(struct be_ctrl_info *ctrl); 730extern int be_cmd_reset(struct be_ctrl_info *ctrl);
672extern int be_cmd_get_stats(struct be_ctrl_info *ctrl, 731extern int be_cmd_get_stats(struct be_ctrl_info *ctrl,
673 struct be_dma_mem *nonemb_cmd); 732 struct be_dma_mem *nonemb_cmd);
@@ -679,10 +738,11 @@ extern int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id,
679 bool promiscuous); 738 bool promiscuous);
680extern int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, 739extern int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl,
681 u8 port_num, bool en); 740 u8 port_num, bool en);
682extern int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, 741extern int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
683 u8 *mac_table, u32 num, bool promiscuous); 742 struct dev_mc_list *mc_list, u32 mc_count);
684extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, 743extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl,
685 u32 tx_fc, u32 rx_fc); 744 u32 tx_fc, u32 rx_fc);
686extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, 745extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl,
687 u32 *tx_fc, u32 *rx_fc); 746 u32 *tx_fc, u32 *rx_fc);
688extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num); 747extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num);
748extern void be_process_mcc(struct be_ctrl_info *ctrl);
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index b132aa4893ca..b02e805c1db3 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -61,7 +61,7 @@
61/* Clear the interrupt for this eq */ 61/* Clear the interrupt for this eq */
62#define DB_EQ_CLR_SHIFT (9) /* bit 9 */ 62#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
63/* Must be 1 */ 63/* Must be 1 */
64#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */ 64#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
65/* Number of event entries processed */ 65/* Number of event entries processed */
66#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ 66#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
67/* Rearm bit */ 67/* Rearm bit */
@@ -88,6 +88,12 @@
88/* Number of rx frags posted */ 88/* Number of rx frags posted */
89#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */ 89#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
90 90
91/********** MCC door bell ************/
92#define DB_MCCQ_OFFSET 0x140
93#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
94/* Number of entries posted */
95#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
96
91/* 97/*
92 * BE descriptors: host memory data structures whose formats 98 * BE descriptors: host memory data structures whose formats
93 * are hardwired in BE silicon. 99 * are hardwired in BE silicon.
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 66bb56874d9b..66c10c87f517 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -60,26 +60,6 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
60 return 0; 60 return 0;
61} 61}
62 62
63static inline void *queue_head_node(struct be_queue_info *q)
64{
65 return q->dma_mem.va + q->head * q->entry_size;
66}
67
68static inline void *queue_tail_node(struct be_queue_info *q)
69{
70 return q->dma_mem.va + q->tail * q->entry_size;
71}
72
73static inline void queue_head_inc(struct be_queue_info *q)
74{
75 index_inc(&q->head, q->len);
76}
77
78static inline void queue_tail_inc(struct be_queue_info *q)
79{
80 index_inc(&q->tail, q->len);
81}
82
83static void be_intr_set(struct be_ctrl_info *ctrl, bool enable) 63static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
84{ 64{
85 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 65 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
@@ -127,7 +107,7 @@ static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid,
127 iowrite32(val, ctrl->db + DB_EQ_OFFSET); 107 iowrite32(val, ctrl->db + DB_EQ_OFFSET);
128} 108}
129 109
130static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, 110void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
131 bool arm, u16 num_popped) 111 bool arm, u16 num_popped)
132{ 112{
133 u32 val = 0; 113 u32 val = 0;
@@ -234,28 +214,24 @@ static void netdev_stats_update(struct be_adapter *adapter)
234 dev_stats->tx_window_errors = 0; 214 dev_stats->tx_window_errors = 0;
235} 215}
236 216
237static void be_link_status_update(struct be_adapter *adapter) 217void be_link_status_update(void *ctxt, bool link_up)
238{ 218{
239 struct be_link_info *prev = &adapter->link; 219 struct be_adapter *adapter = ctxt;
240 struct be_link_info now = { 0 };
241 struct net_device *netdev = adapter->netdev; 220 struct net_device *netdev = adapter->netdev;
242 221
243 be_cmd_link_status_query(&adapter->ctrl, &now);
244
245 /* If link came up or went down */ 222 /* If link came up or went down */
246 if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO || 223 if (adapter->link_up != link_up) {
247 prev->speed == PHY_LINK_SPEED_ZERO)) { 224 if (link_up) {
248 if (now.speed == PHY_LINK_SPEED_ZERO) {
249 netif_stop_queue(netdev);
250 netif_carrier_off(netdev);
251 printk(KERN_INFO "%s: Link down\n", netdev->name);
252 } else {
253 netif_start_queue(netdev); 225 netif_start_queue(netdev);
254 netif_carrier_on(netdev); 226 netif_carrier_on(netdev);
255 printk(KERN_INFO "%s: Link up\n", netdev->name); 227 printk(KERN_INFO "%s: Link up\n", netdev->name);
228 } else {
229 netif_stop_queue(netdev);
230 netif_carrier_off(netdev);
231 printk(KERN_INFO "%s: Link down\n", netdev->name);
256 } 232 }
233 adapter->link_up = link_up;
257 } 234 }
258 *prev = now;
259} 235}
260 236
261/* Update the EQ delay n BE based on the RX frags consumed / sec */ 237/* Update the EQ delay n BE based on the RX frags consumed / sec */
@@ -569,47 +545,32 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
569 be_vid_config(netdev); 545 be_vid_config(netdev);
570} 546}
571 547
572static void be_set_multicast_filter(struct net_device *netdev) 548static void be_set_multicast_list(struct net_device *netdev)
573{ 549{
574 struct be_adapter *adapter = netdev_priv(netdev); 550 struct be_adapter *adapter = netdev_priv(netdev);
575 struct dev_mc_list *mc_ptr; 551 struct be_ctrl_info *ctrl = &adapter->ctrl;
576 u8 mac_addr[32][ETH_ALEN];
577 int i = 0;
578 552
579 if (netdev->flags & IFF_ALLMULTI) { 553 if (netdev->flags & IFF_PROMISC) {
580 /* set BE in Multicast promiscuous */ 554 be_cmd_promiscuous_config(ctrl, adapter->port_num, 1);
581 be_cmd_mcast_mac_set(&adapter->ctrl, 555 adapter->promiscuous = true;
582 adapter->if_handle, NULL, 0, true); 556 goto done;
583 return;
584 } 557 }
585 558
586 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 559 /* BE was previously in promiscous mode; disable it */
587 memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN); 560 if (adapter->promiscuous) {
588 if (++i >= 32) { 561 adapter->promiscuous = false;
589 be_cmd_mcast_mac_set(&adapter->ctrl, 562 be_cmd_promiscuous_config(ctrl, adapter->port_num, 0);
590 adapter->if_handle, &mac_addr[0][0], i, false);
591 i = 0;
592 }
593
594 } 563 }
595 564
596 if (i) { 565 if (netdev->flags & IFF_ALLMULTI) {
597 /* reset the promiscuous mode also. */ 566 be_cmd_multicast_set(ctrl, adapter->if_handle, NULL, 0);
598 be_cmd_mcast_mac_set(&adapter->ctrl, 567 goto done;
599 adapter->if_handle, &mac_addr[0][0], i, false);
600 } 568 }
601}
602 569
603static void be_set_multicast_list(struct net_device *netdev) 570 be_cmd_multicast_set(ctrl, adapter->if_handle, netdev->mc_list,
604{ 571 netdev->mc_count);
605 struct be_adapter *adapter = netdev_priv(netdev); 572done:
606 573 return;
607 if (netdev->flags & IFF_PROMISC) {
608 be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1);
609 } else {
610 be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0);
611 be_set_multicast_filter(netdev);
612 }
613} 574}
614 575
615static void be_rx_rate_update(struct be_adapter *adapter) 576static void be_rx_rate_update(struct be_adapter *adapter)
@@ -960,10 +921,8 @@ static void be_post_rx_frags(struct be_adapter *adapter)
960 return; 921 return;
961} 922}
962 923
963static struct be_eth_tx_compl * 924static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
964be_tx_compl_get(struct be_adapter *adapter)
965{ 925{
966 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
967 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); 926 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
968 927
969 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) 928 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
@@ -1051,6 +1010,59 @@ static void be_tx_q_clean(struct be_adapter *adapter)
1051 } 1010 }
1052} 1011}
1053 1012
1013static void be_mcc_queues_destroy(struct be_adapter *adapter)
1014{
1015 struct be_queue_info *q;
1016 struct be_ctrl_info *ctrl = &adapter->ctrl;
1017
1018 q = &ctrl->mcc_obj.q;
1019 if (q->created)
1020 be_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
1021 be_queue_free(adapter, q);
1022
1023 q = &ctrl->mcc_obj.cq;
1024 if (q->created)
1025 be_cmd_q_destroy(ctrl, q, QTYPE_CQ);
1026 be_queue_free(adapter, q);
1027}
1028
1029/* Must be called only after TX qs are created as MCC shares TX EQ */
1030static int be_mcc_queues_create(struct be_adapter *adapter)
1031{
1032 struct be_queue_info *q, *cq;
1033 struct be_ctrl_info *ctrl = &adapter->ctrl;
1034
1035 /* Alloc MCC compl queue */
1036 cq = &ctrl->mcc_obj.cq;
1037 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1038 sizeof(struct be_mcc_cq_entry)))
1039 goto err;
1040
1041 /* Ask BE to create MCC compl queue; share TX's eq */
1042 if (be_cmd_cq_create(ctrl, cq, &adapter->tx_eq.q, false, true, 0))
1043 goto mcc_cq_free;
1044
1045 /* Alloc MCC queue */
1046 q = &ctrl->mcc_obj.q;
1047 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1048 goto mcc_cq_destroy;
1049
1050 /* Ask BE to create MCC queue */
1051 if (be_cmd_mccq_create(ctrl, q, cq))
1052 goto mcc_q_free;
1053
1054 return 0;
1055
1056mcc_q_free:
1057 be_queue_free(adapter, q);
1058mcc_cq_destroy:
1059 be_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
1060mcc_cq_free:
1061 be_queue_free(adapter, cq);
1062err:
1063 return -1;
1064}
1065
1054static void be_tx_queues_destroy(struct be_adapter *adapter) 1066static void be_tx_queues_destroy(struct be_adapter *adapter)
1055{ 1067{
1056 struct be_queue_info *q; 1068 struct be_queue_info *q;
@@ -1263,7 +1275,7 @@ static irqreturn_t be_msix_rx(int irq, void *dev)
1263 return IRQ_HANDLED; 1275 return IRQ_HANDLED;
1264} 1276}
1265 1277
1266static irqreturn_t be_msix_tx(int irq, void *dev) 1278static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1267{ 1279{
1268 struct be_adapter *adapter = dev; 1280 struct be_adapter *adapter = dev;
1269 1281
@@ -1324,40 +1336,51 @@ int be_poll_rx(struct napi_struct *napi, int budget)
1324 return work_done; 1336 return work_done;
1325} 1337}
1326 1338
1327/* For TX we don't honour budget; consume everything */ 1339void be_process_tx(struct be_adapter *adapter)
1328int be_poll_tx(struct napi_struct *napi, int budget)
1329{ 1340{
1330 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); 1341 struct be_queue_info *txq = &adapter->tx_obj.q;
1331 struct be_adapter *adapter = 1342 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1332 container_of(tx_eq, struct be_adapter, tx_eq);
1333 struct be_tx_obj *tx_obj = &adapter->tx_obj;
1334 struct be_queue_info *tx_cq = &tx_obj->cq;
1335 struct be_queue_info *txq = &tx_obj->q;
1336 struct be_eth_tx_compl *txcp; 1343 struct be_eth_tx_compl *txcp;
1337 u32 num_cmpl = 0; 1344 u32 num_cmpl = 0;
1338 u16 end_idx; 1345 u16 end_idx;
1339 1346
1340 while ((txcp = be_tx_compl_get(adapter))) { 1347 while ((txcp = be_tx_compl_get(tx_cq))) {
1341 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, 1348 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1342 wrb_index, txcp); 1349 wrb_index, txcp);
1343 be_tx_compl_process(adapter, end_idx); 1350 be_tx_compl_process(adapter, end_idx);
1344 num_cmpl++; 1351 num_cmpl++;
1345 } 1352 }
1346 1353
1347 /* As Tx wrbs have been freed up, wake up netdev queue if 1354 if (num_cmpl) {
1348 * it was stopped due to lack of tx wrbs. 1355 be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
1349 */ 1356
1350 if (netif_queue_stopped(adapter->netdev) && 1357 /* As Tx wrbs have been freed up, wake up netdev queue if
1358 * it was stopped due to lack of tx wrbs.
1359 */
1360 if (netif_queue_stopped(adapter->netdev) &&
1351 atomic_read(&txq->used) < txq->len / 2) { 1361 atomic_read(&txq->used) < txq->len / 2) {
1352 netif_wake_queue(adapter->netdev); 1362 netif_wake_queue(adapter->netdev);
1363 }
1364
1365 drvr_stats(adapter)->be_tx_events++;
1366 drvr_stats(adapter)->be_tx_compl += num_cmpl;
1353 } 1367 }
1368}
1369
1370/* As TX and MCC share the same EQ check for both TX and MCC completions.
1371 * For TX/MCC we don't honour budget; consume everything
1372 */
1373static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1374{
1375 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1376 struct be_adapter *adapter =
1377 container_of(tx_eq, struct be_adapter, tx_eq);
1354 1378
1355 napi_complete(napi); 1379 napi_complete(napi);
1356 1380
1357 be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl); 1381 be_process_tx(adapter);
1358 1382
1359 drvr_stats(adapter)->be_tx_events++; 1383 be_process_mcc(&adapter->ctrl);
1360 drvr_stats(adapter)->be_tx_compl += num_cmpl;
1361 1384
1362 return 1; 1385 return 1;
1363} 1386}
@@ -1368,9 +1391,6 @@ static void be_worker(struct work_struct *work)
1368 container_of(work, struct be_adapter, work.work); 1391 container_of(work, struct be_adapter, work.work);
1369 int status; 1392 int status;
1370 1393
1371 /* Check link */
1372 be_link_status_update(adapter);
1373
1374 /* Get Stats */ 1394 /* Get Stats */
1375 status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd); 1395 status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd);
1376 if (!status) 1396 if (!status)
@@ -1419,7 +1439,7 @@ static int be_msix_register(struct be_adapter *adapter)
1419 1439
1420 sprintf(tx_eq->desc, "%s-tx", netdev->name); 1440 sprintf(tx_eq->desc, "%s-tx", netdev->name);
1421 vec = be_msix_vec_get(adapter, tx_eq->q.id); 1441 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1422 status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter); 1442 status = request_irq(vec, be_msix_tx_mcc, 0, tx_eq->desc, adapter);
1423 if (status) 1443 if (status)
1424 goto err; 1444 goto err;
1425 1445
@@ -1495,6 +1515,39 @@ static int be_open(struct net_device *netdev)
1495 struct be_ctrl_info *ctrl = &adapter->ctrl; 1515 struct be_ctrl_info *ctrl = &adapter->ctrl;
1496 struct be_eq_obj *rx_eq = &adapter->rx_eq; 1516 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1497 struct be_eq_obj *tx_eq = &adapter->tx_eq; 1517 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1518 bool link_up;
1519 int status;
1520
1521 /* First time posting */
1522 be_post_rx_frags(adapter);
1523
1524 napi_enable(&rx_eq->napi);
1525 napi_enable(&tx_eq->napi);
1526
1527 be_irq_register(adapter);
1528
1529 be_intr_set(ctrl, true);
1530
1531 /* The evt queues are created in unarmed state; arm them */
1532 be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
1533 be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
1534
1535 /* Rx compl queue may be in unarmed state; rearm it */
1536 be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
1537
1538 status = be_cmd_link_status_query(ctrl, &link_up);
1539 if (status)
1540 return status;
1541 be_link_status_update(adapter, link_up);
1542
1543 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1544 return 0;
1545}
1546
1547static int be_setup(struct be_adapter *adapter)
1548{
1549 struct be_ctrl_info *ctrl = &adapter->ctrl;
1550 struct net_device *netdev = adapter->netdev;
1498 u32 if_flags; 1551 u32 if_flags;
1499 int status; 1552 int status;
1500 1553
@@ -1521,29 +1574,14 @@ static int be_open(struct net_device *netdev)
1521 if (status != 0) 1574 if (status != 0)
1522 goto tx_qs_destroy; 1575 goto tx_qs_destroy;
1523 1576
1524 /* First time posting */ 1577 status = be_mcc_queues_create(adapter);
1525 be_post_rx_frags(adapter); 1578 if (status != 0)
1526 1579 goto rx_qs_destroy;
1527 napi_enable(&rx_eq->napi);
1528 napi_enable(&tx_eq->napi);
1529
1530 be_irq_register(adapter);
1531
1532 be_intr_set(ctrl, true);
1533
1534 /* The evt queues are created in the unarmed state; arm them */
1535 be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
1536 be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
1537
1538 /* The compl queues are created in the unarmed state; arm them */
1539 be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
1540 be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0);
1541
1542 be_link_status_update(adapter);
1543 1580
1544 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1545 return 0; 1581 return 0;
1546 1582
1583rx_qs_destroy:
1584 be_rx_queues_destroy(adapter);
1547tx_qs_destroy: 1585tx_qs_destroy:
1548 be_tx_queues_destroy(adapter); 1586 be_tx_queues_destroy(adapter);
1549if_destroy: 1587if_destroy:
@@ -1552,6 +1590,19 @@ do_none:
1552 return status; 1590 return status;
1553} 1591}
1554 1592
1593static int be_clear(struct be_adapter *adapter)
1594{
1595 struct be_ctrl_info *ctrl = &adapter->ctrl;
1596
1597 be_rx_queues_destroy(adapter);
1598 be_tx_queues_destroy(adapter);
1599
1600 be_cmd_if_destroy(ctrl, adapter->if_handle);
1601
1602 be_mcc_queues_destroy(adapter);
1603 return 0;
1604}
1605
1555static int be_close(struct net_device *netdev) 1606static int be_close(struct net_device *netdev)
1556{ 1607{
1557 struct be_adapter *adapter = netdev_priv(netdev); 1608 struct be_adapter *adapter = netdev_priv(netdev);
@@ -1564,7 +1615,7 @@ static int be_close(struct net_device *netdev)
1564 1615
1565 netif_stop_queue(netdev); 1616 netif_stop_queue(netdev);
1566 netif_carrier_off(netdev); 1617 netif_carrier_off(netdev);
1567 adapter->link.speed = PHY_LINK_SPEED_ZERO; 1618 adapter->link_up = false;
1568 1619
1569 be_intr_set(ctrl, false); 1620 be_intr_set(ctrl, false);
1570 1621
@@ -1581,10 +1632,6 @@ static int be_close(struct net_device *netdev)
1581 napi_disable(&rx_eq->napi); 1632 napi_disable(&rx_eq->napi);
1582 napi_disable(&tx_eq->napi); 1633 napi_disable(&tx_eq->napi);
1583 1634
1584 be_rx_queues_destroy(adapter);
1585 be_tx_queues_destroy(adapter);
1586
1587 be_cmd_if_destroy(ctrl, adapter->if_handle);
1588 return 0; 1635 return 0;
1589} 1636}
1590 1637
@@ -1673,7 +1720,7 @@ static void be_netdev_init(struct net_device *netdev)
1673 1720
1674 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, 1721 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
1675 BE_NAPI_WEIGHT); 1722 BE_NAPI_WEIGHT);
1676 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx, 1723 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
1677 BE_NAPI_WEIGHT); 1724 BE_NAPI_WEIGHT);
1678 1725
1679 netif_carrier_off(netdev); 1726 netif_carrier_off(netdev);
@@ -1755,7 +1802,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
1755 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 1802 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
1756 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 1803 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
1757 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 1804 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
1758 spin_lock_init(&ctrl->cmd_lock); 1805 spin_lock_init(&ctrl->mbox_lock);
1806 spin_lock_init(&ctrl->mcc_lock);
1807 spin_lock_init(&ctrl->mcc_cq_lock);
1808
1809 ctrl->async_cb = be_link_status_update;
1810 ctrl->adapter_ctxt = adapter;
1759 1811
1760 val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 1812 val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
1761 ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) & 1813 ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
@@ -1793,6 +1845,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
1793 1845
1794 unregister_netdev(adapter->netdev); 1846 unregister_netdev(adapter->netdev);
1795 1847
1848 be_clear(adapter);
1849
1796 be_stats_cleanup(adapter); 1850 be_stats_cleanup(adapter);
1797 1851
1798 be_ctrl_cleanup(adapter); 1852 be_ctrl_cleanup(adapter);
@@ -1890,13 +1944,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
1890 be_netdev_init(netdev); 1944 be_netdev_init(netdev);
1891 SET_NETDEV_DEV(netdev, &adapter->pdev->dev); 1945 SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
1892 1946
1947 status = be_setup(adapter);
1948 if (status)
1949 goto stats_clean;
1893 status = register_netdev(netdev); 1950 status = register_netdev(netdev);
1894 if (status != 0) 1951 if (status != 0)
1895 goto stats_clean; 1952 goto unsetup;
1896 1953
1897 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); 1954 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
1898 return 0; 1955 return 0;
1899 1956
1957unsetup:
1958 be_clear(adapter);
1900stats_clean: 1959stats_clean:
1901 be_stats_cleanup(adapter); 1960 be_stats_cleanup(adapter);
1902ctrl_clean: 1961ctrl_clean:
@@ -1921,6 +1980,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
1921 if (netif_running(netdev)) { 1980 if (netif_running(netdev)) {
1922 rtnl_lock(); 1981 rtnl_lock();
1923 be_close(netdev); 1982 be_close(netdev);
1983 be_clear(adapter);
1924 rtnl_unlock(); 1984 rtnl_unlock();
1925 } 1985 }
1926 1986
@@ -1947,6 +2007,7 @@ static int be_resume(struct pci_dev *pdev)
1947 2007
1948 if (netif_running(netdev)) { 2008 if (netif_running(netdev)) {
1949 rtnl_lock(); 2009 rtnl_lock();
2010 be_setup(adapter);
1950 be_open(netdev); 2011 be_open(netdev);
1951 rtnl_unlock(); 2012 rtnl_unlock();
1952 } 2013 }
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 677f60490f67..679885a122b4 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1997,7 +1997,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
1997 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 1997 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
1998 struct e1000_hw *hw = &adapter->hw; 1998 struct e1000_hw *hw = &adapter->hw;
1999 struct net_device *poll_dev = adapter->netdev; 1999 struct net_device *poll_dev = adapter->netdev;
2000 int tx_cleaned = 0, work_done = 0; 2000 int tx_cleaned = 1, work_done = 0;
2001 2001
2002 adapter = netdev_priv(poll_dev); 2002 adapter = netdev_priv(poll_dev);
2003 2003
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index e02bafdd3682..93f4abd990a9 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -668,7 +668,7 @@ int mlx4_en_start_port(struct net_device *dev)
668 queue_work(mdev->workqueue, &priv->mcast_task); 668 queue_work(mdev->workqueue, &priv->mcast_task);
669 669
670 priv->port_up = true; 670 priv->port_up = true;
671 netif_start_queue(dev); 671 netif_tx_start_all_queues(dev);
672 return 0; 672 return 0;
673 673
674mac_err: 674mac_err:
@@ -700,14 +700,14 @@ void mlx4_en_stop_port(struct net_device *dev)
700 en_dbg(DRV, priv, "stop port called while port already down\n"); 700 en_dbg(DRV, priv, "stop port called while port already down\n");
701 return; 701 return;
702 } 702 }
703 netif_stop_queue(dev);
704 703
705 /* Synchronize with tx routine */ 704 /* Synchronize with tx routine */
706 netif_tx_lock_bh(dev); 705 netif_tx_lock_bh(dev);
707 priv->port_up = false; 706 netif_tx_stop_all_queues(dev);
708 netif_tx_unlock_bh(dev); 707 netif_tx_unlock_bh(dev);
709 708
710 /* close port*/ 709 /* close port*/
710 priv->port_up = false;
711 mlx4_CLOSE_PORT(mdev->dev, priv->port); 711 mlx4_CLOSE_PORT(mdev->dev, priv->port);
712 712
713 /* Unregister Mac address for the port */ 713 /* Unregister Mac address for the port */
@@ -881,7 +881,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
881 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 881 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
882 882
883 cancel_delayed_work(&priv->stats_task); 883 cancel_delayed_work(&priv->stats_task);
884 cancel_delayed_work(&priv->refill_task);
885 /* flush any pending task for this netdev */ 884 /* flush any pending task for this netdev */
886 flush_workqueue(mdev->workqueue); 885 flush_workqueue(mdev->workqueue);
887 886
@@ -986,7 +985,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
986 spin_lock_init(&priv->stats_lock); 985 spin_lock_init(&priv->stats_lock);
987 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); 986 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
988 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); 987 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
989 INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
990 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 988 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
991 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 989 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
992 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 990 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 5a14899c1e25..91bdfdfd431f 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -269,31 +269,6 @@ reduce_rings:
269 return 0; 269 return 0;
270} 270}
271 271
272static int mlx4_en_fill_rx_buf(struct net_device *dev,
273 struct mlx4_en_rx_ring *ring)
274{
275 struct mlx4_en_priv *priv = netdev_priv(dev);
276 int num = 0;
277 int err;
278
279 while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
280 err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
281 ring->size_mask);
282 if (err) {
283 if (netif_msg_rx_err(priv))
284 en_warn(priv, "Failed preparing rx descriptor\n");
285 priv->port_stats.rx_alloc_failed++;
286 break;
287 }
288 ++num;
289 ++ring->prod;
290 }
291 if ((u32) (ring->prod - ring->cons) == ring->actual_size)
292 ring->full = 1;
293
294 return num;
295}
296
297static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, 272static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
298 struct mlx4_en_rx_ring *ring) 273 struct mlx4_en_rx_ring *ring)
299{ 274{
@@ -312,42 +287,6 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
312 } 287 }
313} 288}
314 289
315
316void mlx4_en_rx_refill(struct work_struct *work)
317{
318 struct delayed_work *delay = to_delayed_work(work);
319 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
320 refill_task);
321 struct mlx4_en_dev *mdev = priv->mdev;
322 struct net_device *dev = priv->dev;
323 struct mlx4_en_rx_ring *ring;
324 int need_refill = 0;
325 int i;
326
327 mutex_lock(&mdev->state_lock);
328 if (!mdev->device_up || !priv->port_up)
329 goto out;
330
331 /* We only get here if there are no receive buffers, so we can't race
332 * with Rx interrupts while filling buffers */
333 for (i = 0; i < priv->rx_ring_num; i++) {
334 ring = &priv->rx_ring[i];
335 if (ring->need_refill) {
336 if (mlx4_en_fill_rx_buf(dev, ring)) {
337 ring->need_refill = 0;
338 mlx4_en_update_rx_prod_db(ring);
339 } else
340 need_refill = 1;
341 }
342 }
343 if (need_refill)
344 queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
345
346out:
347 mutex_unlock(&mdev->state_lock);
348}
349
350
351int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 290int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
352 struct mlx4_en_rx_ring *ring, u32 size, u16 stride) 291 struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
353{ 292{
@@ -457,9 +396,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
457 ring_ind--; 396 ring_ind--;
458 goto err_allocator; 397 goto err_allocator;
459 } 398 }
460
461 /* Fill Rx buffers */
462 ring->full = 0;
463 } 399 }
464 err = mlx4_en_fill_rx_buffers(priv); 400 err = mlx4_en_fill_rx_buffers(priv);
465 if (err) 401 if (err)
@@ -647,33 +583,6 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
647 return skb; 583 return skb;
648} 584}
649 585
650static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
651 struct mlx4_en_rx_ring *ring,
652 int from, int to, int num)
653{
654 struct skb_frag_struct *skb_frags_from;
655 struct skb_frag_struct *skb_frags_to;
656 struct mlx4_en_rx_desc *rx_desc_from;
657 struct mlx4_en_rx_desc *rx_desc_to;
658 int from_index, to_index;
659 int nr, i;
660
661 for (i = 0; i < num; i++) {
662 from_index = (from + i) & ring->size_mask;
663 to_index = (to + i) & ring->size_mask;
664 skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
665 skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
666 rx_desc_from = ring->buf + (from_index << ring->log_stride);
667 rx_desc_to = ring->buf + (to_index << ring->log_stride);
668
669 for (nr = 0; nr < priv->num_frags; nr++) {
670 skb_frags_to[nr].page = skb_frags_from[nr].page;
671 skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
672 rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
673 }
674 }
675}
676
677 586
678int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 587int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
679{ 588{
@@ -821,11 +730,6 @@ out:
821 wmb(); /* ensure HW sees CQ consumer before we post new buffers */ 730 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
822 ring->cons = cq->mcq.cons_index; 731 ring->cons = cq->mcq.cons_index;
823 ring->prod += polled; /* Polled descriptors were realocated in place */ 732 ring->prod += polled; /* Polled descriptors were realocated in place */
824 if (unlikely(!ring->full)) {
825 mlx4_en_copy_desc(priv, ring, ring->cons - polled,
826 ring->prod - polled, polled);
827 mlx4_en_fill_rx_buf(dev, ring);
828 }
829 mlx4_en_update_rx_prod_db(ring); 733 mlx4_en_update_rx_prod_db(ring);
830 return polled; 734 return polled;
831} 735}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 5dc7466ad035..08c43f2ae72b 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -515,16 +515,9 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
515 else { 515 else {
516 if (netif_msg_tx_err(priv)) 516 if (netif_msg_tx_err(priv))
517 en_warn(priv, "Non-linear headers\n"); 517 en_warn(priv, "Non-linear headers\n");
518 dev_kfree_skb_any(skb);
519 return 0; 518 return 0;
520 } 519 }
521 } 520 }
522 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
523 if (netif_msg_tx_err(priv))
524 en_warn(priv, "LSO header size too big\n");
525 dev_kfree_skb_any(skb);
526 return 0;
527 }
528 } else { 521 } else {
529 *lso_header_size = 0; 522 *lso_header_size = 0;
530 if (!is_inline(skb, NULL)) 523 if (!is_inline(skb, NULL))
@@ -616,13 +609,9 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
616 int lso_header_size; 609 int lso_header_size;
617 void *fragptr; 610 void *fragptr;
618 611
619 if (unlikely(!skb->len)) {
620 dev_kfree_skb_any(skb);
621 return NETDEV_TX_OK;
622 }
623 real_size = get_real_size(skb, dev, &lso_header_size); 612 real_size = get_real_size(skb, dev, &lso_header_size);
624 if (unlikely(!real_size)) 613 if (unlikely(!real_size))
625 return NETDEV_TX_OK; 614 goto tx_drop;
626 615
627 /* Allign descriptor to TXBB size */ 616 /* Allign descriptor to TXBB size */
628 desc_size = ALIGN(real_size, TXBB_SIZE); 617 desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -630,8 +619,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
630 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { 619 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
631 if (netif_msg_tx_err(priv)) 620 if (netif_msg_tx_err(priv))
632 en_warn(priv, "Oversized header or SG list\n"); 621 en_warn(priv, "Oversized header or SG list\n");
633 dev_kfree_skb_any(skb); 622 goto tx_drop;
634 return NETDEV_TX_OK;
635 } 623 }
636 624
637 tx_ind = skb->queue_mapping; 625 tx_ind = skb->queue_mapping;
@@ -653,14 +641,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
653 return NETDEV_TX_BUSY; 641 return NETDEV_TX_BUSY;
654 } 642 }
655 643
656 /* Now that we know what Tx ring to use */
657 if (unlikely(!priv->port_up)) {
658 if (netif_msg_tx_err(priv))
659 en_warn(priv, "xmit: port down!\n");
660 dev_kfree_skb_any(skb);
661 return NETDEV_TX_OK;
662 }
663
664 /* Track current inflight packets for performance analysis */ 644 /* Track current inflight packets for performance analysis */
665 AVG_PERF_COUNTER(priv->pstats.inflight_avg, 645 AVG_PERF_COUNTER(priv->pstats.inflight_avg,
666 (u32) (ring->prod - ring->cons - 1)); 646 (u32) (ring->prod - ring->cons - 1));
@@ -785,5 +765,10 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
785 mlx4_en_xmit_poll(priv, tx_ind); 765 mlx4_en_xmit_poll(priv, tx_ind);
786 766
787 return 0; 767 return 0;
768
769tx_drop:
770 dev_kfree_skb_any(skb);
771 priv->stats.tx_dropped++;
772 return NETDEV_TX_OK;
788} 773}
789 774
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index d43a9e4c2aea..c7c5e86804ff 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -99,7 +99,6 @@
99#define RSS_FACTOR 2 99#define RSS_FACTOR 2
100#define TXBB_SIZE 64 100#define TXBB_SIZE 64
101#define HEADROOM (2048 / TXBB_SIZE + 1) 101#define HEADROOM (2048 / TXBB_SIZE + 1)
102#define MAX_LSO_HDR_SIZE 92
103#define STAMP_STRIDE 64 102#define STAMP_STRIDE 64
104#define STAMP_DWORDS (STAMP_STRIDE / 4) 103#define STAMP_DWORDS (STAMP_STRIDE / 4)
105#define STAMP_SHIFT 31 104#define STAMP_SHIFT 31
@@ -296,8 +295,6 @@ struct mlx4_en_rx_ring {
296 u32 prod; 295 u32 prod;
297 u32 cons; 296 u32 cons;
298 u32 buf_size; 297 u32 buf_size;
299 int need_refill;
300 int full;
301 void *buf; 298 void *buf;
302 void *rx_info; 299 void *rx_info;
303 unsigned long bytes; 300 unsigned long bytes;
@@ -495,7 +492,6 @@ struct mlx4_en_priv {
495 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 492 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
496 struct work_struct mcast_task; 493 struct work_struct mcast_task;
497 struct work_struct mac_task; 494 struct work_struct mac_task;
498 struct delayed_work refill_task;
499 struct work_struct watchdog_task; 495 struct work_struct watchdog_task;
500 struct work_struct linkstate_task; 496 struct work_struct linkstate_task;
501 struct delayed_work stats_task; 497 struct delayed_work stats_task;
@@ -565,7 +561,6 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
565int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); 561int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
566void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); 562void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
567int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); 563int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
568void mlx4_en_rx_refill(struct work_struct *work);
569void mlx4_en_rx_irq(struct mlx4_cq *mcq); 564void mlx4_en_rx_irq(struct mlx4_cq *mcq);
570 565
571int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); 566int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 745ae8b4a2e8..0f32db3e92ad 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1750,12 +1750,12 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
1750 1750
1751 uc_addr_set(mp, dev->dev_addr); 1751 uc_addr_set(mp, dev->dev_addr);
1752 1752
1753 port_config = rdlp(mp, PORT_CONFIG); 1753 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
1754
1754 nibbles = uc_addr_filter_mask(dev); 1755 nibbles = uc_addr_filter_mask(dev);
1755 if (!nibbles) { 1756 if (!nibbles) {
1756 port_config |= UNICAST_PROMISCUOUS_MODE; 1757 port_config |= UNICAST_PROMISCUOUS_MODE;
1757 wrlp(mp, PORT_CONFIG, port_config); 1758 nibbles = 0xffff;
1758 return;
1759 } 1759 }
1760 1760
1761 for (i = 0; i < 16; i += 4) { 1761 for (i = 0; i < 16; i += 4) {
@@ -1776,7 +1776,6 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
1776 wrl(mp, off, v); 1776 wrl(mp, off, v);
1777 } 1777 }
1778 1778
1779 port_config &= ~UNICAST_PROMISCUOUS_MODE;
1780 wrlp(mp, PORT_CONFIG, port_config); 1779 wrlp(mp, PORT_CONFIG, port_config);
1781} 1780}
1782 1781
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 6de8399d6dd9..17c116bb332c 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -356,7 +356,6 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
356 if (!skb_queue_empty(&ap->rqueue)) 356 if (!skb_queue_empty(&ap->rqueue))
357 tasklet_schedule(&ap->tsk); 357 tasklet_schedule(&ap->tsk);
358 ap_put(ap); 358 ap_put(ap);
359 tty_unthrottle(tty);
360} 359}
361 360
362static void 361static void
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index d2fa2db13586..aa3d39f38e22 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -397,7 +397,6 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
397 if (!skb_queue_empty(&ap->rqueue)) 397 if (!skb_queue_empty(&ap->rqueue))
398 tasklet_schedule(&ap->tsk); 398 tasklet_schedule(&ap->tsk);
399 sp_put(ap); 399 sp_put(ap);
400 tty_unthrottle(tty);
401} 400}
402 401
403static void 402static void
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 8a823ecc99a9..bbc6d4d3cc94 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -3837,7 +3837,9 @@ static void ql_reset_work(struct work_struct *work)
3837 16) | ISP_CONTROL_RI)); 3837 16) | ISP_CONTROL_RI));
3838 } 3838 }
3839 3839
3840 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3840 ssleep(1); 3841 ssleep(1);
3842 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3841 } while (--max_wait_time); 3843 } while (--max_wait_time);
3842 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3844 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3843 3845
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 4e22462684c9..4b53b58d75fc 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -51,9 +51,6 @@
51#define TX_BUFFS_AVAIL(tp) \ 51#define TX_BUFFS_AVAIL(tp) \
52 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) 52 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
53 53
54/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
55static const int max_interrupt_work = 20;
56
57/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 54/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
58 The RTL chips use a 64 element hash table based on the Ethernet CRC. */ 55 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
59static const int multicast_filter_limit = 32; 56static const int multicast_filter_limit = 32;
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
new file mode 100644
index 000000000000..5345e47b35ac
--- /dev/null
+++ b/drivers/net/s6gmac.c
@@ -0,0 +1,1073 @@
1/*
2 * Ethernet driver for S6105 on chip network device
3 * (c)2008 emlix GmbH http://www.emlix.com
4 * Authors: Oskar Schirmer <os@emlix.com>
5 * Daniel Gloeckner <dg@emlix.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/types.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/if.h>
22#include <linux/stddef.h>
23#include <linux/mii.h>
24#include <linux/phy.h>
25#include <linux/platform_device.h>
26#include <variant/hardware.h>
27#include <variant/dmac.h>
28
29#define DRV_NAME "s6gmac"
30#define DRV_PRMT DRV_NAME ": "
31
32
33/* register declarations */
34
35#define S6_GMAC_MACCONF1 0x000
36#define S6_GMAC_MACCONF1_TXENA 0
37#define S6_GMAC_MACCONF1_SYNCTX 1
38#define S6_GMAC_MACCONF1_RXENA 2
39#define S6_GMAC_MACCONF1_SYNCRX 3
40#define S6_GMAC_MACCONF1_TXFLOWCTRL 4
41#define S6_GMAC_MACCONF1_RXFLOWCTRL 5
42#define S6_GMAC_MACCONF1_LOOPBACK 8
43#define S6_GMAC_MACCONF1_RESTXFUNC 16
44#define S6_GMAC_MACCONF1_RESRXFUNC 17
45#define S6_GMAC_MACCONF1_RESTXMACCTRL 18
46#define S6_GMAC_MACCONF1_RESRXMACCTRL 19
47#define S6_GMAC_MACCONF1_SIMULRES 30
48#define S6_GMAC_MACCONF1_SOFTRES 31
49#define S6_GMAC_MACCONF2 0x004
50#define S6_GMAC_MACCONF2_FULL 0
51#define S6_GMAC_MACCONF2_CRCENA 1
52#define S6_GMAC_MACCONF2_PADCRCENA 2
53#define S6_GMAC_MACCONF2_LENGTHFCHK 4
54#define S6_GMAC_MACCONF2_HUGEFRAMENA 5
55#define S6_GMAC_MACCONF2_IFMODE 8
56#define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1
57#define S6_GMAC_MACCONF2_IFMODE_BYTE 2
58#define S6_GMAC_MACCONF2_IFMODE_MASK 3
59#define S6_GMAC_MACCONF2_PREAMBLELEN 12
60#define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F
61#define S6_GMAC_MACIPGIFG 0x008
62#define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0
63#define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F
64#define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8
65#define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16
66#define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24
67#define S6_GMAC_MACHALFDUPLEX 0x00C
68#define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0
69#define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F
70#define S6_GMAC_MACHALFDUPLEX_RETXMAX 12
71#define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F
72#define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16
73#define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17
74#define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18
75#define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19
76#define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20
77#define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F
78#define S6_GMAC_MACMAXFRAMELEN 0x010
79#define S6_GMAC_MACMIICONF 0x020
80#define S6_GMAC_MACMIICONF_CSEL 0
81#define S6_GMAC_MACMIICONF_CSEL_DIV10 0
82#define S6_GMAC_MACMIICONF_CSEL_DIV12 1
83#define S6_GMAC_MACMIICONF_CSEL_DIV14 2
84#define S6_GMAC_MACMIICONF_CSEL_DIV18 3
85#define S6_GMAC_MACMIICONF_CSEL_DIV24 4
86#define S6_GMAC_MACMIICONF_CSEL_DIV34 5
87#define S6_GMAC_MACMIICONF_CSEL_DIV68 6
88#define S6_GMAC_MACMIICONF_CSEL_DIV168 7
89#define S6_GMAC_MACMIICONF_CSEL_MASK 7
90#define S6_GMAC_MACMIICONF_PREAMBLESUPR 4
91#define S6_GMAC_MACMIICONF_SCANAUTOINCR 5
92#define S6_GMAC_MACMIICMD 0x024
93#define S6_GMAC_MACMIICMD_READ 0
94#define S6_GMAC_MACMIICMD_SCAN 1
95#define S6_GMAC_MACMIIADDR 0x028
96#define S6_GMAC_MACMIIADDR_REG 0
97#define S6_GMAC_MACMIIADDR_REG_MASK 0x1F
98#define S6_GMAC_MACMIIADDR_PHY 8
99#define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F
100#define S6_GMAC_MACMIICTRL 0x02C
101#define S6_GMAC_MACMIISTAT 0x030
102#define S6_GMAC_MACMIIINDI 0x034
103#define S6_GMAC_MACMIIINDI_BUSY 0
104#define S6_GMAC_MACMIIINDI_SCAN 1
105#define S6_GMAC_MACMIIINDI_INVAL 2
106#define S6_GMAC_MACINTERFSTAT 0x03C
107#define S6_GMAC_MACINTERFSTAT_LINKFAIL 3
108#define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9
109#define S6_GMAC_MACSTATADDR1 0x040
110#define S6_GMAC_MACSTATADDR2 0x044
111
112#define S6_GMAC_FIFOCONF0 0x048
113#define S6_GMAC_FIFOCONF0_HSTRSTWT 0
114#define S6_GMAC_FIFOCONF0_HSTRSTSR 1
115#define S6_GMAC_FIFOCONF0_HSTRSTFR 2
116#define S6_GMAC_FIFOCONF0_HSTRSTST 3
117#define S6_GMAC_FIFOCONF0_HSTRSTFT 4
118#define S6_GMAC_FIFOCONF0_WTMENREQ 8
119#define S6_GMAC_FIFOCONF0_SRFENREQ 9
120#define S6_GMAC_FIFOCONF0_FRFENREQ 10
121#define S6_GMAC_FIFOCONF0_STFENREQ 11
122#define S6_GMAC_FIFOCONF0_FTFENREQ 12
123#define S6_GMAC_FIFOCONF0_WTMENRPLY 16
124#define S6_GMAC_FIFOCONF0_SRFENRPLY 17
125#define S6_GMAC_FIFOCONF0_FRFENRPLY 18
126#define S6_GMAC_FIFOCONF0_STFENRPLY 19
127#define S6_GMAC_FIFOCONF0_FTFENRPLY 20
128#define S6_GMAC_FIFOCONF1 0x04C
129#define S6_GMAC_FIFOCONF2 0x050
130#define S6_GMAC_FIFOCONF2_CFGLWM 0
131#define S6_GMAC_FIFOCONF2_CFGHWM 16
132#define S6_GMAC_FIFOCONF3 0x054
133#define S6_GMAC_FIFOCONF3_CFGFTTH 0
134#define S6_GMAC_FIFOCONF3_CFGHWMFT 16
135#define S6_GMAC_FIFOCONF4 0x058
136#define S6_GMAC_FIFOCONF_RSV_PREVDROP 0
137#define S6_GMAC_FIFOCONF_RSV_RUNT 1
138#define S6_GMAC_FIFOCONF_RSV_FALSECAR 2
139#define S6_GMAC_FIFOCONF_RSV_CODEERR 3
140#define S6_GMAC_FIFOCONF_RSV_CRCERR 4
141#define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5
142#define S6_GMAC_FIFOCONF_RSV_LENRANGE 6
143#define S6_GMAC_FIFOCONF_RSV_OK 7
144#define S6_GMAC_FIFOCONF_RSV_MULTICAST 8
145#define S6_GMAC_FIFOCONF_RSV_BROADCAST 9
146#define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10
147#define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11
148#define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12
149#define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13
150#define S6_GMAC_FIFOCONF_RSV_VLANTAG 14
151#define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15
152#define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16
153#define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF
154#define S6_GMAC_FIFOCONF5 0x05C
155#define S6_GMAC_FIFOCONF5_DROPLT64 18
156#define S6_GMAC_FIFOCONF5_CFGBYTM 19
157#define S6_GMAC_FIFOCONF5_RXDROPSIZE 20
158#define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF
159
160#define S6_GMAC_STAT_REGS 0x080
161#define S6_GMAC_STAT_SIZE_MIN 12
162#define S6_GMAC_STATTR64 0x080
163#define S6_GMAC_STATTR64_SIZE 18
164#define S6_GMAC_STATTR127 0x084
165#define S6_GMAC_STATTR127_SIZE 18
166#define S6_GMAC_STATTR255 0x088
167#define S6_GMAC_STATTR255_SIZE 18
168#define S6_GMAC_STATTR511 0x08C
169#define S6_GMAC_STATTR511_SIZE 18
170#define S6_GMAC_STATTR1K 0x090
171#define S6_GMAC_STATTR1K_SIZE 18
172#define S6_GMAC_STATTRMAX 0x094
173#define S6_GMAC_STATTRMAX_SIZE 18
174#define S6_GMAC_STATTRMGV 0x098
175#define S6_GMAC_STATTRMGV_SIZE 18
176#define S6_GMAC_STATRBYT 0x09C
177#define S6_GMAC_STATRBYT_SIZE 24
178#define S6_GMAC_STATRPKT 0x0A0
179#define S6_GMAC_STATRPKT_SIZE 18
180#define S6_GMAC_STATRFCS 0x0A4
181#define S6_GMAC_STATRFCS_SIZE 12
182#define S6_GMAC_STATRMCA 0x0A8
183#define S6_GMAC_STATRMCA_SIZE 18
184#define S6_GMAC_STATRBCA 0x0AC
185#define S6_GMAC_STATRBCA_SIZE 22
186#define S6_GMAC_STATRXCF 0x0B0
187#define S6_GMAC_STATRXCF_SIZE 18
188#define S6_GMAC_STATRXPF 0x0B4
189#define S6_GMAC_STATRXPF_SIZE 12
190#define S6_GMAC_STATRXUO 0x0B8
191#define S6_GMAC_STATRXUO_SIZE 12
192#define S6_GMAC_STATRALN 0x0BC
193#define S6_GMAC_STATRALN_SIZE 12
194#define S6_GMAC_STATRFLR 0x0C0
195#define S6_GMAC_STATRFLR_SIZE 16
196#define S6_GMAC_STATRCDE 0x0C4
197#define S6_GMAC_STATRCDE_SIZE 12
198#define S6_GMAC_STATRCSE 0x0C8
199#define S6_GMAC_STATRCSE_SIZE 12
200#define S6_GMAC_STATRUND 0x0CC
201#define S6_GMAC_STATRUND_SIZE 12
202#define S6_GMAC_STATROVR 0x0D0
203#define S6_GMAC_STATROVR_SIZE 12
204#define S6_GMAC_STATRFRG 0x0D4
205#define S6_GMAC_STATRFRG_SIZE 12
206#define S6_GMAC_STATRJBR 0x0D8
207#define S6_GMAC_STATRJBR_SIZE 12
208#define S6_GMAC_STATRDRP 0x0DC
209#define S6_GMAC_STATRDRP_SIZE 12
210#define S6_GMAC_STATTBYT 0x0E0
211#define S6_GMAC_STATTBYT_SIZE 24
212#define S6_GMAC_STATTPKT 0x0E4
213#define S6_GMAC_STATTPKT_SIZE 18
214#define S6_GMAC_STATTMCA 0x0E8
215#define S6_GMAC_STATTMCA_SIZE 18
216#define S6_GMAC_STATTBCA 0x0EC
217#define S6_GMAC_STATTBCA_SIZE 18
218#define S6_GMAC_STATTXPF 0x0F0
219#define S6_GMAC_STATTXPF_SIZE 12
220#define S6_GMAC_STATTDFR 0x0F4
221#define S6_GMAC_STATTDFR_SIZE 12
222#define S6_GMAC_STATTEDF 0x0F8
223#define S6_GMAC_STATTEDF_SIZE 12
224#define S6_GMAC_STATTSCL 0x0FC
225#define S6_GMAC_STATTSCL_SIZE 12
226#define S6_GMAC_STATTMCL 0x100
227#define S6_GMAC_STATTMCL_SIZE 12
228#define S6_GMAC_STATTLCL 0x104
229#define S6_GMAC_STATTLCL_SIZE 12
230#define S6_GMAC_STATTXCL 0x108
231#define S6_GMAC_STATTXCL_SIZE 12
232#define S6_GMAC_STATTNCL 0x10C
233#define S6_GMAC_STATTNCL_SIZE 13
234#define S6_GMAC_STATTPFH 0x110
235#define S6_GMAC_STATTPFH_SIZE 12
236#define S6_GMAC_STATTDRP 0x114
237#define S6_GMAC_STATTDRP_SIZE 12
238#define S6_GMAC_STATTJBR 0x118
239#define S6_GMAC_STATTJBR_SIZE 12
240#define S6_GMAC_STATTFCS 0x11C
241#define S6_GMAC_STATTFCS_SIZE 12
242#define S6_GMAC_STATTXCF 0x120
243#define S6_GMAC_STATTXCF_SIZE 12
244#define S6_GMAC_STATTOVR 0x124
245#define S6_GMAC_STATTOVR_SIZE 12
246#define S6_GMAC_STATTUND 0x128
247#define S6_GMAC_STATTUND_SIZE 12
248#define S6_GMAC_STATTFRG 0x12C
249#define S6_GMAC_STATTFRG_SIZE 12
250#define S6_GMAC_STATCARRY(n) (0x130 + 4*(n))
251#define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n))
252#define S6_GMAC_STATCARRY1_RDRP 0
253#define S6_GMAC_STATCARRY1_RJBR 1
254#define S6_GMAC_STATCARRY1_RFRG 2
255#define S6_GMAC_STATCARRY1_ROVR 3
256#define S6_GMAC_STATCARRY1_RUND 4
257#define S6_GMAC_STATCARRY1_RCSE 5
258#define S6_GMAC_STATCARRY1_RCDE 6
259#define S6_GMAC_STATCARRY1_RFLR 7
260#define S6_GMAC_STATCARRY1_RALN 8
261#define S6_GMAC_STATCARRY1_RXUO 9
262#define S6_GMAC_STATCARRY1_RXPF 10
263#define S6_GMAC_STATCARRY1_RXCF 11
264#define S6_GMAC_STATCARRY1_RBCA 12
265#define S6_GMAC_STATCARRY1_RMCA 13
266#define S6_GMAC_STATCARRY1_RFCS 14
267#define S6_GMAC_STATCARRY1_RPKT 15
268#define S6_GMAC_STATCARRY1_RBYT 16
269#define S6_GMAC_STATCARRY1_TRMGV 25
270#define S6_GMAC_STATCARRY1_TRMAX 26
271#define S6_GMAC_STATCARRY1_TR1K 27
272#define S6_GMAC_STATCARRY1_TR511 28
273#define S6_GMAC_STATCARRY1_TR255 29
274#define S6_GMAC_STATCARRY1_TR127 30
275#define S6_GMAC_STATCARRY1_TR64 31
276#define S6_GMAC_STATCARRY2_TDRP 0
277#define S6_GMAC_STATCARRY2_TPFH 1
278#define S6_GMAC_STATCARRY2_TNCL 2
279#define S6_GMAC_STATCARRY2_TXCL 3
280#define S6_GMAC_STATCARRY2_TLCL 4
281#define S6_GMAC_STATCARRY2_TMCL 5
282#define S6_GMAC_STATCARRY2_TSCL 6
283#define S6_GMAC_STATCARRY2_TEDF 7
284#define S6_GMAC_STATCARRY2_TDFR 8
285#define S6_GMAC_STATCARRY2_TXPF 9
286#define S6_GMAC_STATCARRY2_TBCA 10
287#define S6_GMAC_STATCARRY2_TMCA 11
288#define S6_GMAC_STATCARRY2_TPKT 12
289#define S6_GMAC_STATCARRY2_TBYT 13
290#define S6_GMAC_STATCARRY2_TFRG 14
291#define S6_GMAC_STATCARRY2_TUND 15
292#define S6_GMAC_STATCARRY2_TOVR 16
293#define S6_GMAC_STATCARRY2_TXCF 17
294#define S6_GMAC_STATCARRY2_TFCS 18
295#define S6_GMAC_STATCARRY2_TJBR 19
296
297#define S6_GMAC_HOST_PBLKCTRL 0x140
298#define S6_GMAC_HOST_PBLKCTRL_TXENA 0
299#define S6_GMAC_HOST_PBLKCTRL_RXENA 1
300#define S6_GMAC_HOST_PBLKCTRL_TXSRES 2
301#define S6_GMAC_HOST_PBLKCTRL_RXSRES 3
302#define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8
303#define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12
304#define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4
305#define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5
306#define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6
307#define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7
308#define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF
309#define S6_GMAC_HOST_PBLKCTRL_STATENA 16
310#define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17
311#define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18
312#define S6_GMAC_HOST_PBLKCTRL_RGMII 19
313#define S6_GMAC_HOST_INTMASK 0x144
314#define S6_GMAC_HOST_INTSTAT 0x148
315#define S6_GMAC_HOST_INT_TXBURSTOVER 3
316#define S6_GMAC_HOST_INT_TXPREWOVER 4
317#define S6_GMAC_HOST_INT_RXBURSTUNDER 5
318#define S6_GMAC_HOST_INT_RXPOSTRFULL 6
319#define S6_GMAC_HOST_INT_RXPOSTRUNDER 7
320#define S6_GMAC_HOST_RXFIFOHWM 0x14C
321#define S6_GMAC_HOST_CTRLFRAMXP 0x150
322#define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n))
323#define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n))
324#define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n))
325#define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n))
326
327#define S6_GMAC_BURST_PREWR 0x1B0
328#define S6_GMAC_BURST_PREWR_LEN 0
329#define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1)
330#define S6_GMAC_BURST_PREWR_CFE 20
331#define S6_GMAC_BURST_PREWR_PPE 21
332#define S6_GMAC_BURST_PREWR_FCS 22
333#define S6_GMAC_BURST_PREWR_PAD 23
334#define S6_GMAC_BURST_POSTRD 0x1D0
335#define S6_GMAC_BURST_POSTRD_LEN 0
336#define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1)
337#define S6_GMAC_BURST_POSTRD_DROP 20
338
339
340/* data handling */
341
342#define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */
343#define S6_NUM_RX_SKB 16
344#define S6_MAX_FRLEN 1536
345
346struct s6gmac {
347 u32 reg;
348 u32 tx_dma;
349 u32 rx_dma;
350 u32 io;
351 u8 tx_chan;
352 u8 rx_chan;
353 spinlock_t lock;
354 u8 tx_skb_i, tx_skb_o;
355 u8 rx_skb_i, rx_skb_o;
356 struct sk_buff *tx_skb[S6_NUM_TX_SKB];
357 struct sk_buff *rx_skb[S6_NUM_RX_SKB];
358 unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)];
359 unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)];
360 struct phy_device *phydev;
361 struct {
362 struct mii_bus *bus;
363 int irq[PHY_MAX_ADDR];
364 } mii;
365 struct {
366 unsigned int mbit;
367 u8 giga;
368 u8 isup;
369 u8 full;
370 } link;
371};
372
373static void s6gmac_rx_fillfifo(struct s6gmac *pd)
374{
375 struct sk_buff *skb;
376 while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB)
377 && (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan))
378 && (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) {
379 pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
380 s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
381 pd->io, (u32)skb->data, S6_MAX_FRLEN);
382 }
383}
384
385static void s6gmac_rx_interrupt(struct net_device *dev)
386{
387 struct s6gmac *pd = netdev_priv(dev);
388 u32 pfx;
389 struct sk_buff *skb;
390 while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) >
391 s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) {
392 skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB];
393 pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD);
394 if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) {
395 dev_kfree_skb_irq(skb);
396 } else {
397 skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
398 & S6_GMAC_BURST_POSTRD_LEN_MASK);
399 skb->dev = dev;
400 skb->protocol = eth_type_trans(skb, dev);
401 skb->ip_summed = CHECKSUM_UNNECESSARY;
402 netif_rx(skb);
403 }
404 }
405}
406
407static void s6gmac_tx_interrupt(struct net_device *dev)
408{
409 struct s6gmac *pd = netdev_priv(dev);
410 while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >
411 s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) {
412 dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
413 }
414 if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
415 netif_wake_queue(dev);
416}
417
418struct s6gmac_statinf {
419 unsigned reg_size : 4; /* 0: unused */
420 unsigned reg_off : 6;
421 unsigned net_index : 6;
422};
423
424#define S6_STATS_B (8 * sizeof(u32))
425#define S6_STATS_C(b, r, f) [b] = { \
426 BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \
427 BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \
428 >= (1<<4)) + \
429 r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \
430 BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \
431 >= ((1<<6)-1)) + \
432 (r - S6_GMAC_STAT_REGS) / sizeof(u32), \
433 BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \
434 % sizeof(unsigned long)) + \
435 BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \
436 / sizeof(unsigned long)) >= (1<<6))) + \
437 BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \
438 != sizeof(unsigned long))) + \
439 (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)},
440
441static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { {
442 S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes)
443 S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets)
444 S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors)
445 S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast)
446 S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors)
447 S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors)
448 S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors)
449 S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors)
450 S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors)
451 S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors)
452 S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors)
453 S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped)
454}, {
455 S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes)
456 S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets)
457 S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors)
458 S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors)
459 S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions)
460 S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped)
461 S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors)
462 S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors)
463 S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors)
464 S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors)
465 S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors)
466} };
467
468static void s6gmac_stats_collect(struct s6gmac *pd,
469 const struct s6gmac_statinf *inf)
470{
471 int b;
472 for (b = 0; b < S6_STATS_B; b++) {
473 if (inf[b].reg_size) {
474 pd->stats[inf[b].net_index] +=
475 readl(pd->reg + S6_GMAC_STAT_REGS
476 + sizeof(u32) * inf[b].reg_off);
477 }
478 }
479}
480
481static void s6gmac_stats_carry(struct s6gmac *pd,
482 const struct s6gmac_statinf *inf, u32 mask)
483{
484 int b;
485 while (mask) {
486 b = fls(mask) - 1;
487 mask &= ~(1 << b);
488 pd->carry[inf[b].net_index] += (1 << inf[b].reg_size);
489 }
490}
491
492static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry)
493{
494 int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) &
495 ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry));
496 return r;
497}
498
499static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry)
500{
501 u32 mask;
502 mask = s6gmac_stats_pending(pd, carry);
503 if (mask) {
504 writel(mask, pd->reg + S6_GMAC_STATCARRY(carry));
505 s6gmac_stats_carry(pd, &statinf[carry][0], mask);
506 }
507}
508
509static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
510{
511 struct net_device *dev = (struct net_device *)dev_id;
512 struct s6gmac *pd = netdev_priv(dev);
513 if (!dev)
514 return IRQ_NONE;
515 spin_lock(&pd->lock);
516 if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
517 s6gmac_rx_interrupt(dev);
518 s6gmac_rx_fillfifo(pd);
519 if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
520 s6gmac_tx_interrupt(dev);
521 s6gmac_stats_interrupt(pd, 0);
522 s6gmac_stats_interrupt(pd, 1);
523 spin_unlock(&pd->lock);
524 return IRQ_HANDLED;
525}
526
527static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n,
528 u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi)
529{
530 writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n));
531 writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n));
532 writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n));
533 writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n));
534}
535
536static inline void s6gmac_stop_device(struct net_device *dev)
537{
538 struct s6gmac *pd = netdev_priv(dev);
539 writel(0, pd->reg + S6_GMAC_MACCONF1);
540}
541
542static inline void s6gmac_init_device(struct net_device *dev)
543{
544 struct s6gmac *pd = netdev_priv(dev);
545 int is_rgmii = !!(pd->phydev->supported
546 & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half));
547#if 0
548 writel(1 << S6_GMAC_MACCONF1_SYNCTX |
549 1 << S6_GMAC_MACCONF1_SYNCRX |
550 1 << S6_GMAC_MACCONF1_TXFLOWCTRL |
551 1 << S6_GMAC_MACCONF1_RXFLOWCTRL |
552 1 << S6_GMAC_MACCONF1_RESTXFUNC |
553 1 << S6_GMAC_MACCONF1_RESRXFUNC |
554 1 << S6_GMAC_MACCONF1_RESTXMACCTRL |
555 1 << S6_GMAC_MACCONF1_RESRXMACCTRL,
556 pd->reg + S6_GMAC_MACCONF1);
557#endif
558 writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1);
559 udelay(1000);
560 writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA,
561 pd->reg + S6_GMAC_MACCONF1);
562 writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES |
563 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES,
564 pd->reg + S6_GMAC_HOST_PBLKCTRL);
565 writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
566 S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
567 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
568 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
569 is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
570 pd->reg + S6_GMAC_HOST_PBLKCTRL);
571 writel(1 << S6_GMAC_MACCONF1_TXENA |
572 1 << S6_GMAC_MACCONF1_RXENA |
573 (dev->flags & IFF_LOOPBACK ? 1 : 0)
574 << S6_GMAC_MACCONF1_LOOPBACK,
575 pd->reg + S6_GMAC_MACCONF1);
576 writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ?
577 dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN,
578 pd->reg + S6_GMAC_MACMAXFRAMELEN);
579 writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL |
580 1 << S6_GMAC_MACCONF2_PADCRCENA |
581 1 << S6_GMAC_MACCONF2_LENGTHFCHK |
582 (pd->link.giga ?
583 S6_GMAC_MACCONF2_IFMODE_BYTE :
584 S6_GMAC_MACCONF2_IFMODE_NIBBLE)
585 << S6_GMAC_MACCONF2_IFMODE |
586 7 << S6_GMAC_MACCONF2_PREAMBLELEN,
587 pd->reg + S6_GMAC_MACCONF2);
588 writel(0, pd->reg + S6_GMAC_MACSTATADDR1);
589 writel(0, pd->reg + S6_GMAC_MACSTATADDR2);
590 writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ |
591 1 << S6_GMAC_FIFOCONF0_SRFENREQ |
592 1 << S6_GMAC_FIFOCONF0_FRFENREQ |
593 1 << S6_GMAC_FIFOCONF0_STFENREQ |
594 1 << S6_GMAC_FIFOCONF0_FTFENREQ,
595 pd->reg + S6_GMAC_FIFOCONF0);
596 writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH |
597 128 << S6_GMAC_FIFOCONF3_CFGHWMFT,
598 pd->reg + S6_GMAC_FIFOCONF3);
599 writel((S6_GMAC_FIFOCONF_RSV_MASK & ~(
600 1 << S6_GMAC_FIFOCONF_RSV_RUNT |
601 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
602 1 << S6_GMAC_FIFOCONF_RSV_OK |
603 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
604 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
605 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
606 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
607 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) |
608 1 << S6_GMAC_FIFOCONF5_DROPLT64 |
609 pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM |
610 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE,
611 pd->reg + S6_GMAC_FIFOCONF5);
612 writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT |
613 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
614 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
615 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
616 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
617 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
618 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED,
619 pd->reg + S6_GMAC_FIFOCONF4);
620 s6gmac_set_dstaddr(pd, 0,
621 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF);
622 s6gmac_set_dstaddr(pd, 1,
623 dev->dev_addr[5] |
624 dev->dev_addr[4] << 8 |
625 dev->dev_addr[3] << 16 |
626 dev->dev_addr[2] << 24,
627 dev->dev_addr[1] |
628 dev->dev_addr[0] << 8,
629 0xFFFFFFFF, 0x0000FFFF);
630 s6gmac_set_dstaddr(pd, 2,
631 0x00000000, 0x00000100, 0x00000000, 0x00000100);
632 s6gmac_set_dstaddr(pd, 3,
633 0x00000000, 0x00000000, 0x00000000, 0x00000000);
634 writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA |
635 1 << S6_GMAC_HOST_PBLKCTRL_RXENA |
636 S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
637 S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
638 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
639 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
640 is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
641 pd->reg + S6_GMAC_HOST_PBLKCTRL);
642}
643
644static void s6mii_enable(struct s6gmac *pd)
645{
646 writel(readl(pd->reg + S6_GMAC_MACCONF1) &
647 ~(1 << S6_GMAC_MACCONF1_SOFTRES),
648 pd->reg + S6_GMAC_MACCONF1);
649 writel((readl(pd->reg + S6_GMAC_MACMIICONF)
650 & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL))
651 | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL),
652 pd->reg + S6_GMAC_MACMIICONF);
653}
654
655static int s6mii_busy(struct s6gmac *pd, int tmo)
656{
657 while (readl(pd->reg + S6_GMAC_MACMIIINDI)) {
658 if (--tmo == 0)
659 return -ETIME;
660 udelay(64);
661 }
662 return 0;
663}
664
665static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum)
666{
667 struct s6gmac *pd = bus->priv;
668 s6mii_enable(pd);
669 if (s6mii_busy(pd, 256))
670 return -ETIME;
671 writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
672 regnum << S6_GMAC_MACMIIADDR_REG,
673 pd->reg + S6_GMAC_MACMIIADDR);
674 writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD);
675 writel(0, pd->reg + S6_GMAC_MACMIICMD);
676 if (s6mii_busy(pd, 256))
677 return -ETIME;
678 return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT);
679}
680
681static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
682{
683 struct s6gmac *pd = bus->priv;
684 s6mii_enable(pd);
685 if (s6mii_busy(pd, 256))
686 return -ETIME;
687 writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
688 regnum << S6_GMAC_MACMIIADDR_REG,
689 pd->reg + S6_GMAC_MACMIIADDR);
690 writel(value, pd->reg + S6_GMAC_MACMIICTRL);
691 if (s6mii_busy(pd, 256))
692 return -ETIME;
693 return 0;
694}
695
696static int s6mii_reset(struct mii_bus *bus)
697{
698 struct s6gmac *pd = bus->priv;
699 s6mii_enable(pd);
700 if (s6mii_busy(pd, PHY_INIT_TIMEOUT))
701 return -ETIME;
702 return 0;
703}
704
705static void s6gmac_set_rgmii_txclock(struct s6gmac *pd)
706{
707 u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
708 pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC);
709 switch (pd->link.mbit) {
710 case 10:
711 pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC;
712 break;
713 case 100:
714 pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC;
715 break;
716 case 1000:
717 pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC;
718 break;
719 default:
720 return;
721 }
722 writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL);
723}
724
725static inline void s6gmac_linkisup(struct net_device *dev, int isup)
726{
727 struct s6gmac *pd = netdev_priv(dev);
728 struct phy_device *phydev = pd->phydev;
729
730 pd->link.full = phydev->duplex;
731 pd->link.giga = (phydev->speed == 1000);
732 if (pd->link.mbit != phydev->speed) {
733 pd->link.mbit = phydev->speed;
734 s6gmac_set_rgmii_txclock(pd);
735 }
736 pd->link.isup = isup;
737 if (isup)
738 netif_carrier_on(dev);
739 phy_print_status(phydev);
740}
741
742static void s6gmac_adjust_link(struct net_device *dev)
743{
744 struct s6gmac *pd = netdev_priv(dev);
745 struct phy_device *phydev = pd->phydev;
746 if (pd->link.isup &&
747 (!phydev->link ||
748 (pd->link.mbit != phydev->speed) ||
749 (pd->link.full != phydev->duplex))) {
750 pd->link.isup = 0;
751 netif_tx_disable(dev);
752 if (!phydev->link) {
753 netif_carrier_off(dev);
754 phy_print_status(phydev);
755 }
756 }
757 if (!pd->link.isup && phydev->link) {
758 if (pd->link.full != phydev->duplex) {
759 u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
760 if (phydev->duplex)
761 maccfg |= 1 << S6_GMAC_MACCONF2_FULL;
762 else
763 maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL);
764 writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
765 }
766
767 if (pd->link.giga != (phydev->speed == 1000)) {
768 u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5);
769 u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
770 maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK
771 << S6_GMAC_MACCONF2_IFMODE);
772 if (phydev->speed == 1000) {
773 fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM;
774 maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE
775 << S6_GMAC_MACCONF2_IFMODE;
776 } else {
777 fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM);
778 maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE
779 << S6_GMAC_MACCONF2_IFMODE;
780 }
781 writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5);
782 writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
783 }
784
785 if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
786 netif_wake_queue(dev);
787 s6gmac_linkisup(dev, 1);
788 }
789}
790
791static inline int s6gmac_phy_start(struct net_device *dev)
792{
793 struct s6gmac *pd = netdev_priv(dev);
794 int i = 0;
795 struct phy_device *p = NULL;
796 while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR))
797 i++;
798 p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0,
799 PHY_INTERFACE_MODE_RGMII);
800 if (IS_ERR(p)) {
801 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
802 return PTR_ERR(p);
803 }
804 p->supported &= PHY_GBIT_FEATURES;
805 p->advertising = p->supported;
806 pd->phydev = p;
807 return 0;
808}
809
810static inline void s6gmac_init_stats(struct net_device *dev)
811{
812 struct s6gmac *pd = netdev_priv(dev);
813 u32 mask;
814 mask = 1 << S6_GMAC_STATCARRY1_RDRP |
815 1 << S6_GMAC_STATCARRY1_RJBR |
816 1 << S6_GMAC_STATCARRY1_RFRG |
817 1 << S6_GMAC_STATCARRY1_ROVR |
818 1 << S6_GMAC_STATCARRY1_RUND |
819 1 << S6_GMAC_STATCARRY1_RCDE |
820 1 << S6_GMAC_STATCARRY1_RFLR |
821 1 << S6_GMAC_STATCARRY1_RALN |
822 1 << S6_GMAC_STATCARRY1_RMCA |
823 1 << S6_GMAC_STATCARRY1_RFCS |
824 1 << S6_GMAC_STATCARRY1_RPKT |
825 1 << S6_GMAC_STATCARRY1_RBYT;
826 writel(mask, pd->reg + S6_GMAC_STATCARRY(0));
827 writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0));
828 mask = 1 << S6_GMAC_STATCARRY2_TDRP |
829 1 << S6_GMAC_STATCARRY2_TNCL |
830 1 << S6_GMAC_STATCARRY2_TXCL |
831 1 << S6_GMAC_STATCARRY2_TEDF |
832 1 << S6_GMAC_STATCARRY2_TPKT |
833 1 << S6_GMAC_STATCARRY2_TBYT |
834 1 << S6_GMAC_STATCARRY2_TFRG |
835 1 << S6_GMAC_STATCARRY2_TUND |
836 1 << S6_GMAC_STATCARRY2_TOVR |
837 1 << S6_GMAC_STATCARRY2_TFCS |
838 1 << S6_GMAC_STATCARRY2_TJBR;
839 writel(mask, pd->reg + S6_GMAC_STATCARRY(1));
840 writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1));
841}
842
843static inline void s6gmac_init_dmac(struct net_device *dev)
844{
845 struct s6gmac *pd = netdev_priv(dev);
846 s6dmac_disable_chan(pd->tx_dma, pd->tx_chan);
847 s6dmac_disable_chan(pd->rx_dma, pd->rx_chan);
848 s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX);
849 s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX);
850}
851
852static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
853{
854 struct s6gmac *pd = netdev_priv(dev);
855 unsigned long flags;
856 spin_lock_irqsave(&pd->lock, flags);
857 dev->trans_start = jiffies;
858 writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
859 0 << S6_GMAC_BURST_PREWR_CFE |
860 1 << S6_GMAC_BURST_PREWR_PPE |
861 1 << S6_GMAC_BURST_PREWR_FCS |
862 ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD,
863 pd->reg + S6_GMAC_BURST_PREWR);
864 s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan,
865 (u32)skb->data, pd->io, skb->len);
866 if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
867 netif_stop_queue(dev);
868 if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) {
869 printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n",
870 pd->tx_skb_o, pd->tx_skb_i);
871 BUG();
872 }
873 pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb;
874 spin_unlock_irqrestore(&pd->lock, flags);
875 return 0;
876}
877
878static void s6gmac_tx_timeout(struct net_device *dev)
879{
880 struct s6gmac *pd = netdev_priv(dev);
881 unsigned long flags;
882 spin_lock_irqsave(&pd->lock, flags);
883 s6gmac_tx_interrupt(dev);
884 spin_unlock_irqrestore(&pd->lock, flags);
885}
886
887static int s6gmac_open(struct net_device *dev)
888{
889 struct s6gmac *pd = netdev_priv(dev);
890 unsigned long flags;
891 phy_read_status(pd->phydev);
892 spin_lock_irqsave(&pd->lock, flags);
893 pd->link.mbit = 0;
894 s6gmac_linkisup(dev, pd->phydev->link);
895 s6gmac_init_device(dev);
896 s6gmac_init_stats(dev);
897 s6gmac_init_dmac(dev);
898 s6gmac_rx_fillfifo(pd);
899 s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
900 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
901 s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,
902 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1);
903 writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER |
904 0 << S6_GMAC_HOST_INT_TXPREWOVER |
905 0 << S6_GMAC_HOST_INT_RXBURSTUNDER |
906 0 << S6_GMAC_HOST_INT_RXPOSTRFULL |
907 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER,
908 pd->reg + S6_GMAC_HOST_INTMASK);
909 spin_unlock_irqrestore(&pd->lock, flags);
910 phy_start(pd->phydev);
911 netif_start_queue(dev);
912 return 0;
913}
914
915static int s6gmac_stop(struct net_device *dev)
916{
917 struct s6gmac *pd = netdev_priv(dev);
918 unsigned long flags;
919 netif_stop_queue(dev);
920 phy_stop(pd->phydev);
921 spin_lock_irqsave(&pd->lock, flags);
922 s6gmac_init_dmac(dev);
923 s6gmac_stop_device(dev);
924 while (pd->tx_skb_i != pd->tx_skb_o)
925 dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
926 while (pd->rx_skb_i != pd->rx_skb_o)
927 dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]);
928 spin_unlock_irqrestore(&pd->lock, flags);
929 return 0;
930}
931
932static struct net_device_stats *s6gmac_stats(struct net_device *dev)
933{
934 struct s6gmac *pd = netdev_priv(dev);
935 struct net_device_stats *st = (struct net_device_stats *)&pd->stats;
936 int i;
937 do {
938 unsigned long flags;
939 spin_lock_irqsave(&pd->lock, flags);
940 for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++)
941 pd->stats[i] =
942 pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
943 s6gmac_stats_collect(pd, &statinf[0][0]);
944 s6gmac_stats_collect(pd, &statinf[1][0]);
945 i = s6gmac_stats_pending(pd, 0) |
946 s6gmac_stats_pending(pd, 1);
947 spin_unlock_irqrestore(&pd->lock, flags);
948 } while (i);
949 st->rx_errors = st->rx_crc_errors +
950 st->rx_frame_errors +
951 st->rx_length_errors +
952 st->rx_missed_errors;
953 st->tx_errors += st->tx_aborted_errors;
954 return st;
955}
956
957static int __devinit s6gmac_probe(struct platform_device *pdev)
958{
959 struct net_device *dev;
960 struct s6gmac *pd;
961 int res;
962 unsigned long i;
963 struct mii_bus *mb;
964 dev = alloc_etherdev(sizeof(*pd));
965 if (!dev) {
966 printk(KERN_ERR DRV_PRMT "etherdev alloc failed, aborting.\n");
967 return -ENOMEM;
968 }
969 dev->open = s6gmac_open;
970 dev->stop = s6gmac_stop;
971 dev->hard_start_xmit = s6gmac_tx;
972 dev->tx_timeout = s6gmac_tx_timeout;
973 dev->watchdog_timeo = HZ;
974 dev->get_stats = s6gmac_stats;
975 dev->irq = platform_get_irq(pdev, 0);
976 pd = netdev_priv(dev);
977 memset(pd, 0, sizeof(*pd));
978 spin_lock_init(&pd->lock);
979 pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
980 i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start;
981 pd->tx_dma = DMA_MASK_DMAC(i);
982 pd->tx_chan = DMA_INDEX_CHNL(i);
983 i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start;
984 pd->rx_dma = DMA_MASK_DMAC(i);
985 pd->rx_chan = DMA_INDEX_CHNL(i);
986 pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
987 res = request_irq(dev->irq, &s6gmac_interrupt, 0, dev->name, dev);
988 if (res) {
989 printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
990 goto errirq;
991 }
992 res = register_netdev(dev);
993 if (res) {
994 printk(KERN_ERR DRV_PRMT "error registering device %s\n",
995 dev->name);
996 goto errdev;
997 }
998 mb = mdiobus_alloc();
999 if (!mb) {
1000 printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
1001 goto errmii;
1002 }
1003 mb->name = "s6gmac_mii";
1004 mb->read = s6mii_read;
1005 mb->write = s6mii_write;
1006 mb->reset = s6mii_reset;
1007 mb->priv = pd;
1008 snprintf(mb->id, MII_BUS_ID_SIZE, "0");
1009 mb->phy_mask = ~(1 << 0);
1010 mb->irq = &pd->mii.irq[0];
1011 for (i = 0; i < PHY_MAX_ADDR; i++) {
1012 int n = platform_get_irq(pdev, i + 1);
1013 if (n < 0)
1014 n = PHY_POLL;
1015 pd->mii.irq[i] = n;
1016 }
1017 mdiobus_register(mb);
1018 pd->mii.bus = mb;
1019 res = s6gmac_phy_start(dev);
1020 if (res)
1021 return res;
1022 platform_set_drvdata(pdev, dev);
1023 return 0;
1024errmii:
1025 unregister_netdev(dev);
1026errdev:
1027 free_irq(dev->irq, dev);
1028errirq:
1029 free_netdev(dev);
1030 return res;
1031}
1032
1033static int __devexit s6gmac_remove(struct platform_device *pdev)
1034{
1035 struct net_device *dev = platform_get_drvdata(pdev);
1036 if (dev) {
1037 struct s6gmac *pd = netdev_priv(dev);
1038 mdiobus_unregister(pd->mii.bus);
1039 unregister_netdev(dev);
1040 free_irq(dev->irq, dev);
1041 free_netdev(dev);
1042 platform_set_drvdata(pdev, NULL);
1043 }
1044 return 0;
1045}
1046
1047static struct platform_driver s6gmac_driver = {
1048 .probe = s6gmac_probe,
1049 .remove = __devexit_p(s6gmac_remove),
1050 .driver = {
1051 .name = "s6gmac",
1052 .owner = THIS_MODULE,
1053 },
1054};
1055
1056static int __init s6gmac_init(void)
1057{
1058 printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n");
1059 return platform_driver_register(&s6gmac_driver);
1060}
1061
1062
1063static void __exit s6gmac_exit(void)
1064{
1065 platform_driver_unregister(&s6gmac_driver);
1066}
1067
1068module_init(s6gmac_init);
1069module_exit(s6gmac_exit);
1070
1071MODULE_LICENSE("GPL");
1072MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
1073MODULE_AUTHOR("Oskar Schirmer <os@emlix.com>");
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3717569828bf..a906d3998131 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -169,10 +169,12 @@ config USB_NET_CDCETHER
169 The Linux-USB CDC Ethernet Gadget driver is an open implementation. 169 The Linux-USB CDC Ethernet Gadget driver is an open implementation.
170 This driver should work with at least the following devices: 170 This driver should work with at least the following devices:
171 171
172 * Dell Wireless 5530 HSPA
172 * Ericsson PipeRider (all variants) 173 * Ericsson PipeRider (all variants)
174 * Ericsson Mobile Broadband Module (all variants)
173 * Motorola (DM100 and SB4100) 175 * Motorola (DM100 and SB4100)
174 * Broadcom Cable Modem (reference design) 176 * Broadcom Cable Modem (reference design)
175 * Toshiba PCX1100U 177 * Toshiba (PCX1100U and F3507g)
176 * ... 178 * ...
177 179
178 This driver creates an interface named "ethX", where X depends on 180 This driver creates an interface named "ethX", where X depends on
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 01fd528306ec..4a6aff579403 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -533,6 +533,31 @@ static const struct usb_device_id products [] = {
533 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM, 533 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
534 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 534 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
535 .driver_info = (unsigned long) &cdc_info, 535 .driver_info = (unsigned long) &cdc_info,
536}, {
537 /* Ericsson F3507g ver. 2 */
538 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
539 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
540 .driver_info = (unsigned long) &cdc_info,
541}, {
542 /* Ericsson F3607gw */
543 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
544 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
545 .driver_info = (unsigned long) &cdc_info,
546}, {
547 /* Ericsson F3307 */
548 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
549 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
550 .driver_info = (unsigned long) &cdc_info,
551}, {
552 /* Toshiba F3507g */
553 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
554 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
555 .driver_info = (unsigned long) &cdc_info,
556}, {
557 /* Dell F3507g */
558 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
559 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
560 .driver_info = (unsigned long) &cdc_info,
536}, 561},
537 { }, // END 562 { }, // END
538}; 563};
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index c66b9c324f54..ca39ace0b0eb 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -307,9 +307,10 @@ static const struct usb_device_id products [] = {
307 USB_DEVICE (0x1286, 0x8001), // "blob" bootloader 307 USB_DEVICE (0x1286, 0x8001), // "blob" bootloader
308 .driver_info = (unsigned long) &blob_info, 308 .driver_info = (unsigned long) &blob_info,
309}, { 309}, {
310 // Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config 310 // Linux Ethernet/RNDIS gadget, mostly on PXA, second config
311 // e.g. Gumstix, current OpenZaurus, ... 311 // e.g. Gumstix, current OpenZaurus, ... or anything else
312 USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203), 312 // that just enables this gadget option.
313 USB_DEVICE (0x0525, 0xa4a2),
313 .driver_info = (unsigned long) &linuxdev_info, 314 .driver_info = (unsigned long) &linuxdev_info,
314}, 315},
315#endif 316#endif
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 2138535f2339..73acbd244aa1 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -297,7 +297,7 @@ static int update_eth_regs_async(pegasus_t * pegasus)
297 297
298 pegasus->dr.bRequestType = PEGASUS_REQT_WRITE; 298 pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
299 pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS; 299 pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
300 pegasus->dr.wValue = 0; 300 pegasus->dr.wValue = cpu_to_le16(0);
301 pegasus->dr.wIndex = cpu_to_le16(EthCtrl0); 301 pegasus->dr.wIndex = cpu_to_le16(EthCtrl0);
302 pegasus->dr.wLength = cpu_to_le16(3); 302 pegasus->dr.wLength = cpu_to_le16(3);
303 pegasus->ctrl_urb->transfer_buffer_length = 3; 303 pegasus->ctrl_urb->transfer_buffer_length = 3;
@@ -446,11 +446,12 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
446 int i; 446 int i;
447 __u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE }; 447 __u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
448 int ret; 448 int ret;
449 __le16 le_data = cpu_to_le16(data);
449 450
450 set_registers(pegasus, EpromOffset, 4, d); 451 set_registers(pegasus, EpromOffset, 4, d);
451 enable_eprom_write(pegasus); 452 enable_eprom_write(pegasus);
452 set_register(pegasus, EpromOffset, index); 453 set_register(pegasus, EpromOffset, index);
453 set_registers(pegasus, EpromData, 2, &data); 454 set_registers(pegasus, EpromData, 2, &le_data);
454 set_register(pegasus, EpromCtrl, EPROM_WRITE); 455 set_register(pegasus, EpromCtrl, EPROM_WRITE);
455 456
456 for (i = 0; i < REG_TIMEOUT; i++) { 457 for (i = 0; i < REG_TIMEOUT; i++) {
@@ -923,29 +924,32 @@ static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev)
923 924
924static inline void disable_net_traffic(pegasus_t * pegasus) 925static inline void disable_net_traffic(pegasus_t * pegasus)
925{ 926{
926 int tmp = 0; 927 __le16 tmp = cpu_to_le16(0);
927 928
928 set_registers(pegasus, EthCtrl0, 2, &tmp); 929 set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
929} 930}
930 931
931static inline void get_interrupt_interval(pegasus_t * pegasus) 932static inline void get_interrupt_interval(pegasus_t * pegasus)
932{ 933{
933 __u8 data[2]; 934 u16 data;
935 u8 interval;
934 936
935 read_eprom_word(pegasus, 4, (__u16 *) data); 937 read_eprom_word(pegasus, 4, &data);
938 interval = data >> 8;
936 if (pegasus->usb->speed != USB_SPEED_HIGH) { 939 if (pegasus->usb->speed != USB_SPEED_HIGH) {
937 if (data[1] < 0x80) { 940 if (interval < 0x80) {
938 if (netif_msg_timer(pegasus)) 941 if (netif_msg_timer(pegasus))
939 dev_info(&pegasus->intf->dev, "intr interval " 942 dev_info(&pegasus->intf->dev, "intr interval "
940 "changed from %ums to %ums\n", 943 "changed from %ums to %ums\n",
941 data[1], 0x80); 944 interval, 0x80);
942 data[1] = 0x80; 945 interval = 0x80;
946 data = (data & 0x00FF) | ((u16)interval << 8);
943#ifdef PEGASUS_WRITE_EEPROM 947#ifdef PEGASUS_WRITE_EEPROM
944 write_eprom_word(pegasus, 4, *(__u16 *) data); 948 write_eprom_word(pegasus, 4, data);
945#endif 949#endif
946 } 950 }
947 } 951 }
948 pegasus->intr_interval = data[1]; 952 pegasus->intr_interval = interval;
949} 953}
950 954
951static void set_carrier(struct net_device *net) 955static void set_carrier(struct net_device *net)
@@ -1299,7 +1303,8 @@ static int pegasus_blacklisted(struct usb_device *udev)
1299 /* Special quirk to keep the driver from handling the Belkin Bluetooth 1303 /* Special quirk to keep the driver from handling the Belkin Bluetooth
1300 * dongle which happens to have the same ID. 1304 * dongle which happens to have the same ID.
1301 */ 1305 */
1302 if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) && 1306 if ((udd->idVendor == cpu_to_le16(VENDOR_BELKIN)) &&
1307 (udd->idProduct == cpu_to_le16(0x0121)) &&
1303 (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) && 1308 (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&
1304 (udd->bDeviceProtocol == 1)) 1309 (udd->bDeviceProtocol == 1))
1305 return 1; 1310 return 1;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index b02f7adff5dc..3ba35956327a 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1847,7 +1847,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
1847 */ 1847 */
1848 if (tdinfo->skb_dma) { 1848 if (tdinfo->skb_dma) {
1849 1849
1850 pktlen = (skb->len > ETH_ZLEN ? : ETH_ZLEN); 1850 pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
1851 for (i = 0; i < tdinfo->nskb_dma; i++) { 1851 for (i = 0; i < tdinfo->nskb_dma; i++) {
1852#ifdef VELOCITY_ZERO_COPY_SUPPORT 1852#ifdef VELOCITY_ZERO_COPY_SUPPORT
1853 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE); 1853 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 55f7de09d134..ea045151f953 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -538,6 +538,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
538 sc->iobase = mem; /* So we can unmap it on detach */ 538 sc->iobase = mem; /* So we can unmap it on detach */
539 sc->cachelsz = csz * sizeof(u32); /* convert to bytes */ 539 sc->cachelsz = csz * sizeof(u32); /* convert to bytes */
540 sc->opmode = NL80211_IFTYPE_STATION; 540 sc->opmode = NL80211_IFTYPE_STATION;
541 sc->bintval = 1000;
541 mutex_init(&sc->lock); 542 mutex_init(&sc->lock);
542 spin_lock_init(&sc->rxbuflock); 543 spin_lock_init(&sc->rxbuflock);
543 spin_lock_init(&sc->txbuflock); 544 spin_lock_init(&sc->txbuflock);
@@ -686,6 +687,13 @@ ath5k_pci_resume(struct pci_dev *pdev)
686 if (err) 687 if (err)
687 return err; 688 return err;
688 689
690 /*
691 * Suspend/Resume resets the PCI configuration space, so we have to
692 * re-disable the RETRY_TIMEOUT register (0x41) to keep
693 * PCI Tx retries from interfering with C3 CPU state
694 */
695 pci_write_config_byte(pdev, 0x41, 0);
696
689 err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); 697 err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
690 if (err) { 698 if (err) {
691 ATH5K_ERR(sc, "request_irq failed\n"); 699 ATH5K_ERR(sc, "request_irq failed\n");
@@ -2748,9 +2756,6 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
2748 goto end; 2756 goto end;
2749 } 2757 }
2750 2758
2751 /* Set to a reasonable value. Note that this will
2752 * be set to mac80211's value at ath5k_config(). */
2753 sc->bintval = 1000;
2754 ath5k_hw_set_lladdr(sc->ah, conf->mac_addr); 2759 ath5k_hw_set_lladdr(sc->ah, conf->mac_addr);
2755 2760
2756 ret = 0; 2761 ret = 0;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9f49a3251d4d..66a6c1f5022a 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1196,8 +1196,8 @@ void ath_radio_disable(struct ath_softc *sc)
1196 1196
1197 ath9k_hw_phy_disable(ah); 1197 ath9k_hw_phy_disable(ah);
1198 ath9k_hw_configpcipowersave(ah, 1); 1198 ath9k_hw_configpcipowersave(ah, 1);
1199 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1200 ath9k_ps_restore(sc); 1199 ath9k_ps_restore(sc);
1200 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1201} 1201}
1202 1202
1203/*******************/ 1203/*******************/
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index ccdf20a2e9be..170c5b32e49b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -87,6 +87,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
87 struct ath_softc *sc; 87 struct ath_softc *sc;
88 struct ieee80211_hw *hw; 88 struct ieee80211_hw *hw;
89 u8 csz; 89 u8 csz;
90 u32 val;
90 int ret = 0; 91 int ret = 0;
91 struct ath_hw *ah; 92 struct ath_hw *ah;
92 93
@@ -133,6 +134,14 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
133 134
134 pci_set_master(pdev); 135 pci_set_master(pdev);
135 136
137 /*
138 * Disable the RETRY_TIMEOUT register (0x41) to keep
139 * PCI Tx retries from interfering with C3 CPU state.
140 */
141 pci_read_config_dword(pdev, 0x40, &val);
142 if ((val & 0x0000ff00) != 0)
143 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
144
136 ret = pci_request_region(pdev, 0, "ath9k"); 145 ret = pci_request_region(pdev, 0, "ath9k");
137 if (ret) { 146 if (ret) {
138 dev_err(&pdev->dev, "PCI memory region reserve error\n"); 147 dev_err(&pdev->dev, "PCI memory region reserve error\n");
@@ -239,12 +248,21 @@ static int ath_pci_resume(struct pci_dev *pdev)
239 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 248 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
240 struct ath_wiphy *aphy = hw->priv; 249 struct ath_wiphy *aphy = hw->priv;
241 struct ath_softc *sc = aphy->sc; 250 struct ath_softc *sc = aphy->sc;
251 u32 val;
242 int err; 252 int err;
243 253
244 err = pci_enable_device(pdev); 254 err = pci_enable_device(pdev);
245 if (err) 255 if (err)
246 return err; 256 return err;
247 pci_restore_state(pdev); 257 pci_restore_state(pdev);
258 /*
259 * Suspend/Resume resets the PCI configuration space, so we have to
260 * re-disable the RETRY_TIMEOUT register (0x41) to keep
261 * PCI Tx retries from interfering with C3 CPU state
262 */
263 pci_read_config_dword(pdev, 0x40, &val);
264 if ((val & 0x0000ff00) != 0)
265 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
248 266
249 /* Enable LED */ 267 /* Enable LED */
250 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN, 268 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index f99f3a76df3f..cece1c4c6bda 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -539,11 +539,14 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
539 if (ath_beacon_dtim_pending_cab(skb)) { 539 if (ath_beacon_dtim_pending_cab(skb)) {
540 /* 540 /*
541 * Remain awake waiting for buffered broadcast/multicast 541 * Remain awake waiting for buffered broadcast/multicast
542 * frames. 542 * frames. If the last broadcast/multicast frame is not
543 * received properly, the next beacon frame will work as
544 * a backup trigger for returning into NETWORK SLEEP state,
545 * so we are waiting for it as well.
543 */ 546 */
544 DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating " 547 DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating "
545 "buffered broadcast/multicast frame(s)\n"); 548 "buffered broadcast/multicast frame(s)\n");
546 sc->sc_flags |= SC_OP_WAIT_FOR_CAB; 549 sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
547 return; 550 return;
548 } 551 }
549 552
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 635c16ee6186..77c339f8516c 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -288,6 +288,7 @@ struct iwm_priv {
288 u8 *eeprom; 288 u8 *eeprom;
289 struct timer_list watchdog; 289 struct timer_list watchdog;
290 struct work_struct reset_worker; 290 struct work_struct reset_worker;
291 struct mutex mutex;
291 struct rfkill *rfkill; 292 struct rfkill *rfkill;
292 293
293 char private[0] __attribute__((__aligned__(NETDEV_ALIGN))); 294 char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -315,8 +316,11 @@ extern const struct iw_handler_def iwm_iw_handler_def;
315void *iwm_if_alloc(int sizeof_bus, struct device *dev, 316void *iwm_if_alloc(int sizeof_bus, struct device *dev,
316 struct iwm_if_ops *if_ops); 317 struct iwm_if_ops *if_ops);
317void iwm_if_free(struct iwm_priv *iwm); 318void iwm_if_free(struct iwm_priv *iwm);
319int iwm_if_add(struct iwm_priv *iwm);
320void iwm_if_remove(struct iwm_priv *iwm);
318int iwm_mode_to_nl80211_iftype(int mode); 321int iwm_mode_to_nl80211_iftype(int mode);
319int iwm_priv_init(struct iwm_priv *iwm); 322int iwm_priv_init(struct iwm_priv *iwm);
323void iwm_priv_deinit(struct iwm_priv *iwm);
320void iwm_reset(struct iwm_priv *iwm); 324void iwm_reset(struct iwm_priv *iwm);
321void iwm_tx_credit_init_pools(struct iwm_priv *iwm, 325void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
322 struct iwm_umac_notif_alive *alive); 326 struct iwm_umac_notif_alive *alive);
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 6a2640f16b6d..8be206d58222 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -112,6 +112,9 @@ static void iwm_statistics_request(struct work_struct *work)
112 iwm_send_umac_stats_req(iwm, 0); 112 iwm_send_umac_stats_req(iwm, 0);
113} 113}
114 114
115int __iwm_up(struct iwm_priv *iwm);
116int __iwm_down(struct iwm_priv *iwm);
117
115static void iwm_reset_worker(struct work_struct *work) 118static void iwm_reset_worker(struct work_struct *work)
116{ 119{
117 struct iwm_priv *iwm; 120 struct iwm_priv *iwm;
@@ -120,6 +123,19 @@ static void iwm_reset_worker(struct work_struct *work)
120 123
121 iwm = container_of(work, struct iwm_priv, reset_worker); 124 iwm = container_of(work, struct iwm_priv, reset_worker);
122 125
126 /*
127 * XXX: The iwm->mutex is introduced purely for this reset work,
128 * because the other users for iwm_up and iwm_down are only netdev
129 * ndo_open and ndo_stop which are already protected by rtnl.
130 * Please remove iwm->mutex together if iwm_reset_worker() is not
131 * required in the future.
132 */
133 if (!mutex_trylock(&iwm->mutex)) {
134 IWM_WARN(iwm, "We are in the middle of interface bringing "
135 "UP/DOWN. Skip driver resetting.\n");
136 return;
137 }
138
123 if (iwm->umac_profile_active) { 139 if (iwm->umac_profile_active) {
124 profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL); 140 profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL);
125 if (profile) 141 if (profile)
@@ -128,10 +144,10 @@ static void iwm_reset_worker(struct work_struct *work)
128 IWM_ERR(iwm, "Couldn't alloc memory for profile\n"); 144 IWM_ERR(iwm, "Couldn't alloc memory for profile\n");
129 } 145 }
130 146
131 iwm_down(iwm); 147 __iwm_down(iwm);
132 148
133 while (retry++ < 3) { 149 while (retry++ < 3) {
134 ret = iwm_up(iwm); 150 ret = __iwm_up(iwm);
135 if (!ret) 151 if (!ret)
136 break; 152 break;
137 153
@@ -142,7 +158,7 @@ static void iwm_reset_worker(struct work_struct *work)
142 IWM_WARN(iwm, "iwm_up() failed: %d\n", ret); 158 IWM_WARN(iwm, "iwm_up() failed: %d\n", ret);
143 159
144 kfree(profile); 160 kfree(profile);
145 return; 161 goto out;
146 } 162 }
147 163
148 if (profile) { 164 if (profile) {
@@ -151,6 +167,9 @@ static void iwm_reset_worker(struct work_struct *work)
151 iwm_send_mlme_profile(iwm); 167 iwm_send_mlme_profile(iwm);
152 kfree(profile); 168 kfree(profile);
153 } 169 }
170
171 out:
172 mutex_unlock(&iwm->mutex);
154} 173}
155 174
156static void iwm_watchdog(unsigned long data) 175static void iwm_watchdog(unsigned long data)
@@ -215,10 +234,21 @@ int iwm_priv_init(struct iwm_priv *iwm)
215 init_timer(&iwm->watchdog); 234 init_timer(&iwm->watchdog);
216 iwm->watchdog.function = iwm_watchdog; 235 iwm->watchdog.function = iwm_watchdog;
217 iwm->watchdog.data = (unsigned long)iwm; 236 iwm->watchdog.data = (unsigned long)iwm;
237 mutex_init(&iwm->mutex);
218 238
219 return 0; 239 return 0;
220} 240}
221 241
242void iwm_priv_deinit(struct iwm_priv *iwm)
243{
244 int i;
245
246 for (i = 0; i < IWM_TX_QUEUES; i++)
247 destroy_workqueue(iwm->txq[i].wq);
248
249 destroy_workqueue(iwm->rx_wq);
250}
251
222/* 252/*
223 * We reset all the structures, and we reset the UMAC. 253 * We reset all the structures, and we reset the UMAC.
224 * After calling this routine, you're expected to reload 254 * After calling this routine, you're expected to reload
@@ -466,7 +496,7 @@ void iwm_link_off(struct iwm_priv *iwm)
466 496
467 iwm_rx_free(iwm); 497 iwm_rx_free(iwm);
468 498
469 cancel_delayed_work(&iwm->stats_request); 499 cancel_delayed_work_sync(&iwm->stats_request);
470 memset(wstats, 0, sizeof(struct iw_statistics)); 500 memset(wstats, 0, sizeof(struct iw_statistics));
471 wstats->qual.updated = IW_QUAL_ALL_INVALID; 501 wstats->qual.updated = IW_QUAL_ALL_INVALID;
472 502
@@ -511,7 +541,7 @@ static int iwm_channels_init(struct iwm_priv *iwm)
511 return 0; 541 return 0;
512} 542}
513 543
514int iwm_up(struct iwm_priv *iwm) 544int __iwm_up(struct iwm_priv *iwm)
515{ 545{
516 int ret; 546 int ret;
517 struct iwm_notif *notif_reboot, *notif_ack = NULL; 547 struct iwm_notif *notif_reboot, *notif_ack = NULL;
@@ -647,7 +677,18 @@ int iwm_up(struct iwm_priv *iwm)
647 return -EIO; 677 return -EIO;
648} 678}
649 679
650int iwm_down(struct iwm_priv *iwm) 680int iwm_up(struct iwm_priv *iwm)
681{
682 int ret;
683
684 mutex_lock(&iwm->mutex);
685 ret = __iwm_up(iwm);
686 mutex_unlock(&iwm->mutex);
687
688 return ret;
689}
690
691int __iwm_down(struct iwm_priv *iwm)
651{ 692{
652 int ret; 693 int ret;
653 694
@@ -678,3 +719,14 @@ int iwm_down(struct iwm_priv *iwm)
678 719
679 return 0; 720 return 0;
680} 721}
722
723int iwm_down(struct iwm_priv *iwm)
724{
725 int ret;
726
727 mutex_lock(&iwm->mutex);
728 ret = __iwm_down(iwm);
729 mutex_unlock(&iwm->mutex);
730
731 return ret;
732}
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 68e2c3b6c7a1..aaa20c6885c8 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -114,32 +114,31 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
114 iwm = wdev_to_iwm(wdev); 114 iwm = wdev_to_iwm(wdev);
115 iwm->bus_ops = if_ops; 115 iwm->bus_ops = if_ops;
116 iwm->wdev = wdev; 116 iwm->wdev = wdev;
117 iwm_priv_init(iwm); 117
118 ret = iwm_priv_init(iwm);
119 if (ret) {
120 dev_err(dev, "failed to init iwm_priv\n");
121 goto out_wdev;
122 }
123
118 wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode); 124 wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode);
119 125
120 ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, 126 ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
121 IWM_TX_QUEUES);
122 if (!ndev) { 127 if (!ndev) {
123 dev_err(dev, "no memory for network device instance\n"); 128 dev_err(dev, "no memory for network device instance\n");
124 goto out_wdev; 129 goto out_priv;
125 } 130 }
126 131
127 ndev->netdev_ops = &iwm_netdev_ops; 132 ndev->netdev_ops = &iwm_netdev_ops;
128 ndev->wireless_handlers = &iwm_iw_handler_def; 133 ndev->wireless_handlers = &iwm_iw_handler_def;
129 ndev->ieee80211_ptr = wdev; 134 ndev->ieee80211_ptr = wdev;
130 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); 135 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
131 ret = register_netdev(ndev);
132 if (ret < 0) {
133 dev_err(dev, "Failed to register netdev: %d\n", ret);
134 goto out_ndev;
135 }
136
137 wdev->netdev = ndev; 136 wdev->netdev = ndev;
138 137
139 return iwm; 138 return iwm;
140 139
141 out_ndev: 140 out_priv:
142 free_netdev(ndev); 141 iwm_priv_deinit(iwm);
143 142
144 out_wdev: 143 out_wdev:
145 iwm_wdev_free(iwm); 144 iwm_wdev_free(iwm);
@@ -148,15 +147,29 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
148 147
149void iwm_if_free(struct iwm_priv *iwm) 148void iwm_if_free(struct iwm_priv *iwm)
150{ 149{
151 int i;
152
153 if (!iwm_to_ndev(iwm)) 150 if (!iwm_to_ndev(iwm))
154 return; 151 return;
155 152
156 unregister_netdev(iwm_to_ndev(iwm));
157 free_netdev(iwm_to_ndev(iwm)); 153 free_netdev(iwm_to_ndev(iwm));
158 iwm_wdev_free(iwm); 154 iwm_wdev_free(iwm);
159 destroy_workqueue(iwm->rx_wq); 155 iwm_priv_deinit(iwm);
160 for (i = 0; i < IWM_TX_QUEUES; i++) 156}
161 destroy_workqueue(iwm->txq[i].wq); 157
158int iwm_if_add(struct iwm_priv *iwm)
159{
160 struct net_device *ndev = iwm_to_ndev(iwm);
161 int ret;
162
163 ret = register_netdev(ndev);
164 if (ret < 0) {
165 dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret);
166 return ret;
167 }
168
169 return 0;
170}
171
172void iwm_if_remove(struct iwm_priv *iwm)
173{
174 unregister_netdev(iwm_to_ndev(iwm));
162} 175}
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index b54da677b371..916681837fd2 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -454,10 +454,18 @@ static int iwm_sdio_probe(struct sdio_func *func,
454 454
455 INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker); 455 INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker);
456 456
457 ret = iwm_if_add(iwm);
458 if (ret) {
459 dev_err(dev, "add SDIO interface failed\n");
460 goto destroy_wq;
461 }
462
457 dev_info(dev, "IWM SDIO probe\n"); 463 dev_info(dev, "IWM SDIO probe\n");
458 464
459 return 0; 465 return 0;
460 466
467 destroy_wq:
468 destroy_workqueue(hw->isr_wq);
461 debugfs_exit: 469 debugfs_exit:
462 iwm_debugfs_exit(iwm); 470 iwm_debugfs_exit(iwm);
463 if_free: 471 if_free:
@@ -471,9 +479,10 @@ static void iwm_sdio_remove(struct sdio_func *func)
471 struct iwm_priv *iwm = hw_to_iwm(hw); 479 struct iwm_priv *iwm = hw_to_iwm(hw);
472 struct device *dev = &func->dev; 480 struct device *dev = &func->dev;
473 481
482 iwm_if_remove(iwm);
483 destroy_workqueue(hw->isr_wq);
474 iwm_debugfs_exit(iwm); 484 iwm_debugfs_exit(iwm);
475 iwm_if_free(iwm); 485 iwm_if_free(iwm);
476 destroy_workqueue(hw->isr_wq);
477 486
478 sdio_set_drvdata(func, NULL); 487 sdio_set_drvdata(func, NULL);
479 488
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index f0e5e943f6e3..14a19baff214 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -67,6 +67,7 @@ static struct usb_device_id usb_ids[] = {
67 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, 67 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
68 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, 68 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
69 { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B }, 69 { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
70 { USB_DEVICE(0x083a, 0xe503), .driver_info = DEVICE_ZD1211B },
70 { USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B }, 71 { USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B },
71 { USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B }, 72 { USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
72 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, 73 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 151bf5bc8afe..1032d5fdbd42 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1471,11 +1471,13 @@ static void __devinit decode_smsc(int efer, int key, int devid, int devrev)
1471 1471
1472static void __devinit winbond_check(int io, int key) 1472static void __devinit winbond_check(int io, int key)
1473{ 1473{
1474 int devid, devrev, oldid, x_devid, x_devrev, x_oldid; 1474 int origval, devid, devrev, oldid, x_devid, x_devrev, x_oldid;
1475 1475
1476 if (!request_region(io, 3, __func__)) 1476 if (!request_region(io, 3, __func__))
1477 return; 1477 return;
1478 1478
1479 origval = inb(io); /* Save original value */
1480
1479 /* First probe without key */ 1481 /* First probe without key */
1480 outb(0x20, io); 1482 outb(0x20, io);
1481 x_devid = inb(io + 1); 1483 x_devid = inb(io + 1);
@@ -1495,6 +1497,8 @@ static void __devinit winbond_check(int io, int key)
1495 oldid = inb(io + 1); 1497 oldid = inb(io + 1);
1496 outb(0xaa, io); /* Magic Seal */ 1498 outb(0xaa, io); /* Magic Seal */
1497 1499
1500 outb(origval, io); /* in case we poked some entirely different hardware */
1501
1498 if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid)) 1502 if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid))
1499 goto out; /* protection against false positives */ 1503 goto out; /* protection against false positives */
1500 1504
@@ -1505,11 +1509,15 @@ out:
1505 1509
1506static void __devinit winbond_check2(int io, int key) 1510static void __devinit winbond_check2(int io, int key)
1507{ 1511{
1508 int devid, devrev, oldid, x_devid, x_devrev, x_oldid; 1512 int origval[3], devid, devrev, oldid, x_devid, x_devrev, x_oldid;
1509 1513
1510 if (!request_region(io, 3, __func__)) 1514 if (!request_region(io, 3, __func__))
1511 return; 1515 return;
1512 1516
1517 origval[0] = inb(io); /* Save original values */
1518 origval[1] = inb(io + 1);
1519 origval[2] = inb(io + 2);
1520
1513 /* First probe without the key */ 1521 /* First probe without the key */
1514 outb(0x20, io + 2); 1522 outb(0x20, io + 2);
1515 x_devid = inb(io + 2); 1523 x_devid = inb(io + 2);
@@ -1528,6 +1536,10 @@ static void __devinit winbond_check2(int io, int key)
1528 oldid = inb(io + 2); 1536 oldid = inb(io + 2);
1529 outb(0xaa, io); /* Magic Seal */ 1537 outb(0xaa, io); /* Magic Seal */
1530 1538
1539 outb(origval[0], io); /* in case we poked some entirely different hardware */
1540 outb(origval[1], io + 1);
1541 outb(origval[2], io + 2);
1542
1531 if (x_devid == devid && x_devrev == devrev && x_oldid == oldid) 1543 if (x_devid == devid && x_devrev == devrev && x_oldid == oldid)
1532 goto out; /* protection against false positives */ 1544 goto out; /* protection against false positives */
1533 1545
@@ -1538,11 +1550,13 @@ out:
1538 1550
1539static void __devinit smsc_check(int io, int key) 1551static void __devinit smsc_check(int io, int key)
1540{ 1552{
1541 int id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev; 1553 int origval, id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
1542 1554
1543 if (!request_region(io, 3, __func__)) 1555 if (!request_region(io, 3, __func__))
1544 return; 1556 return;
1545 1557
1558 origval = inb(io); /* Save original value */
1559
1546 /* First probe without the key */ 1560 /* First probe without the key */
1547 outb(0x0d, io); 1561 outb(0x0d, io);
1548 x_oldid = inb(io + 1); 1562 x_oldid = inb(io + 1);
@@ -1566,6 +1580,8 @@ static void __devinit smsc_check(int io, int key)
1566 rev = inb(io + 1); 1580 rev = inb(io + 1);
1567 outb(0xaa, io); /* Magic Seal */ 1581 outb(0xaa, io); /* Magic Seal */
1568 1582
1583 outb(origval, io); /* in case we poked some entirely different hardware */
1584
1569 if (x_id == id && x_oldrev == oldrev && 1585 if (x_id == id && x_oldrev == oldrev &&
1570 x_oldid == oldid && x_rev == rev) 1586 x_oldid == oldid && x_rev == rev)
1571 goto out; /* protection against false positives */ 1587 goto out; /* protection against false positives */
@@ -1602,11 +1618,12 @@ static void __devinit detect_and_report_smsc(void)
1602static void __devinit detect_and_report_it87(void) 1618static void __devinit detect_and_report_it87(void)
1603{ 1619{
1604 u16 dev; 1620 u16 dev;
1605 u8 r; 1621 u8 origval, r;
1606 if (verbose_probing) 1622 if (verbose_probing)
1607 printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n"); 1623 printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n");
1608 if (!request_region(0x2e, 1, __func__)) 1624 if (!request_region(0x2e, 2, __func__))
1609 return; 1625 return;
1626 origval = inb(0x2e); /* Save original value */
1610 outb(0x87, 0x2e); 1627 outb(0x87, 0x2e);
1611 outb(0x01, 0x2e); 1628 outb(0x01, 0x2e);
1612 outb(0x55, 0x2e); 1629 outb(0x55, 0x2e);
@@ -1626,8 +1643,10 @@ static void __devinit detect_and_report_it87(void)
1626 outb(r | 8, 0x2F); 1643 outb(r | 8, 0x2F);
1627 outb(0x02, 0x2E); /* Lock */ 1644 outb(0x02, 0x2E); /* Lock */
1628 outb(0x02, 0x2F); 1645 outb(0x02, 0x2F);
1646 } else {
1647 outb(origval, 0x2e); /* Oops, sorry to disturb */
1629 } 1648 }
1630 release_region(0x2e, 1); 1649 release_region(0x2e, 2);
1631} 1650}
1632#endif /* CONFIG_PARPORT_PC_SUPERIO */ 1651#endif /* CONFIG_PARPORT_PC_SUPERIO */
1633 1652
@@ -2271,6 +2290,9 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2271 if (IS_ERR(pdev)) 2290 if (IS_ERR(pdev))
2272 return NULL; 2291 return NULL;
2273 dev = &pdev->dev; 2292 dev = &pdev->dev;
2293
2294 dev->coherent_dma_mask = DMA_BIT_MASK(24);
2295 dev->dma_mask = &dev->coherent_dma_mask;
2274 } 2296 }
2275 2297
2276 ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL); 2298 ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index b77ae6794275..1ebd6b4c743b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,10 +2,11 @@
2# Makefile for the PCI bus specific drivers. 2# Makefile for the PCI bus specific drivers.
3# 3#
4 4
5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \ 5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ 6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
7 irq.o 7 irq.o
8obj-$(CONFIG_PROC_FS) += proc.o 8obj-$(CONFIG_PROC_FS) += proc.o
9obj-$(CONFIG_SYSFS) += slot.o
9 10
10# Build PCI Express stuff if needed 11# Build PCI Express stuff if needed
11obj-$(CONFIG_PCIEPORTBUS) += pcie/ 12obj-$(CONFIG_PCIEPORTBUS) += pcie/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 0f3706512686..db23200c4874 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -66,6 +66,25 @@ EXPORT_SYMBOL(pci_bus_write_config_byte);
66EXPORT_SYMBOL(pci_bus_write_config_word); 66EXPORT_SYMBOL(pci_bus_write_config_word);
67EXPORT_SYMBOL(pci_bus_write_config_dword); 67EXPORT_SYMBOL(pci_bus_write_config_dword);
68 68
69/**
70 * pci_bus_set_ops - Set raw operations of pci bus
71 * @bus: pci bus struct
72 * @ops: new raw operations
73 *
74 * Return previous raw operations
75 */
76struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
77{
78 struct pci_ops *old_ops;
79 unsigned long flags;
80
81 spin_lock_irqsave(&pci_lock, flags);
82 old_ops = bus->ops;
83 bus->ops = ops;
84 spin_unlock_irqrestore(&pci_lock, flags);
85 return old_ops;
86}
87EXPORT_SYMBOL(pci_bus_set_ops);
69 88
70/** 89/**
71 * pci_read_vpd - Read one entry from Vital Product Data 90 * pci_read_vpd - Read one entry from Vital Product Data
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 97a8194063b5..cef28a79103f 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -41,9 +41,14 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
41 void *alignf_data) 41 void *alignf_data)
42{ 42{
43 int i, ret = -ENOMEM; 43 int i, ret = -ENOMEM;
44 resource_size_t max = -1;
44 45
45 type_mask |= IORESOURCE_IO | IORESOURCE_MEM; 46 type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
46 47
48 /* don't allocate too high if the pref mem doesn't support 64bit*/
49 if (!(res->flags & IORESOURCE_MEM_64))
50 max = PCIBIOS_MAX_MEM_32;
51
47 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 52 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
48 struct resource *r = bus->resource[i]; 53 struct resource *r = bus->resource[i];
49 if (!r) 54 if (!r)
@@ -62,7 +67,7 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
62 /* Ok, try it out.. */ 67 /* Ok, try it out.. */
63 ret = allocate_resource(r, res, size, 68 ret = allocate_resource(r, res, size,
64 r->start ? : min, 69 r->start ? : min,
65 -1, align, 70 max, align,
66 alignf, alignf_data); 71 alignf, alignf_data);
67 if (ret == 0) 72 if (ret == 0)
68 break; 73 break;
@@ -201,13 +206,18 @@ void pci_enable_bridges(struct pci_bus *bus)
201 * Walk the given bus, including any bridged devices 206 * Walk the given bus, including any bridged devices
202 * on buses under this bus. Call the provided callback 207 * on buses under this bus. Call the provided callback
203 * on each device found. 208 * on each device found.
209 *
210 * We check the return of @cb each time. If it returns anything
211 * other than 0, we break out.
212 *
204 */ 213 */
205void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), 214void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
206 void *userdata) 215 void *userdata)
207{ 216{
208 struct pci_dev *dev; 217 struct pci_dev *dev;
209 struct pci_bus *bus; 218 struct pci_bus *bus;
210 struct list_head *next; 219 struct list_head *next;
220 int retval;
211 221
212 bus = top; 222 bus = top;
213 down_read(&pci_bus_sem); 223 down_read(&pci_bus_sem);
@@ -231,8 +241,10 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
231 241
232 /* Run device routines with the device locked */ 242 /* Run device routines with the device locked */
233 down(&dev->dev.sem); 243 down(&dev->dev.sem);
234 cb(dev, userdata); 244 retval = cb(dev, userdata);
235 up(&dev->dev.sem); 245 up(&dev->dev.sem);
246 if (retval)
247 break;
236 } 248 }
237 up_read(&pci_bus_sem); 249 up_read(&pci_bus_sem);
238} 250}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index fa3a11365ec3..7b287cb38b7a 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -267,6 +267,84 @@ rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
267 } 267 }
268 return ret; 268 return ret;
269} 269}
270
271static LIST_HEAD(dmar_atsr_units);
272
273static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
274{
275 struct acpi_dmar_atsr *atsr;
276 struct dmar_atsr_unit *atsru;
277
278 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
279 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
280 if (!atsru)
281 return -ENOMEM;
282
283 atsru->hdr = hdr;
284 atsru->include_all = atsr->flags & 0x1;
285
286 list_add(&atsru->list, &dmar_atsr_units);
287
288 return 0;
289}
290
291static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
292{
293 int rc;
294 struct acpi_dmar_atsr *atsr;
295
296 if (atsru->include_all)
297 return 0;
298
299 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
300 rc = dmar_parse_dev_scope((void *)(atsr + 1),
301 (void *)atsr + atsr->header.length,
302 &atsru->devices_cnt, &atsru->devices,
303 atsr->segment);
304 if (rc || !atsru->devices_cnt) {
305 list_del(&atsru->list);
306 kfree(atsru);
307 }
308
309 return rc;
310}
311
312int dmar_find_matched_atsr_unit(struct pci_dev *dev)
313{
314 int i;
315 struct pci_bus *bus;
316 struct acpi_dmar_atsr *atsr;
317 struct dmar_atsr_unit *atsru;
318
319 list_for_each_entry(atsru, &dmar_atsr_units, list) {
320 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
321 if (atsr->segment == pci_domain_nr(dev->bus))
322 goto found;
323 }
324
325 return 0;
326
327found:
328 for (bus = dev->bus; bus; bus = bus->parent) {
329 struct pci_dev *bridge = bus->self;
330
331 if (!bridge || !bridge->is_pcie ||
332 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
333 return 0;
334
335 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
336 for (i = 0; i < atsru->devices_cnt; i++)
337 if (atsru->devices[i] == bridge)
338 return 1;
339 break;
340 }
341 }
342
343 if (atsru->include_all)
344 return 1;
345
346 return 0;
347}
270#endif 348#endif
271 349
272static void __init 350static void __init
@@ -274,22 +352,28 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
274{ 352{
275 struct acpi_dmar_hardware_unit *drhd; 353 struct acpi_dmar_hardware_unit *drhd;
276 struct acpi_dmar_reserved_memory *rmrr; 354 struct acpi_dmar_reserved_memory *rmrr;
355 struct acpi_dmar_atsr *atsr;
277 356
278 switch (header->type) { 357 switch (header->type) {
279 case ACPI_DMAR_TYPE_HARDWARE_UNIT: 358 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
280 drhd = (struct acpi_dmar_hardware_unit *)header; 359 drhd = container_of(header, struct acpi_dmar_hardware_unit,
360 header);
281 printk (KERN_INFO PREFIX 361 printk (KERN_INFO PREFIX
282 "DRHD (flags: 0x%08x)base: 0x%016Lx\n", 362 "DRHD base: %#016Lx flags: %#x\n",
283 drhd->flags, (unsigned long long)drhd->address); 363 (unsigned long long)drhd->address, drhd->flags);
284 break; 364 break;
285 case ACPI_DMAR_TYPE_RESERVED_MEMORY: 365 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
286 rmrr = (struct acpi_dmar_reserved_memory *)header; 366 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
287 367 header);
288 printk (KERN_INFO PREFIX 368 printk (KERN_INFO PREFIX
289 "RMRR base: 0x%016Lx end: 0x%016Lx\n", 369 "RMRR base: %#016Lx end: %#016Lx\n",
290 (unsigned long long)rmrr->base_address, 370 (unsigned long long)rmrr->base_address,
291 (unsigned long long)rmrr->end_address); 371 (unsigned long long)rmrr->end_address);
292 break; 372 break;
373 case ACPI_DMAR_TYPE_ATSR:
374 atsr = container_of(header, struct acpi_dmar_atsr, header);
375 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
376 break;
293 } 377 }
294} 378}
295 379
@@ -363,6 +447,11 @@ parse_dmar_table(void)
363 ret = dmar_parse_one_rmrr(entry_header); 447 ret = dmar_parse_one_rmrr(entry_header);
364#endif 448#endif
365 break; 449 break;
450 case ACPI_DMAR_TYPE_ATSR:
451#ifdef CONFIG_DMAR
452 ret = dmar_parse_one_atsr(entry_header);
453#endif
454 break;
366 default: 455 default:
367 printk(KERN_WARNING PREFIX 456 printk(KERN_WARNING PREFIX
368 "Unknown DMAR structure type\n"); 457 "Unknown DMAR structure type\n");
@@ -431,11 +520,19 @@ int __init dmar_dev_scope_init(void)
431#ifdef CONFIG_DMAR 520#ifdef CONFIG_DMAR
432 { 521 {
433 struct dmar_rmrr_unit *rmrr, *rmrr_n; 522 struct dmar_rmrr_unit *rmrr, *rmrr_n;
523 struct dmar_atsr_unit *atsr, *atsr_n;
524
434 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { 525 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
435 ret = rmrr_parse_dev(rmrr); 526 ret = rmrr_parse_dev(rmrr);
436 if (ret) 527 if (ret)
437 return ret; 528 return ret;
438 } 529 }
530
531 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
532 ret = atsr_parse_dev(atsr);
533 if (ret)
534 return ret;
535 }
439 } 536 }
440#endif 537#endif
441 538
@@ -468,6 +565,9 @@ int __init dmar_table_init(void)
468#ifdef CONFIG_DMAR 565#ifdef CONFIG_DMAR
469 if (list_empty(&dmar_rmrr_units)) 566 if (list_empty(&dmar_rmrr_units))
470 printk(KERN_INFO PREFIX "No RMRR found\n"); 567 printk(KERN_INFO PREFIX "No RMRR found\n");
568
569 if (list_empty(&dmar_atsr_units))
570 printk(KERN_INFO PREFIX "No ATSR found\n");
471#endif 571#endif
472 572
473#ifdef CONFIG_INTR_REMAP 573#ifdef CONFIG_INTR_REMAP
@@ -515,6 +615,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
515 u32 ver; 615 u32 ver;
516 static int iommu_allocated = 0; 616 static int iommu_allocated = 0;
517 int agaw = 0; 617 int agaw = 0;
618 int msagaw = 0;
518 619
519 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); 620 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
520 if (!iommu) 621 if (!iommu)
@@ -535,12 +636,20 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
535 agaw = iommu_calculate_agaw(iommu); 636 agaw = iommu_calculate_agaw(iommu);
536 if (agaw < 0) { 637 if (agaw < 0) {
537 printk(KERN_ERR 638 printk(KERN_ERR
538 "Cannot get a valid agaw for iommu (seq_id = %d)\n", 639 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
640 iommu->seq_id);
641 goto error;
642 }
643 msagaw = iommu_calculate_max_sagaw(iommu);
644 if (msagaw < 0) {
645 printk(KERN_ERR
646 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
539 iommu->seq_id); 647 iommu->seq_id);
540 goto error; 648 goto error;
541 } 649 }
542#endif 650#endif
543 iommu->agaw = agaw; 651 iommu->agaw = agaw;
652 iommu->msagaw = msagaw;
544 653
545 /* the registers might be more than one page */ 654 /* the registers might be more than one page */
546 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), 655 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
@@ -590,7 +699,8 @@ void free_iommu(struct intel_iommu *iommu)
590 */ 699 */
591static inline void reclaim_free_desc(struct q_inval *qi) 700static inline void reclaim_free_desc(struct q_inval *qi)
592{ 701{
593 while (qi->desc_status[qi->free_tail] == QI_DONE) { 702 while (qi->desc_status[qi->free_tail] == QI_DONE ||
703 qi->desc_status[qi->free_tail] == QI_ABORT) {
594 qi->desc_status[qi->free_tail] = QI_FREE; 704 qi->desc_status[qi->free_tail] = QI_FREE;
595 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; 705 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
596 qi->free_cnt++; 706 qi->free_cnt++;
@@ -600,10 +710,13 @@ static inline void reclaim_free_desc(struct q_inval *qi)
600static int qi_check_fault(struct intel_iommu *iommu, int index) 710static int qi_check_fault(struct intel_iommu *iommu, int index)
601{ 711{
602 u32 fault; 712 u32 fault;
603 int head; 713 int head, tail;
604 struct q_inval *qi = iommu->qi; 714 struct q_inval *qi = iommu->qi;
605 int wait_index = (index + 1) % QI_LENGTH; 715 int wait_index = (index + 1) % QI_LENGTH;
606 716
717 if (qi->desc_status[wait_index] == QI_ABORT)
718 return -EAGAIN;
719
607 fault = readl(iommu->reg + DMAR_FSTS_REG); 720 fault = readl(iommu->reg + DMAR_FSTS_REG);
608 721
609 /* 722 /*
@@ -613,7 +726,11 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
613 */ 726 */
614 if (fault & DMA_FSTS_IQE) { 727 if (fault & DMA_FSTS_IQE) {
615 head = readl(iommu->reg + DMAR_IQH_REG); 728 head = readl(iommu->reg + DMAR_IQH_REG);
616 if ((head >> 4) == index) { 729 if ((head >> DMAR_IQ_SHIFT) == index) {
730 printk(KERN_ERR "VT-d detected invalid descriptor: "
731 "low=%llx, high=%llx\n",
732 (unsigned long long)qi->desc[index].low,
733 (unsigned long long)qi->desc[index].high);
617 memcpy(&qi->desc[index], &qi->desc[wait_index], 734 memcpy(&qi->desc[index], &qi->desc[wait_index],
618 sizeof(struct qi_desc)); 735 sizeof(struct qi_desc));
619 __iommu_flush_cache(iommu, &qi->desc[index], 736 __iommu_flush_cache(iommu, &qi->desc[index],
@@ -623,6 +740,32 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
623 } 740 }
624 } 741 }
625 742
743 /*
744 * If ITE happens, all pending wait_desc commands are aborted.
745 * No new descriptors are fetched until the ITE is cleared.
746 */
747 if (fault & DMA_FSTS_ITE) {
748 head = readl(iommu->reg + DMAR_IQH_REG);
749 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
750 head |= 1;
751 tail = readl(iommu->reg + DMAR_IQT_REG);
752 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
753
754 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
755
756 do {
757 if (qi->desc_status[head] == QI_IN_USE)
758 qi->desc_status[head] = QI_ABORT;
759 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
760 } while (head != tail);
761
762 if (qi->desc_status[wait_index] == QI_ABORT)
763 return -EAGAIN;
764 }
765
766 if (fault & DMA_FSTS_ICE)
767 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
768
626 return 0; 769 return 0;
627} 770}
628 771
@@ -632,7 +775,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
632 */ 775 */
633int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) 776int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
634{ 777{
635 int rc = 0; 778 int rc;
636 struct q_inval *qi = iommu->qi; 779 struct q_inval *qi = iommu->qi;
637 struct qi_desc *hw, wait_desc; 780 struct qi_desc *hw, wait_desc;
638 int wait_index, index; 781 int wait_index, index;
@@ -643,6 +786,9 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
643 786
644 hw = qi->desc; 787 hw = qi->desc;
645 788
789restart:
790 rc = 0;
791
646 spin_lock_irqsave(&qi->q_lock, flags); 792 spin_lock_irqsave(&qi->q_lock, flags);
647 while (qi->free_cnt < 3) { 793 while (qi->free_cnt < 3) {
648 spin_unlock_irqrestore(&qi->q_lock, flags); 794 spin_unlock_irqrestore(&qi->q_lock, flags);
@@ -673,7 +819,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
673 * update the HW tail register indicating the presence of 819 * update the HW tail register indicating the presence of
674 * new descriptors. 820 * new descriptors.
675 */ 821 */
676 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); 822 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
677 823
678 while (qi->desc_status[wait_index] != QI_DONE) { 824 while (qi->desc_status[wait_index] != QI_DONE) {
679 /* 825 /*
@@ -685,18 +831,21 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
685 */ 831 */
686 rc = qi_check_fault(iommu, index); 832 rc = qi_check_fault(iommu, index);
687 if (rc) 833 if (rc)
688 goto out; 834 break;
689 835
690 spin_unlock(&qi->q_lock); 836 spin_unlock(&qi->q_lock);
691 cpu_relax(); 837 cpu_relax();
692 spin_lock(&qi->q_lock); 838 spin_lock(&qi->q_lock);
693 } 839 }
694out: 840
695 qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE; 841 qi->desc_status[index] = QI_DONE;
696 842
697 reclaim_free_desc(qi); 843 reclaim_free_desc(qi);
698 spin_unlock_irqrestore(&qi->q_lock, flags); 844 spin_unlock_irqrestore(&qi->q_lock, flags);
699 845
846 if (rc == -EAGAIN)
847 goto restart;
848
700 return rc; 849 return rc;
701} 850}
702 851
@@ -714,41 +863,26 @@ void qi_global_iec(struct intel_iommu *iommu)
714 qi_submit_sync(&desc, iommu); 863 qi_submit_sync(&desc, iommu);
715} 864}
716 865
717int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, 866void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
718 u64 type, int non_present_entry_flush) 867 u64 type)
719{ 868{
720 struct qi_desc desc; 869 struct qi_desc desc;
721 870
722 if (non_present_entry_flush) {
723 if (!cap_caching_mode(iommu->cap))
724 return 1;
725 else
726 did = 0;
727 }
728
729 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) 871 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
730 | QI_CC_GRAN(type) | QI_CC_TYPE; 872 | QI_CC_GRAN(type) | QI_CC_TYPE;
731 desc.high = 0; 873 desc.high = 0;
732 874
733 return qi_submit_sync(&desc, iommu); 875 qi_submit_sync(&desc, iommu);
734} 876}
735 877
736int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, 878void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
737 unsigned int size_order, u64 type, 879 unsigned int size_order, u64 type)
738 int non_present_entry_flush)
739{ 880{
740 u8 dw = 0, dr = 0; 881 u8 dw = 0, dr = 0;
741 882
742 struct qi_desc desc; 883 struct qi_desc desc;
743 int ih = 0; 884 int ih = 0;
744 885
745 if (non_present_entry_flush) {
746 if (!cap_caching_mode(iommu->cap))
747 return 1;
748 else
749 did = 0;
750 }
751
752 if (cap_write_drain(iommu->cap)) 886 if (cap_write_drain(iommu->cap))
753 dw = 1; 887 dw = 1;
754 888
@@ -760,7 +894,28 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
760 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) 894 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
761 | QI_IOTLB_AM(size_order); 895 | QI_IOTLB_AM(size_order);
762 896
763 return qi_submit_sync(&desc, iommu); 897 qi_submit_sync(&desc, iommu);
898}
899
900void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
901 u64 addr, unsigned mask)
902{
903 struct qi_desc desc;
904
905 if (mask) {
906 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
907 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
908 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
909 } else
910 desc.high = QI_DEV_IOTLB_ADDR(addr);
911
912 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
913 qdep = 0;
914
915 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
916 QI_DIOTLB_TYPE;
917
918 qi_submit_sync(&desc, iommu);
764} 919}
765 920
766/* 921/*
@@ -790,7 +945,6 @@ void dmar_disable_qi(struct intel_iommu *iommu)
790 cpu_relax(); 945 cpu_relax();
791 946
792 iommu->gcmd &= ~DMA_GCMD_QIE; 947 iommu->gcmd &= ~DMA_GCMD_QIE;
793
794 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 948 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
795 949
796 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, 950 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
@@ -804,7 +958,7 @@ end:
804 */ 958 */
805static void __dmar_enable_qi(struct intel_iommu *iommu) 959static void __dmar_enable_qi(struct intel_iommu *iommu)
806{ 960{
807 u32 cmd, sts; 961 u32 sts;
808 unsigned long flags; 962 unsigned long flags;
809 struct q_inval *qi = iommu->qi; 963 struct q_inval *qi = iommu->qi;
810 964
@@ -818,9 +972,8 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
818 972
819 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); 973 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
820 974
821 cmd = iommu->gcmd | DMA_GCMD_QIE;
822 iommu->gcmd |= DMA_GCMD_QIE; 975 iommu->gcmd |= DMA_GCMD_QIE;
823 writel(cmd, iommu->reg + DMAR_GCMD_REG); 976 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
824 977
825 /* Make sure hardware complete it */ 978 /* Make sure hardware complete it */
826 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); 979 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
@@ -1096,7 +1249,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
1096 set_irq_data(irq, NULL); 1249 set_irq_data(irq, NULL);
1097 iommu->irq = 0; 1250 iommu->irq = 0;
1098 destroy_irq(irq); 1251 destroy_irq(irq);
1099 return 0; 1252 return ret;
1100 } 1253 }
1101 1254
1102 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); 1255 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 9aa4fe100a0d..66f29bc00be4 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig HOTPLUG_PCI 5menuconfig HOTPLUG_PCI
6 tristate "Support for PCI Hotplug" 6 tristate "Support for PCI Hotplug"
7 depends on PCI && HOTPLUG 7 depends on PCI && HOTPLUG && SYSFS
8 ---help--- 8 ---help---
9 Say Y here if you have a motherboard with a PCI Hotplug controller. 9 Say Y here if you have a motherboard with a PCI Hotplug controller.
10 This allows you to add and remove PCI cards while the machine is 10 This allows you to add and remove PCI cards while the machine is
@@ -41,7 +41,7 @@ config HOTPLUG_PCI_FAKE
41 41
42config HOTPLUG_PCI_COMPAQ 42config HOTPLUG_PCI_COMPAQ
43 tristate "Compaq PCI Hotplug driver" 43 tristate "Compaq PCI Hotplug driver"
44 depends on X86 && PCI_BIOS && PCI_LEGACY 44 depends on X86 && PCI_BIOS
45 help 45 help
46 Say Y here if you have a motherboard with a Compaq PCI Hotplug 46 Say Y here if you have a motherboard with a Compaq PCI Hotplug
47 controller. 47 controller.
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 43c10bd261b4..4dd7114964ac 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -77,7 +77,6 @@ static int get_latch_status (struct hotplug_slot *slot, u8 *value);
77static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 77static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
78 78
79static struct hotplug_slot_ops acpi_hotplug_slot_ops = { 79static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
80 .owner = THIS_MODULE,
81 .enable_slot = enable_slot, 80 .enable_slot = enable_slot,
82 .disable_slot = disable_slot, 81 .disable_slot = disable_slot,
83 .set_attention_status = set_attention_status, 82 .set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index de94f4feef8c..a5b9f6ae507b 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -72,7 +72,6 @@ static int get_adapter_status(struct hotplug_slot *slot, u8 * value);
72static int get_latch_status(struct hotplug_slot *slot, u8 * value); 72static int get_latch_status(struct hotplug_slot *slot, u8 * value);
73 73
74static struct hotplug_slot_ops cpci_hotplug_slot_ops = { 74static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
75 .owner = THIS_MODULE,
76 .enable_slot = enable_slot, 75 .enable_slot = enable_slot,
77 .disable_slot = disable_slot, 76 .disable_slot = disable_slot,
78 .set_attention_status = set_attention_status, 77 .set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index afaf8f69f73e..53836001d511 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -150,25 +150,25 @@ struct ctrl_reg { /* offset */
150 150
151/* offsets to the controller registers based on the above structure layout */ 151/* offsets to the controller registers based on the above structure layout */
152enum ctrl_offsets { 152enum ctrl_offsets {
153 SLOT_RST = offsetof(struct ctrl_reg, slot_RST), 153 SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
154 SLOT_ENABLE = offsetof(struct ctrl_reg, slot_enable), 154 SLOT_ENABLE = offsetof(struct ctrl_reg, slot_enable),
155 MISC = offsetof(struct ctrl_reg, misc), 155 MISC = offsetof(struct ctrl_reg, misc),
156 LED_CONTROL = offsetof(struct ctrl_reg, led_control), 156 LED_CONTROL = offsetof(struct ctrl_reg, led_control),
157 INT_INPUT_CLEAR = offsetof(struct ctrl_reg, int_input_clear), 157 INT_INPUT_CLEAR = offsetof(struct ctrl_reg, int_input_clear),
158 INT_MASK = offsetof(struct ctrl_reg, int_mask), 158 INT_MASK = offsetof(struct ctrl_reg, int_mask),
159 CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0), 159 CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
160 CTRL_RESERVED1 = offsetof(struct ctrl_reg, reserved1), 160 CTRL_RESERVED1 = offsetof(struct ctrl_reg, reserved1),
161 CTRL_RESERVED2 = offsetof(struct ctrl_reg, reserved1), 161 CTRL_RESERVED2 = offsetof(struct ctrl_reg, reserved1),
162 GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB), 162 GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
163 NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input), 163 NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
164 CTRL_RESERVED3 = offsetof(struct ctrl_reg, reserved3), 164 CTRL_RESERVED3 = offsetof(struct ctrl_reg, reserved3),
165 CTRL_RESERVED4 = offsetof(struct ctrl_reg, reserved4), 165 CTRL_RESERVED4 = offsetof(struct ctrl_reg, reserved4),
166 CTRL_RESERVED5 = offsetof(struct ctrl_reg, reserved5), 166 CTRL_RESERVED5 = offsetof(struct ctrl_reg, reserved5),
167 CTRL_RESERVED6 = offsetof(struct ctrl_reg, reserved6), 167 CTRL_RESERVED6 = offsetof(struct ctrl_reg, reserved6),
168 CTRL_RESERVED7 = offsetof(struct ctrl_reg, reserved7), 168 CTRL_RESERVED7 = offsetof(struct ctrl_reg, reserved7),
169 CTRL_RESERVED8 = offsetof(struct ctrl_reg, reserved8), 169 CTRL_RESERVED8 = offsetof(struct ctrl_reg, reserved8),
170 SLOT_MASK = offsetof(struct ctrl_reg, slot_mask), 170 SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
171 CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9), 171 CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
172 CTRL_RESERVED10 = offsetof(struct ctrl_reg, reserved10), 172 CTRL_RESERVED10 = offsetof(struct ctrl_reg, reserved10),
173 CTRL_RESERVED11 = offsetof(struct ctrl_reg, reserved11), 173 CTRL_RESERVED11 = offsetof(struct ctrl_reg, reserved11),
174 SLOT_SERR = offsetof(struct ctrl_reg, slot_SERR), 174 SLOT_SERR = offsetof(struct ctrl_reg, slot_SERR),
@@ -190,7 +190,9 @@ struct hrt {
190 u32 reserved2; 190 u32 reserved2;
191} __attribute__ ((packed)); 191} __attribute__ ((packed));
192 192
193/* offsets to the hotplug resource table registers based on the above structure layout */ 193/* offsets to the hotplug resource table registers based on the above
194 * structure layout
195 */
194enum hrt_offsets { 196enum hrt_offsets {
195 SIG0 = offsetof(struct hrt, sig0), 197 SIG0 = offsetof(struct hrt, sig0),
196 SIG1 = offsetof(struct hrt, sig1), 198 SIG1 = offsetof(struct hrt, sig1),
@@ -217,18 +219,20 @@ struct slot_rt {
217 u16 pre_mem_length; 219 u16 pre_mem_length;
218} __attribute__ ((packed)); 220} __attribute__ ((packed));
219 221
220/* offsets to the hotplug slot resource table registers based on the above structure layout */ 222/* offsets to the hotplug slot resource table registers based on the above
223 * structure layout
224 */
221enum slot_rt_offsets { 225enum slot_rt_offsets {
222 DEV_FUNC = offsetof(struct slot_rt, dev_func), 226 DEV_FUNC = offsetof(struct slot_rt, dev_func),
223 PRIMARY_BUS = offsetof(struct slot_rt, primary_bus), 227 PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
224 SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus), 228 SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
225 MAX_BUS = offsetof(struct slot_rt, max_bus), 229 MAX_BUS = offsetof(struct slot_rt, max_bus),
226 IO_BASE = offsetof(struct slot_rt, io_base), 230 IO_BASE = offsetof(struct slot_rt, io_base),
227 IO_LENGTH = offsetof(struct slot_rt, io_length), 231 IO_LENGTH = offsetof(struct slot_rt, io_length),
228 MEM_BASE = offsetof(struct slot_rt, mem_base), 232 MEM_BASE = offsetof(struct slot_rt, mem_base),
229 MEM_LENGTH = offsetof(struct slot_rt, mem_length), 233 MEM_LENGTH = offsetof(struct slot_rt, mem_length),
230 PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base), 234 PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
231 PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length), 235 PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
232}; 236};
233 237
234struct pci_func { 238struct pci_func {
@@ -286,8 +290,8 @@ struct event_info {
286struct controller { 290struct controller {
287 struct controller *next; 291 struct controller *next;
288 u32 ctrl_int_comp; 292 u32 ctrl_int_comp;
289 struct mutex crit_sect; /* critical section mutex */ 293 struct mutex crit_sect; /* critical section mutex */
290 void __iomem *hpc_reg; /* cookie for our pci controller location */ 294 void __iomem *hpc_reg; /* cookie for our pci controller location */
291 struct pci_resource *mem_head; 295 struct pci_resource *mem_head;
292 struct pci_resource *p_mem_head; 296 struct pci_resource *p_mem_head;
293 struct pci_resource *io_head; 297 struct pci_resource *io_head;
@@ -299,7 +303,7 @@ struct controller {
299 u8 next_event; 303 u8 next_event;
300 u8 interrupt; 304 u8 interrupt;
301 u8 cfgspc_irq; 305 u8 cfgspc_irq;
302 u8 bus; /* bus number for the pci hotplug controller */ 306 u8 bus; /* bus number for the pci hotplug controller */
303 u8 rev; 307 u8 rev;
304 u8 slot_device_offset; 308 u8 slot_device_offset;
305 u8 first_slot; 309 u8 first_slot;
@@ -401,46 +405,57 @@ struct resource_lists {
401 405
402 406
403/* debugfs functions for the hotplug controller info */ 407/* debugfs functions for the hotplug controller info */
404extern void cpqhp_initialize_debugfs (void); 408extern void cpqhp_initialize_debugfs(void);
405extern void cpqhp_shutdown_debugfs (void); 409extern void cpqhp_shutdown_debugfs(void);
406extern void cpqhp_create_debugfs_files (struct controller *ctrl); 410extern void cpqhp_create_debugfs_files(struct controller *ctrl);
407extern void cpqhp_remove_debugfs_files (struct controller *ctrl); 411extern void cpqhp_remove_debugfs_files(struct controller *ctrl);
408 412
409/* controller functions */ 413/* controller functions */
410extern void cpqhp_pushbutton_thread (unsigned long event_pointer); 414extern void cpqhp_pushbutton_thread(unsigned long event_pointer);
411extern irqreturn_t cpqhp_ctrl_intr (int IRQ, void *data); 415extern irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
412extern int cpqhp_find_available_resources (struct controller *ctrl, void __iomem *rom_start); 416extern int cpqhp_find_available_resources(struct controller *ctrl,
413extern int cpqhp_event_start_thread (void); 417 void __iomem *rom_start);
414extern void cpqhp_event_stop_thread (void); 418extern int cpqhp_event_start_thread(void);
415extern struct pci_func *cpqhp_slot_create (unsigned char busnumber); 419extern void cpqhp_event_stop_thread(void);
416extern struct pci_func *cpqhp_slot_find (unsigned char bus, unsigned char device, unsigned char index); 420extern struct pci_func *cpqhp_slot_create(unsigned char busnumber);
417extern int cpqhp_process_SI (struct controller *ctrl, struct pci_func *func); 421extern struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
418extern int cpqhp_process_SS (struct controller *ctrl, struct pci_func *func); 422 unsigned char index);
419extern int cpqhp_hardware_test (struct controller *ctrl, int test_num); 423extern int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
424extern int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
425extern int cpqhp_hardware_test(struct controller *ctrl, int test_num);
420 426
421/* resource functions */ 427/* resource functions */
422extern int cpqhp_resource_sort_and_combine (struct pci_resource **head); 428extern int cpqhp_resource_sort_and_combine (struct pci_resource **head);
423 429
424/* pci functions */ 430/* pci functions */
425extern int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num); 431extern int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
426extern int cpqhp_get_bus_dev (struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot); 432extern int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
427extern int cpqhp_save_config (struct controller *ctrl, int busnumber, int is_hot_plug); 433 u8 slot);
428extern int cpqhp_save_base_addr_length (struct controller *ctrl, struct pci_func * func); 434extern int cpqhp_save_config(struct controller *ctrl, int busnumber,
429extern int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func); 435 int is_hot_plug);
430extern int cpqhp_configure_board (struct controller *ctrl, struct pci_func * func); 436extern int cpqhp_save_base_addr_length(struct controller *ctrl,
431extern int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot); 437 struct pci_func *func);
432extern int cpqhp_valid_replace (struct controller *ctrl, struct pci_func * func); 438extern int cpqhp_save_used_resources(struct controller *ctrl,
433extern void cpqhp_destroy_board_resources (struct pci_func * func); 439 struct pci_func *func);
434extern int cpqhp_return_board_resources (struct pci_func * func, struct resource_lists * resources); 440extern int cpqhp_configure_board(struct controller *ctrl,
435extern void cpqhp_destroy_resource_list (struct resource_lists * resources); 441 struct pci_func *func);
436extern int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func); 442extern int cpqhp_save_slot_config(struct controller *ctrl,
437extern int cpqhp_unconfigure_device (struct pci_func* func); 443 struct pci_func *new_slot);
444extern int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
445extern void cpqhp_destroy_board_resources(struct pci_func *func);
446extern int cpqhp_return_board_resources (struct pci_func *func,
447 struct resource_lists *resources);
448extern void cpqhp_destroy_resource_list(struct resource_lists *resources);
449extern int cpqhp_configure_device(struct controller *ctrl,
450 struct pci_func *func);
451extern int cpqhp_unconfigure_device(struct pci_func *func);
438 452
439/* Global variables */ 453/* Global variables */
440extern int cpqhp_debug; 454extern int cpqhp_debug;
441extern int cpqhp_legacy_mode; 455extern int cpqhp_legacy_mode;
442extern struct controller *cpqhp_ctrl_list; 456extern struct controller *cpqhp_ctrl_list;
443extern struct pci_func *cpqhp_slot_list[256]; 457extern struct pci_func *cpqhp_slot_list[256];
458extern struct irq_routing_table *cpqhp_routing_table;
444 459
445/* these can be gotten rid of, but for debugging they are purty */ 460/* these can be gotten rid of, but for debugging they are purty */
446extern u8 cpqhp_nic_irq; 461extern u8 cpqhp_nic_irq;
@@ -449,7 +464,7 @@ extern u8 cpqhp_disk_irq;
449 464
450/* inline functions */ 465/* inline functions */
451 466
452static inline char *slot_name(struct slot *slot) 467static inline const char *slot_name(struct slot *slot)
453{ 468{
454 return hotplug_slot_name(slot->hotplug_slot); 469 return hotplug_slot_name(slot->hotplug_slot);
455} 470}
@@ -458,9 +473,9 @@ static inline char *slot_name(struct slot *slot)
458 * return_resource 473 * return_resource
459 * 474 *
460 * Puts node back in the resource list pointed to by head 475 * Puts node back in the resource list pointed to by head
461 *
462 */ 476 */
463static inline void return_resource(struct pci_resource **head, struct pci_resource *node) 477static inline void return_resource(struct pci_resource **head,
478 struct pci_resource *node)
464{ 479{
465 if (!node || !head) 480 if (!node || !head)
466 return; 481 return;
@@ -471,7 +486,7 @@ static inline void return_resource(struct pci_resource **head, struct pci_resour
471static inline void set_SOGO(struct controller *ctrl) 486static inline void set_SOGO(struct controller *ctrl)
472{ 487{
473 u16 misc; 488 u16 misc;
474 489
475 misc = readw(ctrl->hpc_reg + MISC); 490 misc = readw(ctrl->hpc_reg + MISC);
476 misc = (misc | 0x0001) & 0xFFFB; 491 misc = (misc | 0x0001) & 0xFFFB;
477 writew(misc, ctrl->hpc_reg + MISC); 492 writew(misc, ctrl->hpc_reg + MISC);
@@ -481,7 +496,7 @@ static inline void set_SOGO(struct controller *ctrl)
481static inline void amber_LED_on(struct controller *ctrl, u8 slot) 496static inline void amber_LED_on(struct controller *ctrl, u8 slot)
482{ 497{
483 u32 led_control; 498 u32 led_control;
484 499
485 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 500 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
486 led_control |= (0x01010000L << slot); 501 led_control |= (0x01010000L << slot);
487 writel(led_control, ctrl->hpc_reg + LED_CONTROL); 502 writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -491,7 +506,7 @@ static inline void amber_LED_on(struct controller *ctrl, u8 slot)
491static inline void amber_LED_off(struct controller *ctrl, u8 slot) 506static inline void amber_LED_off(struct controller *ctrl, u8 slot)
492{ 507{
493 u32 led_control; 508 u32 led_control;
494 509
495 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 510 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
496 led_control &= ~(0x01010000L << slot); 511 led_control &= ~(0x01010000L << slot);
497 writel(led_control, ctrl->hpc_reg + LED_CONTROL); 512 writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -504,7 +519,7 @@ static inline int read_amber_LED(struct controller *ctrl, u8 slot)
504 519
505 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 520 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
506 led_control &= (0x01010000L << slot); 521 led_control &= (0x01010000L << slot);
507 522
508 return led_control ? 1 : 0; 523 return led_control ? 1 : 0;
509} 524}
510 525
@@ -512,7 +527,7 @@ static inline int read_amber_LED(struct controller *ctrl, u8 slot)
512static inline void green_LED_on(struct controller *ctrl, u8 slot) 527static inline void green_LED_on(struct controller *ctrl, u8 slot)
513{ 528{
514 u32 led_control; 529 u32 led_control;
515 530
516 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 531 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
517 led_control |= 0x0101L << slot; 532 led_control |= 0x0101L << slot;
518 writel(led_control, ctrl->hpc_reg + LED_CONTROL); 533 writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -521,7 +536,7 @@ static inline void green_LED_on(struct controller *ctrl, u8 slot)
521static inline void green_LED_off(struct controller *ctrl, u8 slot) 536static inline void green_LED_off(struct controller *ctrl, u8 slot)
522{ 537{
523 u32 led_control; 538 u32 led_control;
524 539
525 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 540 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
526 led_control &= ~(0x0101L << slot); 541 led_control &= ~(0x0101L << slot);
527 writel(led_control, ctrl->hpc_reg + LED_CONTROL); 542 writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -531,7 +546,7 @@ static inline void green_LED_off(struct controller *ctrl, u8 slot)
531static inline void green_LED_blink(struct controller *ctrl, u8 slot) 546static inline void green_LED_blink(struct controller *ctrl, u8 slot)
532{ 547{
533 u32 led_control; 548 u32 led_control;
534 549
535 led_control = readl(ctrl->hpc_reg + LED_CONTROL); 550 led_control = readl(ctrl->hpc_reg + LED_CONTROL);
536 led_control &= ~(0x0101L << slot); 551 led_control &= ~(0x0101L << slot);
537 led_control |= (0x0001L << slot); 552 led_control |= (0x0001L << slot);
@@ -575,22 +590,21 @@ static inline u8 read_slot_enable(struct controller *ctrl)
575} 590}
576 591
577 592
578/* 593/**
579 * get_controller_speed - find the current frequency/mode of controller. 594 * get_controller_speed - find the current frequency/mode of controller.
580 * 595 *
581 * @ctrl: controller to get frequency/mode for. 596 * @ctrl: controller to get frequency/mode for.
582 * 597 *
583 * Returns controller speed. 598 * Returns controller speed.
584 *
585 */ 599 */
586static inline u8 get_controller_speed(struct controller *ctrl) 600static inline u8 get_controller_speed(struct controller *ctrl)
587{ 601{
588 u8 curr_freq; 602 u8 curr_freq;
589 u16 misc; 603 u16 misc;
590 604
591 if (ctrl->pcix_support) { 605 if (ctrl->pcix_support) {
592 curr_freq = readb(ctrl->hpc_reg + NEXT_CURR_FREQ); 606 curr_freq = readb(ctrl->hpc_reg + NEXT_CURR_FREQ);
593 if ((curr_freq & 0xB0) == 0xB0) 607 if ((curr_freq & 0xB0) == 0xB0)
594 return PCI_SPEED_133MHz_PCIX; 608 return PCI_SPEED_133MHz_PCIX;
595 if ((curr_freq & 0xA0) == 0xA0) 609 if ((curr_freq & 0xA0) == 0xA0)
596 return PCI_SPEED_100MHz_PCIX; 610 return PCI_SPEED_100MHz_PCIX;
@@ -602,19 +616,18 @@ static inline u8 get_controller_speed(struct controller *ctrl)
602 return PCI_SPEED_33MHz; 616 return PCI_SPEED_33MHz;
603 } 617 }
604 618
605 misc = readw(ctrl->hpc_reg + MISC); 619 misc = readw(ctrl->hpc_reg + MISC);
606 return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz; 620 return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
607} 621}
608
609 622
610/* 623
624/**
611 * get_adapter_speed - find the max supported frequency/mode of adapter. 625 * get_adapter_speed - find the max supported frequency/mode of adapter.
612 * 626 *
613 * @ctrl: hotplug controller. 627 * @ctrl: hotplug controller.
614 * @hp_slot: hotplug slot where adapter is installed. 628 * @hp_slot: hotplug slot where adapter is installed.
615 * 629 *
616 * Returns adapter speed. 630 * Returns adapter speed.
617 *
618 */ 631 */
619static inline u8 get_adapter_speed(struct controller *ctrl, u8 hp_slot) 632static inline u8 get_adapter_speed(struct controller *ctrl, u8 hp_slot)
620{ 633{
@@ -672,7 +685,8 @@ static inline int get_slot_enabled(struct controller *ctrl, struct slot *slot)
672} 685}
673 686
674 687
675static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slot) 688static inline int cpq_get_latch_status(struct controller *ctrl,
689 struct slot *slot)
676{ 690{
677 u32 status; 691 u32 status;
678 u8 hp_slot; 692 u8 hp_slot;
@@ -687,7 +701,8 @@ static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slo
687} 701}
688 702
689 703
690static inline int get_presence_status(struct controller *ctrl, struct slot *slot) 704static inline int get_presence_status(struct controller *ctrl,
705 struct slot *slot)
691{ 706{
692 int presence_save = 0; 707 int presence_save = 0;
693 u8 hp_slot; 708 u8 hp_slot;
@@ -696,7 +711,8 @@ static inline int get_presence_status(struct controller *ctrl, struct slot *slot
696 hp_slot = slot->device - ctrl->slot_device_offset; 711 hp_slot = slot->device - ctrl->slot_device_offset;
697 712
698 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); 713 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
699 presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15)) >> hp_slot) & 0x02; 714 presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15))
715 >> hp_slot) & 0x02;
700 716
701 return presence_save; 717 return presence_save;
702} 718}
@@ -718,5 +734,12 @@ static inline int wait_for_ctrl_irq(struct controller *ctrl)
718 return retval; 734 return retval;
719} 735}
720 736
721#endif 737#include <asm/pci_x86.h>
738static inline int cpqhp_routing_table_length(void)
739{
740 BUG_ON(cpqhp_routing_table == NULL);
741 return ((cpqhp_routing_table->size - sizeof(struct irq_routing_table)) /
742 sizeof(struct irq_info));
743}
722 744
745#endif
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c2e1bcbb28a7..075b4f4b6e0d 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -25,8 +25,7 @@
25 * Send feedback to <greg@kroah.com> 25 * Send feedback to <greg@kroah.com>
26 * 26 *
27 * Jan 12, 2003 - Added 66/100/133MHz PCI-X support, 27 * Jan 12, 2003 - Added 66/100/133MHz PCI-X support,
28 * Torben Mathiasen <torben.mathiasen@hp.com> 28 * Torben Mathiasen <torben.mathiasen@hp.com>
29 *
30 */ 29 */
31 30
32#include <linux/module.h> 31#include <linux/module.h>
@@ -45,7 +44,6 @@
45 44
46#include "cpqphp.h" 45#include "cpqphp.h"
47#include "cpqphp_nvram.h" 46#include "cpqphp_nvram.h"
48#include <asm/pci_x86.h>
49 47
50 48
51/* Global variables */ 49/* Global variables */
@@ -53,6 +51,7 @@ int cpqhp_debug;
53int cpqhp_legacy_mode; 51int cpqhp_legacy_mode;
54struct controller *cpqhp_ctrl_list; /* = NULL */ 52struct controller *cpqhp_ctrl_list; /* = NULL */
55struct pci_func *cpqhp_slot_list[256]; 53struct pci_func *cpqhp_slot_list[256];
54struct irq_routing_table *cpqhp_routing_table;
56 55
57/* local variables */ 56/* local variables */
58static void __iomem *smbios_table; 57static void __iomem *smbios_table;
@@ -78,33 +77,6 @@ MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
78 77
79#define CPQHPC_MODULE_MINOR 208 78#define CPQHPC_MODULE_MINOR 208
80 79
81static int one_time_init (void);
82static int set_attention_status (struct hotplug_slot *slot, u8 value);
83static int process_SI (struct hotplug_slot *slot);
84static int process_SS (struct hotplug_slot *slot);
85static int hardware_test (struct hotplug_slot *slot, u32 value);
86static int get_power_status (struct hotplug_slot *slot, u8 *value);
87static int get_attention_status (struct hotplug_slot *slot, u8 *value);
88static int get_latch_status (struct hotplug_slot *slot, u8 *value);
89static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
90static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
91static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
92
93static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
94 .owner = THIS_MODULE,
95 .set_attention_status = set_attention_status,
96 .enable_slot = process_SI,
97 .disable_slot = process_SS,
98 .hardware_test = hardware_test,
99 .get_power_status = get_power_status,
100 .get_attention_status = get_attention_status,
101 .get_latch_status = get_latch_status,
102 .get_adapter_status = get_adapter_status,
103 .get_max_bus_speed = get_max_bus_speed,
104 .get_cur_bus_speed = get_cur_bus_speed,
105};
106
107
108static inline int is_slot64bit(struct slot *slot) 80static inline int is_slot64bit(struct slot *slot)
109{ 81{
110 return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0; 82 return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0;
@@ -144,7 +116,7 @@ static void __iomem * detect_SMBIOS_pointer(void __iomem *begin, void __iomem *e
144 break; 116 break;
145 } 117 }
146 } 118 }
147 119
148 if (!status) 120 if (!status)
149 fp = NULL; 121 fp = NULL;
150 122
@@ -171,7 +143,7 @@ static int init_SERR(struct controller * ctrl)
171 tempdword = ctrl->first_slot; 143 tempdword = ctrl->first_slot;
172 144
173 number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F; 145 number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
174 // Loop through slots 146 /* Loop through slots */
175 while (number_of_slots) { 147 while (number_of_slots) {
176 physical_slot = tempdword; 148 physical_slot = tempdword;
177 writeb(0, ctrl->hpc_reg + SLOT_SERR); 149 writeb(0, ctrl->hpc_reg + SLOT_SERR);
@@ -182,41 +154,42 @@ static int init_SERR(struct controller * ctrl)
182 return 0; 154 return 0;
183} 155}
184 156
185 157static int init_cpqhp_routing_table(void)
186/* nice debugging output */
187static int pci_print_IRQ_route (void)
188{ 158{
189 struct irq_routing_table *routing_table;
190 int len; 159 int len;
191 int loop;
192
193 u8 tbus, tdevice, tslot;
194 160
195 routing_table = pcibios_get_irq_routing_table(); 161 cpqhp_routing_table = pcibios_get_irq_routing_table();
196 if (routing_table == NULL) { 162 if (cpqhp_routing_table == NULL)
197 err("No BIOS Routing Table??? Not good\n");
198 return -ENOMEM; 163 return -ENOMEM;
199 }
200 164
201 len = (routing_table->size - sizeof(struct irq_routing_table)) / 165 len = cpqhp_routing_table_length();
202 sizeof(struct irq_info);
203 // Make sure I got at least one entry
204 if (len == 0) { 166 if (len == 0) {
205 kfree(routing_table); 167 kfree(cpqhp_routing_table);
168 cpqhp_routing_table = NULL;
206 return -1; 169 return -1;
207 } 170 }
208 171
209 dbg("bus dev func slot\n"); 172 return 0;
173}
174
175/* nice debugging output */
176static void pci_print_IRQ_route(void)
177{
178 int len;
179 int loop;
180 u8 tbus, tdevice, tslot;
181
182 len = cpqhp_routing_table_length();
210 183
184 dbg("bus dev func slot\n");
211 for (loop = 0; loop < len; ++loop) { 185 for (loop = 0; loop < len; ++loop) {
212 tbus = routing_table->slots[loop].bus; 186 tbus = cpqhp_routing_table->slots[loop].bus;
213 tdevice = routing_table->slots[loop].devfn; 187 tdevice = cpqhp_routing_table->slots[loop].devfn;
214 tslot = routing_table->slots[loop].slot; 188 tslot = cpqhp_routing_table->slots[loop].slot;
215 dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot); 189 dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot);
216 190
217 } 191 }
218 kfree(routing_table); 192 return;
219 return 0;
220} 193}
221 194
222 195
@@ -242,9 +215,9 @@ static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
242 void __iomem *p_max; 215 void __iomem *p_max;
243 216
244 if (!smbios_table || !curr) 217 if (!smbios_table || !curr)
245 return(NULL); 218 return NULL;
246 219
247 // set p_max to the end of the table 220 /* set p_max to the end of the table */
248 p_max = smbios_start + readw(smbios_table + ST_LENGTH); 221 p_max = smbios_start + readw(smbios_table + ST_LENGTH);
249 222
250 p_temp = curr; 223 p_temp = curr;
@@ -253,20 +226,19 @@ static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
253 while ((p_temp < p_max) && !bail) { 226 while ((p_temp < p_max) && !bail) {
254 /* Look for the double NULL terminator 227 /* Look for the double NULL terminator
255 * The first condition is the previous byte 228 * The first condition is the previous byte
256 * and the second is the curr */ 229 * and the second is the curr
257 if (!previous_byte && !(readb(p_temp))) { 230 */
231 if (!previous_byte && !(readb(p_temp)))
258 bail = 1; 232 bail = 1;
259 }
260 233
261 previous_byte = readb(p_temp); 234 previous_byte = readb(p_temp);
262 p_temp++; 235 p_temp++;
263 } 236 }
264 237
265 if (p_temp < p_max) { 238 if (p_temp < p_max)
266 return p_temp; 239 return p_temp;
267 } else { 240 else
268 return NULL; 241 return NULL;
269 }
270} 242}
271 243
272 244
@@ -292,21 +264,18 @@ static void __iomem *get_SMBIOS_entry(void __iomem *smbios_start,
292 if (!smbios_table) 264 if (!smbios_table)
293 return NULL; 265 return NULL;
294 266
295 if (!previous) { 267 if (!previous)
296 previous = smbios_start; 268 previous = smbios_start;
297 } else { 269 else
298 previous = get_subsequent_smbios_entry(smbios_start, 270 previous = get_subsequent_smbios_entry(smbios_start,
299 smbios_table, previous); 271 smbios_table, previous);
300 }
301 272
302 while (previous) { 273 while (previous)
303 if (readb(previous + SMBIOS_GENERIC_TYPE) != type) { 274 if (readb(previous + SMBIOS_GENERIC_TYPE) != type)
304 previous = get_subsequent_smbios_entry(smbios_start, 275 previous = get_subsequent_smbios_entry(smbios_start,
305 smbios_table, previous); 276 smbios_table, previous);
306 } else { 277 else
307 break; 278 break;
308 }
309 }
310 279
311 return previous; 280 return previous;
312} 281}
@@ -322,144 +291,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
322 kfree(slot); 291 kfree(slot);
323} 292}
324 293
325#define SLOT_NAME_SIZE 10
326
327static int ctrl_slot_setup(struct controller *ctrl,
328 void __iomem *smbios_start,
329 void __iomem *smbios_table)
330{
331 struct slot *slot;
332 struct hotplug_slot *hotplug_slot;
333 struct hotplug_slot_info *hotplug_slot_info;
334 u8 number_of_slots;
335 u8 slot_device;
336 u8 slot_number;
337 u8 ctrl_slot;
338 u32 tempdword;
339 char name[SLOT_NAME_SIZE];
340 void __iomem *slot_entry= NULL;
341 int result = -ENOMEM;
342
343 dbg("%s\n", __func__);
344
345 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
346
347 number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
348 slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
349 slot_number = ctrl->first_slot;
350
351 while (number_of_slots) {
352 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
353 if (!slot)
354 goto error;
355
356 slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
357 GFP_KERNEL);
358 if (!slot->hotplug_slot)
359 goto error_slot;
360 hotplug_slot = slot->hotplug_slot;
361
362 hotplug_slot->info =
363 kzalloc(sizeof(*(hotplug_slot->info)),
364 GFP_KERNEL);
365 if (!hotplug_slot->info)
366 goto error_hpslot;
367 hotplug_slot_info = hotplug_slot->info;
368
369 slot->ctrl = ctrl;
370 slot->bus = ctrl->bus;
371 slot->device = slot_device;
372 slot->number = slot_number;
373 dbg("slot->number = %u\n", slot->number);
374
375 slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
376 slot_entry);
377
378 while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
379 slot->number)) {
380 slot_entry = get_SMBIOS_entry(smbios_start,
381 smbios_table, 9, slot_entry);
382 }
383
384 slot->p_sm_slot = slot_entry;
385
386 init_timer(&slot->task_event);
387 slot->task_event.expires = jiffies + 5 * HZ;
388 slot->task_event.function = cpqhp_pushbutton_thread;
389
390 //FIXME: these capabilities aren't used but if they are
391 // they need to be correctly implemented
392 slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
393 slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
394
395 if (is_slot64bit(slot))
396 slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
397 if (is_slot66mhz(slot))
398 slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
399 if (ctrl->speed == PCI_SPEED_66MHz)
400 slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
401
402 ctrl_slot =
403 slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
404
405 // Check presence
406 slot->capabilities |=
407 ((((~tempdword) >> 23) |
408 ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
409 // Check the switch state
410 slot->capabilities |=
411 ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
412 // Check the slot enable
413 slot->capabilities |=
414 ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
415
416 /* register this slot with the hotplug pci core */
417 hotplug_slot->release = &release_slot;
418 hotplug_slot->private = slot;
419 snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
420 hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
421
422 hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
423 hotplug_slot_info->attention_status =
424 cpq_get_attention_status(ctrl, slot);
425 hotplug_slot_info->latch_status =
426 cpq_get_latch_status(ctrl, slot);
427 hotplug_slot_info->adapter_status =
428 get_presence_status(ctrl, slot);
429
430 dbg("registering bus %d, dev %d, number %d, "
431 "ctrl->slot_device_offset %d, slot %d\n",
432 slot->bus, slot->device,
433 slot->number, ctrl->slot_device_offset,
434 slot_number);
435 result = pci_hp_register(hotplug_slot,
436 ctrl->pci_dev->bus,
437 slot->device,
438 name);
439 if (result) {
440 err("pci_hp_register failed with error %d\n", result);
441 goto error_info;
442 }
443
444 slot->next = ctrl->slot;
445 ctrl->slot = slot;
446
447 number_of_slots--;
448 slot_device++;
449 slot_number++;
450 }
451
452 return 0;
453error_info:
454 kfree(hotplug_slot_info);
455error_hpslot:
456 kfree(hotplug_slot);
457error_slot:
458 kfree(slot);
459error:
460 return result;
461}
462
463static int ctrl_slot_cleanup (struct controller * ctrl) 294static int ctrl_slot_cleanup (struct controller * ctrl)
464{ 295{
465 struct slot *old_slot, *next_slot; 296 struct slot *old_slot, *next_slot;
@@ -476,36 +307,32 @@ static int ctrl_slot_cleanup (struct controller * ctrl)
476 307
477 cpqhp_remove_debugfs_files(ctrl); 308 cpqhp_remove_debugfs_files(ctrl);
478 309
479 //Free IRQ associated with hot plug device 310 /* Free IRQ associated with hot plug device */
480 free_irq(ctrl->interrupt, ctrl); 311 free_irq(ctrl->interrupt, ctrl);
481 //Unmap the memory 312 /* Unmap the memory */
482 iounmap(ctrl->hpc_reg); 313 iounmap(ctrl->hpc_reg);
483 //Finally reclaim PCI mem 314 /* Finally reclaim PCI mem */
484 release_mem_region(pci_resource_start(ctrl->pci_dev, 0), 315 release_mem_region(pci_resource_start(ctrl->pci_dev, 0),
485 pci_resource_len(ctrl->pci_dev, 0)); 316 pci_resource_len(ctrl->pci_dev, 0));
486 317
487 return(0); 318 return 0;
488} 319}
489 320
490 321
491//============================================================================ 322/**
492// function: get_slot_mapping 323 * get_slot_mapping - determine logical slot mapping for PCI device
493// 324 *
494// Description: Attempts to determine a logical slot mapping for a PCI 325 * Won't work for more than one PCI-PCI bridge in a slot.
495// device. Won't work for more than one PCI-PCI bridge 326 *
496// in a slot. 327 * @bus_num - bus number of PCI device
497// 328 * @dev_num - device number of PCI device
498// Input: u8 bus_num - bus number of PCI device 329 * @slot - Pointer to u8 where slot number will be returned
499// u8 dev_num - device number of PCI device 330 *
500// u8 *slot - Pointer to u8 where slot number will 331 * Output: SUCCESS or FAILURE
501// be returned 332 */
502//
503// Output: SUCCESS or FAILURE
504//=============================================================================
505static int 333static int
506get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot) 334get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
507{ 335{
508 struct irq_routing_table *PCIIRQRoutingInfoLength;
509 u32 work; 336 u32 work;
510 long len; 337 long len;
511 long loop; 338 long loop;
@@ -516,36 +343,25 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
516 343
517 bridgeSlot = 0xFF; 344 bridgeSlot = 0xFF;
518 345
519 PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table(); 346 len = cpqhp_routing_table_length();
520 if (!PCIIRQRoutingInfoLength)
521 return -1;
522
523 len = (PCIIRQRoutingInfoLength->size -
524 sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
525 // Make sure I got at least one entry
526 if (len == 0) {
527 kfree(PCIIRQRoutingInfoLength);
528 return -1;
529 }
530
531 for (loop = 0; loop < len; ++loop) { 347 for (loop = 0; loop < len; ++loop) {
532 tbus = PCIIRQRoutingInfoLength->slots[loop].bus; 348 tbus = cpqhp_routing_table->slots[loop].bus;
533 tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn >> 3; 349 tdevice = cpqhp_routing_table->slots[loop].devfn >> 3;
534 tslot = PCIIRQRoutingInfoLength->slots[loop].slot; 350 tslot = cpqhp_routing_table->slots[loop].slot;
535 351
536 if ((tbus == bus_num) && (tdevice == dev_num)) { 352 if ((tbus == bus_num) && (tdevice == dev_num)) {
537 *slot = tslot; 353 *slot = tslot;
538 kfree(PCIIRQRoutingInfoLength);
539 return 0; 354 return 0;
540 } else { 355 } else {
541 /* Did not get a match on the target PCI device. Check 356 /* Did not get a match on the target PCI device. Check
542 * if the current IRQ table entry is a PCI-to-PCI bridge 357 * if the current IRQ table entry is a PCI-to-PCI
543 * device. If so, and it's secondary bus matches the 358 * bridge device. If so, and it's secondary bus
544 * bus number for the target device, I need to save the 359 * matches the bus number for the target device, I need
545 * bridge's slot number. If I can not find an entry for 360 * to save the bridge's slot number. If I can not find
546 * the target device, I will have to assume it's on the 361 * an entry for the target device, I will have to
547 * other side of the bridge, and assign it the bridge's 362 * assume it's on the other side of the bridge, and
548 * slot. */ 363 * assign it the bridge's slot.
364 */
549 bus->number = tbus; 365 bus->number = tbus;
550 pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0), 366 pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0),
551 PCI_CLASS_REVISION, &work); 367 PCI_CLASS_REVISION, &work);
@@ -555,25 +371,23 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
555 PCI_DEVFN(tdevice, 0), 371 PCI_DEVFN(tdevice, 0),
556 PCI_PRIMARY_BUS, &work); 372 PCI_PRIMARY_BUS, &work);
557 // See if bridge's secondary bus matches target bus. 373 // See if bridge's secondary bus matches target bus.
558 if (((work >> 8) & 0x000000FF) == (long) bus_num) { 374 if (((work >> 8) & 0x000000FF) == (long) bus_num)
559 bridgeSlot = tslot; 375 bridgeSlot = tslot;
560 }
561 } 376 }
562 } 377 }
563 378
564 } 379 }
565 380
566 // If we got here, we didn't find an entry in the IRQ mapping table 381 /* If we got here, we didn't find an entry in the IRQ mapping table for
567 // for the target PCI device. If we did determine that the target 382 * the target PCI device. If we did determine that the target device
568 // device is on the other side of a PCI-to-PCI bridge, return the 383 * is on the other side of a PCI-to-PCI bridge, return the slot number
569 // slot number for the bridge. 384 * for the bridge.
385 */
570 if (bridgeSlot != 0xFF) { 386 if (bridgeSlot != 0xFF) {
571 *slot = bridgeSlot; 387 *slot = bridgeSlot;
572 kfree(PCIIRQRoutingInfoLength);
573 return 0; 388 return 0;
574 } 389 }
575 kfree(PCIIRQRoutingInfoLength); 390 /* Couldn't find an entry in the routing table for this PCI device */
576 // Couldn't find an entry in the routing table for this PCI device
577 return -1; 391 return -1;
578} 392}
579 393
@@ -591,32 +405,32 @@ cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func,
591 u8 hp_slot; 405 u8 hp_slot;
592 406
593 if (func == NULL) 407 if (func == NULL)
594 return(1); 408 return 1;
595 409
596 hp_slot = func->device - ctrl->slot_device_offset; 410 hp_slot = func->device - ctrl->slot_device_offset;
597 411
598 // Wait for exclusive access to hardware 412 /* Wait for exclusive access to hardware */
599 mutex_lock(&ctrl->crit_sect); 413 mutex_lock(&ctrl->crit_sect);
600 414
601 if (status == 1) { 415 if (status == 1)
602 amber_LED_on (ctrl, hp_slot); 416 amber_LED_on (ctrl, hp_slot);
603 } else if (status == 0) { 417 else if (status == 0)
604 amber_LED_off (ctrl, hp_slot); 418 amber_LED_off (ctrl, hp_slot);
605 } else { 419 else {
606 // Done with exclusive hardware access 420 /* Done with exclusive hardware access */
607 mutex_unlock(&ctrl->crit_sect); 421 mutex_unlock(&ctrl->crit_sect);
608 return(1); 422 return 1;
609 } 423 }
610 424
611 set_SOGO(ctrl); 425 set_SOGO(ctrl);
612 426
613 // Wait for SOBS to be unset 427 /* Wait for SOBS to be unset */
614 wait_for_ctrl_irq (ctrl); 428 wait_for_ctrl_irq (ctrl);
615 429
616 // Done with exclusive hardware access 430 /* Done with exclusive hardware access */
617 mutex_unlock(&ctrl->crit_sect); 431 mutex_unlock(&ctrl->crit_sect);
618 432
619 return(0); 433 return 0;
620} 434}
621 435
622 436
@@ -719,7 +533,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
719 533
720 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); 534 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
721 535
722 return cpqhp_hardware_test(ctrl, value); 536 return cpqhp_hardware_test(ctrl, value);
723} 537}
724 538
725 539
@@ -738,7 +552,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
738{ 552{
739 struct slot *slot = hotplug_slot->private; 553 struct slot *slot = hotplug_slot->private;
740 struct controller *ctrl = slot->ctrl; 554 struct controller *ctrl = slot->ctrl;
741 555
742 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); 556 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
743 557
744 *value = cpq_get_attention_status(ctrl, slot); 558 *value = cpq_get_attention_status(ctrl, slot);
@@ -793,6 +607,230 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
793 return 0; 607 return 0;
794} 608}
795 609
610static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
611 .set_attention_status = set_attention_status,
612 .enable_slot = process_SI,
613 .disable_slot = process_SS,
614 .hardware_test = hardware_test,
615 .get_power_status = get_power_status,
616 .get_attention_status = get_attention_status,
617 .get_latch_status = get_latch_status,
618 .get_adapter_status = get_adapter_status,
619 .get_max_bus_speed = get_max_bus_speed,
620 .get_cur_bus_speed = get_cur_bus_speed,
621};
622
623#define SLOT_NAME_SIZE 10
624
625static int ctrl_slot_setup(struct controller *ctrl,
626 void __iomem *smbios_start,
627 void __iomem *smbios_table)
628{
629 struct slot *slot;
630 struct hotplug_slot *hotplug_slot;
631 struct hotplug_slot_info *hotplug_slot_info;
632 u8 number_of_slots;
633 u8 slot_device;
634 u8 slot_number;
635 u8 ctrl_slot;
636 u32 tempdword;
637 char name[SLOT_NAME_SIZE];
638 void __iomem *slot_entry= NULL;
639 int result = -ENOMEM;
640
641 dbg("%s\n", __func__);
642
643 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
644
645 number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
646 slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
647 slot_number = ctrl->first_slot;
648
649 while (number_of_slots) {
650 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
651 if (!slot)
652 goto error;
653
654 slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
655 GFP_KERNEL);
656 if (!slot->hotplug_slot)
657 goto error_slot;
658 hotplug_slot = slot->hotplug_slot;
659
660 hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
661 GFP_KERNEL);
662 if (!hotplug_slot->info)
663 goto error_hpslot;
664 hotplug_slot_info = hotplug_slot->info;
665
666 slot->ctrl = ctrl;
667 slot->bus = ctrl->bus;
668 slot->device = slot_device;
669 slot->number = slot_number;
670 dbg("slot->number = %u\n", slot->number);
671
672 slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
673 slot_entry);
674
675 while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
676 slot->number)) {
677 slot_entry = get_SMBIOS_entry(smbios_start,
678 smbios_table, 9, slot_entry);
679 }
680
681 slot->p_sm_slot = slot_entry;
682
683 init_timer(&slot->task_event);
684 slot->task_event.expires = jiffies + 5 * HZ;
685 slot->task_event.function = cpqhp_pushbutton_thread;
686
687 /*FIXME: these capabilities aren't used but if they are
688 * they need to be correctly implemented
689 */
690 slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
691 slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
692
693 if (is_slot64bit(slot))
694 slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
695 if (is_slot66mhz(slot))
696 slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
697 if (ctrl->speed == PCI_SPEED_66MHz)
698 slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
699
700 ctrl_slot =
701 slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
702
703 /* Check presence */
704 slot->capabilities |=
705 ((((~tempdword) >> 23) |
706 ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
707 /* Check the switch state */
708 slot->capabilities |=
709 ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
710 /* Check the slot enable */
711 slot->capabilities |=
712 ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
713
714 /* register this slot with the hotplug pci core */
715 hotplug_slot->release = &release_slot;
716 hotplug_slot->private = slot;
717 snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
718 hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
719
720 hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
721 hotplug_slot_info->attention_status =
722 cpq_get_attention_status(ctrl, slot);
723 hotplug_slot_info->latch_status =
724 cpq_get_latch_status(ctrl, slot);
725 hotplug_slot_info->adapter_status =
726 get_presence_status(ctrl, slot);
727
728 dbg("registering bus %d, dev %d, number %d, "
729 "ctrl->slot_device_offset %d, slot %d\n",
730 slot->bus, slot->device,
731 slot->number, ctrl->slot_device_offset,
732 slot_number);
733 result = pci_hp_register(hotplug_slot,
734 ctrl->pci_dev->bus,
735 slot->device,
736 name);
737 if (result) {
738 err("pci_hp_register failed with error %d\n", result);
739 goto error_info;
740 }
741
742 slot->next = ctrl->slot;
743 ctrl->slot = slot;
744
745 number_of_slots--;
746 slot_device++;
747 slot_number++;
748 }
749
750 return 0;
751error_info:
752 kfree(hotplug_slot_info);
753error_hpslot:
754 kfree(hotplug_slot);
755error_slot:
756 kfree(slot);
757error:
758 return result;
759}
760
761static int one_time_init(void)
762{
763 int loop;
764 int retval = 0;
765
766 if (initialized)
767 return 0;
768
769 power_mode = 0;
770
771 retval = init_cpqhp_routing_table();
772 if (retval)
773 goto error;
774
775 if (cpqhp_debug)
776 pci_print_IRQ_route();
777
778 dbg("Initialize + Start the notification mechanism \n");
779
780 retval = cpqhp_event_start_thread();
781 if (retval)
782 goto error;
783
784 dbg("Initialize slot lists\n");
785 for (loop = 0; loop < 256; loop++)
786 cpqhp_slot_list[loop] = NULL;
787
788 /* FIXME: We also need to hook the NMI handler eventually.
789 * this also needs to be worked with Christoph
790 * register_NMI_handler();
791 */
792 /* Map rom address */
793 cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
794 if (!cpqhp_rom_start) {
795 err ("Could not ioremap memory region for ROM\n");
796 retval = -EIO;
797 goto error;
798 }
799
800 /* Now, map the int15 entry point if we are on compaq specific
801 * hardware
802 */
803 compaq_nvram_init(cpqhp_rom_start);
804
805 /* Map smbios table entry point structure */
806 smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
807 cpqhp_rom_start + ROM_PHY_LEN);
808 if (!smbios_table) {
809 err ("Could not find the SMBIOS pointer in memory\n");
810 retval = -EIO;
811 goto error_rom_start;
812 }
813
814 smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
815 readw(smbios_table + ST_LENGTH));
816 if (!smbios_start) {
817 err ("Could not ioremap memory region taken from SMBIOS values\n");
818 retval = -EIO;
819 goto error_smbios_start;
820 }
821
822 initialized = 1;
823
824 return retval;
825
826error_smbios_start:
827 iounmap(smbios_start);
828error_rom_start:
829 iounmap(cpqhp_rom_start);
830error:
831 return retval;
832}
833
796static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 834static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
797{ 835{
798 u8 num_of_slots = 0; 836 u8 num_of_slots = 0;
@@ -815,7 +853,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
815 return err; 853 return err;
816 } 854 }
817 855
818 // Need to read VID early b/c it's used to differentiate CPQ and INTC discovery 856 /* Need to read VID early b/c it's used to differentiate CPQ and INTC
857 * discovery
858 */
819 rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id); 859 rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id);
820 if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) { 860 if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) {
821 err(msg_HPC_non_compaq_or_intel); 861 err(msg_HPC_non_compaq_or_intel);
@@ -832,217 +872,209 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
832 } 872 }
833 873
834 /* Check for the proper subsytem ID's 874 /* Check for the proper subsytem ID's
835 * Intel uses a different SSID programming model than Compaq. 875 * Intel uses a different SSID programming model than Compaq.
836 * For Intel, each SSID bit identifies a PHP capability. 876 * For Intel, each SSID bit identifies a PHP capability.
837 * Also Intel HPC's may have RID=0. 877 * Also Intel HPC's may have RID=0.
838 */ 878 */
839 if ((pdev->revision > 2) || (vendor_id == PCI_VENDOR_ID_INTEL)) { 879 if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
840 // TODO: This code can be made to support non-Compaq or Intel subsystem IDs 880 err(msg_HPC_not_supported);
841 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid); 881 return -ENODEV;
842 if (rc) { 882 }
843 err("%s : pci_read_config_word failed\n", __func__);
844 goto err_disable_device;
845 }
846 dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
847 if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
848 err(msg_HPC_non_compaq_or_intel);
849 rc = -ENODEV;
850 goto err_disable_device;
851 }
852 883
853 ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL); 884 /* TODO: This code can be made to support non-Compaq or Intel
854 if (!ctrl) { 885 * subsystem IDs
855 err("%s : out of memory\n", __func__); 886 */
856 rc = -ENOMEM; 887 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
857 goto err_disable_device; 888 if (rc) {
858 } 889 err("%s : pci_read_config_word failed\n", __func__);
890 goto err_disable_device;
891 }
892 dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
893 if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
894 err(msg_HPC_non_compaq_or_intel);
895 rc = -ENODEV;
896 goto err_disable_device;
897 }
859 898
860 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid); 899 ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
861 if (rc) { 900 if (!ctrl) {
862 err("%s : pci_read_config_word failed\n", __func__); 901 err("%s : out of memory\n", __func__);
863 goto err_free_ctrl; 902 rc = -ENOMEM;
864 } 903 goto err_disable_device;
904 }
865 905
866 info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid); 906 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
867 907 if (rc) {
868 /* Set Vendor ID, so it can be accessed later from other functions */ 908 err("%s : pci_read_config_word failed\n", __func__);
869 ctrl->vendor_id = vendor_id; 909 goto err_free_ctrl;
870 910 }
871 switch (subsystem_vid) {
872 case PCI_VENDOR_ID_COMPAQ:
873 if (pdev->revision >= 0x13) { /* CIOBX */
874 ctrl->push_flag = 1;
875 ctrl->slot_switch_type = 1;
876 ctrl->push_button = 1;
877 ctrl->pci_config_space = 1;
878 ctrl->defeature_PHP = 1;
879 ctrl->pcix_support = 1;
880 ctrl->pcix_speed_capability = 1;
881 pci_read_config_byte(pdev, 0x41, &bus_cap);
882 if (bus_cap & 0x80) {
883 dbg("bus max supports 133MHz PCI-X\n");
884 ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
885 break;
886 }
887 if (bus_cap & 0x40) {
888 dbg("bus max supports 100MHz PCI-X\n");
889 ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
890 break;
891 }
892 if (bus_cap & 20) {
893 dbg("bus max supports 66MHz PCI-X\n");
894 ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
895 break;
896 }
897 if (bus_cap & 10) {
898 dbg("bus max supports 66MHz PCI\n");
899 ctrl->speed_capability = PCI_SPEED_66MHz;
900 break;
901 }
902
903 break;
904 }
905
906 switch (subsystem_deviceid) {
907 case PCI_SUB_HPC_ID:
908 /* Original 6500/7000 implementation */
909 ctrl->slot_switch_type = 1;
910 ctrl->speed_capability = PCI_SPEED_33MHz;
911 ctrl->push_button = 0;
912 ctrl->pci_config_space = 1;
913 ctrl->defeature_PHP = 1;
914 ctrl->pcix_support = 0;
915 ctrl->pcix_speed_capability = 0;
916 break;
917 case PCI_SUB_HPC_ID2:
918 /* First Pushbutton implementation */
919 ctrl->push_flag = 1;
920 ctrl->slot_switch_type = 1;
921 ctrl->speed_capability = PCI_SPEED_33MHz;
922 ctrl->push_button = 1;
923 ctrl->pci_config_space = 1;
924 ctrl->defeature_PHP = 1;
925 ctrl->pcix_support = 0;
926 ctrl->pcix_speed_capability = 0;
927 break;
928 case PCI_SUB_HPC_ID_INTC:
929 /* Third party (6500/7000) */
930 ctrl->slot_switch_type = 1;
931 ctrl->speed_capability = PCI_SPEED_33MHz;
932 ctrl->push_button = 0;
933 ctrl->pci_config_space = 1;
934 ctrl->defeature_PHP = 1;
935 ctrl->pcix_support = 0;
936 ctrl->pcix_speed_capability = 0;
937 break;
938 case PCI_SUB_HPC_ID3:
939 /* First 66 Mhz implementation */
940 ctrl->push_flag = 1;
941 ctrl->slot_switch_type = 1;
942 ctrl->speed_capability = PCI_SPEED_66MHz;
943 ctrl->push_button = 1;
944 ctrl->pci_config_space = 1;
945 ctrl->defeature_PHP = 1;
946 ctrl->pcix_support = 0;
947 ctrl->pcix_speed_capability = 0;
948 break;
949 case PCI_SUB_HPC_ID4:
950 /* First PCI-X implementation, 100MHz */
951 ctrl->push_flag = 1;
952 ctrl->slot_switch_type = 1;
953 ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
954 ctrl->push_button = 1;
955 ctrl->pci_config_space = 1;
956 ctrl->defeature_PHP = 1;
957 ctrl->pcix_support = 1;
958 ctrl->pcix_speed_capability = 0;
959 break;
960 default:
961 err(msg_HPC_not_supported);
962 rc = -ENODEV;
963 goto err_free_ctrl;
964 }
965 break;
966 911
967 case PCI_VENDOR_ID_INTEL: 912 info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
968 /* Check for speed capability (0=33, 1=66) */ 913
969 if (subsystem_deviceid & 0x0001) { 914 /* Set Vendor ID, so it can be accessed later from other
970 ctrl->speed_capability = PCI_SPEED_66MHz; 915 * functions
971 } else { 916 */
972 ctrl->speed_capability = PCI_SPEED_33MHz; 917 ctrl->vendor_id = vendor_id;
973 } 918
974 919 switch (subsystem_vid) {
975 /* Check for push button */ 920 case PCI_VENDOR_ID_COMPAQ:
976 if (subsystem_deviceid & 0x0002) { 921 if (pdev->revision >= 0x13) { /* CIOBX */
977 /* no push button */ 922 ctrl->push_flag = 1;
978 ctrl->push_button = 0; 923 ctrl->slot_switch_type = 1;
979 } else { 924 ctrl->push_button = 1;
980 /* push button supported */ 925 ctrl->pci_config_space = 1;
981 ctrl->push_button = 1; 926 ctrl->defeature_PHP = 1;
982 } 927 ctrl->pcix_support = 1;
983 928 ctrl->pcix_speed_capability = 1;
984 /* Check for slot switch type (0=mechanical, 1=not mechanical) */ 929 pci_read_config_byte(pdev, 0x41, &bus_cap);
985 if (subsystem_deviceid & 0x0004) { 930 if (bus_cap & 0x80) {
986 /* no switch */ 931 dbg("bus max supports 133MHz PCI-X\n");
987 ctrl->slot_switch_type = 0; 932 ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
988 } else {
989 /* switch */
990 ctrl->slot_switch_type = 1;
991 }
992
993 /* PHP Status (0=De-feature PHP, 1=Normal operation) */
994 if (subsystem_deviceid & 0x0008) {
995 ctrl->defeature_PHP = 1; // PHP supported
996 } else {
997 ctrl->defeature_PHP = 0; // PHP not supported
998 }
999
1000 /* Alternate Base Address Register Interface (0=not supported, 1=supported) */
1001 if (subsystem_deviceid & 0x0010) {
1002 ctrl->alternate_base_address = 1; // supported
1003 } else {
1004 ctrl->alternate_base_address = 0; // not supported
1005 }
1006
1007 /* PCI Config Space Index (0=not supported, 1=supported) */
1008 if (subsystem_deviceid & 0x0020) {
1009 ctrl->pci_config_space = 1; // supported
1010 } else {
1011 ctrl->pci_config_space = 0; // not supported
1012 }
1013
1014 /* PCI-X support */
1015 if (subsystem_deviceid & 0x0080) {
1016 /* PCI-X capable */
1017 ctrl->pcix_support = 1;
1018 /* Frequency of operation in PCI-X mode */
1019 if (subsystem_deviceid & 0x0040) {
1020 /* 133MHz PCI-X if bit 7 is 1 */
1021 ctrl->pcix_speed_capability = 1;
1022 } else {
1023 /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
1024 /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
1025 ctrl->pcix_speed_capability = 0;
1026 }
1027 } else {
1028 /* Conventional PCI */
1029 ctrl->pcix_support = 0;
1030 ctrl->pcix_speed_capability = 0;
1031 }
1032 break; 933 break;
934 }
935 if (bus_cap & 0x40) {
936 dbg("bus max supports 100MHz PCI-X\n");
937 ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
938 break;
939 }
940 if (bus_cap & 20) {
941 dbg("bus max supports 66MHz PCI-X\n");
942 ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
943 break;
944 }
945 if (bus_cap & 10) {
946 dbg("bus max supports 66MHz PCI\n");
947 ctrl->speed_capability = PCI_SPEED_66MHz;
948 break;
949 }
950
951 break;
952 }
1033 953
1034 default: 954 switch (subsystem_deviceid) {
1035 err(msg_HPC_not_supported); 955 case PCI_SUB_HPC_ID:
1036 rc = -ENODEV; 956 /* Original 6500/7000 implementation */
1037 goto err_free_ctrl; 957 ctrl->slot_switch_type = 1;
958 ctrl->speed_capability = PCI_SPEED_33MHz;
959 ctrl->push_button = 0;
960 ctrl->pci_config_space = 1;
961 ctrl->defeature_PHP = 1;
962 ctrl->pcix_support = 0;
963 ctrl->pcix_speed_capability = 0;
964 break;
965 case PCI_SUB_HPC_ID2:
966 /* First Pushbutton implementation */
967 ctrl->push_flag = 1;
968 ctrl->slot_switch_type = 1;
969 ctrl->speed_capability = PCI_SPEED_33MHz;
970 ctrl->push_button = 1;
971 ctrl->pci_config_space = 1;
972 ctrl->defeature_PHP = 1;
973 ctrl->pcix_support = 0;
974 ctrl->pcix_speed_capability = 0;
975 break;
976 case PCI_SUB_HPC_ID_INTC:
977 /* Third party (6500/7000) */
978 ctrl->slot_switch_type = 1;
979 ctrl->speed_capability = PCI_SPEED_33MHz;
980 ctrl->push_button = 0;
981 ctrl->pci_config_space = 1;
982 ctrl->defeature_PHP = 1;
983 ctrl->pcix_support = 0;
984 ctrl->pcix_speed_capability = 0;
985 break;
986 case PCI_SUB_HPC_ID3:
987 /* First 66 Mhz implementation */
988 ctrl->push_flag = 1;
989 ctrl->slot_switch_type = 1;
990 ctrl->speed_capability = PCI_SPEED_66MHz;
991 ctrl->push_button = 1;
992 ctrl->pci_config_space = 1;
993 ctrl->defeature_PHP = 1;
994 ctrl->pcix_support = 0;
995 ctrl->pcix_speed_capability = 0;
996 break;
997 case PCI_SUB_HPC_ID4:
998 /* First PCI-X implementation, 100MHz */
999 ctrl->push_flag = 1;
1000 ctrl->slot_switch_type = 1;
1001 ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
1002 ctrl->push_button = 1;
1003 ctrl->pci_config_space = 1;
1004 ctrl->defeature_PHP = 1;
1005 ctrl->pcix_support = 1;
1006 ctrl->pcix_speed_capability = 0;
1007 break;
1008 default:
1009 err(msg_HPC_not_supported);
1010 rc = -ENODEV;
1011 goto err_free_ctrl;
1038 } 1012 }
1013 break;
1014
1015 case PCI_VENDOR_ID_INTEL:
1016 /* Check for speed capability (0=33, 1=66) */
1017 if (subsystem_deviceid & 0x0001)
1018 ctrl->speed_capability = PCI_SPEED_66MHz;
1019 else
1020 ctrl->speed_capability = PCI_SPEED_33MHz;
1021
1022 /* Check for push button */
1023 if (subsystem_deviceid & 0x0002)
1024 ctrl->push_button = 0;
1025 else
1026 ctrl->push_button = 1;
1027
1028 /* Check for slot switch type (0=mechanical, 1=not mechanical) */
1029 if (subsystem_deviceid & 0x0004)
1030 ctrl->slot_switch_type = 0;
1031 else
1032 ctrl->slot_switch_type = 1;
1033
1034 /* PHP Status (0=De-feature PHP, 1=Normal operation) */
1035 if (subsystem_deviceid & 0x0008)
1036 ctrl->defeature_PHP = 1; /* PHP supported */
1037 else
1038 ctrl->defeature_PHP = 0; /* PHP not supported */
1039
1040 /* Alternate Base Address Register Interface
1041 * (0=not supported, 1=supported)
1042 */
1043 if (subsystem_deviceid & 0x0010)
1044 ctrl->alternate_base_address = 1;
1045 else
1046 ctrl->alternate_base_address = 0;
1047
1048 /* PCI Config Space Index (0=not supported, 1=supported) */
1049 if (subsystem_deviceid & 0x0020)
1050 ctrl->pci_config_space = 1;
1051 else
1052 ctrl->pci_config_space = 0;
1053
1054 /* PCI-X support */
1055 if (subsystem_deviceid & 0x0080) {
1056 ctrl->pcix_support = 1;
1057 if (subsystem_deviceid & 0x0040)
1058 /* 133MHz PCI-X if bit 7 is 1 */
1059 ctrl->pcix_speed_capability = 1;
1060 else
1061 /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
1062 /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
1063 ctrl->pcix_speed_capability = 0;
1064 } else {
1065 /* Conventional PCI */
1066 ctrl->pcix_support = 0;
1067 ctrl->pcix_speed_capability = 0;
1068 }
1069 break;
1039 1070
1040 } else { 1071 default:
1041 err(msg_HPC_not_supported); 1072 err(msg_HPC_not_supported);
1042 return -ENODEV; 1073 rc = -ENODEV;
1074 goto err_free_ctrl;
1043 } 1075 }
1044 1076
1045 // Tell the user that we found one. 1077 /* Tell the user that we found one. */
1046 info("Initializing the PCI hot plug controller residing on PCI bus %d\n", 1078 info("Initializing the PCI hot plug controller residing on PCI bus %d\n",
1047 pdev->bus->number); 1079 pdev->bus->number);
1048 1080
@@ -1087,7 +1119,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1087 if (rc) { 1119 if (rc) {
1088 goto err_free_bus; 1120 goto err_free_bus;
1089 } 1121 }
1090 1122
1091 dbg("pdev = %p\n", pdev); 1123 dbg("pdev = %p\n", pdev);
1092 dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0)); 1124 dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
1093 dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0)); 1125 dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0));
@@ -1109,7 +1141,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1109 goto err_free_mem_region; 1141 goto err_free_mem_region;
1110 } 1142 }
1111 1143
1112 // Check for 66Mhz operation 1144 /* Check for 66Mhz operation */
1113 ctrl->speed = get_controller_speed(ctrl); 1145 ctrl->speed = get_controller_speed(ctrl);
1114 1146
1115 1147
@@ -1120,7 +1152,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1120 * 1152 *
1121 ********************************************************/ 1153 ********************************************************/
1122 1154
1123 // find the physical slot number of the first hot plug slot 1155 /* find the physical slot number of the first hot plug slot */
1124 1156
1125 /* Get slot won't work for devices behind bridges, but 1157 /* Get slot won't work for devices behind bridges, but
1126 * in this case it will always be called for the "base" 1158 * in this case it will always be called for the "base"
@@ -1137,7 +1169,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1137 goto err_iounmap; 1169 goto err_iounmap;
1138 } 1170 }
1139 1171
1140 // Store PCI Config Space for all devices on this bus 1172 /* Store PCI Config Space for all devices on this bus */
1141 rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK)); 1173 rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK));
1142 if (rc) { 1174 if (rc) {
1143 err("%s: unable to save PCI configuration data, error %d\n", 1175 err("%s: unable to save PCI configuration data, error %d\n",
@@ -1148,7 +1180,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1148 /* 1180 /*
1149 * Get IO, memory, and IRQ resources for new devices 1181 * Get IO, memory, and IRQ resources for new devices
1150 */ 1182 */
1151 // The next line is required for cpqhp_find_available_resources 1183 /* The next line is required for cpqhp_find_available_resources */
1152 ctrl->interrupt = pdev->irq; 1184 ctrl->interrupt = pdev->irq;
1153 if (ctrl->interrupt < 0x10) { 1185 if (ctrl->interrupt < 0x10) {
1154 cpqhp_legacy_mode = 1; 1186 cpqhp_legacy_mode = 1;
@@ -1182,7 +1214,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1182 __func__, rc); 1214 __func__, rc);
1183 goto err_iounmap; 1215 goto err_iounmap;
1184 } 1216 }
1185 1217
1186 /* Mask all general input interrupts */ 1218 /* Mask all general input interrupts */
1187 writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK); 1219 writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK);
1188 1220
@@ -1196,12 +1228,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1196 goto err_iounmap; 1228 goto err_iounmap;
1197 } 1229 }
1198 1230
1199 /* Enable Shift Out interrupt and clear it, also enable SERR on power fault */ 1231 /* Enable Shift Out interrupt and clear it, also enable SERR on power
1232 * fault
1233 */
1200 temp_word = readw(ctrl->hpc_reg + MISC); 1234 temp_word = readw(ctrl->hpc_reg + MISC);
1201 temp_word |= 0x4006; 1235 temp_word |= 0x4006;
1202 writew(temp_word, ctrl->hpc_reg + MISC); 1236 writew(temp_word, ctrl->hpc_reg + MISC);
1203 1237
1204 // Changed 05/05/97 to clear all interrupts at start 1238 /* Changed 05/05/97 to clear all interrupts at start */
1205 writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR); 1239 writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR);
1206 1240
1207 ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); 1241 ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
@@ -1216,13 +1250,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1216 cpqhp_ctrl_list = ctrl; 1250 cpqhp_ctrl_list = ctrl;
1217 } 1251 }
1218 1252
1219 // turn off empty slots here unless command line option "ON" set 1253 /* turn off empty slots here unless command line option "ON" set
1220 // Wait for exclusive access to hardware 1254 * Wait for exclusive access to hardware
1255 */
1221 mutex_lock(&ctrl->crit_sect); 1256 mutex_lock(&ctrl->crit_sect);
1222 1257
1223 num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F; 1258 num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
1224 1259
1225 // find first device number for the ctrl 1260 /* find first device number for the ctrl */
1226 device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4; 1261 device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
1227 1262
1228 while (num_of_slots) { 1263 while (num_of_slots) {
@@ -1234,23 +1269,21 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1234 hp_slot = func->device - ctrl->slot_device_offset; 1269 hp_slot = func->device - ctrl->slot_device_offset;
1235 dbg("hp_slot: %d\n", hp_slot); 1270 dbg("hp_slot: %d\n", hp_slot);
1236 1271
1237 // We have to save the presence info for these slots 1272 /* We have to save the presence info for these slots */
1238 temp_word = ctrl->ctrl_int_comp >> 16; 1273 temp_word = ctrl->ctrl_int_comp >> 16;
1239 func->presence_save = (temp_word >> hp_slot) & 0x01; 1274 func->presence_save = (temp_word >> hp_slot) & 0x01;
1240 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; 1275 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
1241 1276
1242 if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { 1277 if (ctrl->ctrl_int_comp & (0x1L << hp_slot))
1243 func->switch_save = 0; 1278 func->switch_save = 0;
1244 } else { 1279 else
1245 func->switch_save = 0x10; 1280 func->switch_save = 0x10;
1246 }
1247 1281
1248 if (!power_mode) { 1282 if (!power_mode)
1249 if (!func->is_a_board) { 1283 if (!func->is_a_board) {
1250 green_LED_off(ctrl, hp_slot); 1284 green_LED_off(ctrl, hp_slot);
1251 slot_disable(ctrl, hp_slot); 1285 slot_disable(ctrl, hp_slot);
1252 } 1286 }
1253 }
1254 1287
1255 device++; 1288 device++;
1256 num_of_slots--; 1289 num_of_slots--;
@@ -1258,7 +1291,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1258 1291
1259 if (!power_mode) { 1292 if (!power_mode) {
1260 set_SOGO(ctrl); 1293 set_SOGO(ctrl);
1261 // Wait for SOBS to be unset 1294 /* Wait for SOBS to be unset */
1262 wait_for_ctrl_irq(ctrl); 1295 wait_for_ctrl_irq(ctrl);
1263 } 1296 }
1264 1297
@@ -1269,7 +1302,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1269 goto err_free_irq; 1302 goto err_free_irq;
1270 } 1303 }
1271 1304
1272 // Done with exclusive hardware access 1305 /* Done with exclusive hardware access */
1273 mutex_unlock(&ctrl->crit_sect); 1306 mutex_unlock(&ctrl->crit_sect);
1274 1307
1275 cpqhp_create_debugfs_files(ctrl); 1308 cpqhp_create_debugfs_files(ctrl);
@@ -1291,77 +1324,6 @@ err_disable_device:
1291 return rc; 1324 return rc;
1292} 1325}
1293 1326
1294
1295static int one_time_init(void)
1296{
1297 int loop;
1298 int retval = 0;
1299
1300 if (initialized)
1301 return 0;
1302
1303 power_mode = 0;
1304
1305 retval = pci_print_IRQ_route();
1306 if (retval)
1307 goto error;
1308
1309 dbg("Initialize + Start the notification mechanism \n");
1310
1311 retval = cpqhp_event_start_thread();
1312 if (retval)
1313 goto error;
1314
1315 dbg("Initialize slot lists\n");
1316 for (loop = 0; loop < 256; loop++) {
1317 cpqhp_slot_list[loop] = NULL;
1318 }
1319
1320 // FIXME: We also need to hook the NMI handler eventually.
1321 // this also needs to be worked with Christoph
1322 // register_NMI_handler();
1323
1324 // Map rom address
1325 cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
1326 if (!cpqhp_rom_start) {
1327 err ("Could not ioremap memory region for ROM\n");
1328 retval = -EIO;
1329 goto error;
1330 }
1331
1332 /* Now, map the int15 entry point if we are on compaq specific hardware */
1333 compaq_nvram_init(cpqhp_rom_start);
1334
1335 /* Map smbios table entry point structure */
1336 smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
1337 cpqhp_rom_start + ROM_PHY_LEN);
1338 if (!smbios_table) {
1339 err ("Could not find the SMBIOS pointer in memory\n");
1340 retval = -EIO;
1341 goto error_rom_start;
1342 }
1343
1344 smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
1345 readw(smbios_table + ST_LENGTH));
1346 if (!smbios_start) {
1347 err ("Could not ioremap memory region taken from SMBIOS values\n");
1348 retval = -EIO;
1349 goto error_smbios_start;
1350 }
1351
1352 initialized = 1;
1353
1354 return retval;
1355
1356error_smbios_start:
1357 iounmap(smbios_start);
1358error_rom_start:
1359 iounmap(cpqhp_rom_start);
1360error:
1361 return retval;
1362}
1363
1364
1365static void __exit unload_cpqphpd(void) 1327static void __exit unload_cpqphpd(void)
1366{ 1328{
1367 struct pci_func *next; 1329 struct pci_func *next;
@@ -1381,10 +1343,10 @@ static void __exit unload_cpqphpd(void)
1381 if (ctrl->hpc_reg) { 1343 if (ctrl->hpc_reg) {
1382 u16 misc; 1344 u16 misc;
1383 rc = read_slot_enable (ctrl); 1345 rc = read_slot_enable (ctrl);
1384 1346
1385 writeb(0, ctrl->hpc_reg + SLOT_SERR); 1347 writeb(0, ctrl->hpc_reg + SLOT_SERR);
1386 writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK); 1348 writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK);
1387 1349
1388 misc = readw(ctrl->hpc_reg + MISC); 1350 misc = readw(ctrl->hpc_reg + MISC);
1389 misc &= 0xFFFD; 1351 misc &= 0xFFFD;
1390 writew(misc, ctrl->hpc_reg + MISC); 1352 writew(misc, ctrl->hpc_reg + MISC);
@@ -1464,38 +1426,34 @@ static void __exit unload_cpqphpd(void)
1464 } 1426 }
1465 } 1427 }
1466 1428
1467 // Stop the notification mechanism 1429 /* Stop the notification mechanism */
1468 if (initialized) 1430 if (initialized)
1469 cpqhp_event_stop_thread(); 1431 cpqhp_event_stop_thread();
1470 1432
1471 //unmap the rom address 1433 /* unmap the rom address */
1472 if (cpqhp_rom_start) 1434 if (cpqhp_rom_start)
1473 iounmap(cpqhp_rom_start); 1435 iounmap(cpqhp_rom_start);
1474 if (smbios_start) 1436 if (smbios_start)
1475 iounmap(smbios_start); 1437 iounmap(smbios_start);
1476} 1438}
1477 1439
1478
1479
1480static struct pci_device_id hpcd_pci_tbl[] = { 1440static struct pci_device_id hpcd_pci_tbl[] = {
1481 { 1441 {
1482 /* handle any PCI Hotplug controller */ 1442 /* handle any PCI Hotplug controller */
1483 .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00), 1443 .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
1484 .class_mask = ~0, 1444 .class_mask = ~0,
1485 1445
1486 /* no matter who makes it */ 1446 /* no matter who makes it */
1487 .vendor = PCI_ANY_ID, 1447 .vendor = PCI_ANY_ID,
1488 .device = PCI_ANY_ID, 1448 .device = PCI_ANY_ID,
1489 .subvendor = PCI_ANY_ID, 1449 .subvendor = PCI_ANY_ID,
1490 .subdevice = PCI_ANY_ID, 1450 .subdevice = PCI_ANY_ID,
1491 1451
1492 }, { /* end: all zeroes */ } 1452 }, { /* end: all zeroes */ }
1493}; 1453};
1494 1454
1495MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl); 1455MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl);
1496 1456
1497
1498
1499static struct pci_driver cpqhpc_driver = { 1457static struct pci_driver cpqhpc_driver = {
1500 .name = "compaq_pci_hotplug", 1458 .name = "compaq_pci_hotplug",
1501 .id_table = hpcd_pci_tbl, 1459 .id_table = hpcd_pci_tbl,
@@ -1503,8 +1461,6 @@ static struct pci_driver cpqhpc_driver = {
1503 /* remove: cpqhpc_remove_one, */ 1461 /* remove: cpqhpc_remove_one, */
1504}; 1462};
1505 1463
1506
1507
1508static int __init cpqhpc_init(void) 1464static int __init cpqhpc_init(void)
1509{ 1465{
1510 int result; 1466 int result;
@@ -1518,7 +1474,6 @@ static int __init cpqhpc_init(void)
1518 return result; 1474 return result;
1519} 1475}
1520 1476
1521
1522static void __exit cpqhpc_cleanup(void) 1477static void __exit cpqhpc_cleanup(void)
1523{ 1478{
1524 dbg("unload_cpqphpd()\n"); 1479 dbg("unload_cpqphpd()\n");
@@ -1529,8 +1484,5 @@ static void __exit cpqhpc_cleanup(void)
1529 cpqhp_shutdown_debugfs(); 1484 cpqhp_shutdown_debugfs();
1530} 1485}
1531 1486
1532
1533module_init(cpqhpc_init); 1487module_init(cpqhpc_init);
1534module_exit(cpqhpc_cleanup); 1488module_exit(cpqhpc_cleanup);
1535
1536
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index cc227a8c4b11..2fa47af992a8 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -81,14 +81,15 @@ static u8 handle_switch_change(u8 change, struct controller * ctrl)
81 81
82 for (hp_slot = 0; hp_slot < 6; hp_slot++) { 82 for (hp_slot = 0; hp_slot < 6; hp_slot++) {
83 if (change & (0x1L << hp_slot)) { 83 if (change & (0x1L << hp_slot)) {
84 /********************************** 84 /*
85 * this one changed. 85 * this one changed.
86 **********************************/ 86 */
87 func = cpqhp_slot_find(ctrl->bus, 87 func = cpqhp_slot_find(ctrl->bus,
88 (hp_slot + ctrl->slot_device_offset), 0); 88 (hp_slot + ctrl->slot_device_offset), 0);
89 89
90 /* this is the structure that tells the worker thread 90 /* this is the structure that tells the worker thread
91 *what to do */ 91 * what to do
92 */
92 taskInfo = &(ctrl->event_queue[ctrl->next_event]); 93 taskInfo = &(ctrl->event_queue[ctrl->next_event]);
93 ctrl->next_event = (ctrl->next_event + 1) % 10; 94 ctrl->next_event = (ctrl->next_event + 1) % 10;
94 taskInfo->hp_slot = hp_slot; 95 taskInfo->hp_slot = hp_slot;
@@ -100,17 +101,17 @@ static u8 handle_switch_change(u8 change, struct controller * ctrl)
100 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; 101 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
101 102
102 if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { 103 if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
103 /********************************** 104 /*
104 * Switch opened 105 * Switch opened
105 **********************************/ 106 */
106 107
107 func->switch_save = 0; 108 func->switch_save = 0;
108 109
109 taskInfo->event_type = INT_SWITCH_OPEN; 110 taskInfo->event_type = INT_SWITCH_OPEN;
110 } else { 111 } else {
111 /********************************** 112 /*
112 * Switch closed 113 * Switch closed
113 **********************************/ 114 */
114 115
115 func->switch_save = 0x10; 116 func->switch_save = 0x10;
116 117
@@ -131,9 +132,8 @@ static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device)
131{ 132{
132 struct slot *slot = ctrl->slot; 133 struct slot *slot = ctrl->slot;
133 134
134 while (slot && (slot->device != device)) { 135 while (slot && (slot->device != device))
135 slot = slot->next; 136 slot = slot->next;
136 }
137 137
138 return slot; 138 return slot;
139} 139}
@@ -152,17 +152,17 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
152 if (!change) 152 if (!change)
153 return 0; 153 return 0;
154 154
155 /********************************** 155 /*
156 * Presence Change 156 * Presence Change
157 **********************************/ 157 */
158 dbg("cpqsbd: Presence/Notify input change.\n"); 158 dbg("cpqsbd: Presence/Notify input change.\n");
159 dbg(" Changed bits are 0x%4.4x\n", change ); 159 dbg(" Changed bits are 0x%4.4x\n", change );
160 160
161 for (hp_slot = 0; hp_slot < 6; hp_slot++) { 161 for (hp_slot = 0; hp_slot < 6; hp_slot++) {
162 if (change & (0x0101 << hp_slot)) { 162 if (change & (0x0101 << hp_slot)) {
163 /********************************** 163 /*
164 * this one changed. 164 * this one changed.
165 **********************************/ 165 */
166 func = cpqhp_slot_find(ctrl->bus, 166 func = cpqhp_slot_find(ctrl->bus,
167 (hp_slot + ctrl->slot_device_offset), 0); 167 (hp_slot + ctrl->slot_device_offset), 0);
168 168
@@ -177,22 +177,23 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
177 return 0; 177 return 0;
178 178
179 /* If the switch closed, must be a button 179 /* If the switch closed, must be a button
180 * If not in button mode, nevermind */ 180 * If not in button mode, nevermind
181 */
181 if (func->switch_save && (ctrl->push_button == 1)) { 182 if (func->switch_save && (ctrl->push_button == 1)) {
182 temp_word = ctrl->ctrl_int_comp >> 16; 183 temp_word = ctrl->ctrl_int_comp >> 16;
183 temp_byte = (temp_word >> hp_slot) & 0x01; 184 temp_byte = (temp_word >> hp_slot) & 0x01;
184 temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02; 185 temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02;
185 186
186 if (temp_byte != func->presence_save) { 187 if (temp_byte != func->presence_save) {
187 /************************************** 188 /*
188 * button Pressed (doesn't do anything) 189 * button Pressed (doesn't do anything)
189 **************************************/ 190 */
190 dbg("hp_slot %d button pressed\n", hp_slot); 191 dbg("hp_slot %d button pressed\n", hp_slot);
191 taskInfo->event_type = INT_BUTTON_PRESS; 192 taskInfo->event_type = INT_BUTTON_PRESS;
192 } else { 193 } else {
193 /********************************** 194 /*
194 * button Released - TAKE ACTION!!!! 195 * button Released - TAKE ACTION!!!!
195 **********************************/ 196 */
196 dbg("hp_slot %d button released\n", hp_slot); 197 dbg("hp_slot %d button released\n", hp_slot);
197 taskInfo->event_type = INT_BUTTON_RELEASE; 198 taskInfo->event_type = INT_BUTTON_RELEASE;
198 199
@@ -210,7 +211,8 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
210 } 211 }
211 } else { 212 } else {
212 /* Switch is open, assume a presence change 213 /* Switch is open, assume a presence change
213 * Save the presence state */ 214 * Save the presence state
215 */
214 temp_word = ctrl->ctrl_int_comp >> 16; 216 temp_word = ctrl->ctrl_int_comp >> 16;
215 func->presence_save = (temp_word >> hp_slot) & 0x01; 217 func->presence_save = (temp_word >> hp_slot) & 0x01;
216 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; 218 func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
@@ -241,17 +243,17 @@ static u8 handle_power_fault(u8 change, struct controller * ctrl)
241 if (!change) 243 if (!change)
242 return 0; 244 return 0;
243 245
244 /********************************** 246 /*
245 * power fault 247 * power fault
246 **********************************/ 248 */
247 249
248 info("power fault interrupt\n"); 250 info("power fault interrupt\n");
249 251
250 for (hp_slot = 0; hp_slot < 6; hp_slot++) { 252 for (hp_slot = 0; hp_slot < 6; hp_slot++) {
251 if (change & (0x01 << hp_slot)) { 253 if (change & (0x01 << hp_slot)) {
252 /********************************** 254 /*
253 * this one changed. 255 * this one changed.
254 **********************************/ 256 */
255 func = cpqhp_slot_find(ctrl->bus, 257 func = cpqhp_slot_find(ctrl->bus,
256 (hp_slot + ctrl->slot_device_offset), 0); 258 (hp_slot + ctrl->slot_device_offset), 0);
257 259
@@ -262,16 +264,16 @@ static u8 handle_power_fault(u8 change, struct controller * ctrl)
262 rc++; 264 rc++;
263 265
264 if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) { 266 if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) {
265 /********************************** 267 /*
266 * power fault Cleared 268 * power fault Cleared
267 **********************************/ 269 */
268 func->status = 0x00; 270 func->status = 0x00;
269 271
270 taskInfo->event_type = INT_POWER_FAULT_CLEAR; 272 taskInfo->event_type = INT_POWER_FAULT_CLEAR;
271 } else { 273 } else {
272 /********************************** 274 /*
273 * power fault 275 * power fault
274 **********************************/ 276 */
275 taskInfo->event_type = INT_POWER_FAULT; 277 taskInfo->event_type = INT_POWER_FAULT;
276 278
277 if (ctrl->rev < 4) { 279 if (ctrl->rev < 4) {
@@ -432,13 +434,15 @@ static struct pci_resource *do_pre_bridge_resource_split(struct pci_resource **h
432 434
433 435
434 /* If we got here, there the bridge requires some of the resource, but 436 /* If we got here, there the bridge requires some of the resource, but
435 * we may be able to split some off of the front */ 437 * we may be able to split some off of the front
438 */
436 439
437 node = *head; 440 node = *head;
438 441
439 if (node->length & (alignment -1)) { 442 if (node->length & (alignment -1)) {
440 /* this one isn't an aligned length, so we'll make a new entry 443 /* this one isn't an aligned length, so we'll make a new entry
441 * and split it up. */ 444 * and split it up.
445 */
442 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); 446 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
443 447
444 if (!split_node) 448 if (!split_node)
@@ -544,10 +548,10 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
544 if (!(*head)) 548 if (!(*head))
545 return NULL; 549 return NULL;
546 550
547 if ( cpqhp_resource_sort_and_combine(head) ) 551 if (cpqhp_resource_sort_and_combine(head))
548 return NULL; 552 return NULL;
549 553
550 if ( sort_by_size(head) ) 554 if (sort_by_size(head))
551 return NULL; 555 return NULL;
552 556
553 for (node = *head; node; node = node->next) { 557 for (node = *head; node; node = node->next) {
@@ -556,7 +560,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
556 560
557 if (node->base & (size - 1)) { 561 if (node->base & (size - 1)) {
558 /* this one isn't base aligned properly 562 /* this one isn't base aligned properly
559 * so we'll make a new entry and split it up */ 563 * so we'll make a new entry and split it up
564 */
560 temp_dword = (node->base | (size-1)) + 1; 565 temp_dword = (node->base | (size-1)) + 1;
561 566
562 /* Short circuit if adjusted size is too small */ 567 /* Short circuit if adjusted size is too small */
@@ -581,7 +586,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
581 /* Don't need to check if too small since we already did */ 586 /* Don't need to check if too small since we already did */
582 if (node->length > size) { 587 if (node->length > size) {
583 /* this one is longer than we need 588 /* this one is longer than we need
584 * so we'll make a new entry and split it up */ 589 * so we'll make a new entry and split it up
590 */
585 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); 591 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
586 592
587 if (!split_node) 593 if (!split_node)
@@ -601,7 +607,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
601 continue; 607 continue;
602 608
603 /* If we got here, then it is the right size 609 /* If we got here, then it is the right size
604 * Now take it out of the list and break */ 610 * Now take it out of the list and break
611 */
605 if (*head == node) { 612 if (*head == node) {
606 *head = node->next; 613 *head = node->next;
607 } else { 614 } else {
@@ -642,14 +649,16 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
642 return NULL; 649 return NULL;
643 650
644 for (max = *head; max; max = max->next) { 651 for (max = *head; max; max = max->next) {
645 /* If not big enough we could probably just bail, 652 /* If not big enough we could probably just bail,
646 * instead we'll continue to the next. */ 653 * instead we'll continue to the next.
654 */
647 if (max->length < size) 655 if (max->length < size)
648 continue; 656 continue;
649 657
650 if (max->base & (size - 1)) { 658 if (max->base & (size - 1)) {
651 /* this one isn't base aligned properly 659 /* this one isn't base aligned properly
652 * so we'll make a new entry and split it up */ 660 * so we'll make a new entry and split it up
661 */
653 temp_dword = (max->base | (size-1)) + 1; 662 temp_dword = (max->base | (size-1)) + 1;
654 663
655 /* Short circuit if adjusted size is too small */ 664 /* Short circuit if adjusted size is too small */
@@ -672,7 +681,8 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
672 681
673 if ((max->base + max->length) & (size - 1)) { 682 if ((max->base + max->length) & (size - 1)) {
674 /* this one isn't end aligned properly at the top 683 /* this one isn't end aligned properly at the top
675 * so we'll make a new entry and split it up */ 684 * so we'll make a new entry and split it up
685 */
676 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); 686 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
677 687
678 if (!split_node) 688 if (!split_node)
@@ -744,7 +754,8 @@ static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
744 if (node->base & (size - 1)) { 754 if (node->base & (size - 1)) {
745 dbg("%s: not aligned\n", __func__); 755 dbg("%s: not aligned\n", __func__);
746 /* this one isn't base aligned properly 756 /* this one isn't base aligned properly
747 * so we'll make a new entry and split it up */ 757 * so we'll make a new entry and split it up
758 */
748 temp_dword = (node->base | (size-1)) + 1; 759 temp_dword = (node->base | (size-1)) + 1;
749 760
750 /* Short circuit if adjusted size is too small */ 761 /* Short circuit if adjusted size is too small */
@@ -769,7 +780,8 @@ static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
769 if (node->length > size) { 780 if (node->length > size) {
770 dbg("%s: too big\n", __func__); 781 dbg("%s: too big\n", __func__);
771 /* this one is longer than we need 782 /* this one is longer than we need
772 * so we'll make a new entry and split it up */ 783 * so we'll make a new entry and split it up
784 */
773 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); 785 split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
774 786
775 if (!split_node) 787 if (!split_node)
@@ -886,19 +898,19 @@ irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data)
886 u32 Diff; 898 u32 Diff;
887 u32 temp_dword; 899 u32 temp_dword;
888 900
889 901
890 misc = readw(ctrl->hpc_reg + MISC); 902 misc = readw(ctrl->hpc_reg + MISC);
891 /*************************************** 903 /*
892 * Check to see if it was our interrupt 904 * Check to see if it was our interrupt
893 ***************************************/ 905 */
894 if (!(misc & 0x000C)) { 906 if (!(misc & 0x000C)) {
895 return IRQ_NONE; 907 return IRQ_NONE;
896 } 908 }
897 909
898 if (misc & 0x0004) { 910 if (misc & 0x0004) {
899 /********************************** 911 /*
900 * Serial Output interrupt Pending 912 * Serial Output interrupt Pending
901 **********************************/ 913 */
902 914
903 /* Clear the interrupt */ 915 /* Clear the interrupt */
904 misc |= 0x0004; 916 misc |= 0x0004;
@@ -961,11 +973,8 @@ struct pci_func *cpqhp_slot_create(u8 busnumber)
961 struct pci_func *next; 973 struct pci_func *next;
962 974
963 new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL); 975 new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL);
964 if (new_slot == NULL) { 976 if (new_slot == NULL)
965 /* I'm not dead yet!
966 * You will be. */
967 return new_slot; 977 return new_slot;
968 }
969 978
970 new_slot->next = NULL; 979 new_slot->next = NULL;
971 new_slot->configured = 1; 980 new_slot->configured = 1;
@@ -996,10 +1005,8 @@ static int slot_remove(struct pci_func * old_slot)
996 return 1; 1005 return 1;
997 1006
998 next = cpqhp_slot_list[old_slot->bus]; 1007 next = cpqhp_slot_list[old_slot->bus];
999 1008 if (next == NULL)
1000 if (next == NULL) {
1001 return 1; 1009 return 1;
1002 }
1003 1010
1004 if (next == old_slot) { 1011 if (next == old_slot) {
1005 cpqhp_slot_list[old_slot->bus] = old_slot->next; 1012 cpqhp_slot_list[old_slot->bus] = old_slot->next;
@@ -1008,9 +1015,8 @@ static int slot_remove(struct pci_func * old_slot)
1008 return 0; 1015 return 0;
1009 } 1016 }
1010 1017
1011 while ((next->next != old_slot) && (next->next != NULL)) { 1018 while ((next->next != old_slot) && (next->next != NULL))
1012 next = next->next; 1019 next = next->next;
1013 }
1014 1020
1015 if (next->next == old_slot) { 1021 if (next->next == old_slot) {
1016 next->next = old_slot->next; 1022 next->next = old_slot->next;
@@ -1040,9 +1046,8 @@ static int bridge_slot_remove(struct pci_func *bridge)
1040 for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) { 1046 for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) {
1041 next = cpqhp_slot_list[tempBus]; 1047 next = cpqhp_slot_list[tempBus];
1042 1048
1043 while (!slot_remove(next)) { 1049 while (!slot_remove(next))
1044 next = cpqhp_slot_list[tempBus]; 1050 next = cpqhp_slot_list[tempBus];
1045 }
1046 } 1051 }
1047 1052
1048 next = cpqhp_slot_list[bridge->bus]; 1053 next = cpqhp_slot_list[bridge->bus];
@@ -1130,39 +1135,43 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1130 u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); 1135 u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
1131 u16 reg16; 1136 u16 reg16;
1132 u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); 1137 u32 leds = readl(ctrl->hpc_reg + LED_CONTROL);
1133 1138
1134 if (ctrl->speed == adapter_speed) 1139 if (ctrl->speed == adapter_speed)
1135 return 0; 1140 return 0;
1136 1141
1137 /* We don't allow freq/mode changes if we find another adapter running 1142 /* We don't allow freq/mode changes if we find another adapter running
1138 * in another slot on this controller */ 1143 * in another slot on this controller
1144 */
1139 for(slot = ctrl->slot; slot; slot = slot->next) { 1145 for(slot = ctrl->slot; slot; slot = slot->next) {
1140 if (slot->device == (hp_slot + ctrl->slot_device_offset)) 1146 if (slot->device == (hp_slot + ctrl->slot_device_offset))
1141 continue; 1147 continue;
1142 if (!slot->hotplug_slot || !slot->hotplug_slot->info) 1148 if (!slot->hotplug_slot || !slot->hotplug_slot->info)
1143 continue; 1149 continue;
1144 if (slot->hotplug_slot->info->adapter_status == 0) 1150 if (slot->hotplug_slot->info->adapter_status == 0)
1145 continue; 1151 continue;
1146 /* If another adapter is running on the same segment but at a 1152 /* If another adapter is running on the same segment but at a
1147 * lower speed/mode, we allow the new adapter to function at 1153 * lower speed/mode, we allow the new adapter to function at
1148 * this rate if supported */ 1154 * this rate if supported
1149 if (ctrl->speed < adapter_speed) 1155 */
1156 if (ctrl->speed < adapter_speed)
1150 return 0; 1157 return 0;
1151 1158
1152 return 1; 1159 return 1;
1153 } 1160 }
1154 1161
1155 /* If the controller doesn't support freq/mode changes and the 1162 /* If the controller doesn't support freq/mode changes and the
1156 * controller is running at a higher mode, we bail */ 1163 * controller is running at a higher mode, we bail
1164 */
1157 if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability)) 1165 if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability))
1158 return 1; 1166 return 1;
1159 1167
1160 /* But we allow the adapter to run at a lower rate if possible */ 1168 /* But we allow the adapter to run at a lower rate if possible */
1161 if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability)) 1169 if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability))
1162 return 0; 1170 return 0;
1163 1171
1164 /* We try to set the max speed supported by both the adapter and 1172 /* We try to set the max speed supported by both the adapter and
1165 * controller */ 1173 * controller
1174 */
1166 if (ctrl->speed_capability < adapter_speed) { 1175 if (ctrl->speed_capability < adapter_speed) {
1167 if (ctrl->speed == ctrl->speed_capability) 1176 if (ctrl->speed == ctrl->speed_capability)
1168 return 0; 1177 return 0;
@@ -1171,22 +1180,22 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1171 1180
1172 writel(0x0L, ctrl->hpc_reg + LED_CONTROL); 1181 writel(0x0L, ctrl->hpc_reg + LED_CONTROL);
1173 writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE); 1182 writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE);
1174 1183
1175 set_SOGO(ctrl); 1184 set_SOGO(ctrl);
1176 wait_for_ctrl_irq(ctrl); 1185 wait_for_ctrl_irq(ctrl);
1177 1186
1178 if (adapter_speed != PCI_SPEED_133MHz_PCIX) 1187 if (adapter_speed != PCI_SPEED_133MHz_PCIX)
1179 reg = 0xF5; 1188 reg = 0xF5;
1180 else 1189 else
1181 reg = 0xF4; 1190 reg = 0xF4;
1182 pci_write_config_byte(ctrl->pci_dev, 0x41, reg); 1191 pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
1183 1192
1184 reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ); 1193 reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ);
1185 reg16 &= ~0x000F; 1194 reg16 &= ~0x000F;
1186 switch(adapter_speed) { 1195 switch(adapter_speed) {
1187 case(PCI_SPEED_133MHz_PCIX): 1196 case(PCI_SPEED_133MHz_PCIX):
1188 reg = 0x75; 1197 reg = 0x75;
1189 reg16 |= 0xB; 1198 reg16 |= 0xB;
1190 break; 1199 break;
1191 case(PCI_SPEED_100MHz_PCIX): 1200 case(PCI_SPEED_100MHz_PCIX):
1192 reg = 0x74; 1201 reg = 0x74;
@@ -1203,48 +1212,48 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1203 default: /* 33MHz PCI 2.2 */ 1212 default: /* 33MHz PCI 2.2 */
1204 reg = 0x71; 1213 reg = 0x71;
1205 break; 1214 break;
1206 1215
1207 } 1216 }
1208 reg16 |= 0xB << 12; 1217 reg16 |= 0xB << 12;
1209 writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ); 1218 writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ);
1210 1219
1211 mdelay(5); 1220 mdelay(5);
1212 1221
1213 /* Reenable interrupts */ 1222 /* Reenable interrupts */
1214 writel(0, ctrl->hpc_reg + INT_MASK); 1223 writel(0, ctrl->hpc_reg + INT_MASK);
1215 1224
1216 pci_write_config_byte(ctrl->pci_dev, 0x41, reg); 1225 pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
1217 1226
1218 /* Restart state machine */ 1227 /* Restart state machine */
1219 reg = ~0xF; 1228 reg = ~0xF;
1220 pci_read_config_byte(ctrl->pci_dev, 0x43, &reg); 1229 pci_read_config_byte(ctrl->pci_dev, 0x43, &reg);
1221 pci_write_config_byte(ctrl->pci_dev, 0x43, reg); 1230 pci_write_config_byte(ctrl->pci_dev, 0x43, reg);
1222 1231
1223 /* Only if mode change...*/ 1232 /* Only if mode change...*/
1224 if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || 1233 if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
1225 ((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) 1234 ((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
1226 set_SOGO(ctrl); 1235 set_SOGO(ctrl);
1227 1236
1228 wait_for_ctrl_irq(ctrl); 1237 wait_for_ctrl_irq(ctrl);
1229 mdelay(1100); 1238 mdelay(1100);
1230 1239
1231 /* Restore LED/Slot state */ 1240 /* Restore LED/Slot state */
1232 writel(leds, ctrl->hpc_reg + LED_CONTROL); 1241 writel(leds, ctrl->hpc_reg + LED_CONTROL);
1233 writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE); 1242 writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE);
1234 1243
1235 set_SOGO(ctrl); 1244 set_SOGO(ctrl);
1236 wait_for_ctrl_irq(ctrl); 1245 wait_for_ctrl_irq(ctrl);
1237 1246
1238 ctrl->speed = adapter_speed; 1247 ctrl->speed = adapter_speed;
1239 slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 1248 slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
1240 1249
1241 info("Successfully changed frequency/mode for adapter in slot %d\n", 1250 info("Successfully changed frequency/mode for adapter in slot %d\n",
1242 slot->number); 1251 slot->number);
1243 return 0; 1252 return 0;
1244} 1253}
1245 1254
1246/* the following routines constitute the bulk of the 1255/* the following routines constitute the bulk of the
1247 hotplug controller logic 1256 * hotplug controller logic
1248 */ 1257 */
1249 1258
1250 1259
@@ -1268,17 +1277,17 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
1268 1277
1269 hp_slot = func->device - ctrl->slot_device_offset; 1278 hp_slot = func->device - ctrl->slot_device_offset;
1270 1279
1271 if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)) { 1280 /*
1272 /********************************** 1281 * The switch is open.
1273 * The switch is open. 1282 */
1274 **********************************/ 1283 if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot))
1275 rc = INTERLOCK_OPEN; 1284 rc = INTERLOCK_OPEN;
1276 } else if (is_slot_enabled (ctrl, hp_slot)) { 1285 /*
1277 /********************************** 1286 * The board is already on
1278 * The board is already on 1287 */
1279 **********************************/ 1288 else if (is_slot_enabled (ctrl, hp_slot))
1280 rc = CARD_FUNCTIONING; 1289 rc = CARD_FUNCTIONING;
1281 } else { 1290 else {
1282 mutex_lock(&ctrl->crit_sect); 1291 mutex_lock(&ctrl->crit_sect);
1283 1292
1284 /* turn on board without attaching to the bus */ 1293 /* turn on board without attaching to the bus */
@@ -1299,7 +1308,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
1299 1308
1300 /* Wait for SOBS to be unset */ 1309 /* Wait for SOBS to be unset */
1301 wait_for_ctrl_irq (ctrl); 1310 wait_for_ctrl_irq (ctrl);
1302 1311
1303 adapter_speed = get_adapter_speed(ctrl, hp_slot); 1312 adapter_speed = get_adapter_speed(ctrl, hp_slot);
1304 if (ctrl->speed != adapter_speed) 1313 if (ctrl->speed != adapter_speed)
1305 if (set_controller_speed(ctrl, adapter_speed, hp_slot)) 1314 if (set_controller_speed(ctrl, adapter_speed, hp_slot))
@@ -1352,7 +1361,8 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
1352 * Get slot won't work for devices behind 1361 * Get slot won't work for devices behind
1353 * bridges, but in this case it will always be 1362 * bridges, but in this case it will always be
1354 * called for the "base" bus/dev/func of an 1363 * called for the "base" bus/dev/func of an
1355 * adapter. */ 1364 * adapter.
1365 */
1356 1366
1357 mutex_lock(&ctrl->crit_sect); 1367 mutex_lock(&ctrl->crit_sect);
1358 1368
@@ -1377,7 +1387,8 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
1377 1387
1378 * Get slot won't work for devices behind bridges, but 1388 * Get slot won't work for devices behind bridges, but
1379 * in this case it will always be called for the "base" 1389 * in this case it will always be called for the "base"
1380 * bus/dev/func of an adapter. */ 1390 * bus/dev/func of an adapter.
1391 */
1381 1392
1382 mutex_lock(&ctrl->crit_sect); 1393 mutex_lock(&ctrl->crit_sect);
1383 1394
@@ -1434,7 +1445,8 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1434 wait_for_ctrl_irq (ctrl); 1445 wait_for_ctrl_irq (ctrl);
1435 1446
1436 /* Change bits in slot power register to force another shift out 1447 /* Change bits in slot power register to force another shift out
1437 * NOTE: this is to work around the timer bug */ 1448 * NOTE: this is to work around the timer bug
1449 */
1438 temp_byte = readb(ctrl->hpc_reg + SLOT_POWER); 1450 temp_byte = readb(ctrl->hpc_reg + SLOT_POWER);
1439 writeb(0x00, ctrl->hpc_reg + SLOT_POWER); 1451 writeb(0x00, ctrl->hpc_reg + SLOT_POWER);
1440 writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER); 1452 writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER);
@@ -1443,12 +1455,12 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1443 1455
1444 /* Wait for SOBS to be unset */ 1456 /* Wait for SOBS to be unset */
1445 wait_for_ctrl_irq (ctrl); 1457 wait_for_ctrl_irq (ctrl);
1446 1458
1447 adapter_speed = get_adapter_speed(ctrl, hp_slot); 1459 adapter_speed = get_adapter_speed(ctrl, hp_slot);
1448 if (ctrl->speed != adapter_speed) 1460 if (ctrl->speed != adapter_speed)
1449 if (set_controller_speed(ctrl, adapter_speed, hp_slot)) 1461 if (set_controller_speed(ctrl, adapter_speed, hp_slot))
1450 rc = WRONG_BUS_FREQUENCY; 1462 rc = WRONG_BUS_FREQUENCY;
1451 1463
1452 /* turn off board without attaching to the bus */ 1464 /* turn off board without attaching to the bus */
1453 disable_slot_power (ctrl, hp_slot); 1465 disable_slot_power (ctrl, hp_slot);
1454 1466
@@ -1461,7 +1473,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1461 1473
1462 if (rc) 1474 if (rc)
1463 return rc; 1475 return rc;
1464 1476
1465 p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 1477 p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
1466 1478
1467 /* turn on board and blink green LED */ 1479 /* turn on board and blink green LED */
@@ -1521,7 +1533,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1521 } 1533 }
1522 1534
1523 /* All F's is an empty slot or an invalid board */ 1535 /* All F's is an empty slot or an invalid board */
1524 if (temp_register != 0xFFFFFFFF) { /* Check for a board in the slot */ 1536 if (temp_register != 0xFFFFFFFF) {
1525 res_lists.io_head = ctrl->io_head; 1537 res_lists.io_head = ctrl->io_head;
1526 res_lists.mem_head = ctrl->mem_head; 1538 res_lists.mem_head = ctrl->mem_head;
1527 res_lists.p_mem_head = ctrl->p_mem_head; 1539 res_lists.p_mem_head = ctrl->p_mem_head;
@@ -1570,9 +1582,8 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
1570 index = 0; 1582 index = 0;
1571 do { 1583 do {
1572 new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++); 1584 new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++);
1573 if (new_slot && !new_slot->pci_dev) { 1585 if (new_slot && !new_slot->pci_dev)
1574 cpqhp_configure_device(ctrl, new_slot); 1586 cpqhp_configure_device(ctrl, new_slot);
1575 }
1576 } while (new_slot); 1587 } while (new_slot);
1577 1588
1578 mutex_lock(&ctrl->crit_sect); 1589 mutex_lock(&ctrl->crit_sect);
@@ -1859,12 +1870,12 @@ static void interrupt_event_handler(struct controller *ctrl)
1859 info(msg_button_on, p_slot->number); 1870 info(msg_button_on, p_slot->number);
1860 } 1871 }
1861 mutex_lock(&ctrl->crit_sect); 1872 mutex_lock(&ctrl->crit_sect);
1862 1873
1863 dbg("blink green LED and turn off amber\n"); 1874 dbg("blink green LED and turn off amber\n");
1864 1875
1865 amber_LED_off (ctrl, hp_slot); 1876 amber_LED_off (ctrl, hp_slot);
1866 green_LED_blink (ctrl, hp_slot); 1877 green_LED_blink (ctrl, hp_slot);
1867 1878
1868 set_SOGO(ctrl); 1879 set_SOGO(ctrl);
1869 1880
1870 /* Wait for SOBS to be unset */ 1881 /* Wait for SOBS to be unset */
@@ -1958,7 +1969,7 @@ void cpqhp_pushbutton_thread(unsigned long slot)
1958 if (cpqhp_process_SI(ctrl, func) != 0) { 1969 if (cpqhp_process_SI(ctrl, func) != 0) {
1959 amber_LED_on(ctrl, hp_slot); 1970 amber_LED_on(ctrl, hp_slot);
1960 green_LED_off(ctrl, hp_slot); 1971 green_LED_off(ctrl, hp_slot);
1961 1972
1962 set_SOGO(ctrl); 1973 set_SOGO(ctrl);
1963 1974
1964 /* Wait for SOBS to be unset */ 1975 /* Wait for SOBS to be unset */
@@ -2079,7 +2090,7 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
2079 struct pci_bus *pci_bus = ctrl->pci_bus; 2090 struct pci_bus *pci_bus = ctrl->pci_bus;
2080 int physical_slot=0; 2091 int physical_slot=0;
2081 2092
2082 device = func->device; 2093 device = func->device;
2083 func = cpqhp_slot_find(ctrl->bus, device, index++); 2094 func = cpqhp_slot_find(ctrl->bus, device, index++);
2084 p_slot = cpqhp_find_slot(ctrl, device); 2095 p_slot = cpqhp_find_slot(ctrl, device);
2085 if (p_slot) { 2096 if (p_slot) {
@@ -2113,9 +2124,8 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
2113 2124
2114 /* If the VGA Enable bit is set, remove isn't 2125 /* If the VGA Enable bit is set, remove isn't
2115 * supported */ 2126 * supported */
2116 if (BCR & PCI_BRIDGE_CTL_VGA) { 2127 if (BCR & PCI_BRIDGE_CTL_VGA)
2117 rc = REMOVE_NOT_SUPPORTED; 2128 rc = REMOVE_NOT_SUPPORTED;
2118 }
2119 } 2129 }
2120 } 2130 }
2121 2131
@@ -2183,67 +2193,67 @@ int cpqhp_hardware_test(struct controller *ctrl, int test_num)
2183 num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f; 2193 num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f;
2184 2194
2185 switch (test_num) { 2195 switch (test_num) {
2186 case 1: 2196 case 1:
2187 /* Do stuff here! */ 2197 /* Do stuff here! */
2188 2198
2189 /* Do that funky LED thing */ 2199 /* Do that funky LED thing */
2190 /* so we can restore them later */ 2200 /* so we can restore them later */
2191 save_LED = readl(ctrl->hpc_reg + LED_CONTROL); 2201 save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
2192 work_LED = 0x01010101; 2202 work_LED = 0x01010101;
2193 switch_leds(ctrl, num_of_slots, &work_LED, 0); 2203 switch_leds(ctrl, num_of_slots, &work_LED, 0);
2194 switch_leds(ctrl, num_of_slots, &work_LED, 1); 2204 switch_leds(ctrl, num_of_slots, &work_LED, 1);
2195 switch_leds(ctrl, num_of_slots, &work_LED, 0); 2205 switch_leds(ctrl, num_of_slots, &work_LED, 0);
2196 switch_leds(ctrl, num_of_slots, &work_LED, 1); 2206 switch_leds(ctrl, num_of_slots, &work_LED, 1);
2197 2207
2198 work_LED = 0x01010000; 2208 work_LED = 0x01010000;
2199 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2209 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2200 switch_leds(ctrl, num_of_slots, &work_LED, 0); 2210 switch_leds(ctrl, num_of_slots, &work_LED, 0);
2201 switch_leds(ctrl, num_of_slots, &work_LED, 1); 2211 switch_leds(ctrl, num_of_slots, &work_LED, 1);
2202 work_LED = 0x00000101; 2212 work_LED = 0x00000101;
2203 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2213 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2204 switch_leds(ctrl, num_of_slots, &work_LED, 0); 2214 switch_leds(ctrl, num_of_slots, &work_LED, 0);
2205 switch_leds(ctrl, num_of_slots, &work_LED, 1); 2215 switch_leds(ctrl, num_of_slots, &work_LED, 1);
2216
2217 work_LED = 0x01010000;
2218 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2219 for (loop = 0; loop < num_of_slots; loop++) {
2220 set_SOGO(ctrl);
2206 2221
2207 work_LED = 0x01010000; 2222 /* Wait for SOGO interrupt */
2208 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2223 wait_for_ctrl_irq (ctrl);
2209 for (loop = 0; loop < num_of_slots; loop++) {
2210 set_SOGO(ctrl);
2211 2224
2212 /* Wait for SOGO interrupt */ 2225 /* Get ready for next iteration */
2213 wait_for_ctrl_irq (ctrl); 2226 long_delay((3*HZ)/10);
2227 work_LED = work_LED >> 16;
2228 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2214 2229
2215 /* Get ready for next iteration */ 2230 set_SOGO(ctrl);
2216 long_delay((3*HZ)/10);
2217 work_LED = work_LED >> 16;
2218 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2219
2220 set_SOGO(ctrl);
2221 2231
2222 /* Wait for SOGO interrupt */ 2232 /* Wait for SOGO interrupt */
2223 wait_for_ctrl_irq (ctrl); 2233 wait_for_ctrl_irq (ctrl);
2224 2234
2225 /* Get ready for next iteration */ 2235 /* Get ready for next iteration */
2226 long_delay((3*HZ)/10); 2236 long_delay((3*HZ)/10);
2227 work_LED = work_LED << 16; 2237 work_LED = work_LED << 16;
2228 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2238 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2229 work_LED = work_LED << 1; 2239 work_LED = work_LED << 1;
2230 writel(work_LED, ctrl->hpc_reg + LED_CONTROL); 2240 writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
2231 } 2241 }
2232 2242
2233 /* put it back the way it was */ 2243 /* put it back the way it was */
2234 writel(save_LED, ctrl->hpc_reg + LED_CONTROL); 2244 writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
2235 2245
2236 set_SOGO(ctrl); 2246 set_SOGO(ctrl);
2237 2247
2238 /* Wait for SOBS to be unset */ 2248 /* Wait for SOBS to be unset */
2239 wait_for_ctrl_irq (ctrl); 2249 wait_for_ctrl_irq (ctrl);
2240 break; 2250 break;
2241 case 2: 2251 case 2:
2242 /* Do other stuff here! */ 2252 /* Do other stuff here! */
2243 break; 2253 break;
2244 case 3: 2254 case 3:
2245 /* and more... */ 2255 /* and more... */
2246 break; 2256 break;
2247 } 2257 }
2248 return 0; 2258 return 0;
2249} 2259}
@@ -2312,9 +2322,9 @@ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func
2312 while ((function < max_functions) && (!stop_it)) { 2322 while ((function < max_functions) && (!stop_it)) {
2313 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID); 2323 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID);
2314 2324
2315 if (ID == 0xFFFFFFFF) { /* There's nothing there. */ 2325 if (ID == 0xFFFFFFFF) {
2316 function++; 2326 function++;
2317 } else { /* There's something there */ 2327 } else {
2318 /* Setup slot structure. */ 2328 /* Setup slot structure. */
2319 new_slot = cpqhp_slot_create(func->bus); 2329 new_slot = cpqhp_slot_create(func->bus);
2320 2330
@@ -2339,8 +2349,8 @@ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func
2339 2349
2340 2350
2341/* 2351/*
2342 Configuration logic that involves the hotplug data structures and 2352 * Configuration logic that involves the hotplug data structures and
2343 their bookkeeping 2353 * their bookkeeping
2344 */ 2354 */
2345 2355
2346 2356
@@ -2393,7 +2403,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2393 if (rc) 2403 if (rc)
2394 return rc; 2404 return rc;
2395 2405
2396 if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* PCI-PCI Bridge */ 2406 if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
2397 /* set Primary bus */ 2407 /* set Primary bus */
2398 dbg("set Primary bus = %d\n", func->bus); 2408 dbg("set Primary bus = %d\n", func->bus);
2399 rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus); 2409 rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus);
@@ -2484,7 +2494,8 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2484 temp_resources.irqs = &irqs; 2494 temp_resources.irqs = &irqs;
2485 2495
2486 /* Make copies of the nodes we are going to pass down so that 2496 /* Make copies of the nodes we are going to pass down so that
2487 * if there is a problem,we can just use these to free resources */ 2497 * if there is a problem,we can just use these to free resources
2498 */
2488 hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL); 2499 hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL);
2489 hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL); 2500 hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL);
2490 hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL); 2501 hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL);
@@ -2556,7 +2567,8 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2556 temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16; 2567 temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16;
2557 rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); 2568 rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
2558 2569
2559 /* Adjust this to compensate for extra adjustment in first loop */ 2570 /* Adjust this to compensate for extra adjustment in first loop
2571 */
2560 irqs.barber_pole--; 2572 irqs.barber_pole--;
2561 2573
2562 rc = 0; 2574 rc = 0;
@@ -2917,27 +2929,26 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
2917 } /* End of base register loop */ 2929 } /* End of base register loop */
2918 if (cpqhp_legacy_mode) { 2930 if (cpqhp_legacy_mode) {
2919 /* Figure out which interrupt pin this function uses */ 2931 /* Figure out which interrupt pin this function uses */
2920 rc = pci_bus_read_config_byte (pci_bus, devfn, 2932 rc = pci_bus_read_config_byte (pci_bus, devfn,
2921 PCI_INTERRUPT_PIN, &temp_byte); 2933 PCI_INTERRUPT_PIN, &temp_byte);
2922 2934
2923 /* If this function needs an interrupt and we are behind 2935 /* If this function needs an interrupt and we are behind
2924 * a bridge and the pin is tied to something that's 2936 * a bridge and the pin is tied to something that's
2925 * alread mapped, set this one the same */ 2937 * alread mapped, set this one the same */
2926 if (temp_byte && resources->irqs && 2938 if (temp_byte && resources->irqs &&
2927 (resources->irqs->valid_INT & 2939 (resources->irqs->valid_INT &
2928 (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) { 2940 (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) {
2929 /* We have to share with something already set up */ 2941 /* We have to share with something already set up */
2930 IRQ = resources->irqs->interrupt[(temp_byte + 2942 IRQ = resources->irqs->interrupt[(temp_byte +
2931 resources->irqs->barber_pole - 1) & 0x03]; 2943 resources->irqs->barber_pole - 1) & 0x03];
2932 } else { 2944 } else {
2933 /* Program IRQ based on card type */ 2945 /* Program IRQ based on card type */
2934 rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); 2946 rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code);
2935 2947
2936 if (class_code == PCI_BASE_CLASS_STORAGE) { 2948 if (class_code == PCI_BASE_CLASS_STORAGE)
2937 IRQ = cpqhp_disk_irq; 2949 IRQ = cpqhp_disk_irq;
2938 } else { 2950 else
2939 IRQ = cpqhp_nic_irq; 2951 IRQ = cpqhp_nic_irq;
2940 }
2941 } 2952 }
2942 2953
2943 /* IRQ Line */ 2954 /* IRQ Line */
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index cb174888002b..76ba8a1c774d 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -94,12 +94,13 @@ static u8 evbuffer[1024];
94 94
95static void __iomem *compaq_int15_entry_point; 95static void __iomem *compaq_int15_entry_point;
96 96
97static spinlock_t int15_lock; /* lock for ordering int15_bios_call() */ 97/* lock for ordering int15_bios_call() */
98static spinlock_t int15_lock;
98 99
99 100
100/* This is a series of function that deals with 101/* This is a series of function that deals with
101 setting & getting the hotplug resource table in some environment variable. 102 * setting & getting the hotplug resource table in some environment variable.
102*/ 103 */
103 104
104/* 105/*
105 * We really shouldn't be doing this unless there is a _very_ good reason to!!! 106 * We really shouldn't be doing this unless there is a _very_ good reason to!!!
@@ -113,7 +114,7 @@ static u32 add_byte( u32 **p_buffer, u8 value, u32 *used, u32 *avail)
113 114
114 if ((*used + 1) > *avail) 115 if ((*used + 1) > *avail)
115 return(1); 116 return(1);
116 117
117 *((u8*)*p_buffer) = value; 118 *((u8*)*p_buffer) = value;
118 tByte = (u8**)p_buffer; 119 tByte = (u8**)p_buffer;
119 (*tByte)++; 120 (*tByte)++;
@@ -170,10 +171,10 @@ static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size)
170 unsigned long flags; 171 unsigned long flags;
171 int op = operation; 172 int op = operation;
172 int ret_val; 173 int ret_val;
173 174
174 if (!compaq_int15_entry_point) 175 if (!compaq_int15_entry_point)
175 return -ENODEV; 176 return -ENODEV;
176 177
177 spin_lock_irqsave(&int15_lock, flags); 178 spin_lock_irqsave(&int15_lock, flags);
178 __asm__ ( 179 __asm__ (
179 "xorl %%ebx,%%ebx\n" \ 180 "xorl %%ebx,%%ebx\n" \
@@ -187,7 +188,7 @@ static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size)
187 "D" (buffer), "m" (compaq_int15_entry_point) 188 "D" (buffer), "m" (compaq_int15_entry_point)
188 : "%ebx", "%edx"); 189 : "%ebx", "%edx");
189 spin_unlock_irqrestore(&int15_lock, flags); 190 spin_unlock_irqrestore(&int15_lock, flags);
190 191
191 return((ret_val & 0xFF00) >> 8); 192 return((ret_val & 0xFF00) >> 8);
192} 193}
193 194
@@ -210,14 +211,16 @@ static int load_HRT (void __iomem *rom_start)
210 211
211 available = 1024; 212 available = 1024;
212 213
213 // Now load the EV 214 /* Now load the EV */
214 temp_dword = available; 215 temp_dword = available;
215 216
216 rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword); 217 rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword);
217 218
218 evbuffer_length = temp_dword; 219 evbuffer_length = temp_dword;
219 220
220 // We're maintaining the resource lists so write FF to invalidate old info 221 /* We're maintaining the resource lists so write FF to invalidate old
222 * info
223 */
221 temp_dword = 1; 224 temp_dword = 1;
222 225
223 rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword); 226 rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword);
@@ -263,13 +266,13 @@ static u32 store_HRT (void __iomem *rom_start)
263 p_EV_header = (struct ev_hrt_header *) pFill; 266 p_EV_header = (struct ev_hrt_header *) pFill;
264 267
265 ctrl = cpqhp_ctrl_list; 268 ctrl = cpqhp_ctrl_list;
266 269
267 // The revision of this structure 270 /* The revision of this structure */
268 rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available); 271 rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available);
269 if (rc) 272 if (rc)
270 return(rc); 273 return(rc);
271 274
272 // The number of controllers 275 /* The number of controllers */
273 rc = add_byte( &pFill, 1, &usedbytes, &available); 276 rc = add_byte( &pFill, 1, &usedbytes, &available);
274 if (rc) 277 if (rc)
275 return(rc); 278 return(rc);
@@ -279,27 +282,27 @@ static u32 store_HRT (void __iomem *rom_start)
279 282
280 numCtrl++; 283 numCtrl++;
281 284
282 // The bus number 285 /* The bus number */
283 rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available); 286 rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available);
284 if (rc) 287 if (rc)
285 return(rc); 288 return(rc);
286 289
287 // The device Number 290 /* The device Number */
288 rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available); 291 rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available);
289 if (rc) 292 if (rc)
290 return(rc); 293 return(rc);
291 294
292 // The function Number 295 /* The function Number */
293 rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available); 296 rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available);
294 if (rc) 297 if (rc)
295 return(rc); 298 return(rc);
296 299
297 // Skip the number of available entries 300 /* Skip the number of available entries */
298 rc = add_dword( &pFill, 0, &usedbytes, &available); 301 rc = add_dword( &pFill, 0, &usedbytes, &available);
299 if (rc) 302 if (rc)
300 return(rc); 303 return(rc);
301 304
302 // Figure out memory Available 305 /* Figure out memory Available */
303 306
304 resNode = ctrl->mem_head; 307 resNode = ctrl->mem_head;
305 308
@@ -308,12 +311,12 @@ static u32 store_HRT (void __iomem *rom_start)
308 while (resNode) { 311 while (resNode) {
309 loop ++; 312 loop ++;
310 313
311 // base 314 /* base */
312 rc = add_dword( &pFill, resNode->base, &usedbytes, &available); 315 rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
313 if (rc) 316 if (rc)
314 return(rc); 317 return(rc);
315 318
316 // length 319 /* length */
317 rc = add_dword( &pFill, resNode->length, &usedbytes, &available); 320 rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
318 if (rc) 321 if (rc)
319 return(rc); 322 return(rc);
@@ -321,10 +324,10 @@ static u32 store_HRT (void __iomem *rom_start)
321 resNode = resNode->next; 324 resNode = resNode->next;
322 } 325 }
323 326
324 // Fill in the number of entries 327 /* Fill in the number of entries */
325 p_ev_ctrl->mem_avail = loop; 328 p_ev_ctrl->mem_avail = loop;
326 329
327 // Figure out prefetchable memory Available 330 /* Figure out prefetchable memory Available */
328 331
329 resNode = ctrl->p_mem_head; 332 resNode = ctrl->p_mem_head;
330 333
@@ -333,12 +336,12 @@ static u32 store_HRT (void __iomem *rom_start)
333 while (resNode) { 336 while (resNode) {
334 loop ++; 337 loop ++;
335 338
336 // base 339 /* base */
337 rc = add_dword( &pFill, resNode->base, &usedbytes, &available); 340 rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
338 if (rc) 341 if (rc)
339 return(rc); 342 return(rc);
340 343
341 // length 344 /* length */
342 rc = add_dword( &pFill, resNode->length, &usedbytes, &available); 345 rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
343 if (rc) 346 if (rc)
344 return(rc); 347 return(rc);
@@ -346,10 +349,10 @@ static u32 store_HRT (void __iomem *rom_start)
346 resNode = resNode->next; 349 resNode = resNode->next;
347 } 350 }
348 351
349 // Fill in the number of entries 352 /* Fill in the number of entries */
350 p_ev_ctrl->p_mem_avail = loop; 353 p_ev_ctrl->p_mem_avail = loop;
351 354
352 // Figure out IO Available 355 /* Figure out IO Available */
353 356
354 resNode = ctrl->io_head; 357 resNode = ctrl->io_head;
355 358
@@ -358,12 +361,12 @@ static u32 store_HRT (void __iomem *rom_start)
358 while (resNode) { 361 while (resNode) {
359 loop ++; 362 loop ++;
360 363
361 // base 364 /* base */
362 rc = add_dword( &pFill, resNode->base, &usedbytes, &available); 365 rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
363 if (rc) 366 if (rc)
364 return(rc); 367 return(rc);
365 368
366 // length 369 /* length */
367 rc = add_dword( &pFill, resNode->length, &usedbytes, &available); 370 rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
368 if (rc) 371 if (rc)
369 return(rc); 372 return(rc);
@@ -371,10 +374,10 @@ static u32 store_HRT (void __iomem *rom_start)
371 resNode = resNode->next; 374 resNode = resNode->next;
372 } 375 }
373 376
374 // Fill in the number of entries 377 /* Fill in the number of entries */
375 p_ev_ctrl->io_avail = loop; 378 p_ev_ctrl->io_avail = loop;
376 379
377 // Figure out bus Available 380 /* Figure out bus Available */
378 381
379 resNode = ctrl->bus_head; 382 resNode = ctrl->bus_head;
380 383
@@ -383,12 +386,12 @@ static u32 store_HRT (void __iomem *rom_start)
383 while (resNode) { 386 while (resNode) {
384 loop ++; 387 loop ++;
385 388
386 // base 389 /* base */
387 rc = add_dword( &pFill, resNode->base, &usedbytes, &available); 390 rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
388 if (rc) 391 if (rc)
389 return(rc); 392 return(rc);
390 393
391 // length 394 /* length */
392 rc = add_dword( &pFill, resNode->length, &usedbytes, &available); 395 rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
393 if (rc) 396 if (rc)
394 return(rc); 397 return(rc);
@@ -396,15 +399,15 @@ static u32 store_HRT (void __iomem *rom_start)
396 resNode = resNode->next; 399 resNode = resNode->next;
397 } 400 }
398 401
399 // Fill in the number of entries 402 /* Fill in the number of entries */
400 p_ev_ctrl->bus_avail = loop; 403 p_ev_ctrl->bus_avail = loop;
401 404
402 ctrl = ctrl->next; 405 ctrl = ctrl->next;
403 } 406 }
404 407
405 p_EV_header->num_of_ctrl = numCtrl; 408 p_EV_header->num_of_ctrl = numCtrl;
406 409
407 // Now store the EV 410 /* Now store the EV */
408 411
409 temp_dword = usedbytes; 412 temp_dword = usedbytes;
410 413
@@ -449,20 +452,21 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
449 struct ev_hrt_header *p_EV_header; 452 struct ev_hrt_header *p_EV_header;
450 453
451 if (!evbuffer_init) { 454 if (!evbuffer_init) {
452 // Read the resource list information in from NVRAM 455 /* Read the resource list information in from NVRAM */
453 if (load_HRT(rom_start)) 456 if (load_HRT(rom_start))
454 memset (evbuffer, 0, 1024); 457 memset (evbuffer, 0, 1024);
455 458
456 evbuffer_init = 1; 459 evbuffer_init = 1;
457 } 460 }
458 461
459 // If we saved information in NVRAM, use it now 462 /* If we saved information in NVRAM, use it now */
460 p_EV_header = (struct ev_hrt_header *) evbuffer; 463 p_EV_header = (struct ev_hrt_header *) evbuffer;
461 464
462 // The following code is for systems where version 1.0 of this 465 /* The following code is for systems where version 1.0 of this
463 // driver has been loaded, but doesn't support the hardware. 466 * driver has been loaded, but doesn't support the hardware.
464 // In that case, the driver would incorrectly store something 467 * In that case, the driver would incorrectly store something
465 // in NVRAM. 468 * in NVRAM.
469 */
466 if ((p_EV_header->Version == 2) || 470 if ((p_EV_header->Version == 2) ||
467 ((p_EV_header->Version == 1) && !ctrl->push_flag)) { 471 ((p_EV_header->Version == 1) && !ctrl->push_flag)) {
468 p_byte = &(p_EV_header->next); 472 p_byte = &(p_EV_header->next);
@@ -479,7 +483,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
479 function = p_ev_ctrl->function; 483 function = p_ev_ctrl->function;
480 484
481 while ((bus != ctrl->bus) || 485 while ((bus != ctrl->bus) ||
482 (device != PCI_SLOT(ctrl->pci_dev->devfn)) || 486 (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
483 (function != PCI_FUNC(ctrl->pci_dev->devfn))) { 487 (function != PCI_FUNC(ctrl->pci_dev->devfn))) {
484 nummem = p_ev_ctrl->mem_avail; 488 nummem = p_ev_ctrl->mem_avail;
485 numpmem = p_ev_ctrl->p_mem_avail; 489 numpmem = p_ev_ctrl->p_mem_avail;
@@ -491,7 +495,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
491 if (p_byte > ((u8*)p_EV_header + evbuffer_length)) 495 if (p_byte > ((u8*)p_EV_header + evbuffer_length))
492 return 2; 496 return 2;
493 497
494 // Skip forward to the next entry 498 /* Skip forward to the next entry */
495 p_byte += (nummem + numpmem + numio + numbus) * 8; 499 p_byte += (nummem + numpmem + numio + numbus) * 8;
496 500
497 if (p_byte > ((u8*)p_EV_header + evbuffer_length)) 501 if (p_byte > ((u8*)p_EV_header + evbuffer_length))
@@ -629,8 +633,9 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
629 ctrl->bus_head = bus_node; 633 ctrl->bus_head = bus_node;
630 } 634 }
631 635
632 // If all of the following fail, we don't have any resources for 636 /* If all of the following fail, we don't have any resources for
633 // hot plug add 637 * hot plug add
638 */
634 rc = 1; 639 rc = 1;
635 rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); 640 rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
636 rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); 641 rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
@@ -640,14 +645,14 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
640 if (rc) 645 if (rc)
641 return(rc); 646 return(rc);
642 } else { 647 } else {
643 if ((evbuffer[0] != 0) && (!ctrl->push_flag)) 648 if ((evbuffer[0] != 0) && (!ctrl->push_flag))
644 return 1; 649 return 1;
645 } 650 }
646 651
647 return 0; 652 return 0;
648} 653}
649 654
650 655
651int compaq_nvram_store (void __iomem *rom_start) 656int compaq_nvram_store (void __iomem *rom_start)
652{ 657{
653 int rc = 1; 658 int rc = 1;
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 6c0ed0fcb8ee..6173b9a4544e 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -37,7 +37,6 @@
37#include "../pci.h" 37#include "../pci.h"
38#include "cpqphp.h" 38#include "cpqphp.h"
39#include "cpqphp_nvram.h" 39#include "cpqphp_nvram.h"
40#include <asm/pci_x86.h>
41 40
42 41
43u8 cpqhp_nic_irq; 42u8 cpqhp_nic_irq;
@@ -82,14 +81,14 @@ static void __iomem *detect_HRT_floating_pointer(void __iomem *begin, void __iom
82} 81}
83 82
84 83
85int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func) 84int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
86{ 85{
87 unsigned char bus; 86 unsigned char bus;
88 struct pci_bus *child; 87 struct pci_bus *child;
89 int num; 88 int num;
90 89
91 if (func->pci_dev == NULL) 90 if (func->pci_dev == NULL)
92 func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function)); 91 func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function));
93 92
94 /* No pci device, we need to create it then */ 93 /* No pci device, we need to create it then */
95 if (func->pci_dev == NULL) { 94 if (func->pci_dev == NULL) {
@@ -99,7 +98,7 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
99 if (num) 98 if (num)
100 pci_bus_add_devices(ctrl->pci_dev->bus); 99 pci_bus_add_devices(ctrl->pci_dev->bus);
101 100
102 func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function)); 101 func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function));
103 if (func->pci_dev == NULL) { 102 if (func->pci_dev == NULL) {
104 dbg("ERROR: pci_dev still null\n"); 103 dbg("ERROR: pci_dev still null\n");
105 return 0; 104 return 0;
@@ -112,20 +111,24 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
112 pci_do_scan_bus(child); 111 pci_do_scan_bus(child);
113 } 112 }
114 113
114 pci_dev_put(func->pci_dev);
115
115 return 0; 116 return 0;
116} 117}
117 118
118 119
119int cpqhp_unconfigure_device(struct pci_func* func) 120int cpqhp_unconfigure_device(struct pci_func* func)
120{ 121{
121 int j; 122 int j;
122 123
123 dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function); 124 dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function);
124 125
125 for (j=0; j<8 ; j++) { 126 for (j=0; j<8 ; j++) {
126 struct pci_dev* temp = pci_find_slot(func->bus, PCI_DEVFN(func->device, j)); 127 struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
127 if (temp) 128 if (temp) {
129 pci_dev_put(temp);
128 pci_remove_bus_device(temp); 130 pci_remove_bus_device(temp);
131 }
129 } 132 }
130 return 0; 133 return 0;
131} 134}
@@ -178,32 +181,22 @@ int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num)
178 if (!rc) 181 if (!rc)
179 return !rc; 182 return !rc;
180 183
181 // set the Edge Level Control Register (ELCR) 184 /* set the Edge Level Control Register (ELCR) */
182 temp_word = inb(0x4d0); 185 temp_word = inb(0x4d0);
183 temp_word |= inb(0x4d1) << 8; 186 temp_word |= inb(0x4d1) << 8;
184 187
185 temp_word |= 0x01 << irq_num; 188 temp_word |= 0x01 << irq_num;
186 189
187 // This should only be for x86 as it sets the Edge Level Control Register 190 /* This should only be for x86 as it sets the Edge Level
188 outb((u8) (temp_word & 0xFF), 0x4d0); 191 * Control Register
189 outb((u8) ((temp_word & 0xFF00) >> 8), 0x4d1); 192 */
190 rc = 0; 193 outb((u8) (temp_word & 0xFF), 0x4d0); outb((u8) ((temp_word &
191 } 194 0xFF00) >> 8), 0x4d1); rc = 0; }
192 195
193 return rc; 196 return rc;
194} 197}
195 198
196 199
197/*
198 * WTF??? This function isn't in the code, yet a function calls it, but the
199 * compiler optimizes it away? strange. Here as a placeholder to keep the
200 * compiler happy.
201 */
202static int PCI_ScanBusNonBridge (u8 bus, u8 device)
203{
204 return 0;
205}
206
207static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num) 200static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num)
208{ 201{
209 u16 tdevice; 202 u16 tdevice;
@@ -213,11 +206,11 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
213 ctrl->pci_bus->number = bus_num; 206 ctrl->pci_bus->number = bus_num;
214 207
215 for (tdevice = 0; tdevice < 0xFF; tdevice++) { 208 for (tdevice = 0; tdevice < 0xFF; tdevice++) {
216 //Scan for access first 209 /* Scan for access first */
217 if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) 210 if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
218 continue; 211 continue;
219 dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice); 212 dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice);
220 //Yep we got one. Not a bridge ? 213 /* Yep we got one. Not a bridge ? */
221 if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) { 214 if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) {
222 *dev_num = tdevice; 215 *dev_num = tdevice;
223 dbg("found it !\n"); 216 dbg("found it !\n");
@@ -225,16 +218,16 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
225 } 218 }
226 } 219 }
227 for (tdevice = 0; tdevice < 0xFF; tdevice++) { 220 for (tdevice = 0; tdevice < 0xFF; tdevice++) {
228 //Scan for access first 221 /* Scan for access first */
229 if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) 222 if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
230 continue; 223 continue;
231 dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice); 224 dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice);
232 //Yep we got one. bridge ? 225 /* Yep we got one. bridge ? */
233 if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { 226 if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
234 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus); 227 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus);
228 /* XXX: no recursion, wtf? */
235 dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice); 229 dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice);
236 if (PCI_ScanBusNonBridge(tbus, tdevice) == 0) 230 return 0;
237 return 0;
238 } 231 }
239 } 232 }
240 233
@@ -244,39 +237,23 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
244 237
245static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge) 238static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge)
246{ 239{
247 struct irq_routing_table *PCIIRQRoutingInfoLength; 240 int loop, len;
248 long len;
249 long loop;
250 u32 work; 241 u32 work;
251
252 u8 tbus, tdevice, tslot; 242 u8 tbus, tdevice, tslot;
253 243
254 PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table(); 244 len = cpqhp_routing_table_length();
255 if (!PCIIRQRoutingInfoLength)
256 return -1;
257
258 len = (PCIIRQRoutingInfoLength->size -
259 sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
260 // Make sure I got at least one entry
261 if (len == 0) {
262 kfree(PCIIRQRoutingInfoLength );
263 return -1;
264 }
265
266 for (loop = 0; loop < len; ++loop) { 245 for (loop = 0; loop < len; ++loop) {
267 tbus = PCIIRQRoutingInfoLength->slots[loop].bus; 246 tbus = cpqhp_routing_table->slots[loop].bus;
268 tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn; 247 tdevice = cpqhp_routing_table->slots[loop].devfn;
269 tslot = PCIIRQRoutingInfoLength->slots[loop].slot; 248 tslot = cpqhp_routing_table->slots[loop].slot;
270 249
271 if (tslot == slot) { 250 if (tslot == slot) {
272 *bus_num = tbus; 251 *bus_num = tbus;
273 *dev_num = tdevice; 252 *dev_num = tdevice;
274 ctrl->pci_bus->number = tbus; 253 ctrl->pci_bus->number = tbus;
275 pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work); 254 pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
276 if (!nobridge || (work == 0xffffffff)) { 255 if (!nobridge || (work == 0xffffffff))
277 kfree(PCIIRQRoutingInfoLength );
278 return 0; 256 return 0;
279 }
280 257
281 dbg("bus_num %d devfn %d\n", *bus_num, *dev_num); 258 dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
282 pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work); 259 pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work);
@@ -287,28 +264,26 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
287 dbg("Scan bus for Non Bridge: bus %d\n", tbus); 264 dbg("Scan bus for Non Bridge: bus %d\n", tbus);
288 if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) { 265 if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) {
289 *bus_num = tbus; 266 *bus_num = tbus;
290 kfree(PCIIRQRoutingInfoLength );
291 return 0; 267 return 0;
292 } 268 }
293 } else { 269 } else
294 kfree(PCIIRQRoutingInfoLength );
295 return 0; 270 return 0;
296 }
297
298 } 271 }
299 } 272 }
300 kfree(PCIIRQRoutingInfoLength );
301 return -1; 273 return -1;
302} 274}
303 275
304 276
305int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot) 277int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot)
306{ 278{
307 return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0); //plain (bridges allowed) 279 /* plain (bridges allowed) */
280 return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0);
308} 281}
309 282
310 283
311/* More PCI configuration routines; this time centered around hotplug controller */ 284/* More PCI configuration routines; this time centered around hotplug
285 * controller
286 */
312 287
313 288
314/* 289/*
@@ -339,12 +314,12 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
339 int stop_it; 314 int stop_it;
340 int index; 315 int index;
341 316
342 // Decide which slots are supported 317 /* Decide which slots are supported */
343 318
344 if (is_hot_plug) { 319 if (is_hot_plug) {
345 //********************************* 320 /*
346 // is_hot_plug is the slot mask 321 * is_hot_plug is the slot mask
347 //********************************* 322 */
348 FirstSupported = is_hot_plug >> 4; 323 FirstSupported = is_hot_plug >> 4;
349 LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1; 324 LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1;
350 } else { 325 } else {
@@ -352,123 +327,127 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
352 LastSupported = 0x1F; 327 LastSupported = 0x1F;
353 } 328 }
354 329
355 // Save PCI configuration space for all devices in supported slots 330 /* Save PCI configuration space for all devices in supported slots */
356 ctrl->pci_bus->number = busnumber; 331 ctrl->pci_bus->number = busnumber;
357 for (device = FirstSupported; device <= LastSupported; device++) { 332 for (device = FirstSupported; device <= LastSupported; device++) {
358 ID = 0xFFFFFFFF; 333 ID = 0xFFFFFFFF;
359 rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID); 334 rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
335
336 if (ID == 0xFFFFFFFF) {
337 if (is_hot_plug) {
338 /* Setup slot structure with entry for empty
339 * slot
340 */
341 new_slot = cpqhp_slot_create(busnumber);
342 if (new_slot == NULL)
343 return 1;
360 344
361 if (ID != 0xFFFFFFFF) { // device in slot 345 new_slot->bus = (u8) busnumber;
362 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code); 346 new_slot->device = (u8) device;
363 if (rc) 347 new_slot->function = 0;
364 return rc; 348 new_slot->is_a_board = 0;
349 new_slot->presence_save = 0;
350 new_slot->switch_save = 0;
351 }
352 continue;
353 }
365 354
366 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type); 355 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
367 if (rc) 356 if (rc)
368 return rc; 357 return rc;
369 358
370 // If multi-function device, set max_functions to 8 359 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
371 if (header_type & 0x80) 360 if (rc)
372 max_functions = 8; 361 return rc;
373 else
374 max_functions = 1;
375 362
376 function = 0; 363 /* If multi-function device, set max_functions to 8 */
364 if (header_type & 0x80)
365 max_functions = 8;
366 else
367 max_functions = 1;
377 368
378 do { 369 function = 0;
379 DevError = 0;
380 370
381 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // P-P Bridge 371 do {
382 // Recurse the subordinate bus 372 DevError = 0;
383 // get the subordinate bus number 373 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
384 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus); 374 /* Recurse the subordinate bus
385 if (rc) { 375 * get the subordinate bus number
376 */
377 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
378 if (rc) {
379 return rc;
380 } else {
381 sub_bus = (int) secondary_bus;
382
383 /* Save secondary bus cfg spc
384 * with this recursive call.
385 */
386 rc = cpqhp_save_config(ctrl, sub_bus, 0);
387 if (rc)
386 return rc; 388 return rc;
387 } else { 389 ctrl->pci_bus->number = busnumber;
388 sub_bus = (int) secondary_bus;
389
390 // Save secondary bus cfg spc
391 // with this recursive call.
392 rc = cpqhp_save_config(ctrl, sub_bus, 0);
393 if (rc)
394 return rc;
395 ctrl->pci_bus->number = busnumber;
396 }
397 } 390 }
391 }
398 392
399 index = 0; 393 index = 0;
394 new_slot = cpqhp_slot_find(busnumber, device, index++);
395 while (new_slot &&
396 (new_slot->function != (u8) function))
400 new_slot = cpqhp_slot_find(busnumber, device, index++); 397 new_slot = cpqhp_slot_find(busnumber, device, index++);
401 while (new_slot &&
402 (new_slot->function != (u8) function))
403 new_slot = cpqhp_slot_find(busnumber, device, index++);
404 398
405 if (!new_slot) { 399 if (!new_slot) {
406 // Setup slot structure. 400 /* Setup slot structure. */
407 new_slot = cpqhp_slot_create(busnumber); 401 new_slot = cpqhp_slot_create(busnumber);
408 402 if (new_slot == NULL)
409 if (new_slot == NULL) 403 return 1;
410 return(1); 404 }
411 }
412
413 new_slot->bus = (u8) busnumber;
414 new_slot->device = (u8) device;
415 new_slot->function = (u8) function;
416 new_slot->is_a_board = 1;
417 new_slot->switch_save = 0x10;
418 // In case of unsupported board
419 new_slot->status = DevError;
420 new_slot->pci_dev = pci_find_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
421
422 for (cloop = 0; cloop < 0x20; cloop++) {
423 rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
424 if (rc)
425 return rc;
426 }
427 405
428 function++; 406 new_slot->bus = (u8) busnumber;
407 new_slot->device = (u8) device;
408 new_slot->function = (u8) function;
409 new_slot->is_a_board = 1;
410 new_slot->switch_save = 0x10;
411 /* In case of unsupported board */
412 new_slot->status = DevError;
413 new_slot->pci_dev = pci_get_bus_and_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
429 414
430 stop_it = 0; 415 for (cloop = 0; cloop < 0x20; cloop++) {
416 rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
417 if (rc)
418 return rc;
419 }
431 420
432 // this loop skips to the next present function 421 pci_dev_put(new_slot->pci_dev);
433 // reading in Class Code and Header type.
434 422
435 while ((function < max_functions)&&(!stop_it)) { 423 function++;
436 rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
437 if (ID == 0xFFFFFFFF) { // nothing there.
438 function++;
439 } else { // Something there
440 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
441 if (rc)
442 return rc;
443 424
444 rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type); 425 stop_it = 0;
445 if (rc)
446 return rc;
447 426
448 stop_it++; 427 /* this loop skips to the next present function
449 } 428 * reading in Class Code and Header type.
429 */
430 while ((function < max_functions) && (!stop_it)) {
431 rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
432 if (ID == 0xFFFFFFFF) {
433 function++;
434 continue;
450 } 435 }
436 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
437 if (rc)
438 return rc;
451 439
452 } while (function < max_functions); 440 rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
453 } // End of IF (device in slot?) 441 if (rc)
454 else if (is_hot_plug) { 442 return rc;
455 // Setup slot structure with entry for empty slot
456 new_slot = cpqhp_slot_create(busnumber);
457 443
458 if (new_slot == NULL) { 444 stop_it++;
459 return(1);
460 } 445 }
461 446
462 new_slot->bus = (u8) busnumber; 447 } while (function < max_functions);
463 new_slot->device = (u8) device; 448 } /* End of FOR loop */
464 new_slot->function = 0;
465 new_slot->is_a_board = 0;
466 new_slot->presence_save = 0;
467 new_slot->switch_save = 0;
468 }
469 } // End of FOR loop
470 449
471 return(0); 450 return 0;
472} 451}
473 452
474 453
@@ -489,7 +468,7 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
489 u8 secondary_bus; 468 u8 secondary_bus;
490 int sub_bus; 469 int sub_bus;
491 int max_functions; 470 int max_functions;
492 int function; 471 int function = 0;
493 int cloop = 0; 472 int cloop = 0;
494 int stop_it; 473 int stop_it;
495 474
@@ -498,63 +477,58 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
498 ctrl->pci_bus->number = new_slot->bus; 477 ctrl->pci_bus->number = new_slot->bus;
499 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID); 478 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID);
500 479
501 if (ID != 0xFFFFFFFF) { // device in slot 480 if (ID == 0xFFFFFFFF)
502 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code); 481 return 2;
503 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
504
505 if (header_type & 0x80) // Multi-function device
506 max_functions = 8;
507 else
508 max_functions = 1;
509
510 function = 0;
511
512 do {
513 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
514 // Recurse the subordinate bus
515 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
516 482
517 sub_bus = (int) secondary_bus; 483 pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
484 pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
518 485
519 // Save the config headers for the secondary bus. 486 if (header_type & 0x80) /* Multi-function device */
520 rc = cpqhp_save_config(ctrl, sub_bus, 0); 487 max_functions = 8;
521 if (rc) 488 else
522 return(rc); 489 max_functions = 1;
523 ctrl->pci_bus->number = new_slot->bus;
524 490
525 } // End of IF 491 while (function < max_functions) {
492 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
493 /* Recurse the subordinate bus */
494 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
526 495
527 new_slot->status = 0; 496 sub_bus = (int) secondary_bus;
528 497
529 for (cloop = 0; cloop < 0x20; cloop++) { 498 /* Save the config headers for the secondary
530 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop])); 499 * bus.
531 } 500 */
501 rc = cpqhp_save_config(ctrl, sub_bus, 0);
502 if (rc)
503 return(rc);
504 ctrl->pci_bus->number = new_slot->bus;
532 505
533 function++; 506 }
534 507
535 stop_it = 0; 508 new_slot->status = 0;
536 509
537 // this loop skips to the next present function 510 for (cloop = 0; cloop < 0x20; cloop++)
538 // reading in the Class Code and the Header type. 511 pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
539 512
540 while ((function < max_functions) && (!stop_it)) { 513 function++;
541 pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
542 514
543 if (ID == 0xFFFFFFFF) { // nothing there. 515 stop_it = 0;
544 function++;
545 } else { // Something there
546 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
547 516
548 pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type); 517 /* this loop skips to the next present function
518 * reading in the Class Code and the Header type.
519 */
520 while ((function < max_functions) && (!stop_it)) {
521 pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
549 522
550 stop_it++; 523 if (ID == 0xFFFFFFFF)
551 } 524 function++;
525 else {
526 pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
527 pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
528 stop_it++;
552 } 529 }
530 }
553 531
554 } while (function < max_functions);
555 } // End of IF (device in slot?)
556 else {
557 return 2;
558 } 532 }
559 533
560 return 0; 534 return 0;
@@ -590,11 +564,10 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
590 pci_bus->number = func->bus; 564 pci_bus->number = func->bus;
591 devfn = PCI_DEVFN(func->device, func->function); 565 devfn = PCI_DEVFN(func->device, func->function);
592 566
593 // Check for Bridge 567 /* Check for Bridge */
594 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); 568 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
595 569
596 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { 570 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
597 // PCI-PCI Bridge
598 pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); 571 pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
599 572
600 sub_bus = (int) secondary_bus; 573 sub_bus = (int) secondary_bus;
@@ -610,23 +583,27 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
610 } 583 }
611 pci_bus->number = func->bus; 584 pci_bus->number = func->bus;
612 585
613 //FIXME: this loop is duplicated in the non-bridge case. The two could be rolled together 586 /* FIXME: this loop is duplicated in the non-bridge
614 // Figure out IO and memory base lengths 587 * case. The two could be rolled together Figure out
588 * IO and memory base lengths
589 */
615 for (cloop = 0x10; cloop <= 0x14; cloop += 4) { 590 for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
616 temp_register = 0xFFFFFFFF; 591 temp_register = 0xFFFFFFFF;
617 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); 592 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
618 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base); 593 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
619 594 /* If this register is implemented */
620 if (base) { // If this register is implemented 595 if (base) {
621 if (base & 0x01L) { 596 if (base & 0x01L) {
622 // IO base 597 /* IO base
623 // set base = amount of IO space requested 598 * set base = amount of IO space
599 * requested
600 */
624 base = base & 0xFFFFFFFE; 601 base = base & 0xFFFFFFFE;
625 base = (~base) + 1; 602 base = (~base) + 1;
626 603
627 type = 1; 604 type = 1;
628 } else { 605 } else {
629 // memory base 606 /* memory base */
630 base = base & 0xFFFFFFF0; 607 base = base & 0xFFFFFFF0;
631 base = (~base) + 1; 608 base = (~base) + 1;
632 609
@@ -637,32 +614,36 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
637 type = 0; 614 type = 0;
638 } 615 }
639 616
640 // Save information in slot structure 617 /* Save information in slot structure */
641 func->base_length[(cloop - 0x10) >> 2] = 618 func->base_length[(cloop - 0x10) >> 2] =
642 base; 619 base;
643 func->base_type[(cloop - 0x10) >> 2] = type; 620 func->base_type[(cloop - 0x10) >> 2] = type;
644 621
645 } // End of base register loop 622 } /* End of base register loop */
646 623
647 624 } else if ((header_type & 0x7F) == 0x00) {
648 } else if ((header_type & 0x7F) == 0x00) { // PCI-PCI Bridge 625 /* Figure out IO and memory base lengths */
649 // Figure out IO and memory base lengths
650 for (cloop = 0x10; cloop <= 0x24; cloop += 4) { 626 for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
651 temp_register = 0xFFFFFFFF; 627 temp_register = 0xFFFFFFFF;
652 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); 628 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
653 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base); 629 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
654 630
655 if (base) { // If this register is implemented 631 /* If this register is implemented */
632 if (base) {
656 if (base & 0x01L) { 633 if (base & 0x01L) {
657 // IO base 634 /* IO base
658 // base = amount of IO space requested 635 * base = amount of IO space
636 * requested
637 */
659 base = base & 0xFFFFFFFE; 638 base = base & 0xFFFFFFFE;
660 base = (~base) + 1; 639 base = (~base) + 1;
661 640
662 type = 1; 641 type = 1;
663 } else { 642 } else {
664 // memory base 643 /* memory base
665 // base = amount of memory space requested 644 * base = amount of memory
645 * space requested
646 */
666 base = base & 0xFFFFFFF0; 647 base = base & 0xFFFFFFF0;
667 base = (~base) + 1; 648 base = (~base) + 1;
668 649
@@ -673,16 +654,16 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
673 type = 0; 654 type = 0;
674 } 655 }
675 656
676 // Save information in slot structure 657 /* Save information in slot structure */
677 func->base_length[(cloop - 0x10) >> 2] = base; 658 func->base_length[(cloop - 0x10) >> 2] = base;
678 func->base_type[(cloop - 0x10) >> 2] = type; 659 func->base_type[(cloop - 0x10) >> 2] = type;
679 660
680 } // End of base register loop 661 } /* End of base register loop */
681 662
682 } else { // Some other unknown header type 663 } else { /* Some other unknown header type */
683 } 664 }
684 665
685 // find the next device in this slot 666 /* find the next device in this slot */
686 func = cpqhp_slot_find(func->bus, func->device, index++); 667 func = cpqhp_slot_find(func->bus, func->device, index++);
687 } 668 }
688 669
@@ -728,18 +709,18 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
728 pci_bus->number = func->bus; 709 pci_bus->number = func->bus;
729 devfn = PCI_DEVFN(func->device, func->function); 710 devfn = PCI_DEVFN(func->device, func->function);
730 711
731 // Save the command register 712 /* Save the command register */
732 pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command); 713 pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command);
733 714
734 // disable card 715 /* disable card */
735 command = 0x00; 716 command = 0x00;
736 pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command); 717 pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command);
737 718
738 // Check for Bridge 719 /* Check for Bridge */
739 pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type); 720 pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
740 721
741 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge 722 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
742 // Clear Bridge Control Register 723 /* Clear Bridge Control Register */
743 command = 0x00; 724 command = 0x00;
744 pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command); 725 pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command);
745 pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); 726 pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
@@ -755,7 +736,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
755 bus_node->next = func->bus_head; 736 bus_node->next = func->bus_head;
756 func->bus_head = bus_node; 737 func->bus_head = bus_node;
757 738
758 // Save IO base and Limit registers 739 /* Save IO base and Limit registers */
759 pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base); 740 pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base);
760 pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length); 741 pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length);
761 742
@@ -771,7 +752,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
771 func->io_head = io_node; 752 func->io_head = io_node;
772 } 753 }
773 754
774 // Save memory base and Limit registers 755 /* Save memory base and Limit registers */
775 pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base); 756 pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base);
776 pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length); 757 pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length);
777 758
@@ -787,7 +768,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
787 func->mem_head = mem_node; 768 func->mem_head = mem_node;
788 } 769 }
789 770
790 // Save prefetchable memory base and Limit registers 771 /* Save prefetchable memory base and Limit registers */
791 pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base); 772 pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base);
792 pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length); 773 pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length);
793 774
@@ -802,7 +783,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
802 p_mem_node->next = func->p_mem_head; 783 p_mem_node->next = func->p_mem_head;
803 func->p_mem_head = p_mem_node; 784 func->p_mem_head = p_mem_node;
804 } 785 }
805 // Figure out IO and memory base lengths 786 /* Figure out IO and memory base lengths */
806 for (cloop = 0x10; cloop <= 0x14; cloop += 4) { 787 for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
807 pci_bus_read_config_dword (pci_bus, devfn, cloop, &save_base); 788 pci_bus_read_config_dword (pci_bus, devfn, cloop, &save_base);
808 789
@@ -812,11 +793,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
812 793
813 temp_register = base; 794 temp_register = base;
814 795
815 if (base) { // If this register is implemented 796 /* If this register is implemented */
797 if (base) {
816 if (((base & 0x03L) == 0x01) 798 if (((base & 0x03L) == 0x01)
817 && (save_command & 0x01)) { 799 && (save_command & 0x01)) {
818 // IO base 800 /* IO base
819 // set temp_register = amount of IO space requested 801 * set temp_register = amount
802 * of IO space requested
803 */
820 temp_register = base & 0xFFFFFFFE; 804 temp_register = base & 0xFFFFFFFE;
821 temp_register = (~temp_register) + 1; 805 temp_register = (~temp_register) + 1;
822 806
@@ -834,7 +818,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
834 } else 818 } else
835 if (((base & 0x0BL) == 0x08) 819 if (((base & 0x0BL) == 0x08)
836 && (save_command & 0x02)) { 820 && (save_command & 0x02)) {
837 // prefetchable memory base 821 /* prefetchable memory base */
838 temp_register = base & 0xFFFFFFF0; 822 temp_register = base & 0xFFFFFFF0;
839 temp_register = (~temp_register) + 1; 823 temp_register = (~temp_register) + 1;
840 824
@@ -851,7 +835,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
851 } else 835 } else
852 if (((base & 0x0BL) == 0x00) 836 if (((base & 0x0BL) == 0x00)
853 && (save_command & 0x02)) { 837 && (save_command & 0x02)) {
854 // prefetchable memory base 838 /* prefetchable memory base */
855 temp_register = base & 0xFFFFFFF0; 839 temp_register = base & 0xFFFFFFF0;
856 temp_register = (~temp_register) + 1; 840 temp_register = (~temp_register) + 1;
857 841
@@ -868,9 +852,10 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
868 } else 852 } else
869 return(1); 853 return(1);
870 } 854 }
871 } // End of base register loop 855 } /* End of base register loop */
872 } else if ((header_type & 0x7F) == 0x00) { // Standard header 856 /* Standard header */
873 // Figure out IO and memory base lengths 857 } else if ((header_type & 0x7F) == 0x00) {
858 /* Figure out IO and memory base lengths */
874 for (cloop = 0x10; cloop <= 0x24; cloop += 4) { 859 for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
875 pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base); 860 pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base);
876 861
@@ -880,11 +865,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
880 865
881 temp_register = base; 866 temp_register = base;
882 867
883 if (base) { // If this register is implemented 868 /* If this register is implemented */
869 if (base) {
884 if (((base & 0x03L) == 0x01) 870 if (((base & 0x03L) == 0x01)
885 && (save_command & 0x01)) { 871 && (save_command & 0x01)) {
886 // IO base 872 /* IO base
887 // set temp_register = amount of IO space requested 873 * set temp_register = amount
874 * of IO space requested
875 */
888 temp_register = base & 0xFFFFFFFE; 876 temp_register = base & 0xFFFFFFFE;
889 temp_register = (~temp_register) + 1; 877 temp_register = (~temp_register) + 1;
890 878
@@ -901,7 +889,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
901 } else 889 } else
902 if (((base & 0x0BL) == 0x08) 890 if (((base & 0x0BL) == 0x08)
903 && (save_command & 0x02)) { 891 && (save_command & 0x02)) {
904 // prefetchable memory base 892 /* prefetchable memory base */
905 temp_register = base & 0xFFFFFFF0; 893 temp_register = base & 0xFFFFFFF0;
906 temp_register = (~temp_register) + 1; 894 temp_register = (~temp_register) + 1;
907 895
@@ -918,7 +906,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
918 } else 906 } else
919 if (((base & 0x0BL) == 0x00) 907 if (((base & 0x0BL) == 0x00)
920 && (save_command & 0x02)) { 908 && (save_command & 0x02)) {
921 // prefetchable memory base 909 /* prefetchable memory base */
922 temp_register = base & 0xFFFFFFF0; 910 temp_register = base & 0xFFFFFFF0;
923 temp_register = (~temp_register) + 1; 911 temp_register = (~temp_register) + 1;
924 912
@@ -935,15 +923,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
935 } else 923 } else
936 return(1); 924 return(1);
937 } 925 }
938 } // End of base register loop 926 } /* End of base register loop */
939 } else { // Some other unknown header type
940 } 927 }
941 928
942 // find the next device in this slot 929 /* find the next device in this slot */
943 func = cpqhp_slot_find(func->bus, func->device, index++); 930 func = cpqhp_slot_find(func->bus, func->device, index++);
944 } 931 }
945 932
946 return(0); 933 return 0;
947} 934}
948 935
949 936
@@ -975,16 +962,16 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
975 pci_bus->number = func->bus; 962 pci_bus->number = func->bus;
976 devfn = PCI_DEVFN(func->device, func->function); 963 devfn = PCI_DEVFN(func->device, func->function);
977 964
978 // Start at the top of config space so that the control 965 /* Start at the top of config space so that the control
979 // registers are programmed last 966 * registers are programmed last
980 for (cloop = 0x3C; cloop > 0; cloop -= 4) { 967 */
968 for (cloop = 0x3C; cloop > 0; cloop -= 4)
981 pci_bus_write_config_dword (pci_bus, devfn, cloop, func->config_space[cloop >> 2]); 969 pci_bus_write_config_dword (pci_bus, devfn, cloop, func->config_space[cloop >> 2]);
982 }
983 970
984 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); 971 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
985 972
986 // If this is a bridge device, restore subordinate devices 973 /* If this is a bridge device, restore subordinate devices */
987 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge 974 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
988 pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); 975 pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
989 976
990 sub_bus = (int) secondary_bus; 977 sub_bus = (int) secondary_bus;
@@ -1000,8 +987,9 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
1000 } 987 }
1001 } else { 988 } else {
1002 989
1003 // Check all the base Address Registers to make sure 990 /* Check all the base Address Registers to make sure
1004 // they are the same. If not, the board is different. 991 * they are the same. If not, the board is different.
992 */
1005 993
1006 for (cloop = 16; cloop < 40; cloop += 4) { 994 for (cloop = 16; cloop < 40; cloop += 4) {
1007 pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp); 995 pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp);
@@ -1058,27 +1046,28 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
1058 1046
1059 pci_bus_read_config_dword (pci_bus, devfn, PCI_VENDOR_ID, &temp_register); 1047 pci_bus_read_config_dword (pci_bus, devfn, PCI_VENDOR_ID, &temp_register);
1060 1048
1061 // No adapter present 1049 /* No adapter present */
1062 if (temp_register == 0xFFFFFFFF) 1050 if (temp_register == 0xFFFFFFFF)
1063 return(NO_ADAPTER_PRESENT); 1051 return(NO_ADAPTER_PRESENT);
1064 1052
1065 if (temp_register != func->config_space[0]) 1053 if (temp_register != func->config_space[0])
1066 return(ADAPTER_NOT_SAME); 1054 return(ADAPTER_NOT_SAME);
1067 1055
1068 // Check for same revision number and class code 1056 /* Check for same revision number and class code */
1069 pci_bus_read_config_dword (pci_bus, devfn, PCI_CLASS_REVISION, &temp_register); 1057 pci_bus_read_config_dword (pci_bus, devfn, PCI_CLASS_REVISION, &temp_register);
1070 1058
1071 // Adapter not the same 1059 /* Adapter not the same */
1072 if (temp_register != func->config_space[0x08 >> 2]) 1060 if (temp_register != func->config_space[0x08 >> 2])
1073 return(ADAPTER_NOT_SAME); 1061 return(ADAPTER_NOT_SAME);
1074 1062
1075 // Check for Bridge 1063 /* Check for Bridge */
1076 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); 1064 pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
1077 1065
1078 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge 1066 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
1079 // In order to continue checking, we must program the 1067 /* In order to continue checking, we must program the
1080 // bus registers in the bridge to respond to accesses 1068 * bus registers in the bridge to respond to accesses
1081 // for it's subordinate bus(es) 1069 * for its subordinate bus(es)
1070 */
1082 1071
1083 temp_register = func->config_space[0x18 >> 2]; 1072 temp_register = func->config_space[0x18 >> 2];
1084 pci_bus_write_config_dword (pci_bus, devfn, PCI_PRIMARY_BUS, temp_register); 1073 pci_bus_write_config_dword (pci_bus, devfn, PCI_PRIMARY_BUS, temp_register);
@@ -1096,35 +1085,39 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
1096 } 1085 }
1097 1086
1098 } 1087 }
1099 // Check to see if it is a standard config header 1088 /* Check to see if it is a standard config header */
1100 else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) { 1089 else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
1101 // Check subsystem vendor and ID 1090 /* Check subsystem vendor and ID */
1102 pci_bus_read_config_dword (pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register); 1091 pci_bus_read_config_dword (pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register);
1103 1092
1104 if (temp_register != func->config_space[0x2C >> 2]) { 1093 if (temp_register != func->config_space[0x2C >> 2]) {
1105 // If it's a SMART-2 and the register isn't filled 1094 /* If it's a SMART-2 and the register isn't
1106 // in, ignore the difference because 1095 * filled in, ignore the difference because
1107 // they just have an old rev of the firmware 1096 * they just have an old rev of the firmware
1108 1097 */
1109 if (!((func->config_space[0] == 0xAE100E11) 1098 if (!((func->config_space[0] == 0xAE100E11)
1110 && (temp_register == 0x00L))) 1099 && (temp_register == 0x00L)))
1111 return(ADAPTER_NOT_SAME); 1100 return(ADAPTER_NOT_SAME);
1112 } 1101 }
1113 // Figure out IO and memory base lengths 1102 /* Figure out IO and memory base lengths */
1114 for (cloop = 0x10; cloop <= 0x24; cloop += 4) { 1103 for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
1115 temp_register = 0xFFFFFFFF; 1104 temp_register = 0xFFFFFFFF;
1116 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); 1105 pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
1117 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base); 1106 pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
1118 if (base) { // If this register is implemented 1107
1108 /* If this register is implemented */
1109 if (base) {
1119 if (base & 0x01L) { 1110 if (base & 0x01L) {
1120 // IO base 1111 /* IO base
1121 // set base = amount of IO space requested 1112 * set base = amount of IO
1113 * space requested
1114 */
1122 base = base & 0xFFFFFFFE; 1115 base = base & 0xFFFFFFFE;
1123 base = (~base) + 1; 1116 base = (~base) + 1;
1124 1117
1125 type = 1; 1118 type = 1;
1126 } else { 1119 } else {
1127 // memory base 1120 /* memory base */
1128 base = base & 0xFFFFFFF0; 1121 base = base & 0xFFFFFFF0;
1129 base = (~base) + 1; 1122 base = (~base) + 1;
1130 1123
@@ -1135,23 +1128,24 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
1135 type = 0; 1128 type = 0;
1136 } 1129 }
1137 1130
1138 // Check information in slot structure 1131 /* Check information in slot structure */
1139 if (func->base_length[(cloop - 0x10) >> 2] != base) 1132 if (func->base_length[(cloop - 0x10) >> 2] != base)
1140 return(ADAPTER_NOT_SAME); 1133 return(ADAPTER_NOT_SAME);
1141 1134
1142 if (func->base_type[(cloop - 0x10) >> 2] != type) 1135 if (func->base_type[(cloop - 0x10) >> 2] != type)
1143 return(ADAPTER_NOT_SAME); 1136 return(ADAPTER_NOT_SAME);
1144 1137
1145 } // End of base register loop 1138 } /* End of base register loop */
1146 1139
1147 } // End of (type 0 config space) else 1140 } /* End of (type 0 config space) else */
1148 else { 1141 else {
1149 // this is not a type 0 or 1 config space header so 1142 /* this is not a type 0 or 1 config space header so
1150 // we don't know how to do it 1143 * we don't know how to do it
1144 */
1151 return(DEVICE_TYPE_NOT_SUPPORTED); 1145 return(DEVICE_TYPE_NOT_SUPPORTED);
1152 } 1146 }
1153 1147
1154 // Get the next function 1148 /* Get the next function */
1155 func = cpqhp_slot_find(func->bus, func->device, index++); 1149 func = cpqhp_slot_find(func->bus, func->device, index++);
1156 } 1150 }
1157 1151
@@ -1168,7 +1162,7 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
1168 * this function is for hot plug ADD! 1162 * this function is for hot plug ADD!
1169 * 1163 *
1170 * returns 0 if success 1164 * returns 0 if success
1171 */ 1165 */
1172int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start) 1166int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start)
1173{ 1167{
1174 u8 temp; 1168 u8 temp;
@@ -1187,10 +1181,10 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1187 rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff); 1181 rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff);
1188 dbg("rom_resource_table = %p\n", rom_resource_table); 1182 dbg("rom_resource_table = %p\n", rom_resource_table);
1189 1183
1190 if (rom_resource_table == NULL) { 1184 if (rom_resource_table == NULL)
1191 return -ENODEV; 1185 return -ENODEV;
1192 } 1186
1193 // Sum all resources and setup resource maps 1187 /* Sum all resources and setup resource maps */
1194 unused_IRQ = readl(rom_resource_table + UNUSED_IRQ); 1188 unused_IRQ = readl(rom_resource_table + UNUSED_IRQ);
1195 dbg("unused_IRQ = %x\n", unused_IRQ); 1189 dbg("unused_IRQ = %x\n", unused_IRQ);
1196 1190
@@ -1222,13 +1216,11 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1222 1216
1223 temp = 0; 1217 temp = 0;
1224 1218
1225 if (!cpqhp_nic_irq) { 1219 if (!cpqhp_nic_irq)
1226 cpqhp_nic_irq = ctrl->cfgspc_irq; 1220 cpqhp_nic_irq = ctrl->cfgspc_irq;
1227 }
1228 1221
1229 if (!cpqhp_disk_irq) { 1222 if (!cpqhp_disk_irq)
1230 cpqhp_disk_irq = ctrl->cfgspc_irq; 1223 cpqhp_disk_irq = ctrl->cfgspc_irq;
1231 }
1232 1224
1233 dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq); 1225 dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq);
1234 1226
@@ -1262,13 +1254,13 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1262 dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length, 1254 dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length,
1263 primary_bus, secondary_bus, max_bus); 1255 primary_bus, secondary_bus, max_bus);
1264 1256
1265 // If this entry isn't for our controller's bus, ignore it 1257 /* If this entry isn't for our controller's bus, ignore it */
1266 if (primary_bus != ctrl->bus) { 1258 if (primary_bus != ctrl->bus) {
1267 i--; 1259 i--;
1268 one_slot += sizeof (struct slot_rt); 1260 one_slot += sizeof (struct slot_rt);
1269 continue; 1261 continue;
1270 } 1262 }
1271 // find out if this entry is for an occupied slot 1263 /* find out if this entry is for an occupied slot */
1272 ctrl->pci_bus->number = primary_bus; 1264 ctrl->pci_bus->number = primary_bus;
1273 pci_bus_read_config_dword (ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword); 1265 pci_bus_read_config_dword (ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword);
1274 dbg("temp_D_word = %x\n", temp_dword); 1266 dbg("temp_D_word = %x\n", temp_dword);
@@ -1282,13 +1274,13 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1282 func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++); 1274 func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++);
1283 } 1275 }
1284 1276
1285 // If we can't find a match, skip this table entry 1277 /* If we can't find a match, skip this table entry */
1286 if (!func) { 1278 if (!func) {
1287 i--; 1279 i--;
1288 one_slot += sizeof (struct slot_rt); 1280 one_slot += sizeof (struct slot_rt);
1289 continue; 1281 continue;
1290 } 1282 }
1291 // this may not work and shouldn't be used 1283 /* this may not work and shouldn't be used */
1292 if (secondary_bus != primary_bus) 1284 if (secondary_bus != primary_bus)
1293 bridged_slot = 1; 1285 bridged_slot = 1;
1294 else 1286 else
@@ -1301,7 +1293,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1301 } 1293 }
1302 1294
1303 1295
1304 // If we've got a valid IO base, use it 1296 /* If we've got a valid IO base, use it */
1305 1297
1306 temp_dword = io_base + io_length; 1298 temp_dword = io_base + io_length;
1307 1299
@@ -1325,7 +1317,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1325 } 1317 }
1326 } 1318 }
1327 1319
1328 // If we've got a valid memory base, use it 1320 /* If we've got a valid memory base, use it */
1329 temp_dword = mem_base + mem_length; 1321 temp_dword = mem_base + mem_length;
1330 if ((mem_base) && (temp_dword < 0x10000)) { 1322 if ((mem_base) && (temp_dword < 0x10000)) {
1331 mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL); 1323 mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL);
@@ -1348,8 +1340,9 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1348 } 1340 }
1349 } 1341 }
1350 1342
1351 // If we've got a valid prefetchable memory base, and 1343 /* If we've got a valid prefetchable memory base, and
1352 // the base + length isn't greater than 0xFFFF 1344 * the base + length isn't greater than 0xFFFF
1345 */
1353 temp_dword = pre_mem_base + pre_mem_length; 1346 temp_dword = pre_mem_base + pre_mem_length;
1354 if ((pre_mem_base) && (temp_dword < 0x10000)) { 1347 if ((pre_mem_base) && (temp_dword < 0x10000)) {
1355 p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL); 1348 p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL);
@@ -1372,9 +1365,10 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1372 } 1365 }
1373 } 1366 }
1374 1367
1375 // If we've got a valid bus number, use it 1368 /* If we've got a valid bus number, use it
1376 // The second condition is to ignore bus numbers on 1369 * The second condition is to ignore bus numbers on
1377 // populated slots that don't have PCI-PCI bridges 1370 * populated slots that don't have PCI-PCI bridges
1371 */
1378 if (secondary_bus && (secondary_bus != primary_bus)) { 1372 if (secondary_bus && (secondary_bus != primary_bus)) {
1379 bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL); 1373 bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL);
1380 if (!bus_node) 1374 if (!bus_node)
@@ -1398,8 +1392,9 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
1398 one_slot += sizeof (struct slot_rt); 1392 one_slot += sizeof (struct slot_rt);
1399 } 1393 }
1400 1394
1401 // If all of the following fail, we don't have any resources for 1395 /* If all of the following fail, we don't have any resources for
1402 // hot plug add 1396 * hot plug add
1397 */
1403 rc = 1; 1398 rc = 1;
1404 rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); 1399 rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
1405 rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); 1400 rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 42e4260c3b12..7485ffda950c 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1318,7 +1318,6 @@ error:
1318} 1318}
1319 1319
1320struct hotplug_slot_ops ibmphp_hotplug_slot_ops = { 1320struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
1321 .owner = THIS_MODULE,
1322 .set_attention_status = set_attention_status, 1321 .set_attention_status = set_attention_status,
1323 .enable_slot = enable_slot, 1322 .enable_slot = enable_slot,
1324 .disable_slot = ibmphp_disable_slot, 1323 .disable_slot = ibmphp_disable_slot,
@@ -1421,3 +1420,4 @@ static void __exit ibmphp_exit(void)
1421} 1420}
1422 1421
1423module_init(ibmphp_init); 1422module_init(ibmphp_init);
1423module_exit(ibmphp_exit);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 535fce0f07f9..844580489d4d 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -347,125 +347,129 @@ static struct pci_slot_attribute hotplug_slot_attr_test = {
347 .store = test_write_file 347 .store = test_write_file
348}; 348};
349 349
350static int has_power_file(struct pci_slot *pci_slot) 350static bool has_power_file(struct pci_slot *pci_slot)
351{ 351{
352 struct hotplug_slot *slot = pci_slot->hotplug; 352 struct hotplug_slot *slot = pci_slot->hotplug;
353 if ((!slot) || (!slot->ops)) 353 if ((!slot) || (!slot->ops))
354 return -ENODEV; 354 return false;
355 if ((slot->ops->enable_slot) || 355 if ((slot->ops->enable_slot) ||
356 (slot->ops->disable_slot) || 356 (slot->ops->disable_slot) ||
357 (slot->ops->get_power_status)) 357 (slot->ops->get_power_status))
358 return 0; 358 return true;
359 return -ENOENT; 359 return false;
360} 360}
361 361
362static int has_attention_file(struct pci_slot *pci_slot) 362static bool has_attention_file(struct pci_slot *pci_slot)
363{ 363{
364 struct hotplug_slot *slot = pci_slot->hotplug; 364 struct hotplug_slot *slot = pci_slot->hotplug;
365 if ((!slot) || (!slot->ops)) 365 if ((!slot) || (!slot->ops))
366 return -ENODEV; 366 return false;
367 if ((slot->ops->set_attention_status) || 367 if ((slot->ops->set_attention_status) ||
368 (slot->ops->get_attention_status)) 368 (slot->ops->get_attention_status))
369 return 0; 369 return true;
370 return -ENOENT; 370 return false;
371} 371}
372 372
373static int has_latch_file(struct pci_slot *pci_slot) 373static bool has_latch_file(struct pci_slot *pci_slot)
374{ 374{
375 struct hotplug_slot *slot = pci_slot->hotplug; 375 struct hotplug_slot *slot = pci_slot->hotplug;
376 if ((!slot) || (!slot->ops)) 376 if ((!slot) || (!slot->ops))
377 return -ENODEV; 377 return false;
378 if (slot->ops->get_latch_status) 378 if (slot->ops->get_latch_status)
379 return 0; 379 return true;
380 return -ENOENT; 380 return false;
381} 381}
382 382
383static int has_adapter_file(struct pci_slot *pci_slot) 383static bool has_adapter_file(struct pci_slot *pci_slot)
384{ 384{
385 struct hotplug_slot *slot = pci_slot->hotplug; 385 struct hotplug_slot *slot = pci_slot->hotplug;
386 if ((!slot) || (!slot->ops)) 386 if ((!slot) || (!slot->ops))
387 return -ENODEV; 387 return false;
388 if (slot->ops->get_adapter_status) 388 if (slot->ops->get_adapter_status)
389 return 0; 389 return true;
390 return -ENOENT; 390 return false;
391} 391}
392 392
393static int has_max_bus_speed_file(struct pci_slot *pci_slot) 393static bool has_max_bus_speed_file(struct pci_slot *pci_slot)
394{ 394{
395 struct hotplug_slot *slot = pci_slot->hotplug; 395 struct hotplug_slot *slot = pci_slot->hotplug;
396 if ((!slot) || (!slot->ops)) 396 if ((!slot) || (!slot->ops))
397 return -ENODEV; 397 return false;
398 if (slot->ops->get_max_bus_speed) 398 if (slot->ops->get_max_bus_speed)
399 return 0; 399 return true;
400 return -ENOENT; 400 return false;
401} 401}
402 402
403static int has_cur_bus_speed_file(struct pci_slot *pci_slot) 403static bool has_cur_bus_speed_file(struct pci_slot *pci_slot)
404{ 404{
405 struct hotplug_slot *slot = pci_slot->hotplug; 405 struct hotplug_slot *slot = pci_slot->hotplug;
406 if ((!slot) || (!slot->ops)) 406 if ((!slot) || (!slot->ops))
407 return -ENODEV; 407 return false;
408 if (slot->ops->get_cur_bus_speed) 408 if (slot->ops->get_cur_bus_speed)
409 return 0; 409 return true;
410 return -ENOENT; 410 return false;
411} 411}
412 412
413static int has_test_file(struct pci_slot *pci_slot) 413static bool has_test_file(struct pci_slot *pci_slot)
414{ 414{
415 struct hotplug_slot *slot = pci_slot->hotplug; 415 struct hotplug_slot *slot = pci_slot->hotplug;
416 if ((!slot) || (!slot->ops)) 416 if ((!slot) || (!slot->ops))
417 return -ENODEV; 417 return false;
418 if (slot->ops->hardware_test) 418 if (slot->ops->hardware_test)
419 return 0; 419 return true;
420 return -ENOENT; 420 return false;
421} 421}
422 422
423static int fs_add_slot(struct pci_slot *slot) 423static int fs_add_slot(struct pci_slot *slot)
424{ 424{
425 int retval = 0; 425 int retval = 0;
426 426
427 if (has_power_file(slot) == 0) { 427 /* Create symbolic link to the hotplug driver module */
428 retval = sysfs_create_file(&slot->kobj, &hotplug_slot_attr_power.attr); 428 pci_hp_create_module_link(slot);
429
430 if (has_power_file(slot)) {
431 retval = sysfs_create_file(&slot->kobj,
432 &hotplug_slot_attr_power.attr);
429 if (retval) 433 if (retval)
430 goto exit_power; 434 goto exit_power;
431 } 435 }
432 436
433 if (has_attention_file(slot) == 0) { 437 if (has_attention_file(slot)) {
434 retval = sysfs_create_file(&slot->kobj, 438 retval = sysfs_create_file(&slot->kobj,
435 &hotplug_slot_attr_attention.attr); 439 &hotplug_slot_attr_attention.attr);
436 if (retval) 440 if (retval)
437 goto exit_attention; 441 goto exit_attention;
438 } 442 }
439 443
440 if (has_latch_file(slot) == 0) { 444 if (has_latch_file(slot)) {
441 retval = sysfs_create_file(&slot->kobj, 445 retval = sysfs_create_file(&slot->kobj,
442 &hotplug_slot_attr_latch.attr); 446 &hotplug_slot_attr_latch.attr);
443 if (retval) 447 if (retval)
444 goto exit_latch; 448 goto exit_latch;
445 } 449 }
446 450
447 if (has_adapter_file(slot) == 0) { 451 if (has_adapter_file(slot)) {
448 retval = sysfs_create_file(&slot->kobj, 452 retval = sysfs_create_file(&slot->kobj,
449 &hotplug_slot_attr_presence.attr); 453 &hotplug_slot_attr_presence.attr);
450 if (retval) 454 if (retval)
451 goto exit_adapter; 455 goto exit_adapter;
452 } 456 }
453 457
454 if (has_max_bus_speed_file(slot) == 0) { 458 if (has_max_bus_speed_file(slot)) {
455 retval = sysfs_create_file(&slot->kobj, 459 retval = sysfs_create_file(&slot->kobj,
456 &hotplug_slot_attr_max_bus_speed.attr); 460 &hotplug_slot_attr_max_bus_speed.attr);
457 if (retval) 461 if (retval)
458 goto exit_max_speed; 462 goto exit_max_speed;
459 } 463 }
460 464
461 if (has_cur_bus_speed_file(slot) == 0) { 465 if (has_cur_bus_speed_file(slot)) {
462 retval = sysfs_create_file(&slot->kobj, 466 retval = sysfs_create_file(&slot->kobj,
463 &hotplug_slot_attr_cur_bus_speed.attr); 467 &hotplug_slot_attr_cur_bus_speed.attr);
464 if (retval) 468 if (retval)
465 goto exit_cur_speed; 469 goto exit_cur_speed;
466 } 470 }
467 471
468 if (has_test_file(slot) == 0) { 472 if (has_test_file(slot)) {
469 retval = sysfs_create_file(&slot->kobj, 473 retval = sysfs_create_file(&slot->kobj,
470 &hotplug_slot_attr_test.attr); 474 &hotplug_slot_attr_test.attr);
471 if (retval) 475 if (retval)
@@ -475,55 +479,61 @@ static int fs_add_slot(struct pci_slot *slot)
475 goto exit; 479 goto exit;
476 480
477exit_test: 481exit_test:
478 if (has_cur_bus_speed_file(slot) == 0) 482 if (has_cur_bus_speed_file(slot))
479 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr); 483 sysfs_remove_file(&slot->kobj,
480 484 &hotplug_slot_attr_cur_bus_speed.attr);
481exit_cur_speed: 485exit_cur_speed:
482 if (has_max_bus_speed_file(slot) == 0) 486 if (has_max_bus_speed_file(slot))
483 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); 487 sysfs_remove_file(&slot->kobj,
484 488 &hotplug_slot_attr_max_bus_speed.attr);
485exit_max_speed: 489exit_max_speed:
486 if (has_adapter_file(slot) == 0) 490 if (has_adapter_file(slot))
487 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); 491 sysfs_remove_file(&slot->kobj,
488 492 &hotplug_slot_attr_presence.attr);
489exit_adapter: 493exit_adapter:
490 if (has_latch_file(slot) == 0) 494 if (has_latch_file(slot))
491 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr); 495 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
492
493exit_latch: 496exit_latch:
494 if (has_attention_file(slot) == 0) 497 if (has_attention_file(slot))
495 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr); 498 sysfs_remove_file(&slot->kobj,
496 499 &hotplug_slot_attr_attention.attr);
497exit_attention: 500exit_attention:
498 if (has_power_file(slot) == 0) 501 if (has_power_file(slot))
499 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr); 502 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
500exit_power: 503exit_power:
504 pci_hp_remove_module_link(slot);
501exit: 505exit:
502 return retval; 506 return retval;
503} 507}
504 508
505static void fs_remove_slot(struct pci_slot *slot) 509static void fs_remove_slot(struct pci_slot *slot)
506{ 510{
507 if (has_power_file(slot) == 0) 511 if (has_power_file(slot))
508 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr); 512 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
509 513
510 if (has_attention_file(slot) == 0) 514 if (has_attention_file(slot))
511 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr); 515 sysfs_remove_file(&slot->kobj,
516 &hotplug_slot_attr_attention.attr);
512 517
513 if (has_latch_file(slot) == 0) 518 if (has_latch_file(slot))
514 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr); 519 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
515 520
516 if (has_adapter_file(slot) == 0) 521 if (has_adapter_file(slot))
517 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); 522 sysfs_remove_file(&slot->kobj,
523 &hotplug_slot_attr_presence.attr);
518 524
519 if (has_max_bus_speed_file(slot) == 0) 525 if (has_max_bus_speed_file(slot))
520 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); 526 sysfs_remove_file(&slot->kobj,
527 &hotplug_slot_attr_max_bus_speed.attr);
521 528
522 if (has_cur_bus_speed_file(slot) == 0) 529 if (has_cur_bus_speed_file(slot))
523 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr); 530 sysfs_remove_file(&slot->kobj,
531 &hotplug_slot_attr_cur_bus_speed.attr);
524 532
525 if (has_test_file(slot) == 0) 533 if (has_test_file(slot))
526 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr); 534 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
535
536 pci_hp_remove_module_link(slot);
527} 537}
528 538
529static struct hotplug_slot *get_slot_from_name (const char *name) 539static struct hotplug_slot *get_slot_from_name (const char *name)
@@ -540,10 +550,10 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
540} 550}
541 551
542/** 552/**
543 * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem 553 * __pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
544 * @bus: bus this slot is on 554 * @bus: bus this slot is on
545 * @slot: pointer to the &struct hotplug_slot to register 555 * @slot: pointer to the &struct hotplug_slot to register
546 * @slot_nr: slot number 556 * @devnr: device number
547 * @name: name registered with kobject core 557 * @name: name registered with kobject core
548 * 558 *
549 * Registers a hotplug slot with the pci hotplug subsystem, which will allow 559 * Registers a hotplug slot with the pci hotplug subsystem, which will allow
@@ -551,8 +561,9 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
551 * 561 *
552 * Returns 0 if successful, anything else for an error. 562 * Returns 0 if successful, anything else for an error.
553 */ 563 */
554int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr, 564int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
555 const char *name) 565 int devnr, const char *name,
566 struct module *owner, const char *mod_name)
556{ 567{
557 int result; 568 int result;
558 struct pci_slot *pci_slot; 569 struct pci_slot *pci_slot;
@@ -567,14 +578,16 @@ int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
567 return -EINVAL; 578 return -EINVAL;
568 } 579 }
569 580
570 mutex_lock(&pci_hp_mutex); 581 slot->ops->owner = owner;
582 slot->ops->mod_name = mod_name;
571 583
584 mutex_lock(&pci_hp_mutex);
572 /* 585 /*
573 * No problems if we call this interface from both ACPI_PCI_SLOT 586 * No problems if we call this interface from both ACPI_PCI_SLOT
574 * driver and call it here again. If we've already created the 587 * driver and call it here again. If we've already created the
575 * pci_slot, the interface will simply bump the refcount. 588 * pci_slot, the interface will simply bump the refcount.
576 */ 589 */
577 pci_slot = pci_create_slot(bus, slot_nr, name, slot); 590 pci_slot = pci_create_slot(bus, devnr, name, slot);
578 if (IS_ERR(pci_slot)) { 591 if (IS_ERR(pci_slot)) {
579 result = PTR_ERR(pci_slot); 592 result = PTR_ERR(pci_slot);
580 goto out; 593 goto out;
@@ -684,6 +697,6 @@ MODULE_LICENSE("GPL");
684module_param(debug, bool, 0644); 697module_param(debug, bool, 0644);
685MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); 698MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
686 699
687EXPORT_SYMBOL_GPL(pci_hp_register); 700EXPORT_SYMBOL_GPL(__pci_hp_register);
688EXPORT_SYMBOL_GPL(pci_hp_deregister); 701EXPORT_SYMBOL_GPL(pci_hp_deregister);
689EXPORT_SYMBOL_GPL(pci_hp_change_slot_info); 702EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 0a368547e633..e6cf096498be 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -81,7 +81,6 @@ struct slot {
81 struct hpc_ops *hpc_ops; 81 struct hpc_ops *hpc_ops;
82 struct hotplug_slot *hotplug_slot; 82 struct hotplug_slot *hotplug_slot;
83 struct list_head slot_list; 83 struct list_head slot_list;
84 unsigned long last_emi_toggle;
85 struct delayed_work work; /* work for button event */ 84 struct delayed_work work; /* work for button event */
86 struct mutex lock; 85 struct mutex lock;
87}; 86};
@@ -203,8 +202,6 @@ struct hpc_ops {
203 int (*set_attention_status)(struct slot *slot, u8 status); 202 int (*set_attention_status)(struct slot *slot, u8 status);
204 int (*get_latch_status)(struct slot *slot, u8 *status); 203 int (*get_latch_status)(struct slot *slot, u8 *status);
205 int (*get_adapter_status)(struct slot *slot, u8 *status); 204 int (*get_adapter_status)(struct slot *slot, u8 *status);
206 int (*get_emi_status)(struct slot *slot, u8 *status);
207 int (*toggle_emi)(struct slot *slot);
208 int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); 205 int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
209 int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); 206 int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
210 int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val); 207 int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index fb254b2454de..2317557fdee6 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -73,7 +73,6 @@ static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *val
73static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 73static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
74 74
75static struct hotplug_slot_ops pciehp_hotplug_slot_ops = { 75static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
76 .owner = THIS_MODULE,
77 .set_attention_status = set_attention_status, 76 .set_attention_status = set_attention_status,
78 .enable_slot = enable_slot, 77 .enable_slot = enable_slot,
79 .disable_slot = disable_slot, 78 .disable_slot = disable_slot,
@@ -85,99 +84,6 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
85 .get_cur_bus_speed = get_cur_bus_speed, 84 .get_cur_bus_speed = get_cur_bus_speed,
86}; 85};
87 86
88/*
89 * Check the status of the Electro Mechanical Interlock (EMI)
90 */
91static int get_lock_status(struct hotplug_slot *hotplug_slot, u8 *value)
92{
93 struct slot *slot = hotplug_slot->private;
94 return (slot->hpc_ops->get_emi_status(slot, value));
95}
96
97/*
98 * sysfs interface for the Electro Mechanical Interlock (EMI)
99 * 1 == locked, 0 == unlocked
100 */
101static ssize_t lock_read_file(struct hotplug_slot *slot, char *buf)
102{
103 int retval;
104 u8 value;
105
106 retval = get_lock_status(slot, &value);
107 if (retval)
108 goto lock_read_exit;
109 retval = sprintf (buf, "%d\n", value);
110
111lock_read_exit:
112 return retval;
113}
114
115/*
116 * Change the status of the Electro Mechanical Interlock (EMI)
117 * This is a toggle - in addition there must be at least 1 second
118 * in between toggles.
119 */
120static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status)
121{
122 struct slot *slot = hotplug_slot->private;
123 int retval;
124 u8 value;
125
126 mutex_lock(&slot->ctrl->crit_sect);
127
128 /* has it been >1 sec since our last toggle? */
129 if ((get_seconds() - slot->last_emi_toggle) < 1) {
130 mutex_unlock(&slot->ctrl->crit_sect);
131 return -EINVAL;
132 }
133
134 /* see what our current state is */
135 retval = get_lock_status(hotplug_slot, &value);
136 if (retval || (value == status))
137 goto set_lock_exit;
138
139 slot->hpc_ops->toggle_emi(slot);
140set_lock_exit:
141 mutex_unlock(&slot->ctrl->crit_sect);
142 return 0;
143}
144
145/*
146 * sysfs interface which allows the user to toggle the Electro Mechanical
147 * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock
148 */
149static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot,
150 const char *buf, size_t count)
151{
152 struct slot *slot = hotplug_slot->private;
153 unsigned long llock;
154 u8 lock;
155 int retval = 0;
156
157 llock = simple_strtoul(buf, NULL, 10);
158 lock = (u8)(llock & 0xff);
159
160 switch (lock) {
161 case 0:
162 case 1:
163 retval = set_lock_status(hotplug_slot, lock);
164 break;
165 default:
166 ctrl_err(slot->ctrl, "%d is an invalid lock value\n",
167 lock);
168 retval = -EINVAL;
169 }
170 if (retval)
171 return retval;
172 return count;
173}
174
175static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
176 .attr = {.name = "lock", .mode = S_IFREG | S_IRUGO | S_IWUSR},
177 .show = lock_read_file,
178 .store = lock_write_file
179};
180
181/** 87/**
182 * release_slot - free up the memory used by a slot 88 * release_slot - free up the memory used by a slot
183 * @hotplug_slot: slot to free 89 * @hotplug_slot: slot to free
@@ -236,17 +142,6 @@ static int init_slots(struct controller *ctrl)
236 get_attention_status(hotplug_slot, &info->attention_status); 142 get_attention_status(hotplug_slot, &info->attention_status);
237 get_latch_status(hotplug_slot, &info->latch_status); 143 get_latch_status(hotplug_slot, &info->latch_status);
238 get_adapter_status(hotplug_slot, &info->adapter_status); 144 get_adapter_status(hotplug_slot, &info->adapter_status);
239 /* create additional sysfs entries */
240 if (EMI(ctrl)) {
241 retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
242 &hotplug_slot_attr_lock.attr);
243 if (retval) {
244 pci_hp_deregister(hotplug_slot);
245 ctrl_err(ctrl, "Cannot create additional sysfs "
246 "entries\n");
247 goto error_info;
248 }
249 }
250 } 145 }
251 146
252 return 0; 147 return 0;
@@ -261,13 +156,8 @@ error:
261static void cleanup_slots(struct controller *ctrl) 156static void cleanup_slots(struct controller *ctrl)
262{ 157{
263 struct slot *slot; 158 struct slot *slot;
264 159 list_for_each_entry(slot, &ctrl->slot_list, slot_list)
265 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
266 if (EMI(ctrl))
267 sysfs_remove_file(&slot->hotplug_slot->pci_slot->kobj,
268 &hotplug_slot_attr_lock.attr);
269 pci_hp_deregister(slot->hotplug_slot); 160 pci_hp_deregister(slot->hotplug_slot);
270 }
271} 161}
272 162
273/* 163/*
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 07bd32151146..52813257e5bf 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -422,35 +422,6 @@ static int hpc_query_power_fault(struct slot *slot)
422 return !!(slot_status & PCI_EXP_SLTSTA_PFD); 422 return !!(slot_status & PCI_EXP_SLTSTA_PFD);
423} 423}
424 424
425static int hpc_get_emi_status(struct slot *slot, u8 *status)
426{
427 struct controller *ctrl = slot->ctrl;
428 u16 slot_status;
429 int retval;
430
431 retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
432 if (retval) {
433 ctrl_err(ctrl, "Cannot check EMI status\n");
434 return retval;
435 }
436 *status = !!(slot_status & PCI_EXP_SLTSTA_EIS);
437 return retval;
438}
439
440static int hpc_toggle_emi(struct slot *slot)
441{
442 u16 slot_cmd;
443 u16 cmd_mask;
444 int rc;
445
446 slot_cmd = PCI_EXP_SLTCTL_EIC;
447 cmd_mask = PCI_EXP_SLTCTL_EIC;
448 rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask);
449 slot->last_emi_toggle = get_seconds();
450
451 return rc;
452}
453
454static int hpc_set_attention_status(struct slot *slot, u8 value) 425static int hpc_set_attention_status(struct slot *slot, u8 value)
455{ 426{
456 struct controller *ctrl = slot->ctrl; 427 struct controller *ctrl = slot->ctrl;
@@ -874,8 +845,6 @@ static struct hpc_ops pciehp_hpc_ops = {
874 .get_attention_status = hpc_get_attention_status, 845 .get_attention_status = hpc_get_attention_status,
875 .get_latch_status = hpc_get_latch_status, 846 .get_latch_status = hpc_get_latch_status,
876 .get_adapter_status = hpc_get_adapter_status, 847 .get_adapter_status = hpc_get_adapter_status,
877 .get_emi_status = hpc_get_emi_status,
878 .toggle_emi = hpc_toggle_emi,
879 848
880 .get_max_bus_speed = hpc_get_max_lnk_speed, 849 .get_max_bus_speed = hpc_get_max_lnk_speed,
881 .get_cur_bus_speed = hpc_get_cur_lnk_speed, 850 .get_cur_bus_speed = hpc_get_cur_lnk_speed,
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index e3dd6cf9e89f..5175d9b26f0b 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -82,7 +82,6 @@ static int get_latch_status (struct hotplug_slot *slot, u8 *value);
82static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 82static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
83 83
84static struct hotplug_slot_ops skel_hotplug_slot_ops = { 84static struct hotplug_slot_ops skel_hotplug_slot_ops = {
85 .owner = THIS_MODULE,
86 .enable_slot = enable_slot, 85 .enable_slot = enable_slot,
87 .disable_slot = disable_slot, 86 .disable_slot = disable_slot,
88 .set_attention_status = set_attention_status, 87 .set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 95d02a08fdc7..c159223389ec 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -423,7 +423,6 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
423} 423}
424 424
425struct hotplug_slot_ops rpaphp_hotplug_slot_ops = { 425struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
426 .owner = THIS_MODULE,
427 .enable_slot = enable_slot, 426 .enable_slot = enable_slot,
428 .disable_slot = disable_slot, 427 .disable_slot = disable_slot,
429 .set_attention_status = set_attention_status, 428 .set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 2d6da78fddb6..a4494d78e7c2 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -83,7 +83,6 @@ static int disable_slot(struct hotplug_slot *slot);
83static inline int get_power_status(struct hotplug_slot *slot, u8 *value); 83static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
84 84
85static struct hotplug_slot_ops sn_hotplug_slot_ops = { 85static struct hotplug_slot_ops sn_hotplug_slot_ops = {
86 .owner = THIS_MODULE,
87 .enable_slot = enable_slot, 86 .enable_slot = enable_slot,
88 .disable_slot = disable_slot, 87 .disable_slot = disable_slot,
89 .get_power_status = get_power_status, 88 .get_power_status = get_power_status,
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index fe8d149c2293..8a520a3d0f59 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -69,7 +69,6 @@ static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *val
69static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 69static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
70 70
71static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { 71static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
72 .owner = THIS_MODULE,
73 .set_attention_status = set_attention_status, 72 .set_attention_status = set_attention_status,
74 .enable_slot = enable_slot, 73 .enable_slot = enable_slot,
75 .disable_slot = disable_slot, 74 .disable_slot = disable_slot,
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index cd389162735f..178853a07440 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -53,6 +53,8 @@
53 53
54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55 55
56#define MAX_AGAW_WIDTH 64
57
56#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) 58#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
57 59
58#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) 60#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
@@ -131,8 +133,6 @@ static inline void context_set_fault_enable(struct context_entry *context)
131 context->lo &= (((u64)-1) << 2) | 1; 133 context->lo &= (((u64)-1) << 2) | 1;
132} 134}
133 135
134#define CONTEXT_TT_MULTI_LEVEL 0
135
136static inline void context_set_translation_type(struct context_entry *context, 136static inline void context_set_translation_type(struct context_entry *context,
137 unsigned long value) 137 unsigned long value)
138{ 138{
@@ -256,6 +256,7 @@ struct device_domain_info {
256 u8 bus; /* PCI bus number */ 256 u8 bus; /* PCI bus number */
257 u8 devfn; /* PCI devfn number */ 257 u8 devfn; /* PCI devfn number */
258 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ 258 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
259 struct intel_iommu *iommu; /* IOMMU used by this device */
259 struct dmar_domain *domain; /* pointer to domain */ 260 struct dmar_domain *domain; /* pointer to domain */
260}; 261};
261 262
@@ -401,17 +402,13 @@ void free_iova_mem(struct iova *iova)
401 402
402static inline int width_to_agaw(int width); 403static inline int width_to_agaw(int width);
403 404
404/* calculate agaw for each iommu. 405static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
405 * "SAGAW" may be different across iommus, use a default agaw, and
406 * get a supported less agaw for iommus that don't support the default agaw.
407 */
408int iommu_calculate_agaw(struct intel_iommu *iommu)
409{ 406{
410 unsigned long sagaw; 407 unsigned long sagaw;
411 int agaw = -1; 408 int agaw = -1;
412 409
413 sagaw = cap_sagaw(iommu->cap); 410 sagaw = cap_sagaw(iommu->cap);
414 for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH); 411 for (agaw = width_to_agaw(max_gaw);
415 agaw >= 0; agaw--) { 412 agaw >= 0; agaw--) {
416 if (test_bit(agaw, &sagaw)) 413 if (test_bit(agaw, &sagaw))
417 break; 414 break;
@@ -420,6 +417,24 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
420 return agaw; 417 return agaw;
421} 418}
422 419
420/*
421 * Calculate max SAGAW for each iommu.
422 */
423int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
424{
425 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
426}
427
428/*
429 * calculate agaw for each iommu.
430 * "SAGAW" may be different across iommus, use a default agaw, and
431 * get a supported less agaw for iommus that don't support the default agaw.
432 */
433int iommu_calculate_agaw(struct intel_iommu *iommu)
434{
435 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
436}
437
423/* in native case, each domain is related to only one iommu */ 438/* in native case, each domain is related to only one iommu */
424static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) 439static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
425{ 440{
@@ -809,7 +824,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
809static void iommu_set_root_entry(struct intel_iommu *iommu) 824static void iommu_set_root_entry(struct intel_iommu *iommu)
810{ 825{
811 void *addr; 826 void *addr;
812 u32 cmd, sts; 827 u32 sts;
813 unsigned long flag; 828 unsigned long flag;
814 829
815 addr = iommu->root_entry; 830 addr = iommu->root_entry;
@@ -817,12 +832,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
817 spin_lock_irqsave(&iommu->register_lock, flag); 832 spin_lock_irqsave(&iommu->register_lock, flag);
818 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); 833 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
819 834
820 cmd = iommu->gcmd | DMA_GCMD_SRTP; 835 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
821 writel(cmd, iommu->reg + DMAR_GCMD_REG);
822 836
823 /* Make sure hardware complete it */ 837 /* Make sure hardware complete it */
824 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 838 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
825 readl, (sts & DMA_GSTS_RTPS), sts); 839 readl, (sts & DMA_GSTS_RTPS), sts);
826 840
827 spin_unlock_irqrestore(&iommu->register_lock, flag); 841 spin_unlock_irqrestore(&iommu->register_lock, flag);
828} 842}
@@ -834,39 +848,25 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
834 848
835 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) 849 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
836 return; 850 return;
837 val = iommu->gcmd | DMA_GCMD_WBF;
838 851
839 spin_lock_irqsave(&iommu->register_lock, flag); 852 spin_lock_irqsave(&iommu->register_lock, flag);
840 writel(val, iommu->reg + DMAR_GCMD_REG); 853 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
841 854
842 /* Make sure hardware complete it */ 855 /* Make sure hardware complete it */
843 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 856 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
844 readl, (!(val & DMA_GSTS_WBFS)), val); 857 readl, (!(val & DMA_GSTS_WBFS)), val);
845 858
846 spin_unlock_irqrestore(&iommu->register_lock, flag); 859 spin_unlock_irqrestore(&iommu->register_lock, flag);
847} 860}
848 861
849/* return value determine if we need a write buffer flush */ 862/* return value determine if we need a write buffer flush */
850static int __iommu_flush_context(struct intel_iommu *iommu, 863static void __iommu_flush_context(struct intel_iommu *iommu,
851 u16 did, u16 source_id, u8 function_mask, u64 type, 864 u16 did, u16 source_id, u8 function_mask,
852 int non_present_entry_flush) 865 u64 type)
853{ 866{
854 u64 val = 0; 867 u64 val = 0;
855 unsigned long flag; 868 unsigned long flag;
856 869
857 /*
858 * In the non-present entry flush case, if hardware doesn't cache
859 * non-present entry we do nothing and if hardware cache non-present
860 * entry, we flush entries of domain 0 (the domain id is used to cache
861 * any non-present entries)
862 */
863 if (non_present_entry_flush) {
864 if (!cap_caching_mode(iommu->cap))
865 return 1;
866 else
867 did = 0;
868 }
869
870 switch (type) { 870 switch (type) {
871 case DMA_CCMD_GLOBAL_INVL: 871 case DMA_CCMD_GLOBAL_INVL:
872 val = DMA_CCMD_GLOBAL_INVL; 872 val = DMA_CCMD_GLOBAL_INVL;
@@ -891,33 +891,16 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
891 dmar_readq, (!(val & DMA_CCMD_ICC)), val); 891 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
892 892
893 spin_unlock_irqrestore(&iommu->register_lock, flag); 893 spin_unlock_irqrestore(&iommu->register_lock, flag);
894
895 /* flush context entry will implicitly flush write buffer */
896 return 0;
897} 894}
898 895
899/* return value determine if we need a write buffer flush */ 896/* return value determine if we need a write buffer flush */
900static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, 897static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
901 u64 addr, unsigned int size_order, u64 type, 898 u64 addr, unsigned int size_order, u64 type)
902 int non_present_entry_flush)
903{ 899{
904 int tlb_offset = ecap_iotlb_offset(iommu->ecap); 900 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
905 u64 val = 0, val_iva = 0; 901 u64 val = 0, val_iva = 0;
906 unsigned long flag; 902 unsigned long flag;
907 903
908 /*
909 * In the non-present entry flush case, if hardware doesn't cache
910 * non-present entry we do nothing and if hardware cache non-present
911 * entry, we flush entries of domain 0 (the domain id is used to cache
912 * any non-present entries)
913 */
914 if (non_present_entry_flush) {
915 if (!cap_caching_mode(iommu->cap))
916 return 1;
917 else
918 did = 0;
919 }
920
921 switch (type) { 904 switch (type) {
922 case DMA_TLB_GLOBAL_FLUSH: 905 case DMA_TLB_GLOBAL_FLUSH:
923 /* global flush doesn't need set IVA_REG */ 906 /* global flush doesn't need set IVA_REG */
@@ -965,37 +948,101 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
965 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", 948 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
966 (unsigned long long)DMA_TLB_IIRG(type), 949 (unsigned long long)DMA_TLB_IIRG(type),
967 (unsigned long long)DMA_TLB_IAIG(val)); 950 (unsigned long long)DMA_TLB_IAIG(val));
968 /* flush iotlb entry will implicitly flush write buffer */
969 return 0;
970} 951}
971 952
972static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 953static struct device_domain_info *iommu_support_dev_iotlb(
973 u64 addr, unsigned int pages, int non_present_entry_flush) 954 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
955{
956 int found = 0;
957 unsigned long flags;
958 struct device_domain_info *info;
959 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
960
961 if (!ecap_dev_iotlb_support(iommu->ecap))
962 return NULL;
963
964 if (!iommu->qi)
965 return NULL;
966
967 spin_lock_irqsave(&device_domain_lock, flags);
968 list_for_each_entry(info, &domain->devices, link)
969 if (info->bus == bus && info->devfn == devfn) {
970 found = 1;
971 break;
972 }
973 spin_unlock_irqrestore(&device_domain_lock, flags);
974
975 if (!found || !info->dev)
976 return NULL;
977
978 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
979 return NULL;
980
981 if (!dmar_find_matched_atsr_unit(info->dev))
982 return NULL;
983
984 info->iommu = iommu;
985
986 return info;
987}
988
989static void iommu_enable_dev_iotlb(struct device_domain_info *info)
974{ 990{
975 unsigned int mask; 991 if (!info)
992 return;
993
994 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
995}
996
997static void iommu_disable_dev_iotlb(struct device_domain_info *info)
998{
999 if (!info->dev || !pci_ats_enabled(info->dev))
1000 return;
1001
1002 pci_disable_ats(info->dev);
1003}
1004
1005static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1006 u64 addr, unsigned mask)
1007{
1008 u16 sid, qdep;
1009 unsigned long flags;
1010 struct device_domain_info *info;
1011
1012 spin_lock_irqsave(&device_domain_lock, flags);
1013 list_for_each_entry(info, &domain->devices, link) {
1014 if (!info->dev || !pci_ats_enabled(info->dev))
1015 continue;
1016
1017 sid = info->bus << 8 | info->devfn;
1018 qdep = pci_ats_queue_depth(info->dev);
1019 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1020 }
1021 spin_unlock_irqrestore(&device_domain_lock, flags);
1022}
1023
1024static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1025 u64 addr, unsigned int pages)
1026{
1027 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
976 1028
977 BUG_ON(addr & (~VTD_PAGE_MASK)); 1029 BUG_ON(addr & (~VTD_PAGE_MASK));
978 BUG_ON(pages == 0); 1030 BUG_ON(pages == 0);
979 1031
980 /* Fallback to domain selective flush if no PSI support */
981 if (!cap_pgsel_inv(iommu->cap))
982 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
983 DMA_TLB_DSI_FLUSH,
984 non_present_entry_flush);
985
986 /* 1032 /*
1033 * Fallback to domain selective flush if no PSI support or the size is
1034 * too big.
987 * PSI requires page size to be 2 ^ x, and the base address is naturally 1035 * PSI requires page size to be 2 ^ x, and the base address is naturally
988 * aligned to the size 1036 * aligned to the size
989 */ 1037 */
990 mask = ilog2(__roundup_pow_of_two(pages)); 1038 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
991 /* Fallback to domain selective flush if size is too big */ 1039 iommu->flush.flush_iotlb(iommu, did, 0, 0,
992 if (mask > cap_max_amask_val(iommu->cap)) 1040 DMA_TLB_DSI_FLUSH);
993 return iommu->flush.flush_iotlb(iommu, did, 0, 0, 1041 else
994 DMA_TLB_DSI_FLUSH, non_present_entry_flush); 1042 iommu->flush.flush_iotlb(iommu, did, addr, mask,
995 1043 DMA_TLB_PSI_FLUSH);
996 return iommu->flush.flush_iotlb(iommu, did, addr, mask, 1044 if (did)
997 DMA_TLB_PSI_FLUSH, 1045 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
998 non_present_entry_flush);
999} 1046}
1000 1047
1001static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) 1048static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1021,13 +1068,13 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
1021 unsigned long flags; 1068 unsigned long flags;
1022 1069
1023 spin_lock_irqsave(&iommu->register_lock, flags); 1070 spin_lock_irqsave(&iommu->register_lock, flags);
1024 writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG); 1071 iommu->gcmd |= DMA_GCMD_TE;
1072 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1025 1073
1026 /* Make sure hardware complete it */ 1074 /* Make sure hardware complete it */
1027 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1075 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1028 readl, (sts & DMA_GSTS_TES), sts); 1076 readl, (sts & DMA_GSTS_TES), sts);
1029 1077
1030 iommu->gcmd |= DMA_GCMD_TE;
1031 spin_unlock_irqrestore(&iommu->register_lock, flags); 1078 spin_unlock_irqrestore(&iommu->register_lock, flags);
1032 return 0; 1079 return 0;
1033} 1080}
@@ -1043,7 +1090,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1043 1090
1044 /* Make sure hardware complete it */ 1091 /* Make sure hardware complete it */
1045 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1092 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1046 readl, (!(sts & DMA_GSTS_TES)), sts); 1093 readl, (!(sts & DMA_GSTS_TES)), sts);
1047 1094
1048 spin_unlock_irqrestore(&iommu->register_lock, flag); 1095 spin_unlock_irqrestore(&iommu->register_lock, flag);
1049 return 0; 1096 return 0;
@@ -1325,8 +1372,8 @@ static void domain_exit(struct dmar_domain *domain)
1325 free_domain_mem(domain); 1372 free_domain_mem(domain);
1326} 1373}
1327 1374
1328static int domain_context_mapping_one(struct dmar_domain *domain, 1375static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1329 int segment, u8 bus, u8 devfn) 1376 u8 bus, u8 devfn, int translation)
1330{ 1377{
1331 struct context_entry *context; 1378 struct context_entry *context;
1332 unsigned long flags; 1379 unsigned long flags;
@@ -1336,10 +1383,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1336 unsigned long ndomains; 1383 unsigned long ndomains;
1337 int id; 1384 int id;
1338 int agaw; 1385 int agaw;
1386 struct device_domain_info *info = NULL;
1339 1387
1340 pr_debug("Set context mapping for %02x:%02x.%d\n", 1388 pr_debug("Set context mapping for %02x:%02x.%d\n",
1341 bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 1389 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1390
1342 BUG_ON(!domain->pgd); 1391 BUG_ON(!domain->pgd);
1392 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1393 translation != CONTEXT_TT_MULTI_LEVEL);
1343 1394
1344 iommu = device_to_iommu(segment, bus, devfn); 1395 iommu = device_to_iommu(segment, bus, devfn);
1345 if (!iommu) 1396 if (!iommu)
@@ -1399,21 +1450,44 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1399 } 1450 }
1400 1451
1401 context_set_domain_id(context, id); 1452 context_set_domain_id(context, id);
1402 context_set_address_width(context, iommu->agaw); 1453
1403 context_set_address_root(context, virt_to_phys(pgd)); 1454 if (translation != CONTEXT_TT_PASS_THROUGH) {
1404 context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL); 1455 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1456 translation = info ? CONTEXT_TT_DEV_IOTLB :
1457 CONTEXT_TT_MULTI_LEVEL;
1458 }
1459 /*
1460 * In pass through mode, AW must be programmed to indicate the largest
1461 * AGAW value supported by hardware. And ASR is ignored by hardware.
1462 */
1463 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1464 context_set_address_width(context, iommu->msagaw);
1465 else {
1466 context_set_address_root(context, virt_to_phys(pgd));
1467 context_set_address_width(context, iommu->agaw);
1468 }
1469
1470 context_set_translation_type(context, translation);
1405 context_set_fault_enable(context); 1471 context_set_fault_enable(context);
1406 context_set_present(context); 1472 context_set_present(context);
1407 domain_flush_cache(domain, context, sizeof(*context)); 1473 domain_flush_cache(domain, context, sizeof(*context));
1408 1474
1409 /* it's a non-present to present mapping */ 1475 /*
1410 if (iommu->flush.flush_context(iommu, domain->id, 1476 * It's a non-present to present mapping. If hardware doesn't cache
1411 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1477 * non-present entry we only need to flush the write-buffer. If the
1412 DMA_CCMD_DEVICE_INVL, 1)) 1478 * _does_ cache non-present entries, then it does so in the special
1479 * domain #0, which we have to flush:
1480 */
1481 if (cap_caching_mode(iommu->cap)) {
1482 iommu->flush.flush_context(iommu, 0,
1483 (((u16)bus) << 8) | devfn,
1484 DMA_CCMD_MASK_NOBIT,
1485 DMA_CCMD_DEVICE_INVL);
1486 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
1487 } else {
1413 iommu_flush_write_buffer(iommu); 1488 iommu_flush_write_buffer(iommu);
1414 else 1489 }
1415 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); 1490 iommu_enable_dev_iotlb(info);
1416
1417 spin_unlock_irqrestore(&iommu->lock, flags); 1491 spin_unlock_irqrestore(&iommu->lock, flags);
1418 1492
1419 spin_lock_irqsave(&domain->iommu_lock, flags); 1493 spin_lock_irqsave(&domain->iommu_lock, flags);
@@ -1426,13 +1500,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1426} 1500}
1427 1501
1428static int 1502static int
1429domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev) 1503domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1504 int translation)
1430{ 1505{
1431 int ret; 1506 int ret;
1432 struct pci_dev *tmp, *parent; 1507 struct pci_dev *tmp, *parent;
1433 1508
1434 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus), 1509 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1435 pdev->bus->number, pdev->devfn); 1510 pdev->bus->number, pdev->devfn,
1511 translation);
1436 if (ret) 1512 if (ret)
1437 return ret; 1513 return ret;
1438 1514
@@ -1446,7 +1522,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1446 ret = domain_context_mapping_one(domain, 1522 ret = domain_context_mapping_one(domain,
1447 pci_domain_nr(parent->bus), 1523 pci_domain_nr(parent->bus),
1448 parent->bus->number, 1524 parent->bus->number,
1449 parent->devfn); 1525 parent->devfn, translation);
1450 if (ret) 1526 if (ret)
1451 return ret; 1527 return ret;
1452 parent = parent->bus->self; 1528 parent = parent->bus->self;
@@ -1454,12 +1530,14 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1454 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ 1530 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1455 return domain_context_mapping_one(domain, 1531 return domain_context_mapping_one(domain,
1456 pci_domain_nr(tmp->subordinate), 1532 pci_domain_nr(tmp->subordinate),
1457 tmp->subordinate->number, 0); 1533 tmp->subordinate->number, 0,
1534 translation);
1458 else /* this is a legacy PCI bridge */ 1535 else /* this is a legacy PCI bridge */
1459 return domain_context_mapping_one(domain, 1536 return domain_context_mapping_one(domain,
1460 pci_domain_nr(tmp->bus), 1537 pci_domain_nr(tmp->bus),
1461 tmp->bus->number, 1538 tmp->bus->number,
1462 tmp->devfn); 1539 tmp->devfn,
1540 translation);
1463} 1541}
1464 1542
1465static int domain_context_mapped(struct pci_dev *pdev) 1543static int domain_context_mapped(struct pci_dev *pdev)
@@ -1540,9 +1618,8 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1540 1618
1541 clear_context_table(iommu, bus, devfn); 1619 clear_context_table(iommu, bus, devfn);
1542 iommu->flush.flush_context(iommu, 0, 0, 0, 1620 iommu->flush.flush_context(iommu, 0, 0, 0,
1543 DMA_CCMD_GLOBAL_INVL, 0); 1621 DMA_CCMD_GLOBAL_INVL);
1544 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 1622 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1545 DMA_TLB_GLOBAL_FLUSH, 0);
1546} 1623}
1547 1624
1548static void domain_remove_dev_info(struct dmar_domain *domain) 1625static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -1561,6 +1638,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
1561 info->dev->dev.archdata.iommu = NULL; 1638 info->dev->dev.archdata.iommu = NULL;
1562 spin_unlock_irqrestore(&device_domain_lock, flags); 1639 spin_unlock_irqrestore(&device_domain_lock, flags);
1563 1640
1641 iommu_disable_dev_iotlb(info);
1564 iommu = device_to_iommu(info->segment, info->bus, info->devfn); 1642 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1565 iommu_detach_dev(iommu, info->bus, info->devfn); 1643 iommu_detach_dev(iommu, info->bus, info->devfn);
1566 free_devinfo_mem(info); 1644 free_devinfo_mem(info);
@@ -1756,7 +1834,7 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
1756 goto error; 1834 goto error;
1757 1835
1758 /* context entry init */ 1836 /* context entry init */
1759 ret = domain_context_mapping(domain, pdev); 1837 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
1760 if (!ret) 1838 if (!ret)
1761 return 0; 1839 return 0;
1762error: 1840error:
@@ -1857,6 +1935,23 @@ static inline void iommu_prepare_isa(void)
1857} 1935}
1858#endif /* !CONFIG_DMAR_FLPY_WA */ 1936#endif /* !CONFIG_DMAR_FLPY_WA */
1859 1937
1938/* Initialize each context entry as pass through.*/
1939static int __init init_context_pass_through(void)
1940{
1941 struct pci_dev *pdev = NULL;
1942 struct dmar_domain *domain;
1943 int ret;
1944
1945 for_each_pci_dev(pdev) {
1946 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1947 ret = domain_context_mapping(domain, pdev,
1948 CONTEXT_TT_PASS_THROUGH);
1949 if (ret)
1950 return ret;
1951 }
1952 return 0;
1953}
1954
1860static int __init init_dmars(void) 1955static int __init init_dmars(void)
1861{ 1956{
1862 struct dmar_drhd_unit *drhd; 1957 struct dmar_drhd_unit *drhd;
@@ -1864,6 +1959,7 @@ static int __init init_dmars(void)
1864 struct pci_dev *pdev; 1959 struct pci_dev *pdev;
1865 struct intel_iommu *iommu; 1960 struct intel_iommu *iommu;
1866 int i, ret; 1961 int i, ret;
1962 int pass_through = 1;
1867 1963
1868 /* 1964 /*
1869 * for each drhd 1965 * for each drhd
@@ -1917,7 +2013,15 @@ static int __init init_dmars(void)
1917 printk(KERN_ERR "IOMMU: allocate root entry failed\n"); 2013 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
1918 goto error; 2014 goto error;
1919 } 2015 }
2016 if (!ecap_pass_through(iommu->ecap))
2017 pass_through = 0;
1920 } 2018 }
2019 if (iommu_pass_through)
2020 if (!pass_through) {
2021 printk(KERN_INFO
2022 "Pass Through is not supported by hardware.\n");
2023 iommu_pass_through = 0;
2024 }
1921 2025
1922 /* 2026 /*
1923 * Start from the sane iommu hardware state. 2027 * Start from the sane iommu hardware state.
@@ -1973,35 +2077,56 @@ static int __init init_dmars(void)
1973 } 2077 }
1974 2078
1975 /* 2079 /*
1976 * For each rmrr 2080 * If pass through is set and enabled, context entries of all pci
1977 * for each dev attached to rmrr 2081 * devices are intialized by pass through translation type.
1978 * do
1979 * locate drhd for dev, alloc domain for dev
1980 * allocate free domain
1981 * allocate page table entries for rmrr
1982 * if context not allocated for bus
1983 * allocate and init context
1984 * set present in root table for this bus
1985 * init context with domain, translation etc
1986 * endfor
1987 * endfor
1988 */ 2082 */
1989 for_each_rmrr_units(rmrr) { 2083 if (iommu_pass_through) {
1990 for (i = 0; i < rmrr->devices_cnt; i++) { 2084 ret = init_context_pass_through();
1991 pdev = rmrr->devices[i]; 2085 if (ret) {
1992 /* some BIOS lists non-exist devices in DMAR table */ 2086 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
1993 if (!pdev) 2087 iommu_pass_through = 0;
1994 continue;
1995 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
1996 if (ret)
1997 printk(KERN_ERR
1998 "IOMMU: mapping reserved region failed\n");
1999 } 2088 }
2000 } 2089 }
2001 2090
2002 iommu_prepare_gfx_mapping(); 2091 /*
2092 * If pass through is not set or not enabled, setup context entries for
2093 * identity mappings for rmrr, gfx, and isa.
2094 */
2095 if (!iommu_pass_through) {
2096 /*
2097 * For each rmrr
2098 * for each dev attached to rmrr
2099 * do
2100 * locate drhd for dev, alloc domain for dev
2101 * allocate free domain
2102 * allocate page table entries for rmrr
2103 * if context not allocated for bus
2104 * allocate and init context
2105 * set present in root table for this bus
2106 * init context with domain, translation etc
2107 * endfor
2108 * endfor
2109 */
2110 for_each_rmrr_units(rmrr) {
2111 for (i = 0; i < rmrr->devices_cnt; i++) {
2112 pdev = rmrr->devices[i];
2113 /*
2114 * some BIOS lists non-exist devices in DMAR
2115 * table.
2116 */
2117 if (!pdev)
2118 continue;
2119 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2120 if (ret)
2121 printk(KERN_ERR
2122 "IOMMU: mapping reserved region failed\n");
2123 }
2124 }
2125
2126 iommu_prepare_gfx_mapping();
2003 2127
2004 iommu_prepare_isa(); 2128 iommu_prepare_isa();
2129 }
2005 2130
2006 /* 2131 /*
2007 * for each drhd 2132 * for each drhd
@@ -2023,10 +2148,8 @@ static int __init init_dmars(void)
2023 2148
2024 iommu_set_root_entry(iommu); 2149 iommu_set_root_entry(iommu);
2025 2150
2026 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, 2151 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2027 0); 2152 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2028 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
2029 0);
2030 iommu_disable_protect_mem_regions(iommu); 2153 iommu_disable_protect_mem_regions(iommu);
2031 2154
2032 ret = iommu_enable_translation(iommu); 2155 ret = iommu_enable_translation(iommu);
@@ -2112,7 +2235,8 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
2112 2235
2113 /* make sure context mapping is ok */ 2236 /* make sure context mapping is ok */
2114 if (unlikely(!domain_context_mapped(pdev))) { 2237 if (unlikely(!domain_context_mapped(pdev))) {
2115 ret = domain_context_mapping(domain, pdev); 2238 ret = domain_context_mapping(domain, pdev,
2239 CONTEXT_TT_MULTI_LEVEL);
2116 if (ret) { 2240 if (ret) {
2117 printk(KERN_ERR 2241 printk(KERN_ERR
2118 "Domain context map for %s failed", 2242 "Domain context map for %s failed",
@@ -2173,10 +2297,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2173 if (ret) 2297 if (ret)
2174 goto error; 2298 goto error;
2175 2299
2176 /* it's a non-present to present mapping */ 2300 /* it's a non-present to present mapping. Only flush if caching mode */
2177 ret = iommu_flush_iotlb_psi(iommu, domain->id, 2301 if (cap_caching_mode(iommu->cap))
2178 start_paddr, size >> VTD_PAGE_SHIFT, 1); 2302 iommu_flush_iotlb_psi(iommu, 0, start_paddr,
2179 if (ret) 2303 size >> VTD_PAGE_SHIFT);
2304 else
2180 iommu_flush_write_buffer(iommu); 2305 iommu_flush_write_buffer(iommu);
2181 2306
2182 return start_paddr + ((u64)paddr & (~PAGE_MASK)); 2307 return start_paddr + ((u64)paddr & (~PAGE_MASK));
@@ -2210,15 +2335,22 @@ static void flush_unmaps(void)
2210 if (!iommu) 2335 if (!iommu)
2211 continue; 2336 continue;
2212 2337
2213 if (deferred_flush[i].next) { 2338 if (!deferred_flush[i].next)
2214 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 2339 continue;
2215 DMA_TLB_GLOBAL_FLUSH, 0); 2340
2216 for (j = 0; j < deferred_flush[i].next; j++) { 2341 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2217 __free_iova(&deferred_flush[i].domain[j]->iovad, 2342 DMA_TLB_GLOBAL_FLUSH);
2218 deferred_flush[i].iova[j]); 2343 for (j = 0; j < deferred_flush[i].next; j++) {
2219 } 2344 unsigned long mask;
2220 deferred_flush[i].next = 0; 2345 struct iova *iova = deferred_flush[i].iova[j];
2346
2347 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2348 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2349 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2350 iova->pfn_lo << PAGE_SHIFT, mask);
2351 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2221 } 2352 }
2353 deferred_flush[i].next = 0;
2222 } 2354 }
2223 2355
2224 list_size = 0; 2356 list_size = 0;
@@ -2291,9 +2423,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2291 /* free page tables */ 2423 /* free page tables */
2292 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2424 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2293 if (intel_iommu_strict) { 2425 if (intel_iommu_strict) {
2294 if (iommu_flush_iotlb_psi(iommu, 2426 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2295 domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) 2427 size >> VTD_PAGE_SHIFT);
2296 iommu_flush_write_buffer(iommu);
2297 /* free iova */ 2428 /* free iova */
2298 __free_iova(&domain->iovad, iova); 2429 __free_iova(&domain->iovad, iova);
2299 } else { 2430 } else {
@@ -2384,9 +2515,8 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2384 /* free page tables */ 2515 /* free page tables */
2385 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2516 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2386 2517
2387 if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr, 2518 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2388 size >> VTD_PAGE_SHIFT, 0)) 2519 size >> VTD_PAGE_SHIFT);
2389 iommu_flush_write_buffer(iommu);
2390 2520
2391 /* free iova */ 2521 /* free iova */
2392 __free_iova(&domain->iovad, iova); 2522 __free_iova(&domain->iovad, iova);
@@ -2478,10 +2608,13 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2478 offset += size; 2608 offset += size;
2479 } 2609 }
2480 2610
2481 /* it's a non-present to present mapping */ 2611 /* it's a non-present to present mapping. Only flush if caching mode */
2482 if (iommu_flush_iotlb_psi(iommu, domain->id, 2612 if (cap_caching_mode(iommu->cap))
2483 start_addr, offset >> VTD_PAGE_SHIFT, 1)) 2613 iommu_flush_iotlb_psi(iommu, 0, start_addr,
2614 offset >> VTD_PAGE_SHIFT);
2615 else
2484 iommu_flush_write_buffer(iommu); 2616 iommu_flush_write_buffer(iommu);
2617
2485 return nelems; 2618 return nelems;
2486} 2619}
2487 2620
@@ -2640,9 +2773,9 @@ static int init_iommu_hw(void)
2640 iommu_set_root_entry(iommu); 2773 iommu_set_root_entry(iommu);
2641 2774
2642 iommu->flush.flush_context(iommu, 0, 0, 0, 2775 iommu->flush.flush_context(iommu, 0, 0, 0,
2643 DMA_CCMD_GLOBAL_INVL, 0); 2776 DMA_CCMD_GLOBAL_INVL);
2644 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 2777 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2645 DMA_TLB_GLOBAL_FLUSH, 0); 2778 DMA_TLB_GLOBAL_FLUSH);
2646 iommu_disable_protect_mem_regions(iommu); 2779 iommu_disable_protect_mem_regions(iommu);
2647 iommu_enable_translation(iommu); 2780 iommu_enable_translation(iommu);
2648 } 2781 }
@@ -2657,9 +2790,9 @@ static void iommu_flush_all(void)
2657 2790
2658 for_each_active_iommu(iommu, drhd) { 2791 for_each_active_iommu(iommu, drhd) {
2659 iommu->flush.flush_context(iommu, 0, 0, 0, 2792 iommu->flush.flush_context(iommu, 0, 0, 0,
2660 DMA_CCMD_GLOBAL_INVL, 0); 2793 DMA_CCMD_GLOBAL_INVL);
2661 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 2794 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2662 DMA_TLB_GLOBAL_FLUSH, 0); 2795 DMA_TLB_GLOBAL_FLUSH);
2663 } 2796 }
2664} 2797}
2665 2798
@@ -2782,7 +2915,7 @@ int __init intel_iommu_init(void)
2782 * Check the need for DMA-remapping initialization now. 2915 * Check the need for DMA-remapping initialization now.
2783 * Above initialization will also be used by Interrupt-remapping. 2916 * Above initialization will also be used by Interrupt-remapping.
2784 */ 2917 */
2785 if (no_iommu || swiotlb || dmar_disabled) 2918 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
2786 return -ENODEV; 2919 return -ENODEV;
2787 2920
2788 iommu_init_mempool(); 2921 iommu_init_mempool();
@@ -2802,7 +2935,15 @@ int __init intel_iommu_init(void)
2802 2935
2803 init_timer(&unmap_timer); 2936 init_timer(&unmap_timer);
2804 force_iommu = 1; 2937 force_iommu = 1;
2805 dma_ops = &intel_dma_ops; 2938
2939 if (!iommu_pass_through) {
2940 printk(KERN_INFO
2941 "Multi-level page-table translation for DMAR.\n");
2942 dma_ops = &intel_dma_ops;
2943 } else
2944 printk(KERN_INFO
2945 "DMAR: Pass through translation for DMAR.\n");
2946
2806 init_iommu_sysfs(); 2947 init_iommu_sysfs();
2807 2948
2808 register_iommu(&intel_iommu_ops); 2949 register_iommu(&intel_iommu_ops);
@@ -2888,6 +3029,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2888 info->dev->dev.archdata.iommu = NULL; 3029 info->dev->dev.archdata.iommu = NULL;
2889 spin_unlock_irqrestore(&device_domain_lock, flags); 3030 spin_unlock_irqrestore(&device_domain_lock, flags);
2890 3031
3032 iommu_disable_dev_iotlb(info);
2891 iommu_detach_dev(iommu, info->bus, info->devfn); 3033 iommu_detach_dev(iommu, info->bus, info->devfn);
2892 iommu_detach_dependent_devices(iommu, pdev); 3034 iommu_detach_dependent_devices(iommu, pdev);
2893 free_devinfo_mem(info); 3035 free_devinfo_mem(info);
@@ -2938,6 +3080,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
2938 3080
2939 spin_unlock_irqrestore(&device_domain_lock, flags1); 3081 spin_unlock_irqrestore(&device_domain_lock, flags1);
2940 3082
3083 iommu_disable_dev_iotlb(info);
2941 iommu = device_to_iommu(info->segment, info->bus, info->devfn); 3084 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
2942 iommu_detach_dev(iommu, info->bus, info->devfn); 3085 iommu_detach_dev(iommu, info->bus, info->devfn);
2943 iommu_detach_dependent_devices(iommu, info->dev); 3086 iommu_detach_dependent_devices(iommu, info->dev);
@@ -3142,11 +3285,11 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3142 return -EFAULT; 3285 return -EFAULT;
3143 } 3286 }
3144 3287
3145 ret = domain_context_mapping(dmar_domain, pdev); 3288 ret = vm_domain_add_dev_info(dmar_domain, pdev);
3146 if (ret) 3289 if (ret)
3147 return ret; 3290 return ret;
3148 3291
3149 ret = vm_domain_add_dev_info(dmar_domain, pdev); 3292 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3150 return ret; 3293 return ret;
3151} 3294}
3152 3295
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 3a0cb0bb0593..1e83c8c5f985 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -409,7 +409,7 @@ int free_irte(int irq)
409static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) 409static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
410{ 410{
411 u64 addr; 411 u64 addr;
412 u32 cmd, sts; 412 u32 sts;
413 unsigned long flags; 413 unsigned long flags;
414 414
415 addr = virt_to_phys((void *)iommu->ir_table->base); 415 addr = virt_to_phys((void *)iommu->ir_table->base);
@@ -420,9 +420,8 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
420 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); 420 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
421 421
422 /* Set interrupt-remapping table pointer */ 422 /* Set interrupt-remapping table pointer */
423 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
424 iommu->gcmd |= DMA_GCMD_SIRTP; 423 iommu->gcmd |= DMA_GCMD_SIRTP;
425 writel(cmd, iommu->reg + DMAR_GCMD_REG); 424 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
426 425
427 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 426 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
428 readl, (sts & DMA_GSTS_IRTPS), sts); 427 readl, (sts & DMA_GSTS_IRTPS), sts);
@@ -437,9 +436,8 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
437 spin_lock_irqsave(&iommu->register_lock, flags); 436 spin_lock_irqsave(&iommu->register_lock, flags);
438 437
439 /* Enable interrupt-remapping */ 438 /* Enable interrupt-remapping */
440 cmd = iommu->gcmd | DMA_GCMD_IRE;
441 iommu->gcmd |= DMA_GCMD_IRE; 439 iommu->gcmd |= DMA_GCMD_IRE;
442 writel(cmd, iommu->reg + DMAR_GCMD_REG); 440 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
443 441
444 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 442 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
445 readl, (sts & DMA_GSTS_IRES), sts); 443 readl, (sts & DMA_GSTS_IRES), sts);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b497daab3d4a..e3a87210e947 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -5,6 +5,7 @@
5 * 5 *
6 * PCI Express I/O Virtualization (IOV) support. 6 * PCI Express I/O Virtualization (IOV) support.
7 * Single Root IOV 1.0 7 * Single Root IOV 1.0
8 * Address Translation Service 1.0
8 */ 9 */
9 10
10#include <linux/pci.h> 11#include <linux/pci.h>
@@ -110,7 +111,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
110 } 111 }
111 112
112 if (reset) 113 if (reset)
113 pci_execute_reset_function(virtfn); 114 __pci_reset_function(virtfn);
114 115
115 pci_device_add(virtfn, virtfn->bus); 116 pci_device_add(virtfn, virtfn->bus);
116 mutex_unlock(&iov->dev->sriov->lock); 117 mutex_unlock(&iov->dev->sriov->lock);
@@ -164,7 +165,7 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset)
164 165
165 if (reset) { 166 if (reset) {
166 device_release_driver(&virtfn->dev); 167 device_release_driver(&virtfn->dev);
167 pci_execute_reset_function(virtfn); 168 __pci_reset_function(virtfn);
168 } 169 }
169 170
170 sprintf(buf, "virtfn%u", id); 171 sprintf(buf, "virtfn%u", id);
@@ -487,13 +488,15 @@ found:
487 iov->self = dev; 488 iov->self = dev;
488 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 489 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
489 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 490 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
491 if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
492 iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
490 493
491 if (pdev) 494 if (pdev)
492 iov->dev = pci_dev_get(pdev); 495 iov->dev = pci_dev_get(pdev);
493 else { 496 else
494 iov->dev = dev; 497 iov->dev = dev;
495 mutex_init(&iov->lock); 498
496 } 499 mutex_init(&iov->lock);
497 500
498 dev->sriov = iov; 501 dev->sriov = iov;
499 dev->is_physfn = 1; 502 dev->is_physfn = 1;
@@ -513,11 +516,11 @@ static void sriov_release(struct pci_dev *dev)
513{ 516{
514 BUG_ON(dev->sriov->nr_virtfn); 517 BUG_ON(dev->sriov->nr_virtfn);
515 518
516 if (dev == dev->sriov->dev) 519 if (dev != dev->sriov->dev)
517 mutex_destroy(&dev->sriov->lock);
518 else
519 pci_dev_put(dev->sriov->dev); 520 pci_dev_put(dev->sriov->dev);
520 521
522 mutex_destroy(&dev->sriov->lock);
523
521 kfree(dev->sriov); 524 kfree(dev->sriov);
522 dev->sriov = NULL; 525 dev->sriov = NULL;
523} 526}
@@ -679,3 +682,145 @@ irqreturn_t pci_sriov_migration(struct pci_dev *dev)
679 return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE; 682 return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
680} 683}
681EXPORT_SYMBOL_GPL(pci_sriov_migration); 684EXPORT_SYMBOL_GPL(pci_sriov_migration);
685
686static int ats_alloc_one(struct pci_dev *dev, int ps)
687{
688 int pos;
689 u16 cap;
690 struct pci_ats *ats;
691
692 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
693 if (!pos)
694 return -ENODEV;
695
696 ats = kzalloc(sizeof(*ats), GFP_KERNEL);
697 if (!ats)
698 return -ENOMEM;
699
700 ats->pos = pos;
701 ats->stu = ps;
702 pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
703 ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
704 PCI_ATS_MAX_QDEP;
705 dev->ats = ats;
706
707 return 0;
708}
709
710static void ats_free_one(struct pci_dev *dev)
711{
712 kfree(dev->ats);
713 dev->ats = NULL;
714}
715
716/**
717 * pci_enable_ats - enable the ATS capability
718 * @dev: the PCI device
719 * @ps: the IOMMU page shift
720 *
721 * Returns 0 on success, or negative on failure.
722 */
723int pci_enable_ats(struct pci_dev *dev, int ps)
724{
725 int rc;
726 u16 ctrl;
727
728 BUG_ON(dev->ats && dev->ats->is_enabled);
729
730 if (ps < PCI_ATS_MIN_STU)
731 return -EINVAL;
732
733 if (dev->is_physfn || dev->is_virtfn) {
734 struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
735
736 mutex_lock(&pdev->sriov->lock);
737 if (pdev->ats)
738 rc = pdev->ats->stu == ps ? 0 : -EINVAL;
739 else
740 rc = ats_alloc_one(pdev, ps);
741
742 if (!rc)
743 pdev->ats->ref_cnt++;
744 mutex_unlock(&pdev->sriov->lock);
745 if (rc)
746 return rc;
747 }
748
749 if (!dev->is_physfn) {
750 rc = ats_alloc_one(dev, ps);
751 if (rc)
752 return rc;
753 }
754
755 ctrl = PCI_ATS_CTRL_ENABLE;
756 if (!dev->is_virtfn)
757 ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
758 pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
759
760 dev->ats->is_enabled = 1;
761
762 return 0;
763}
764
765/**
766 * pci_disable_ats - disable the ATS capability
767 * @dev: the PCI device
768 */
769void pci_disable_ats(struct pci_dev *dev)
770{
771 u16 ctrl;
772
773 BUG_ON(!dev->ats || !dev->ats->is_enabled);
774
775 pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
776 ctrl &= ~PCI_ATS_CTRL_ENABLE;
777 pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
778
779 dev->ats->is_enabled = 0;
780
781 if (dev->is_physfn || dev->is_virtfn) {
782 struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
783
784 mutex_lock(&pdev->sriov->lock);
785 pdev->ats->ref_cnt--;
786 if (!pdev->ats->ref_cnt)
787 ats_free_one(pdev);
788 mutex_unlock(&pdev->sriov->lock);
789 }
790
791 if (!dev->is_physfn)
792 ats_free_one(dev);
793}
794
795/**
796 * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
797 * @dev: the PCI device
798 *
799 * Returns the queue depth on success, or negative on failure.
800 *
801 * The ATS spec uses 0 in the Invalidate Queue Depth field to
802 * indicate that the function can accept 32 Invalidate Request.
803 * But here we use the `real' values (i.e. 1~32) for the Queue
804 * Depth; and 0 indicates the function shares the Queue with
805 * other functions (doesn't exclusively own a Queue).
806 */
807int pci_ats_queue_depth(struct pci_dev *dev)
808{
809 int pos;
810 u16 cap;
811
812 if (dev->is_virtfn)
813 return 0;
814
815 if (dev->ats)
816 return dev->ats->qdep;
817
818 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
819 if (!pos)
820 return -ENODEV;
821
822 pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
823
824 return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
825 PCI_ATS_MAX_QDEP;
826}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 362773247fbf..d9f06fbfa0bf 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -75,22 +75,17 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
75} 75}
76#endif 76#endif
77 77
78static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) 78static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
79{ 79{
80 u16 control; 80 u16 control;
81 81
82 if (pos) { 82 BUG_ON(!pos);
83 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
84 control &= ~PCI_MSI_FLAGS_ENABLE;
85 if (enable)
86 control |= PCI_MSI_FLAGS_ENABLE;
87 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
88 }
89}
90 83
91static void msi_set_enable(struct pci_dev *dev, int enable) 84 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
92{ 85 control &= ~PCI_MSI_FLAGS_ENABLE;
93 __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable); 86 if (enable)
87 control |= PCI_MSI_FLAGS_ENABLE;
88 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
94} 89}
95 90
96static void msix_set_enable(struct pci_dev *dev, int enable) 91static void msix_set_enable(struct pci_dev *dev, int enable)
@@ -131,9 +126,6 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
131 * mask all MSI interrupts by clearing the MSI enable bit does not work 126 * mask all MSI interrupts by clearing the MSI enable bit does not work
132 * reliably as devices without an INTx disable bit will then generate a 127 * reliably as devices without an INTx disable bit will then generate a
133 * level IRQ which will never be cleared. 128 * level IRQ which will never be cleared.
134 *
135 * Returns 1 if it succeeded in masking the interrupt and 0 if the device
136 * doesn't support MSI masking.
137 */ 129 */
138static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 130static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
139{ 131{
@@ -303,7 +295,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
303 pos = entry->msi_attrib.pos; 295 pos = entry->msi_attrib.pos;
304 296
305 pci_intx_for_msi(dev, 0); 297 pci_intx_for_msi(dev, 0);
306 msi_set_enable(dev, 0); 298 msi_set_enable(dev, pos, 0);
307 write_msi_msg(dev->irq, &entry->msg); 299 write_msi_msg(dev->irq, &entry->msg);
308 300
309 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 301 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
@@ -321,22 +313,22 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
321 313
322 if (!dev->msix_enabled) 314 if (!dev->msix_enabled)
323 return; 315 return;
316 BUG_ON(list_empty(&dev->msi_list));
317 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
318 pos = entry->msi_attrib.pos;
319 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
324 320
325 /* route the table */ 321 /* route the table */
326 pci_intx_for_msi(dev, 0); 322 pci_intx_for_msi(dev, 0);
327 msix_set_enable(dev, 0); 323 control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
324 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
328 325
329 list_for_each_entry(entry, &dev->msi_list, list) { 326 list_for_each_entry(entry, &dev->msi_list, list) {
330 write_msi_msg(entry->irq, &entry->msg); 327 write_msi_msg(entry->irq, &entry->msg);
331 msix_mask_irq(entry, entry->masked); 328 msix_mask_irq(entry, entry->masked);
332 } 329 }
333 330
334 BUG_ON(list_empty(&dev->msi_list));
335 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
336 pos = entry->msi_attrib.pos;
337 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
338 control &= ~PCI_MSIX_FLAGS_MASKALL; 331 control &= ~PCI_MSIX_FLAGS_MASKALL;
339 control |= PCI_MSIX_FLAGS_ENABLE;
340 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 332 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
341} 333}
342 334
@@ -365,9 +357,9 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
365 u16 control; 357 u16 control;
366 unsigned mask; 358 unsigned mask;
367 359
368 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
369
370 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 360 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
361 msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
362
371 pci_read_config_word(dev, msi_control_reg(pos), &control); 363 pci_read_config_word(dev, msi_control_reg(pos), &control);
372 /* MSI Entry Initialization */ 364 /* MSI Entry Initialization */
373 entry = alloc_msi_entry(dev); 365 entry = alloc_msi_entry(dev);
@@ -381,7 +373,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
381 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 373 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
382 entry->msi_attrib.pos = pos; 374 entry->msi_attrib.pos = pos;
383 375
384 entry->mask_pos = msi_mask_bits_reg(pos, entry->msi_attrib.is_64); 376 entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
385 /* All MSIs are unmasked by default, Mask them all */ 377 /* All MSIs are unmasked by default, Mask them all */
386 if (entry->msi_attrib.maskbit) 378 if (entry->msi_attrib.maskbit)
387 pci_read_config_dword(dev, entry->mask_pos, &entry->masked); 379 pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
@@ -399,7 +391,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
399 391
400 /* Set MSI enabled bits */ 392 /* Set MSI enabled bits */
401 pci_intx_for_msi(dev, 0); 393 pci_intx_for_msi(dev, 0);
402 msi_set_enable(dev, 1); 394 msi_set_enable(dev, pos, 1);
403 dev->msi_enabled = 1; 395 dev->msi_enabled = 1;
404 396
405 dev->irq = entry->irq; 397 dev->irq = entry->irq;
@@ -427,11 +419,14 @@ static int msix_capability_init(struct pci_dev *dev,
427 u8 bir; 419 u8 bir;
428 void __iomem *base; 420 void __iomem *base;
429 421
430 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
431
432 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 422 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
423 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
424
425 /* Ensure MSI-X is disabled while it is set up */
426 control &= ~PCI_MSIX_FLAGS_ENABLE;
427 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
428
433 /* Request & Map MSI-X table region */ 429 /* Request & Map MSI-X table region */
434 pci_read_config_word(dev, msi_control_reg(pos), &control);
435 nr_entries = multi_msix_capable(control); 430 nr_entries = multi_msix_capable(control);
436 431
437 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); 432 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
@@ -442,7 +437,6 @@ static int msix_capability_init(struct pci_dev *dev,
442 if (base == NULL) 437 if (base == NULL)
443 return -ENOMEM; 438 return -ENOMEM;
444 439
445 /* MSI-X Table Initialization */
446 for (i = 0; i < nvec; i++) { 440 for (i = 0; i < nvec; i++) {
447 entry = alloc_msi_entry(dev); 441 entry = alloc_msi_entry(dev);
448 if (!entry) 442 if (!entry)
@@ -455,7 +449,6 @@ static int msix_capability_init(struct pci_dev *dev,
455 entry->msi_attrib.default_irq = dev->irq; 449 entry->msi_attrib.default_irq = dev->irq;
456 entry->msi_attrib.pos = pos; 450 entry->msi_attrib.pos = pos;
457 entry->mask_base = base; 451 entry->mask_base = base;
458 msix_mask_irq(entry, 1);
459 452
460 list_add_tail(&entry->list, &dev->msi_list); 453 list_add_tail(&entry->list, &dev->msi_list);
461 } 454 }
@@ -480,22 +473,31 @@ static int msix_capability_init(struct pci_dev *dev,
480 return ret; 473 return ret;
481 } 474 }
482 475
476 /*
477 * Some devices require MSI-X to be enabled before we can touch the
478 * MSI-X registers. We need to mask all the vectors to prevent
479 * interrupts coming in before they're fully set up.
480 */
481 control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
482 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
483
483 i = 0; 484 i = 0;
484 list_for_each_entry(entry, &dev->msi_list, list) { 485 list_for_each_entry(entry, &dev->msi_list, list) {
485 entries[i].vector = entry->irq; 486 entries[i].vector = entry->irq;
486 set_irq_msi(entry->irq, entry); 487 set_irq_msi(entry->irq, entry);
488 j = entries[i].entry;
489 entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
490 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
491 msix_mask_irq(entry, 1);
487 i++; 492 i++;
488 } 493 }
489 /* Set MSI-X enabled bits */ 494
495 /* Set MSI-X enabled bits and unmask the function */
490 pci_intx_for_msi(dev, 0); 496 pci_intx_for_msi(dev, 0);
491 msix_set_enable(dev, 1);
492 dev->msix_enabled = 1; 497 dev->msix_enabled = 1;
493 498
494 list_for_each_entry(entry, &dev->msi_list, list) { 499 control &= ~PCI_MSIX_FLAGS_MASKALL;
495 int vector = entry->msi_attrib.entry_nr; 500 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
496 entry->masked = readl(base + vector * PCI_MSIX_ENTRY_SIZE +
497 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
498 }
499 501
500 return 0; 502 return 0;
501} 503}
@@ -596,17 +598,20 @@ void pci_msi_shutdown(struct pci_dev *dev)
596 struct msi_desc *desc; 598 struct msi_desc *desc;
597 u32 mask; 599 u32 mask;
598 u16 ctrl; 600 u16 ctrl;
601 unsigned pos;
599 602
600 if (!pci_msi_enable || !dev || !dev->msi_enabled) 603 if (!pci_msi_enable || !dev || !dev->msi_enabled)
601 return; 604 return;
602 605
603 msi_set_enable(dev, 0); 606 BUG_ON(list_empty(&dev->msi_list));
607 desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
608 pos = desc->msi_attrib.pos;
609
610 msi_set_enable(dev, pos, 0);
604 pci_intx_for_msi(dev, 1); 611 pci_intx_for_msi(dev, 1);
605 dev->msi_enabled = 0; 612 dev->msi_enabled = 0;
606 613
607 BUG_ON(list_empty(&dev->msi_list)); 614 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
608 desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
609 pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &ctrl);
610 mask = msi_capable_mask(ctrl); 615 mask = msi_capable_mask(ctrl);
611 msi_mask_irq(desc, mask, ~mask); 616 msi_mask_irq(desc, mask, ~mask);
612 617
@@ -648,10 +653,7 @@ static int msi_free_irqs(struct pci_dev* dev)
648 653
649 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 654 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
650 if (entry->msi_attrib.is_msix) { 655 if (entry->msi_attrib.is_msix) {
651 writel(1, entry->mask_base + entry->msi_attrib.entry_nr 656 msix_mask_irq(entry, 1);
652 * PCI_MSIX_ENTRY_SIZE
653 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
654
655 if (list_is_last(&entry->list, &dev->msi_list)) 657 if (list_is_last(&entry->list, &dev->msi_list))
656 iounmap(entry->mask_base); 658 iounmap(entry->mask_base);
657 } 659 }
@@ -691,8 +693,8 @@ int pci_msix_table_size(struct pci_dev *dev)
691 * indicates the successful configuration of MSI-X capability structure 693 * indicates the successful configuration of MSI-X capability structure
692 * with new allocated MSI-X irqs. A return of < 0 indicates a failure. 694 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
693 * Or a return of > 0 indicates that driver request is exceeding the number 695 * Or a return of > 0 indicates that driver request is exceeding the number
694 * of irqs available. Driver should use the returned value to re-send 696 * of irqs or MSI-X vectors available. Driver should use the returned value to
695 * its request. 697 * re-send its request.
696 **/ 698 **/
697int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 699int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
698{ 700{
@@ -708,7 +710,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
708 710
709 nr_entries = pci_msix_table_size(dev); 711 nr_entries = pci_msix_table_size(dev);
710 if (nvec > nr_entries) 712 if (nvec > nr_entries)
711 return -EINVAL; 713 return nr_entries;
712 714
713 /* Check for any invalid entries */ 715 /* Check for any invalid entries */
714 for (i = 0; i < nvec; i++) { 716 for (i = 0; i < nvec; i++) {
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 71f4df2ef654..a0662842550b 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -16,21 +16,15 @@
16#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) 16#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
17#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI) 17#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI)
18#define msi_data_reg(base, is64bit) \ 18#define msi_data_reg(base, is64bit) \
19 ( (is64bit == 1) ? base+PCI_MSI_DATA_64 : base+PCI_MSI_DATA_32 ) 19 (base + ((is64bit == 1) ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32))
20#define msi_mask_bits_reg(base, is64bit) \ 20#define msi_mask_reg(base, is64bit) \
21 ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4) 21 (base + ((is64bit == 1) ? PCI_MSI_MASK_64 : PCI_MSI_MASK_32))
22#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE
23#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) 22#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
24#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) 23#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
25 24
26#define msix_table_offset_reg(base) (base + 0x04) 25#define msix_table_offset_reg(base) (base + 0x04)
27#define msix_pba_offset_reg(base) (base + 0x08) 26#define msix_pba_offset_reg(base) (base + 0x08)
28#define msix_enable(control) control |= PCI_MSIX_FLAGS_ENABLE
29#define msix_disable(control) control &= ~PCI_MSIX_FLAGS_ENABLE
30#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1) 27#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
31#define multi_msix_capable msix_table_size 28#define multi_msix_capable(control) msix_table_size((control))
32#define msix_unmask(address) (address & ~PCI_MSIX_FLAGS_BITMASK)
33#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK)
34#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK)
35 29
36#endif /* MSI_H */ 30#endif /* MSI_H */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 07bbb9b3b93f..6c93af5ced18 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -485,6 +485,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
485 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 485 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
486 pmcsr |= state; 486 pmcsr |= state;
487 break; 487 break;
488 case PCI_D3hot:
489 case PCI_D3cold:
488 case PCI_UNKNOWN: /* Boot-up */ 490 case PCI_UNKNOWN: /* Boot-up */
489 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 491 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
490 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 492 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
@@ -1208,7 +1210,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
1208 * Error code depending on the platform is returned if both the platform and 1210 * Error code depending on the platform is returned if both the platform and
1209 * the native mechanism fail to enable the generation of wake-up events 1211 * the native mechanism fail to enable the generation of wake-up events
1210 */ 1212 */
1211int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 1213int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
1212{ 1214{
1213 int error = 0; 1215 int error = 0;
1214 bool pme_done = false; 1216 bool pme_done = false;
@@ -1287,15 +1289,14 @@ pci_power_t pci_target_state(struct pci_dev *dev)
1287 default: 1289 default:
1288 target_state = state; 1290 target_state = state;
1289 } 1291 }
1292 } else if (!dev->pm_cap) {
1293 target_state = PCI_D0;
1290 } else if (device_may_wakeup(&dev->dev)) { 1294 } else if (device_may_wakeup(&dev->dev)) {
1291 /* 1295 /*
1292 * Find the deepest state from which the device can generate 1296 * Find the deepest state from which the device can generate
1293 * wake-up events, make it the target state and enable device 1297 * wake-up events, make it the target state and enable device
1294 * to generate PME#. 1298 * to generate PME#.
1295 */ 1299 */
1296 if (!dev->pm_cap)
1297 return PCI_POWER_ERROR;
1298
1299 if (dev->pme_support) { 1300 if (dev->pme_support) {
1300 while (target_state 1301 while (target_state
1301 && !(dev->pme_support & (1 << target_state))) 1302 && !(dev->pme_support & (1 << target_state)))
@@ -1532,7 +1533,7 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1532 if (!pin) 1533 if (!pin)
1533 return -1; 1534 return -1;
1534 1535
1535 while (dev->bus->parent) { 1536 while (!pci_is_root_bus(dev->bus)) {
1536 pin = pci_swizzle_interrupt_pin(dev, pin); 1537 pin = pci_swizzle_interrupt_pin(dev, pin);
1537 dev = dev->bus->self; 1538 dev = dev->bus->self;
1538 } 1539 }
@@ -1552,7 +1553,7 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
1552{ 1553{
1553 u8 pin = *pinp; 1554 u8 pin = *pinp;
1554 1555
1555 while (dev->bus->parent) { 1556 while (!pci_is_root_bus(dev->bus)) {
1556 pin = pci_swizzle_interrupt_pin(dev, pin); 1557 pin = pci_swizzle_interrupt_pin(dev, pin);
1557 dev = dev->bus->self; 1558 dev = dev->bus->self;
1558 } 1559 }
@@ -2058,111 +2059,177 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
2058EXPORT_SYMBOL(pci_set_dma_seg_boundary); 2059EXPORT_SYMBOL(pci_set_dma_seg_boundary);
2059#endif 2060#endif
2060 2061
2061static int __pcie_flr(struct pci_dev *dev, int probe) 2062static int pcie_flr(struct pci_dev *dev, int probe)
2062{ 2063{
2063 u16 status; 2064 int i;
2065 int pos;
2064 u32 cap; 2066 u32 cap;
2065 int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP); 2067 u16 status;
2066 2068
2067 if (!exppos) 2069 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
2070 if (!pos)
2068 return -ENOTTY; 2071 return -ENOTTY;
2069 pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap); 2072
2073 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
2070 if (!(cap & PCI_EXP_DEVCAP_FLR)) 2074 if (!(cap & PCI_EXP_DEVCAP_FLR))
2071 return -ENOTTY; 2075 return -ENOTTY;
2072 2076
2073 if (probe) 2077 if (probe)
2074 return 0; 2078 return 0;
2075 2079
2076 pci_block_user_cfg_access(dev);
2077
2078 /* Wait for Transaction Pending bit clean */ 2080 /* Wait for Transaction Pending bit clean */
2079 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2081 for (i = 0; i < 4; i++) {
2080 if (!(status & PCI_EXP_DEVSTA_TRPND)) 2082 if (i)
2081 goto transaction_done; 2083 msleep((1 << (i - 1)) * 100);
2082 2084
2083 msleep(100); 2085 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2084 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2086 if (!(status & PCI_EXP_DEVSTA_TRPND))
2085 if (!(status & PCI_EXP_DEVSTA_TRPND)) 2087 goto clear;
2086 goto transaction_done; 2088 }
2087 2089
2088 dev_info(&dev->dev, "Busy after 100ms while trying to reset; " 2090 dev_err(&dev->dev, "transaction is not cleared; "
2089 "sleeping for 1 second\n"); 2091 "proceeding with reset anyway\n");
2090 ssleep(1); 2092
2091 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2093clear:
2092 if (status & PCI_EXP_DEVSTA_TRPND) 2094 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
2093 dev_info(&dev->dev, "Still busy after 1s; "
2094 "proceeding with reset anyway\n");
2095
2096transaction_done:
2097 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
2098 PCI_EXP_DEVCTL_BCR_FLR); 2095 PCI_EXP_DEVCTL_BCR_FLR);
2099 mdelay(100); 2096 msleep(100);
2100 2097
2101 pci_unblock_user_cfg_access(dev);
2102 return 0; 2098 return 0;
2103} 2099}
2104 2100
2105static int __pci_af_flr(struct pci_dev *dev, int probe) 2101static int pci_af_flr(struct pci_dev *dev, int probe)
2106{ 2102{
2107 int cappos = pci_find_capability(dev, PCI_CAP_ID_AF); 2103 int i;
2108 u8 status; 2104 int pos;
2109 u8 cap; 2105 u8 cap;
2106 u8 status;
2110 2107
2111 if (!cappos) 2108 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
2109 if (!pos)
2112 return -ENOTTY; 2110 return -ENOTTY;
2113 pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap); 2111
2112 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
2114 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 2113 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
2115 return -ENOTTY; 2114 return -ENOTTY;
2116 2115
2117 if (probe) 2116 if (probe)
2118 return 0; 2117 return 0;
2119 2118
2120 pci_block_user_cfg_access(dev);
2121
2122 /* Wait for Transaction Pending bit clean */ 2119 /* Wait for Transaction Pending bit clean */
2123 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); 2120 for (i = 0; i < 4; i++) {
2124 if (!(status & PCI_AF_STATUS_TP)) 2121 if (i)
2125 goto transaction_done; 2122 msleep((1 << (i - 1)) * 100);
2123
2124 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
2125 if (!(status & PCI_AF_STATUS_TP))
2126 goto clear;
2127 }
2126 2128
2129 dev_err(&dev->dev, "transaction is not cleared; "
2130 "proceeding with reset anyway\n");
2131
2132clear:
2133 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2127 msleep(100); 2134 msleep(100);
2128 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); 2135
2129 if (!(status & PCI_AF_STATUS_TP))
2130 goto transaction_done;
2131
2132 dev_info(&dev->dev, "Busy after 100ms while trying to"
2133 " reset; sleeping for 1 second\n");
2134 ssleep(1);
2135 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
2136 if (status & PCI_AF_STATUS_TP)
2137 dev_info(&dev->dev, "Still busy after 1s; "
2138 "proceeding with reset anyway\n");
2139
2140transaction_done:
2141 pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2142 mdelay(100);
2143
2144 pci_unblock_user_cfg_access(dev);
2145 return 0; 2136 return 0;
2146} 2137}
2147 2138
2148static int __pci_reset_function(struct pci_dev *pdev, int probe) 2139static int pci_pm_reset(struct pci_dev *dev, int probe)
2149{ 2140{
2150 int res; 2141 u16 csr;
2142
2143 if (!dev->pm_cap)
2144 return -ENOTTY;
2151 2145
2152 res = __pcie_flr(pdev, probe); 2146 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
2153 if (res != -ENOTTY) 2147 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
2154 return res; 2148 return -ENOTTY;
2155 2149
2156 res = __pci_af_flr(pdev, probe); 2150 if (probe)
2157 if (res != -ENOTTY) 2151 return 0;
2158 return res;
2159 2152
2160 return res; 2153 if (dev->current_state != PCI_D0)
2154 return -EINVAL;
2155
2156 csr &= ~PCI_PM_CTRL_STATE_MASK;
2157 csr |= PCI_D3hot;
2158 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2159 msleep(pci_pm_d3_delay);
2160
2161 csr &= ~PCI_PM_CTRL_STATE_MASK;
2162 csr |= PCI_D0;
2163 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2164 msleep(pci_pm_d3_delay);
2165
2166 return 0;
2167}
2168
2169static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2170{
2171 u16 ctrl;
2172 struct pci_dev *pdev;
2173
2174 if (dev->subordinate)
2175 return -ENOTTY;
2176
2177 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2178 if (pdev != dev)
2179 return -ENOTTY;
2180
2181 if (probe)
2182 return 0;
2183
2184 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
2185 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
2186 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2187 msleep(100);
2188
2189 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
2190 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2191 msleep(100);
2192
2193 return 0;
2194}
2195
2196static int pci_dev_reset(struct pci_dev *dev, int probe)
2197{
2198 int rc;
2199
2200 might_sleep();
2201
2202 if (!probe) {
2203 pci_block_user_cfg_access(dev);
2204 /* block PM suspend, driver probe, etc. */
2205 down(&dev->dev.sem);
2206 }
2207
2208 rc = pcie_flr(dev, probe);
2209 if (rc != -ENOTTY)
2210 goto done;
2211
2212 rc = pci_af_flr(dev, probe);
2213 if (rc != -ENOTTY)
2214 goto done;
2215
2216 rc = pci_pm_reset(dev, probe);
2217 if (rc != -ENOTTY)
2218 goto done;
2219
2220 rc = pci_parent_bus_reset(dev, probe);
2221done:
2222 if (!probe) {
2223 up(&dev->dev.sem);
2224 pci_unblock_user_cfg_access(dev);
2225 }
2226
2227 return rc;
2161} 2228}
2162 2229
2163/** 2230/**
2164 * pci_execute_reset_function() - Reset a PCI device function 2231 * __pci_reset_function - reset a PCI device function
2165 * @dev: Device function to reset 2232 * @dev: PCI device to reset
2166 * 2233 *
2167 * Some devices allow an individual function to be reset without affecting 2234 * Some devices allow an individual function to be reset without affecting
2168 * other functions in the same device. The PCI device must be responsive 2235 * other functions in the same device. The PCI device must be responsive
@@ -2174,18 +2241,18 @@ static int __pci_reset_function(struct pci_dev *pdev, int probe)
2174 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 2241 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
2175 * etc. 2242 * etc.
2176 * 2243 *
2177 * Returns 0 if the device function was successfully reset or -ENOTTY if the 2244 * Returns 0 if the device function was successfully reset or negative if the
2178 * device doesn't support resetting a single function. 2245 * device doesn't support resetting a single function.
2179 */ 2246 */
2180int pci_execute_reset_function(struct pci_dev *dev) 2247int __pci_reset_function(struct pci_dev *dev)
2181{ 2248{
2182 return __pci_reset_function(dev, 0); 2249 return pci_dev_reset(dev, 0);
2183} 2250}
2184EXPORT_SYMBOL_GPL(pci_execute_reset_function); 2251EXPORT_SYMBOL_GPL(__pci_reset_function);
2185 2252
2186/** 2253/**
2187 * pci_reset_function() - quiesce and reset a PCI device function 2254 * pci_reset_function - quiesce and reset a PCI device function
2188 * @dev: Device function to reset 2255 * @dev: PCI device to reset
2189 * 2256 *
2190 * Some devices allow an individual function to be reset without affecting 2257 * Some devices allow an individual function to be reset without affecting
2191 * other functions in the same device. The PCI device must be responsive 2258 * other functions in the same device. The PCI device must be responsive
@@ -2193,32 +2260,33 @@ EXPORT_SYMBOL_GPL(pci_execute_reset_function);
2193 * 2260 *
2194 * This function does not just reset the PCI portion of a device, but 2261 * This function does not just reset the PCI portion of a device, but
2195 * clears all the state associated with the device. This function differs 2262 * clears all the state associated with the device. This function differs
2196 * from pci_execute_reset_function in that it saves and restores device state 2263 * from __pci_reset_function in that it saves and restores device state
2197 * over the reset. 2264 * over the reset.
2198 * 2265 *
2199 * Returns 0 if the device function was successfully reset or -ENOTTY if the 2266 * Returns 0 if the device function was successfully reset or negative if the
2200 * device doesn't support resetting a single function. 2267 * device doesn't support resetting a single function.
2201 */ 2268 */
2202int pci_reset_function(struct pci_dev *dev) 2269int pci_reset_function(struct pci_dev *dev)
2203{ 2270{
2204 int r = __pci_reset_function(dev, 1); 2271 int rc;
2205 2272
2206 if (r < 0) 2273 rc = pci_dev_reset(dev, 1);
2207 return r; 2274 if (rc)
2275 return rc;
2208 2276
2209 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
2210 disable_irq(dev->irq);
2211 pci_save_state(dev); 2277 pci_save_state(dev);
2212 2278
2279 /*
2280 * both INTx and MSI are disabled after the Interrupt Disable bit
2281 * is set and the Bus Master bit is cleared.
2282 */
2213 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 2283 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
2214 2284
2215 r = pci_execute_reset_function(dev); 2285 rc = pci_dev_reset(dev, 0);
2216 2286
2217 pci_restore_state(dev); 2287 pci_restore_state(dev);
2218 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
2219 enable_irq(dev->irq);
2220 2288
2221 return r; 2289 return rc;
2222} 2290}
2223EXPORT_SYMBOL_GPL(pci_reset_function); 2291EXPORT_SYMBOL_GPL(pci_reset_function);
2224 2292
@@ -2591,6 +2659,8 @@ static int __init pci_setup(char *str)
2591 } else if (!strncmp(str, "resource_alignment=", 19)) { 2659 } else if (!strncmp(str, "resource_alignment=", 19)) {
2592 pci_set_resource_alignment_param(str + 19, 2660 pci_set_resource_alignment_param(str + 19,
2593 strlen(str + 19)); 2661 strlen(str + 19));
2662 } else if (!strncmp(str, "ecrc=", 5)) {
2663 pcie_ecrc_get_policy(str + 5);
2594 } else { 2664 } else {
2595 printk(KERN_ERR "PCI: Unknown option `%s'\n", 2665 printk(KERN_ERR "PCI: Unknown option `%s'\n",
2596 str); 2666 str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d03f6b99f292..f73bcbedf37c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -229,6 +229,15 @@ struct pci_sriov {
229 u8 __iomem *mstate; /* VF Migration State Array */ 229 u8 __iomem *mstate; /* VF Migration State Array */
230}; 230};
231 231
232/* Address Translation Service */
233struct pci_ats {
234 int pos; /* capability position */
235 int stu; /* Smallest Translation Unit */
236 int qdep; /* Invalidate Queue Depth */
237 int ref_cnt; /* Physical Function reference count */
238 int is_enabled:1; /* Enable bit is set */
239};
240
232#ifdef CONFIG_PCI_IOV 241#ifdef CONFIG_PCI_IOV
233extern int pci_iov_init(struct pci_dev *dev); 242extern int pci_iov_init(struct pci_dev *dev);
234extern void pci_iov_release(struct pci_dev *dev); 243extern void pci_iov_release(struct pci_dev *dev);
@@ -236,6 +245,20 @@ extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
236 enum pci_bar_type *type); 245 enum pci_bar_type *type);
237extern void pci_restore_iov_state(struct pci_dev *dev); 246extern void pci_restore_iov_state(struct pci_dev *dev);
238extern int pci_iov_bus_range(struct pci_bus *bus); 247extern int pci_iov_bus_range(struct pci_bus *bus);
248
249extern int pci_enable_ats(struct pci_dev *dev, int ps);
250extern void pci_disable_ats(struct pci_dev *dev);
251extern int pci_ats_queue_depth(struct pci_dev *dev);
252/**
253 * pci_ats_enabled - query the ATS status
254 * @dev: the PCI device
255 *
256 * Returns 1 if ATS capability is enabled, or 0 if not.
257 */
258static inline int pci_ats_enabled(struct pci_dev *dev)
259{
260 return dev->ats && dev->ats->is_enabled;
261}
239#else 262#else
240static inline int pci_iov_init(struct pci_dev *dev) 263static inline int pci_iov_init(struct pci_dev *dev)
241{ 264{
@@ -257,6 +280,22 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
257{ 280{
258 return 0; 281 return 0;
259} 282}
283
284static inline int pci_enable_ats(struct pci_dev *dev, int ps)
285{
286 return -ENODEV;
287}
288static inline void pci_disable_ats(struct pci_dev *dev)
289{
290}
291static inline int pci_ats_queue_depth(struct pci_dev *dev)
292{
293 return -ENODEV;
294}
295static inline int pci_ats_enabled(struct pci_dev *dev)
296{
297 return 0;
298}
260#endif /* CONFIG_PCI_IOV */ 299#endif /* CONFIG_PCI_IOV */
261 300
262#endif /* DRIVERS_PCI_H */ 301#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig
index c3bde588aa13..50e94e02378a 100644
--- a/drivers/pci/pcie/aer/Kconfig
+++ b/drivers/pci/pcie/aer/Kconfig
@@ -10,3 +10,18 @@ config PCIEAER
10 This enables PCI Express Root Port Advanced Error Reporting 10 This enables PCI Express Root Port Advanced Error Reporting
11 (AER) driver support. Error reporting messages sent to Root 11 (AER) driver support. Error reporting messages sent to Root
12 Port will be handled by PCI Express AER driver. 12 Port will be handled by PCI Express AER driver.
13
14
15#
16# PCI Express ECRC
17#
18config PCIE_ECRC
19 bool "PCI Express ECRC settings control"
20 depends on PCIEAER
21 help
22 Used to override firmware/bios settings for PCI Express ECRC
23 (transaction layer end-to-end CRC checking).
24
25 When in doubt, say N.
26
27source "drivers/pci/pcie/aer/Kconfig.debug"
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug
new file mode 100644
index 000000000000..b8c925c1f6aa
--- /dev/null
+++ b/drivers/pci/pcie/aer/Kconfig.debug
@@ -0,0 +1,18 @@
1#
2# PCI Express Root Port Device AER Debug Configuration
3#
4
5config PCIEAER_INJECT
6 tristate "PCIE AER error injector support"
7 depends on PCIEAER
8 default n
9 help
10 This enables PCI Express Root Port Advanced Error Reporting
11 (AER) software error injector.
12
13 Debuging PCIE AER code is quite difficult because it is hard
14 to trigger various real hardware errors. Software based
15 error injection can fake almost all kinds of errors with the
16 help of a user space helper tool aer-inject, which can be
17 gotten from:
18 http://www.kernel.org/pub/linux/utils/pci/aer-inject/
diff --git a/drivers/pci/pcie/aer/Makefile b/drivers/pci/pcie/aer/Makefile
index 8da3bd8455a8..2cba67510dc8 100644
--- a/drivers/pci/pcie/aer/Makefile
+++ b/drivers/pci/pcie/aer/Makefile
@@ -4,6 +4,9 @@
4 4
5obj-$(CONFIG_PCIEAER) += aerdriver.o 5obj-$(CONFIG_PCIEAER) += aerdriver.o
6 6
7obj-$(CONFIG_PCIE_ECRC) += ecrc.o
8
7aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o 9aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o
8aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o 10aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o
9 11
12obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
new file mode 100644
index 000000000000..d92ae21a59d8
--- /dev/null
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -0,0 +1,473 @@
1/*
2 * PCIE AER software error injection support.
3 *
4 * Debuging PCIE AER code is quite difficult because it is hard to
5 * trigger various real hardware errors. Software based error
6 * injection can fake almost all kinds of errors with the help of a
7 * user space helper tool aer-inject, which can be gotten from:
8 * http://www.kernel.org/pub/linux/utils/pci/aer-inject/
9 *
10 * Copyright 2009 Intel Corporation.
11 * Huang Ying <ying.huang@intel.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; version 2
16 * of the License.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/miscdevice.h>
23#include <linux/pci.h>
24#include <linux/fs.h>
25#include <asm/uaccess.h>
26#include "aerdrv.h"
27
28struct aer_error_inj
29{
30 u8 bus;
31 u8 dev;
32 u8 fn;
33 u32 uncor_status;
34 u32 cor_status;
35 u32 header_log0;
36 u32 header_log1;
37 u32 header_log2;
38 u32 header_log3;
39};
40
41struct aer_error
42{
43 struct list_head list;
44 unsigned int bus;
45 unsigned int devfn;
46 int pos_cap_err;
47
48 u32 uncor_status;
49 u32 cor_status;
50 u32 header_log0;
51 u32 header_log1;
52 u32 header_log2;
53 u32 header_log3;
54 u32 root_status;
55 u32 source_id;
56};
57
58struct pci_bus_ops
59{
60 struct list_head list;
61 struct pci_bus *bus;
62 struct pci_ops *ops;
63};
64
65static LIST_HEAD(einjected);
66
67static LIST_HEAD(pci_bus_ops_list);
68
69/* Protect einjected and pci_bus_ops_list */
70static DEFINE_SPINLOCK(inject_lock);
71
72static void aer_error_init(struct aer_error *err, unsigned int bus,
73 unsigned int devfn, int pos_cap_err)
74{
75 INIT_LIST_HEAD(&err->list);
76 err->bus = bus;
77 err->devfn = devfn;
78 err->pos_cap_err = pos_cap_err;
79}
80
81/* inject_lock must be held before calling */
82static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn)
83{
84 struct aer_error *err;
85
86 list_for_each_entry(err, &einjected, list) {
87 if (bus == err->bus && devfn == err->devfn)
88 return err;
89 }
90 return NULL;
91}
92
93/* inject_lock must be held before calling */
94static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
95{
96 return __find_aer_error(dev->bus->number, dev->devfn);
97}
98
99/* inject_lock must be held before calling */
100static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
101{
102 struct pci_bus_ops *bus_ops;
103
104 list_for_each_entry(bus_ops, &pci_bus_ops_list, list) {
105 if (bus_ops->bus == bus)
106 return bus_ops->ops;
107 }
108 return NULL;
109}
110
111static struct pci_bus_ops *pci_bus_ops_pop(void)
112{
113 unsigned long flags;
114 struct pci_bus_ops *bus_ops = NULL;
115
116 spin_lock_irqsave(&inject_lock, flags);
117 if (list_empty(&pci_bus_ops_list))
118 bus_ops = NULL;
119 else {
120 struct list_head *lh = pci_bus_ops_list.next;
121 list_del(lh);
122 bus_ops = list_entry(lh, struct pci_bus_ops, list);
123 }
124 spin_unlock_irqrestore(&inject_lock, flags);
125 return bus_ops;
126}
127
128static u32 *find_pci_config_dword(struct aer_error *err, int where,
129 int *prw1cs)
130{
131 int rw1cs = 0;
132 u32 *target = NULL;
133
134 if (err->pos_cap_err == -1)
135 return NULL;
136
137 switch (where - err->pos_cap_err) {
138 case PCI_ERR_UNCOR_STATUS:
139 target = &err->uncor_status;
140 rw1cs = 1;
141 break;
142 case PCI_ERR_COR_STATUS:
143 target = &err->cor_status;
144 rw1cs = 1;
145 break;
146 case PCI_ERR_HEADER_LOG:
147 target = &err->header_log0;
148 break;
149 case PCI_ERR_HEADER_LOG+4:
150 target = &err->header_log1;
151 break;
152 case PCI_ERR_HEADER_LOG+8:
153 target = &err->header_log2;
154 break;
155 case PCI_ERR_HEADER_LOG+12:
156 target = &err->header_log3;
157 break;
158 case PCI_ERR_ROOT_STATUS:
159 target = &err->root_status;
160 rw1cs = 1;
161 break;
162 case PCI_ERR_ROOT_COR_SRC:
163 target = &err->source_id;
164 break;
165 }
166 if (prw1cs)
167 *prw1cs = rw1cs;
168 return target;
169}
170
171static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
172 int size, u32 *val)
173{
174 u32 *sim;
175 struct aer_error *err;
176 unsigned long flags;
177 struct pci_ops *ops;
178
179 spin_lock_irqsave(&inject_lock, flags);
180 if (size != sizeof(u32))
181 goto out;
182 err = __find_aer_error(bus->number, devfn);
183 if (!err)
184 goto out;
185
186 sim = find_pci_config_dword(err, where, NULL);
187 if (sim) {
188 *val = *sim;
189 spin_unlock_irqrestore(&inject_lock, flags);
190 return 0;
191 }
192out:
193 ops = __find_pci_bus_ops(bus);
194 spin_unlock_irqrestore(&inject_lock, flags);
195 return ops->read(bus, devfn, where, size, val);
196}
197
198int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size,
199 u32 val)
200{
201 u32 *sim;
202 struct aer_error *err;
203 unsigned long flags;
204 int rw1cs;
205 struct pci_ops *ops;
206
207 spin_lock_irqsave(&inject_lock, flags);
208 if (size != sizeof(u32))
209 goto out;
210 err = __find_aer_error(bus->number, devfn);
211 if (!err)
212 goto out;
213
214 sim = find_pci_config_dword(err, where, &rw1cs);
215 if (sim) {
216 if (rw1cs)
217 *sim ^= val;
218 else
219 *sim = val;
220 spin_unlock_irqrestore(&inject_lock, flags);
221 return 0;
222 }
223out:
224 ops = __find_pci_bus_ops(bus);
225 spin_unlock_irqrestore(&inject_lock, flags);
226 return ops->write(bus, devfn, where, size, val);
227}
228
229static struct pci_ops pci_ops_aer = {
230 .read = pci_read_aer,
231 .write = pci_write_aer,
232};
233
234static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
235 struct pci_bus *bus,
236 struct pci_ops *ops)
237{
238 INIT_LIST_HEAD(&bus_ops->list);
239 bus_ops->bus = bus;
240 bus_ops->ops = ops;
241}
242
243static int pci_bus_set_aer_ops(struct pci_bus *bus)
244{
245 struct pci_ops *ops;
246 struct pci_bus_ops *bus_ops;
247 unsigned long flags;
248
249 bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
250 if (!bus_ops)
251 return -ENOMEM;
252 ops = pci_bus_set_ops(bus, &pci_ops_aer);
253 spin_lock_irqsave(&inject_lock, flags);
254 if (ops == &pci_ops_aer)
255 goto out;
256 pci_bus_ops_init(bus_ops, bus, ops);
257 list_add(&bus_ops->list, &pci_bus_ops_list);
258 bus_ops = NULL;
259out:
260 spin_unlock_irqrestore(&inject_lock, flags);
261 if (bus_ops)
262 kfree(bus_ops);
263 return 0;
264}
265
266static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
267{
268 while (1) {
269 if (!dev->is_pcie)
270 break;
271 if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
272 return dev;
273 if (!dev->bus->self)
274 break;
275 dev = dev->bus->self;
276 }
277 return NULL;
278}
279
280static int find_aer_device_iter(struct device *device, void *data)
281{
282 struct pcie_device **result = data;
283 struct pcie_device *pcie_dev;
284
285 if (device->bus == &pcie_port_bus_type) {
286 pcie_dev = to_pcie_device(device);
287 if (pcie_dev->service & PCIE_PORT_SERVICE_AER) {
288 *result = pcie_dev;
289 return 1;
290 }
291 }
292 return 0;
293}
294
295static int find_aer_device(struct pci_dev *dev, struct pcie_device **result)
296{
297 return device_for_each_child(&dev->dev, result, find_aer_device_iter);
298}
299
300static int aer_inject(struct aer_error_inj *einj)
301{
302 struct aer_error *err, *rperr;
303 struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
304 struct pci_dev *dev, *rpdev;
305 struct pcie_device *edev;
306 unsigned long flags;
307 unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
308 int pos_cap_err, rp_pos_cap_err;
309 u32 sever;
310 int ret = 0;
311
312 dev = pci_get_bus_and_slot(einj->bus, devfn);
313 if (!dev)
314 return -EINVAL;
315 rpdev = pcie_find_root_port(dev);
316 if (!rpdev) {
317 ret = -EINVAL;
318 goto out_put;
319 }
320
321 pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
322 if (!pos_cap_err) {
323 ret = -EIO;
324 goto out_put;
325 }
326 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
327
328 rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
329 if (!rp_pos_cap_err) {
330 ret = -EIO;
331 goto out_put;
332 }
333
334 err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
335 if (!err_alloc) {
336 ret = -ENOMEM;
337 goto out_put;
338 }
339 rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
340 if (!rperr_alloc) {
341 ret = -ENOMEM;
342 goto out_put;
343 }
344
345 spin_lock_irqsave(&inject_lock, flags);
346
347 err = __find_aer_error_by_dev(dev);
348 if (!err) {
349 err = err_alloc;
350 err_alloc = NULL;
351 aer_error_init(err, einj->bus, devfn, pos_cap_err);
352 list_add(&err->list, &einjected);
353 }
354 err->uncor_status |= einj->uncor_status;
355 err->cor_status |= einj->cor_status;
356 err->header_log0 = einj->header_log0;
357 err->header_log1 = einj->header_log1;
358 err->header_log2 = einj->header_log2;
359 err->header_log3 = einj->header_log3;
360
361 rperr = __find_aer_error_by_dev(rpdev);
362 if (!rperr) {
363 rperr = rperr_alloc;
364 rperr_alloc = NULL;
365 aer_error_init(rperr, rpdev->bus->number, rpdev->devfn,
366 rp_pos_cap_err);
367 list_add(&rperr->list, &einjected);
368 }
369 if (einj->cor_status) {
370 if (rperr->root_status & PCI_ERR_ROOT_COR_RCV)
371 rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
372 else
373 rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
374 rperr->source_id &= 0xffff0000;
375 rperr->source_id |= (einj->bus << 8) | devfn;
376 }
377 if (einj->uncor_status) {
378 if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
379 rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
380 if (sever & einj->uncor_status) {
381 rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
382 if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
383 rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
384 } else
385 rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
386 rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
387 rperr->source_id &= 0x0000ffff;
388 rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
389 }
390 spin_unlock_irqrestore(&inject_lock, flags);
391
392 ret = pci_bus_set_aer_ops(dev->bus);
393 if (ret)
394 goto out_put;
395 ret = pci_bus_set_aer_ops(rpdev->bus);
396 if (ret)
397 goto out_put;
398
399 if (find_aer_device(rpdev, &edev))
400 aer_irq(-1, edev);
401 else
402 ret = -EINVAL;
403out_put:
404 if (err_alloc)
405 kfree(err_alloc);
406 if (rperr_alloc)
407 kfree(rperr_alloc);
408 pci_dev_put(dev);
409 return ret;
410}
411
412static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
413 size_t usize, loff_t *off)
414{
415 struct aer_error_inj einj;
416 int ret;
417
418 if (!capable(CAP_SYS_ADMIN))
419 return -EPERM;
420
421 if (usize != sizeof(struct aer_error_inj))
422 return -EINVAL;
423
424 if (copy_from_user(&einj, ubuf, usize))
425 return -EFAULT;
426
427 ret = aer_inject(&einj);
428 return ret ? ret : usize;
429}
430
431static const struct file_operations aer_inject_fops = {
432 .write = aer_inject_write,
433 .owner = THIS_MODULE,
434};
435
436static struct miscdevice aer_inject_device = {
437 .minor = MISC_DYNAMIC_MINOR,
438 .name = "aer_inject",
439 .fops = &aer_inject_fops,
440};
441
442static int __init aer_inject_init(void)
443{
444 return misc_register(&aer_inject_device);
445}
446
447static void __exit aer_inject_exit(void)
448{
449 struct aer_error *err, *err_next;
450 unsigned long flags;
451 struct pci_bus_ops *bus_ops;
452
453 misc_deregister(&aer_inject_device);
454
455 while ((bus_ops = pci_bus_ops_pop())) {
456 pci_bus_set_ops(bus_ops->bus, bus_ops->ops);
457 kfree(bus_ops);
458 }
459
460 spin_lock_irqsave(&inject_lock, flags);
461 list_for_each_entry_safe(err, err_next,
462 &pci_bus_ops_list, list) {
463 list_del(&err->list);
464 kfree(err);
465 }
466 spin_unlock_irqrestore(&inject_lock, flags);
467}
468
469module_init(aer_inject_init);
470module_exit(aer_inject_exit);
471
472MODULE_DESCRIPTION("PCIE AER software error injector");
473MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 32ade5af927e..4770f13b3ca1 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -77,7 +77,7 @@ void pci_no_aer(void)
77 * 77 *
78 * Invoked when Root Port detects AER messages. 78 * Invoked when Root Port detects AER messages.
79 **/ 79 **/
80static irqreturn_t aer_irq(int irq, void *context) 80irqreturn_t aer_irq(int irq, void *context)
81{ 81{
82 unsigned int status, id; 82 unsigned int status, id;
83 struct pcie_device *pdev = (struct pcie_device *)context; 83 struct pcie_device *pdev = (struct pcie_device *)context;
@@ -126,6 +126,7 @@ static irqreturn_t aer_irq(int irq, void *context)
126 126
127 return IRQ_HANDLED; 127 return IRQ_HANDLED;
128} 128}
129EXPORT_SYMBOL_GPL(aer_irq);
129 130
130/** 131/**
131 * aer_alloc_rpc - allocate Root Port data structure 132 * aer_alloc_rpc - allocate Root Port data structure
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index aa14482a4779..bbd7428ca2d0 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -11,6 +11,7 @@
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/pcieport_if.h> 12#include <linux/pcieport_if.h>
13#include <linux/aer.h> 13#include <linux/aer.h>
14#include <linux/interrupt.h>
14 15
15#define AER_NONFATAL 0 16#define AER_NONFATAL 0
16#define AER_FATAL 1 17#define AER_FATAL 1
@@ -56,7 +57,11 @@ struct header_log_regs {
56 unsigned int dw3; 57 unsigned int dw3;
57}; 58};
58 59
60#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
59struct aer_err_info { 61struct aer_err_info {
62 struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
63 int error_dev_num;
64 u16 id;
60 int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */ 65 int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */
61 int flags; 66 int flags;
62 unsigned int status; /* COR/UNCOR Error Status */ 67 unsigned int status; /* COR/UNCOR Error Status */
@@ -120,6 +125,7 @@ extern void aer_delete_rootport(struct aer_rpc *rpc);
120extern int aer_init(struct pcie_device *dev); 125extern int aer_init(struct pcie_device *dev);
121extern void aer_isr(struct work_struct *work); 126extern void aer_isr(struct work_struct *work);
122extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); 127extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
128extern irqreturn_t aer_irq(int irq, void *context);
123 129
124#ifdef CONFIG_ACPI 130#ifdef CONFIG_ACPI
125extern int aer_osc_setup(struct pcie_device *pciedev); 131extern int aer_osc_setup(struct pcie_device *pciedev);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 307452f30035..3d8872704a58 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -26,7 +26,9 @@
26#include "aerdrv.h" 26#include "aerdrv.h"
27 27
28static int forceload; 28static int forceload;
29static int nosourceid;
29module_param(forceload, bool, 0); 30module_param(forceload, bool, 0);
31module_param(nosourceid, bool, 0);
30 32
31int pci_enable_pcie_error_reporting(struct pci_dev *dev) 33int pci_enable_pcie_error_reporting(struct pci_dev *dev)
32{ 34{
@@ -109,19 +111,23 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
109#endif /* 0 */ 111#endif /* 0 */
110 112
111 113
112static void set_device_error_reporting(struct pci_dev *dev, void *data) 114static int set_device_error_reporting(struct pci_dev *dev, void *data)
113{ 115{
114 bool enable = *((bool *)data); 116 bool enable = *((bool *)data);
115 117
116 if (dev->pcie_type != PCIE_RC_PORT && 118 if (dev->pcie_type == PCIE_RC_PORT ||
117 dev->pcie_type != PCIE_SW_UPSTREAM_PORT && 119 dev->pcie_type == PCIE_SW_UPSTREAM_PORT ||
118 dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT) 120 dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) {
119 return; 121 if (enable)
122 pci_enable_pcie_error_reporting(dev);
123 else
124 pci_disable_pcie_error_reporting(dev);
125 }
120 126
121 if (enable) 127 if (enable)
122 pci_enable_pcie_error_reporting(dev); 128 pcie_set_ecrc_checking(dev);
123 else 129
124 pci_disable_pcie_error_reporting(dev); 130 return 0;
125} 131}
126 132
127/** 133/**
@@ -139,73 +145,148 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
139 pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); 145 pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
140} 146}
141 147
142static int find_device_iter(struct device *device, void *data) 148static inline int compare_device_id(struct pci_dev *dev,
149 struct aer_err_info *e_info)
143{ 150{
144 struct pci_dev *dev; 151 if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) {
145 u16 id = *(unsigned long *)data; 152 /*
146 u8 secondary, subordinate, d_bus = id >> 8; 153 * Device ID match
154 */
155 return 1;
156 }
147 157
148 if (device->bus == &pci_bus_type) { 158 return 0;
149 dev = to_pci_dev(device); 159}
150 if (id == ((dev->bus->number << 8) | dev->devfn)) { 160
151 /* 161static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
152 * Device ID match 162{
153 */ 163 if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
154 *(unsigned long*)data = (unsigned long)device; 164 e_info->dev[e_info->error_dev_num] = dev;
155 return 1; 165 e_info->error_dev_num++;
156 } 166 return 1;
167 } else
168 return 0;
169}
170
171
172#define PCI_BUS(x) (((x) >> 8) & 0xff)
173
174static int find_device_iter(struct pci_dev *dev, void *data)
175{
176 int pos;
177 u32 status;
178 u32 mask;
179 u16 reg16;
180 int result;
181 struct aer_err_info *e_info = (struct aer_err_info *)data;
182
183 /*
184 * When bus id is equal to 0, it might be a bad id
185 * reported by root port.
186 */
187 if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
188 result = compare_device_id(dev, e_info);
189 if (result)
190 add_error_device(e_info, dev);
157 191
158 /* 192 /*
159 * If device is P2P, check if it is an upstream? 193 * If there is no multiple error, we stop
194 * or continue based on the id comparing.
160 */ 195 */
161 if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) { 196 if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG))
162 pci_read_config_byte(dev, PCI_SECONDARY_BUS, 197 return result;
163 &secondary); 198
164 pci_read_config_byte(dev, PCI_SUBORDINATE_BUS, 199 /*
165 &subordinate); 200 * If there are multiple errors and id does match,
166 if (d_bus >= secondary && d_bus <= subordinate) { 201 * We need continue to search other devices under
167 *(unsigned long*)data = (unsigned long)device; 202 * the root port. Return 0 means that.
168 return 1; 203 */
169 } 204 if (result)
205 return 0;
206 }
207
208 /*
209 * When either
210 * 1) nosourceid==y;
211 * 2) bus id is equal to 0. Some ports might lose the bus
212 * id of error source id;
213 * 3) There are multiple errors and prior id comparing fails;
214 * We check AER status registers to find the initial reporter.
215 */
216 if (atomic_read(&dev->enable_cnt) == 0)
217 return 0;
218 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
219 if (!pos)
220 return 0;
221 /* Check if AER is enabled */
222 pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
223 if (!(reg16 & (
224 PCI_EXP_DEVCTL_CERE |
225 PCI_EXP_DEVCTL_NFERE |
226 PCI_EXP_DEVCTL_FERE |
227 PCI_EXP_DEVCTL_URRE)))
228 return 0;
229 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
230 if (!pos)
231 return 0;
232
233 status = 0;
234 mask = 0;
235 if (e_info->severity == AER_CORRECTABLE) {
236 pci_read_config_dword(dev,
237 pos + PCI_ERR_COR_STATUS,
238 &status);
239 pci_read_config_dword(dev,
240 pos + PCI_ERR_COR_MASK,
241 &mask);
242 if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) {
243 add_error_device(e_info, dev);
244 goto added;
245 }
246 } else {
247 pci_read_config_dword(dev,
248 pos + PCI_ERR_UNCOR_STATUS,
249 &status);
250 pci_read_config_dword(dev,
251 pos + PCI_ERR_UNCOR_MASK,
252 &mask);
253 if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) {
254 add_error_device(e_info, dev);
255 goto added;
170 } 256 }
171 } 257 }
172 258
173 return 0; 259 return 0;
260
261added:
262 if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG)
263 return 0;
264 else
265 return 1;
174} 266}
175 267
176/** 268/**
177 * find_source_device - search through device hierarchy for source device 269 * find_source_device - search through device hierarchy for source device
178 * @parent: pointer to Root Port pci_dev data structure 270 * @parent: pointer to Root Port pci_dev data structure
179 * @id: device ID of agent who sends an error message to this Root Port 271 * @err_info: including detailed error information such like id
180 * 272 *
181 * Invoked when error is detected at the Root Port. 273 * Invoked when error is detected at the Root Port.
182 */ 274 */
183static struct device* find_source_device(struct pci_dev *parent, u16 id) 275static void find_source_device(struct pci_dev *parent,
276 struct aer_err_info *e_info)
184{ 277{
185 struct pci_dev *dev = parent; 278 struct pci_dev *dev = parent;
186 struct device *device; 279 int result;
187 unsigned long device_addr;
188 int status;
189 280
190 /* Is Root Port an agent that sends error message? */ 281 /* Is Root Port an agent that sends error message? */
191 if (id == ((dev->bus->number << 8) | dev->devfn)) 282 result = find_device_iter(dev, e_info);
192 return &dev->dev; 283 if (result)
193 284 return;
194 do {
195 device_addr = id;
196 if ((status = device_for_each_child(&dev->dev,
197 &device_addr, find_device_iter))) {
198 device = (struct device*)device_addr;
199 dev = to_pci_dev(device);
200 if (id == ((dev->bus->number << 8) | dev->devfn))
201 return device;
202 }
203 }while (status);
204 285
205 return NULL; 286 pci_walk_bus(parent->subordinate, find_device_iter, e_info);
206} 287}
207 288
208static void report_error_detected(struct pci_dev *dev, void *data) 289static int report_error_detected(struct pci_dev *dev, void *data)
209{ 290{
210 pci_ers_result_t vote; 291 pci_ers_result_t vote;
211 struct pci_error_handlers *err_handler; 292 struct pci_error_handlers *err_handler;
@@ -230,16 +311,16 @@ static void report_error_detected(struct pci_dev *dev, void *data)
230 dev->driver ? 311 dev->driver ?
231 "no AER-aware driver" : "no driver"); 312 "no AER-aware driver" : "no driver");
232 } 313 }
233 return; 314 return 0;
234 } 315 }
235 316
236 err_handler = dev->driver->err_handler; 317 err_handler = dev->driver->err_handler;
237 vote = err_handler->error_detected(dev, result_data->state); 318 vote = err_handler->error_detected(dev, result_data->state);
238 result_data->result = merge_result(result_data->result, vote); 319 result_data->result = merge_result(result_data->result, vote);
239 return; 320 return 0;
240} 321}
241 322
242static void report_mmio_enabled(struct pci_dev *dev, void *data) 323static int report_mmio_enabled(struct pci_dev *dev, void *data)
243{ 324{
244 pci_ers_result_t vote; 325 pci_ers_result_t vote;
245 struct pci_error_handlers *err_handler; 326 struct pci_error_handlers *err_handler;
@@ -249,15 +330,15 @@ static void report_mmio_enabled(struct pci_dev *dev, void *data)
249 if (!dev->driver || 330 if (!dev->driver ||
250 !dev->driver->err_handler || 331 !dev->driver->err_handler ||
251 !dev->driver->err_handler->mmio_enabled) 332 !dev->driver->err_handler->mmio_enabled)
252 return; 333 return 0;
253 334
254 err_handler = dev->driver->err_handler; 335 err_handler = dev->driver->err_handler;
255 vote = err_handler->mmio_enabled(dev); 336 vote = err_handler->mmio_enabled(dev);
256 result_data->result = merge_result(result_data->result, vote); 337 result_data->result = merge_result(result_data->result, vote);
257 return; 338 return 0;
258} 339}
259 340
260static void report_slot_reset(struct pci_dev *dev, void *data) 341static int report_slot_reset(struct pci_dev *dev, void *data)
261{ 342{
262 pci_ers_result_t vote; 343 pci_ers_result_t vote;
263 struct pci_error_handlers *err_handler; 344 struct pci_error_handlers *err_handler;
@@ -267,15 +348,15 @@ static void report_slot_reset(struct pci_dev *dev, void *data)
267 if (!dev->driver || 348 if (!dev->driver ||
268 !dev->driver->err_handler || 349 !dev->driver->err_handler ||
269 !dev->driver->err_handler->slot_reset) 350 !dev->driver->err_handler->slot_reset)
270 return; 351 return 0;
271 352
272 err_handler = dev->driver->err_handler; 353 err_handler = dev->driver->err_handler;
273 vote = err_handler->slot_reset(dev); 354 vote = err_handler->slot_reset(dev);
274 result_data->result = merge_result(result_data->result, vote); 355 result_data->result = merge_result(result_data->result, vote);
275 return; 356 return 0;
276} 357}
277 358
278static void report_resume(struct pci_dev *dev, void *data) 359static int report_resume(struct pci_dev *dev, void *data)
279{ 360{
280 struct pci_error_handlers *err_handler; 361 struct pci_error_handlers *err_handler;
281 362
@@ -284,11 +365,11 @@ static void report_resume(struct pci_dev *dev, void *data)
284 if (!dev->driver || 365 if (!dev->driver ||
285 !dev->driver->err_handler || 366 !dev->driver->err_handler ||
286 !dev->driver->err_handler->resume) 367 !dev->driver->err_handler->resume)
287 return; 368 return 0;
288 369
289 err_handler = dev->driver->err_handler; 370 err_handler = dev->driver->err_handler;
290 err_handler->resume(dev); 371 err_handler->resume(dev);
291 return; 372 return 0;
292} 373}
293 374
294/** 375/**
@@ -305,7 +386,7 @@ static void report_resume(struct pci_dev *dev, void *data)
305static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, 386static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
306 enum pci_channel_state state, 387 enum pci_channel_state state,
307 char *error_mesg, 388 char *error_mesg,
308 void (*cb)(struct pci_dev *, void *)) 389 int (*cb)(struct pci_dev *, void *))
309{ 390{
310 struct aer_broadcast_data result_data; 391 struct aer_broadcast_data result_data;
311 392
@@ -497,12 +578,12 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
497 */ 578 */
498static void handle_error_source(struct pcie_device * aerdev, 579static void handle_error_source(struct pcie_device * aerdev,
499 struct pci_dev *dev, 580 struct pci_dev *dev,
500 struct aer_err_info info) 581 struct aer_err_info *info)
501{ 582{
502 pci_ers_result_t status = 0; 583 pci_ers_result_t status = 0;
503 int pos; 584 int pos;
504 585
505 if (info.severity == AER_CORRECTABLE) { 586 if (info->severity == AER_CORRECTABLE) {
506 /* 587 /*
507 * Correctable error does not need software intevention. 588 * Correctable error does not need software intevention.
508 * No need to go through error recovery process. 589 * No need to go through error recovery process.
@@ -510,9 +591,9 @@ static void handle_error_source(struct pcie_device * aerdev,
510 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 591 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
511 if (pos) 592 if (pos)
512 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, 593 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
513 info.status); 594 info->status);
514 } else { 595 } else {
515 status = do_recovery(aerdev, dev, info.severity); 596 status = do_recovery(aerdev, dev, info->severity);
516 if (status == PCI_ERS_RESULT_RECOVERED) { 597 if (status == PCI_ERS_RESULT_RECOVERED) {
517 dev_printk(KERN_DEBUG, &dev->dev, "AER driver " 598 dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
518 "successfully recovered\n"); 599 "successfully recovered\n");
@@ -661,6 +742,28 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
661 return AER_SUCCESS; 742 return AER_SUCCESS;
662} 743}
663 744
745static inline void aer_process_err_devices(struct pcie_device *p_device,
746 struct aer_err_info *e_info)
747{
748 int i;
749
750 if (!e_info->dev[0]) {
751 dev_printk(KERN_DEBUG, &p_device->port->dev,
752 "can't find device of ID%04x\n",
753 e_info->id);
754 }
755
756 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
757 if (get_device_error_info(e_info->dev[i], e_info) ==
758 AER_SUCCESS) {
759 aer_print_error(e_info->dev[i], e_info);
760 handle_error_source(p_device,
761 e_info->dev[i],
762 e_info);
763 }
764 }
765}
766
664/** 767/**
665 * aer_isr_one_error - consume an error detected by root port 768 * aer_isr_one_error - consume an error detected by root port
666 * @p_device: pointer to error root port service device 769 * @p_device: pointer to error root port service device
@@ -669,10 +772,16 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
669static void aer_isr_one_error(struct pcie_device *p_device, 772static void aer_isr_one_error(struct pcie_device *p_device,
670 struct aer_err_source *e_src) 773 struct aer_err_source *e_src)
671{ 774{
672 struct device *s_device; 775 struct aer_err_info *e_info;
673 struct aer_err_info e_info = {0, 0, 0,};
674 int i; 776 int i;
675 u16 id; 777
778 /* struct aer_err_info might be big, so we allocate it with slab */
779 e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
780 if (e_info == NULL) {
781 dev_printk(KERN_DEBUG, &p_device->port->dev,
782 "Can't allocate mem when processing AER errors\n");
783 return;
784 }
676 785
677 /* 786 /*
678 * There is a possibility that both correctable error and 787 * There is a possibility that both correctable error and
@@ -684,31 +793,26 @@ static void aer_isr_one_error(struct pcie_device *p_device,
684 if (!(e_src->status & i)) 793 if (!(e_src->status & i))
685 continue; 794 continue;
686 795
796 memset(e_info, 0, sizeof(struct aer_err_info));
797
687 /* Init comprehensive error information */ 798 /* Init comprehensive error information */
688 if (i & PCI_ERR_ROOT_COR_RCV) { 799 if (i & PCI_ERR_ROOT_COR_RCV) {
689 id = ERR_COR_ID(e_src->id); 800 e_info->id = ERR_COR_ID(e_src->id);
690 e_info.severity = AER_CORRECTABLE; 801 e_info->severity = AER_CORRECTABLE;
691 } else { 802 } else {
692 id = ERR_UNCOR_ID(e_src->id); 803 e_info->id = ERR_UNCOR_ID(e_src->id);
693 e_info.severity = ((e_src->status >> 6) & 1); 804 e_info->severity = ((e_src->status >> 6) & 1);
694 } 805 }
695 if (e_src->status & 806 if (e_src->status &
696 (PCI_ERR_ROOT_MULTI_COR_RCV | 807 (PCI_ERR_ROOT_MULTI_COR_RCV |
697 PCI_ERR_ROOT_MULTI_UNCOR_RCV)) 808 PCI_ERR_ROOT_MULTI_UNCOR_RCV))
698 e_info.flags |= AER_MULTI_ERROR_VALID_FLAG; 809 e_info->flags |= AER_MULTI_ERROR_VALID_FLAG;
699 if (!(s_device = find_source_device(p_device->port, id))) { 810
700 printk(KERN_DEBUG "%s->can't find device of ID%04x\n", 811 find_source_device(p_device->port, e_info);
701 __func__, id); 812 aer_process_err_devices(p_device, e_info);
702 continue;
703 }
704 if (get_device_error_info(to_pci_dev(s_device), &e_info) ==
705 AER_SUCCESS) {
706 aer_print_error(to_pci_dev(s_device), &e_info);
707 handle_error_source(p_device,
708 to_pci_dev(s_device),
709 e_info);
710 }
711 } 813 }
814
815 kfree(e_info);
712} 816}
713 817
714/** 818/**
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
new file mode 100644
index 000000000000..ece97df4df6d
--- /dev/null
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -0,0 +1,131 @@
1/*
2 * Enables/disables PCIe ECRC checking.
3 *
4 * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
5 * Andrew Patterson <andrew.patterson@hp.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307, USA.
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/pci.h>
27#include <linux/pci_regs.h>
28#include <linux/errno.h>
29#include "../../pci.h"
30
31#define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */
32#define ECRC_POLICY_OFF 1 /* ECRC off for performance */
33#define ECRC_POLICY_ON 2 /* ECRC on for data integrity */
34
35static int ecrc_policy = ECRC_POLICY_DEFAULT;
36
37static const char *ecrc_policy_str[] = {
38 [ECRC_POLICY_DEFAULT] = "bios",
39 [ECRC_POLICY_OFF] = "off",
40 [ECRC_POLICY_ON] = "on"
41};
42
43/**
44 * enable_ercr_checking - enable PCIe ECRC checking for a device
45 * @dev: the PCI device
46 *
47 * Returns 0 on success, or negative on failure.
48 */
49static int enable_ecrc_checking(struct pci_dev *dev)
50{
51 int pos;
52 u32 reg32;
53
54 if (!dev->is_pcie)
55 return -ENODEV;
56
57 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
58 if (!pos)
59 return -ENODEV;
60
61 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
62 if (reg32 & PCI_ERR_CAP_ECRC_GENC)
63 reg32 |= PCI_ERR_CAP_ECRC_GENE;
64 if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
65 reg32 |= PCI_ERR_CAP_ECRC_CHKE;
66 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
67
68 return 0;
69}
70
71/**
72 * disable_ercr_checking - disables PCIe ECRC checking for a device
73 * @dev: the PCI device
74 *
75 * Returns 0 on success, or negative on failure.
76 */
77static int disable_ecrc_checking(struct pci_dev *dev)
78{
79 int pos;
80 u32 reg32;
81
82 if (!dev->is_pcie)
83 return -ENODEV;
84
85 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
86 if (!pos)
87 return -ENODEV;
88
89 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
90 reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
91 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
92
93 return 0;
94}
95
96/**
97 * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
98 * @dev: the PCI device
99 */
100void pcie_set_ecrc_checking(struct pci_dev *dev)
101{
102 switch (ecrc_policy) {
103 case ECRC_POLICY_DEFAULT:
104 return;
105 case ECRC_POLICY_OFF:
106 disable_ecrc_checking(dev);
107 break;
108 case ECRC_POLICY_ON:
109 enable_ecrc_checking(dev);;
110 break;
111 default:
112 return;
113 }
114}
115
116/**
117 * pcie_ecrc_get_policy - parse kernel command-line ecrc option
118 */
119void pcie_ecrc_get_policy(char *str)
120{
121 int i;
122
123 for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
124 if (!strncmp(str, ecrc_policy_str[i],
125 strlen(ecrc_policy_str[i])))
126 break;
127 if (i >= ARRAY_SIZE(ecrc_policy_str))
128 return;
129
130 ecrc_policy = i;
131}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index b0367f168af4..3d27c97e0486 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -26,40 +26,36 @@
26#endif 26#endif
27#define MODULE_PARAM_PREFIX "pcie_aspm." 27#define MODULE_PARAM_PREFIX "pcie_aspm."
28 28
29struct endpoint_state { 29struct aspm_latency {
30 unsigned int l0s_acceptable_latency; 30 u32 l0s; /* L0s latency (nsec) */
31 unsigned int l1_acceptable_latency; 31 u32 l1; /* L1 latency (nsec) */
32}; 32};
33 33
34struct pcie_link_state { 34struct pcie_link_state {
35 struct list_head sibiling; 35 struct pci_dev *pdev; /* Upstream component of the Link */
36 struct pci_dev *pdev; 36 struct pcie_link_state *root; /* pointer to the root port link */
37 bool downstream_has_switch; 37 struct pcie_link_state *parent; /* pointer to the parent Link state */
38 38 struct list_head sibling; /* node in link_list */
39 struct pcie_link_state *parent; 39 struct list_head children; /* list of child link states */
40 struct list_head children; 40 struct list_head link; /* node in parent's children list */
41 struct list_head link;
42 41
43 /* ASPM state */ 42 /* ASPM state */
44 unsigned int support_state; 43 u32 aspm_support:2; /* Supported ASPM state */
45 unsigned int enabled_state; 44 u32 aspm_enabled:2; /* Enabled ASPM state */
46 unsigned int bios_aspm_state; 45 u32 aspm_default:2; /* Default ASPM state by BIOS */
47 /* upstream component */ 46
48 unsigned int l0s_upper_latency; 47 /* Clock PM state */
49 unsigned int l1_upper_latency; 48 u32 clkpm_capable:1; /* Clock PM capable? */
50 /* downstream component */ 49 u32 clkpm_enabled:1; /* Current Clock PM state */
51 unsigned int l0s_down_latency; 50 u32 clkpm_default:1; /* Default Clock PM state by BIOS */
52 unsigned int l1_down_latency;
53 /* Clock PM state*/
54 unsigned int clk_pm_capable;
55 unsigned int clk_pm_enabled;
56 unsigned int bios_clk_state;
57 51
52 /* Latencies */
53 struct aspm_latency latency; /* Exit latency */
58 /* 54 /*
59 * A pcie downstream port only has one slot under it, so at most there 55 * Endpoint acceptable latencies. A pcie downstream port only
60 * are 8 functions 56 * has one slot under it, so at most there are 8 functions.
61 */ 57 */
62 struct endpoint_state endpoints[8]; 58 struct aspm_latency acceptable[8];
63}; 59};
64 60
65static int aspm_disabled, aspm_force; 61static int aspm_disabled, aspm_force;
@@ -78,27 +74,23 @@ static const char *policy_str[] = {
78 74
79#define LINK_RETRAIN_TIMEOUT HZ 75#define LINK_RETRAIN_TIMEOUT HZ
80 76
81static int policy_to_aspm_state(struct pci_dev *pdev) 77static int policy_to_aspm_state(struct pcie_link_state *link)
82{ 78{
83 struct pcie_link_state *link_state = pdev->link_state;
84
85 switch (aspm_policy) { 79 switch (aspm_policy) {
86 case POLICY_PERFORMANCE: 80 case POLICY_PERFORMANCE:
87 /* Disable ASPM and Clock PM */ 81 /* Disable ASPM and Clock PM */
88 return 0; 82 return 0;
89 case POLICY_POWERSAVE: 83 case POLICY_POWERSAVE:
90 /* Enable ASPM L0s/L1 */ 84 /* Enable ASPM L0s/L1 */
91 return PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; 85 return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
92 case POLICY_DEFAULT: 86 case POLICY_DEFAULT:
93 return link_state->bios_aspm_state; 87 return link->aspm_default;
94 } 88 }
95 return 0; 89 return 0;
96} 90}
97 91
98static int policy_to_clkpm_state(struct pci_dev *pdev) 92static int policy_to_clkpm_state(struct pcie_link_state *link)
99{ 93{
100 struct pcie_link_state *link_state = pdev->link_state;
101
102 switch (aspm_policy) { 94 switch (aspm_policy) {
103 case POLICY_PERFORMANCE: 95 case POLICY_PERFORMANCE:
104 /* Disable ASPM and Clock PM */ 96 /* Disable ASPM and Clock PM */
@@ -107,73 +99,78 @@ static int policy_to_clkpm_state(struct pci_dev *pdev)
107 /* Disable Clock PM */ 99 /* Disable Clock PM */
108 return 1; 100 return 1;
109 case POLICY_DEFAULT: 101 case POLICY_DEFAULT:
110 return link_state->bios_clk_state; 102 return link->clkpm_default;
111 } 103 }
112 return 0; 104 return 0;
113} 105}
114 106
115static void pcie_set_clock_pm(struct pci_dev *pdev, int enable) 107static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
116{ 108{
117 struct pci_dev *child_dev;
118 int pos; 109 int pos;
119 u16 reg16; 110 u16 reg16;
120 struct pcie_link_state *link_state = pdev->link_state; 111 struct pci_dev *child;
112 struct pci_bus *linkbus = link->pdev->subordinate;
121 113
122 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 114 list_for_each_entry(child, &linkbus->devices, bus_list) {
123 pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 115 pos = pci_find_capability(child, PCI_CAP_ID_EXP);
124 if (!pos) 116 if (!pos)
125 return; 117 return;
126 pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16); 118 pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
127 if (enable) 119 if (enable)
128 reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN; 120 reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN;
129 else 121 else
130 reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN; 122 reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
131 pci_write_config_word(child_dev, pos + PCI_EXP_LNKCTL, reg16); 123 pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16);
132 } 124 }
133 link_state->clk_pm_enabled = !!enable; 125 link->clkpm_enabled = !!enable;
134} 126}
135 127
136static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist) 128static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
137{ 129{
138 int pos; 130 /* Don't enable Clock PM if the link is not Clock PM capable */
131 if (!link->clkpm_capable && enable)
132 return;
133 /* Need nothing if the specified equals to current state */
134 if (link->clkpm_enabled == enable)
135 return;
136 pcie_set_clkpm_nocheck(link, enable);
137}
138
139static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
140{
141 int pos, capable = 1, enabled = 1;
139 u32 reg32; 142 u32 reg32;
140 u16 reg16; 143 u16 reg16;
141 int capable = 1, enabled = 1; 144 struct pci_dev *child;
142 struct pci_dev *child_dev; 145 struct pci_bus *linkbus = link->pdev->subordinate;
143 struct pcie_link_state *link_state = pdev->link_state;
144 146
145 /* All functions should have the same cap and state, take the worst */ 147 /* All functions should have the same cap and state, take the worst */
146 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 148 list_for_each_entry(child, &linkbus->devices, bus_list) {
147 pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 149 pos = pci_find_capability(child, PCI_CAP_ID_EXP);
148 if (!pos) 150 if (!pos)
149 return; 151 return;
150 pci_read_config_dword(child_dev, pos + PCI_EXP_LNKCAP, &reg32); 152 pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32);
151 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { 153 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
152 capable = 0; 154 capable = 0;
153 enabled = 0; 155 enabled = 0;
154 break; 156 break;
155 } 157 }
156 pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16); 158 pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
157 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) 159 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
158 enabled = 0; 160 enabled = 0;
159 } 161 }
160 link_state->clk_pm_enabled = enabled; 162 link->clkpm_enabled = enabled;
161 link_state->bios_clk_state = enabled; 163 link->clkpm_default = enabled;
162 if (!blacklist) { 164 link->clkpm_capable = (blacklist) ? 0 : capable;
163 link_state->clk_pm_capable = capable;
164 pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
165 } else {
166 link_state->clk_pm_capable = 0;
167 pcie_set_clock_pm(pdev, 0);
168 }
169} 165}
170 166
171static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev) 167static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link)
172{ 168{
173 struct pci_dev *child_dev; 169 struct pci_dev *child;
170 struct pci_bus *linkbus = link->pdev->subordinate;
174 171
175 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 172 list_for_each_entry(child, &linkbus->devices, bus_list) {
176 if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) 173 if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM)
177 return true; 174 return true;
178 } 175 }
179 return false; 176 return false;
@@ -184,289 +181,263 @@ static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
184 * could use common clock. If they are, configure them to use the 181 * could use common clock. If they are, configure them to use the
185 * common clock. That will reduce the ASPM state exit latency. 182 * common clock. That will reduce the ASPM state exit latency.
186 */ 183 */
187static void pcie_aspm_configure_common_clock(struct pci_dev *pdev) 184static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
188{ 185{
189 int pos, child_pos, i = 0; 186 int ppos, cpos, same_clock = 1;
190 u16 reg16 = 0; 187 u16 reg16, parent_reg, child_reg[8];
191 struct pci_dev *child_dev;
192 int same_clock = 1;
193 unsigned long start_jiffies; 188 unsigned long start_jiffies;
194 u16 child_regs[8], parent_reg; 189 struct pci_dev *child, *parent = link->pdev;
190 struct pci_bus *linkbus = parent->subordinate;
195 /* 191 /*
196 * all functions of a slot should have the same Slot Clock 192 * All functions of a slot should have the same Slot Clock
197 * Configuration, so just check one function 193 * Configuration, so just check one function
198 * */ 194 */
199 child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev, 195 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
200 bus_list); 196 BUG_ON(!child->is_pcie);
201 BUG_ON(!child_dev->is_pcie);
202 197
203 /* Check downstream component if bit Slot Clock Configuration is 1 */ 198 /* Check downstream component if bit Slot Clock Configuration is 1 */
204 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 199 cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
205 pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKSTA, &reg16); 200 pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16);
206 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 201 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
207 same_clock = 0; 202 same_clock = 0;
208 203
209 /* Check upstream component if bit Slot Clock Configuration is 1 */ 204 /* Check upstream component if bit Slot Clock Configuration is 1 */
210 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 205 ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
211 pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16); 206 pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
212 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 207 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
213 same_clock = 0; 208 same_clock = 0;
214 209
215 /* Configure downstream component, all functions */ 210 /* Configure downstream component, all functions */
216 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 211 list_for_each_entry(child, &linkbus->devices, bus_list) {
217 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 212 cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
218 pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, 213 pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16);
219 &reg16); 214 child_reg[PCI_FUNC(child->devfn)] = reg16;
220 child_regs[i] = reg16;
221 if (same_clock) 215 if (same_clock)
222 reg16 |= PCI_EXP_LNKCTL_CCC; 216 reg16 |= PCI_EXP_LNKCTL_CCC;
223 else 217 else
224 reg16 &= ~PCI_EXP_LNKCTL_CCC; 218 reg16 &= ~PCI_EXP_LNKCTL_CCC;
225 pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, 219 pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16);
226 reg16);
227 i++;
228 } 220 }
229 221
230 /* Configure upstream component */ 222 /* Configure upstream component */
231 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 223 pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, &reg16);
232 parent_reg = reg16; 224 parent_reg = reg16;
233 if (same_clock) 225 if (same_clock)
234 reg16 |= PCI_EXP_LNKCTL_CCC; 226 reg16 |= PCI_EXP_LNKCTL_CCC;
235 else 227 else
236 reg16 &= ~PCI_EXP_LNKCTL_CCC; 228 reg16 &= ~PCI_EXP_LNKCTL_CCC;
237 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 229 pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
238 230
239 /* retrain link */ 231 /* Retrain link */
240 reg16 |= PCI_EXP_LNKCTL_RL; 232 reg16 |= PCI_EXP_LNKCTL_RL;
241 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 233 pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
242 234
243 /* Wait for link training end */ 235 /* Wait for link training end. Break out after waiting for timeout */
244 /* break out after waiting for timeout */
245 start_jiffies = jiffies; 236 start_jiffies = jiffies;
246 for (;;) { 237 for (;;) {
247 pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16); 238 pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
248 if (!(reg16 & PCI_EXP_LNKSTA_LT)) 239 if (!(reg16 & PCI_EXP_LNKSTA_LT))
249 break; 240 break;
250 if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) 241 if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
251 break; 242 break;
252 msleep(1); 243 msleep(1);
253 } 244 }
254 /* training failed -> recover */ 245 if (!(reg16 & PCI_EXP_LNKSTA_LT))
255 if (reg16 & PCI_EXP_LNKSTA_LT) { 246 return;
256 dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure" 247
257 " common clock\n"); 248 /* Training failed. Restore common clock configurations */
258 i = 0; 249 dev_printk(KERN_ERR, &parent->dev,
259 list_for_each_entry(child_dev, &pdev->subordinate->devices, 250 "ASPM: Could not configure common clock\n");
260 bus_list) { 251 list_for_each_entry(child, &linkbus->devices, bus_list) {
261 child_pos = pci_find_capability(child_dev, 252 cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
262 PCI_CAP_ID_EXP); 253 pci_write_config_word(child, cpos + PCI_EXP_LNKCTL,
263 pci_write_config_word(child_dev, 254 child_reg[PCI_FUNC(child->devfn)]);
264 child_pos + PCI_EXP_LNKCTL,
265 child_regs[i]);
266 i++;
267 }
268 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg);
269 } 255 }
256 pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg);
270} 257}
271 258
272/* 259/* Convert L0s latency encoding to ns */
273 * calc_L0S_latency: Convert L0s latency encoding to ns 260static u32 calc_l0s_latency(u32 encoding)
274 */
275static unsigned int calc_L0S_latency(unsigned int latency_encoding, int ac)
276{ 261{
277 unsigned int ns = 64; 262 if (encoding == 0x7)
263 return (5 * 1000); /* > 4us */
264 return (64 << encoding);
265}
278 266
279 if (latency_encoding == 0x7) { 267/* Convert L0s acceptable latency encoding to ns */
280 if (ac) 268static u32 calc_l0s_acceptable(u32 encoding)
281 ns = -1U; 269{
282 else 270 if (encoding == 0x7)
283 ns = 5*1000; /* > 4us */ 271 return -1U;
284 } else 272 return (64 << encoding);
285 ns *= (1 << latency_encoding);
286 return ns;
287} 273}
288 274
289/* 275/* Convert L1 latency encoding to ns */
290 * calc_L1_latency: Convert L1 latency encoding to ns 276static u32 calc_l1_latency(u32 encoding)
291 */
292static unsigned int calc_L1_latency(unsigned int latency_encoding, int ac)
293{ 277{
294 unsigned int ns = 1000; 278 if (encoding == 0x7)
279 return (65 * 1000); /* > 64us */
280 return (1000 << encoding);
281}
295 282
296 if (latency_encoding == 0x7) { 283/* Convert L1 acceptable latency encoding to ns */
297 if (ac) 284static u32 calc_l1_acceptable(u32 encoding)
298 ns = -1U; 285{
299 else 286 if (encoding == 0x7)
300 ns = 65*1000; /* > 64us */ 287 return -1U;
301 } else 288 return (1000 << encoding);
302 ns *= (1 << latency_encoding);
303 return ns;
304} 289}
305 290
306static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state, 291static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state,
307 unsigned int *l0s, unsigned int *l1, unsigned int *enabled) 292 u32 *l0s, u32 *l1, u32 *enabled)
308{ 293{
309 int pos; 294 int pos;
310 u16 reg16; 295 u16 reg16;
311 u32 reg32; 296 u32 reg32, encoding;
312 unsigned int latency;
313 297
298 *l0s = *l1 = *enabled = 0;
314 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 299 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
315 pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32); 300 pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
316 *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; 301 *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
317 if (*state != PCIE_LINK_STATE_L0S && 302 if (*state != PCIE_LINK_STATE_L0S &&
318 *state != (PCIE_LINK_STATE_L1|PCIE_LINK_STATE_L0S)) 303 *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S))
319 *state = 0; 304 *state = 0;
320 if (*state == 0) 305 if (*state == 0)
321 return; 306 return;
322 307
323 latency = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; 308 encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
324 *l0s = calc_L0S_latency(latency, 0); 309 *l0s = calc_l0s_latency(encoding);
325 if (*state & PCIE_LINK_STATE_L1) { 310 if (*state & PCIE_LINK_STATE_L1) {
326 latency = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; 311 encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
327 *l1 = calc_L1_latency(latency, 0); 312 *l1 = calc_l1_latency(encoding);
328 } 313 }
329 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 314 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
330 *enabled = reg16 & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1); 315 *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
331} 316}
332 317
333static void pcie_aspm_cap_init(struct pci_dev *pdev) 318static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
334{ 319{
335 struct pci_dev *child_dev; 320 u32 support, l0s, l1, enabled;
336 u32 state, tmp; 321 struct pci_dev *child, *parent = link->pdev;
337 struct pcie_link_state *link_state = pdev->link_state; 322 struct pci_bus *linkbus = parent->subordinate;
323
324 if (blacklist) {
325 /* Set support state to 0, so we will disable ASPM later */
326 link->aspm_support = 0;
327 link->aspm_default = 0;
328 link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
329 return;
330 }
331
332 /* Configure common clock before checking latencies */
333 pcie_aspm_configure_common_clock(link);
338 334
339 /* upstream component states */ 335 /* upstream component states */
340 pcie_aspm_get_cap_device(pdev, &link_state->support_state, 336 pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled);
341 &link_state->l0s_upper_latency, 337 link->aspm_support = support;
342 &link_state->l1_upper_latency, 338 link->latency.l0s = l0s;
343 &link_state->enabled_state); 339 link->latency.l1 = l1;
340 link->aspm_enabled = enabled;
341
344 /* downstream component states, all functions have the same setting */ 342 /* downstream component states, all functions have the same setting */
345 child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev, 343 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
346 bus_list); 344 pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled);
347 pcie_aspm_get_cap_device(child_dev, &state, 345 link->aspm_support &= support;
348 &link_state->l0s_down_latency, 346 link->latency.l0s = max_t(u32, link->latency.l0s, l0s);
349 &link_state->l1_down_latency, 347 link->latency.l1 = max_t(u32, link->latency.l1, l1);
350 &tmp); 348
351 link_state->support_state &= state; 349 if (!link->aspm_support)
352 if (!link_state->support_state)
353 return; 350 return;
354 link_state->enabled_state &= link_state->support_state; 351
355 link_state->bios_aspm_state = link_state->enabled_state; 352 link->aspm_enabled &= link->aspm_support;
353 link->aspm_default = link->aspm_enabled;
356 354
357 /* ENDPOINT states*/ 355 /* ENDPOINT states*/
358 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 356 list_for_each_entry(child, &linkbus->devices, bus_list) {
359 int pos; 357 int pos;
360 u32 reg32; 358 u32 reg32, encoding;
361 unsigned int latency; 359 struct aspm_latency *acceptable =
362 struct endpoint_state *ep_state = 360 &link->acceptable[PCI_FUNC(child->devfn)];
363 &link_state->endpoints[PCI_FUNC(child_dev->devfn)];
364 361
365 if (child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT && 362 if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
366 child_dev->pcie_type != PCI_EXP_TYPE_LEG_END) 363 child->pcie_type != PCI_EXP_TYPE_LEG_END)
367 continue; 364 continue;
368 365
369 pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 366 pos = pci_find_capability(child, PCI_CAP_ID_EXP);
370 pci_read_config_dword(child_dev, pos + PCI_EXP_DEVCAP, &reg32); 367 pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
371 latency = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; 368 encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
372 latency = calc_L0S_latency(latency, 1); 369 acceptable->l0s = calc_l0s_acceptable(encoding);
373 ep_state->l0s_acceptable_latency = latency; 370 if (link->aspm_support & PCIE_LINK_STATE_L1) {
374 if (link_state->support_state & PCIE_LINK_STATE_L1) { 371 encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
375 latency = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; 372 acceptable->l1 = calc_l1_acceptable(encoding);
376 latency = calc_L1_latency(latency, 1);
377 ep_state->l1_acceptable_latency = latency;
378 } 373 }
379 } 374 }
380} 375}
381 376
382static unsigned int __pcie_aspm_check_state_one(struct pci_dev *pdev, 377/**
383 unsigned int state) 378 * __pcie_aspm_check_state_one - check latency for endpoint device.
384{ 379 * @endpoint: pointer to the struct pci_dev of endpoint device
385 struct pci_dev *parent_dev, *tmp_dev; 380 *
386 unsigned int latency, l1_latency = 0; 381 * TBD: The latency from the endpoint to root complex vary per switch's
387 struct pcie_link_state *link_state; 382 * upstream link state above the device. Here we just do a simple check
388 struct endpoint_state *ep_state; 383 * which assumes all links above the device can be in L1 state, that
389 384 * is we just consider the worst case. If switch's upstream link can't
390 parent_dev = pdev->bus->self; 385 * be put into L0S/L1, then our check is too strictly.
391 link_state = parent_dev->link_state; 386 */
392 state &= link_state->support_state; 387static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state)
393 if (state == 0) 388{
394 return 0; 389 u32 l1_switch_latency = 0;
395 ep_state = &link_state->endpoints[PCI_FUNC(pdev->devfn)]; 390 struct aspm_latency *acceptable;
396 391 struct pcie_link_state *link;
397 /* 392
398 * Check latency for endpoint device. 393 link = endpoint->bus->self->link_state;
399 * TBD: The latency from the endpoint to root complex vary per 394 state &= link->aspm_support;
400 * switch's upstream link state above the device. Here we just do a 395 acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
401 * simple check which assumes all links above the device can be in L1 396
402 * state, that is we just consider the worst case. If switch's upstream 397 while (link && state) {
403 * link can't be put into L0S/L1, then our check is too strictly. 398 if ((state & PCIE_LINK_STATE_L0S) &&
404 */ 399 (link->latency.l0s > acceptable->l0s))
405 tmp_dev = pdev; 400 state &= ~PCIE_LINK_STATE_L0S;
406 while (state & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) { 401 if ((state & PCIE_LINK_STATE_L1) &&
407 parent_dev = tmp_dev->bus->self; 402 (link->latency.l1 + l1_switch_latency > acceptable->l1))
408 link_state = parent_dev->link_state; 403 state &= ~PCIE_LINK_STATE_L1;
409 if (state & PCIE_LINK_STATE_L0S) { 404 link = link->parent;
410 latency = max_t(unsigned int, 405 /*
411 link_state->l0s_upper_latency, 406 * Every switch on the path to root complex need 1
412 link_state->l0s_down_latency); 407 * more microsecond for L1. Spec doesn't mention L0s.
413 if (latency > ep_state->l0s_acceptable_latency) 408 */
414 state &= ~PCIE_LINK_STATE_L0S; 409 l1_switch_latency += 1000;
415 }
416 if (state & PCIE_LINK_STATE_L1) {
417 latency = max_t(unsigned int,
418 link_state->l1_upper_latency,
419 link_state->l1_down_latency);
420 if (latency + l1_latency >
421 ep_state->l1_acceptable_latency)
422 state &= ~PCIE_LINK_STATE_L1;
423 }
424 if (!parent_dev->bus->self) /* parent_dev is a root port */
425 break;
426 else {
427 /*
428 * parent_dev is the downstream port of a switch, make
429 * tmp_dev the upstream port of the switch
430 */
431 tmp_dev = parent_dev->bus->self;
432 /*
433 * every switch on the path to root complex need 1 more
434 * microsecond for L1. Spec doesn't mention L0S.
435 */
436 if (state & PCIE_LINK_STATE_L1)
437 l1_latency += 1000;
438 }
439 } 410 }
440 return state; 411 return state;
441} 412}
442 413
443static unsigned int pcie_aspm_check_state(struct pci_dev *pdev, 414static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state)
444 unsigned int state)
445{ 415{
446 struct pci_dev *child_dev; 416 pci_power_t power_state;
417 struct pci_dev *child;
418 struct pci_bus *linkbus = link->pdev->subordinate;
447 419
448 /* If no child, ignore the link */ 420 /* If no child, ignore the link */
449 if (list_empty(&pdev->subordinate->devices)) 421 if (list_empty(&linkbus->devices))
450 return state; 422 return state;
451 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 423
452 if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { 424 list_for_each_entry(child, &linkbus->devices, bus_list) {
453 /* 425 /*
454 * If downstream component of a link is pci bridge, we 426 * If downstream component of a link is pci bridge, we
455 * disable ASPM for now for the link 427 * disable ASPM for now for the link
456 * */ 428 */
457 state = 0; 429 if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
458 break; 430 return 0;
459 } 431
460 if ((child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT && 432 if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
461 child_dev->pcie_type != PCI_EXP_TYPE_LEG_END)) 433 child->pcie_type != PCI_EXP_TYPE_LEG_END))
462 continue; 434 continue;
463 /* Device not in D0 doesn't need check latency */ 435 /* Device not in D0 doesn't need check latency */
464 if (child_dev->current_state == PCI_D1 || 436 power_state = child->current_state;
465 child_dev->current_state == PCI_D2 || 437 if (power_state == PCI_D1 || power_state == PCI_D2 ||
466 child_dev->current_state == PCI_D3hot || 438 power_state == PCI_D3hot || power_state == PCI_D3cold)
467 child_dev->current_state == PCI_D3cold)
468 continue; 439 continue;
469 state = __pcie_aspm_check_state_one(child_dev, state); 440 state = __pcie_aspm_check_state_one(child, state);
470 } 441 }
471 return state; 442 return state;
472} 443}
@@ -482,90 +453,71 @@ static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state)
482 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 453 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
483} 454}
484 455
485static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state) 456static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
486{ 457{
487 struct pci_dev *child_dev; 458 struct pci_dev *child, *parent = link->pdev;
488 int valid = 1; 459 struct pci_bus *linkbus = parent->subordinate;
489 struct pcie_link_state *link_state = pdev->link_state;
490 460
491 /* If no child, disable the link */ 461 /* If no child, disable the link */
492 if (list_empty(&pdev->subordinate->devices)) 462 if (list_empty(&linkbus->devices))
493 state = 0; 463 state = 0;
494 /* 464 /*
495 * if the downstream component has pci bridge function, don't do ASPM 465 * If the downstream component has pci bridge function, don't
496 * now 466 * do ASPM now.
497 */ 467 */
498 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 468 list_for_each_entry(child, &linkbus->devices, bus_list) {
499 if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { 469 if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
500 valid = 0; 470 return;
501 break;
502 }
503 } 471 }
504 if (!valid)
505 return;
506
507 /* 472 /*
508 * spec 2.0 suggests all functions should be configured the same 473 * Spec 2.0 suggests all functions should be configured the
509 * setting for ASPM. Enabling ASPM L1 should be done in upstream 474 * same setting for ASPM. Enabling ASPM L1 should be done in
510 * component first and then downstream, and vice versa for disabling 475 * upstream component first and then downstream, and vice
511 * ASPM L1. Spec doesn't mention L0S. 476 * versa for disabling ASPM L1. Spec doesn't mention L0S.
512 */ 477 */
513 if (state & PCIE_LINK_STATE_L1) 478 if (state & PCIE_LINK_STATE_L1)
514 __pcie_aspm_config_one_dev(pdev, state); 479 __pcie_aspm_config_one_dev(parent, state);
515 480
516 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) 481 list_for_each_entry(child, &linkbus->devices, bus_list)
517 __pcie_aspm_config_one_dev(child_dev, state); 482 __pcie_aspm_config_one_dev(child, state);
518 483
519 if (!(state & PCIE_LINK_STATE_L1)) 484 if (!(state & PCIE_LINK_STATE_L1))
520 __pcie_aspm_config_one_dev(pdev, state); 485 __pcie_aspm_config_one_dev(parent, state);
521 486
522 link_state->enabled_state = state; 487 link->aspm_enabled = state;
523} 488}
524 489
525static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link) 490/* Check the whole hierarchy, and configure each link in the hierarchy */
491static void __pcie_aspm_configure_link_state(struct pcie_link_state *link,
492 u32 state)
526{ 493{
527 struct pcie_link_state *root_port_link = link; 494 struct pcie_link_state *leaf, *root = link->root;
528 while (root_port_link->parent)
529 root_port_link = root_port_link->parent;
530 return root_port_link;
531}
532 495
533/* check the whole hierarchy, and configure each link in the hierarchy */ 496 state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
534static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
535 unsigned int state)
536{
537 struct pcie_link_state *link_state = pdev->link_state;
538 struct pcie_link_state *root_port_link = get_root_port_link(link_state);
539 struct pcie_link_state *leaf;
540 497
541 state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; 498 /* Check all links who have specific root port link */
542 499 list_for_each_entry(leaf, &link_list, sibling) {
543 /* check all links who have specific root port link */ 500 if (!list_empty(&leaf->children) || (leaf->root != root))
544 list_for_each_entry(leaf, &link_list, sibiling) {
545 if (!list_empty(&leaf->children) ||
546 get_root_port_link(leaf) != root_port_link)
547 continue; 501 continue;
548 state = pcie_aspm_check_state(leaf->pdev, state); 502 state = pcie_aspm_check_state(leaf, state);
549 } 503 }
550 /* check root port link too in case it hasn't children */ 504 /* Check root port link too in case it hasn't children */
551 state = pcie_aspm_check_state(root_port_link->pdev, state); 505 state = pcie_aspm_check_state(root, state);
552 506 if (link->aspm_enabled == state)
553 if (link_state->enabled_state == state)
554 return; 507 return;
555
556 /* 508 /*
557 * we must change the hierarchy. See comments in 509 * We must change the hierarchy. See comments in
558 * __pcie_aspm_config_link for the order 510 * __pcie_aspm_config_link for the order
559 **/ 511 **/
560 if (state & PCIE_LINK_STATE_L1) { 512 if (state & PCIE_LINK_STATE_L1) {
561 list_for_each_entry(leaf, &link_list, sibiling) { 513 list_for_each_entry(leaf, &link_list, sibling) {
562 if (get_root_port_link(leaf) == root_port_link) 514 if (leaf->root == root)
563 __pcie_aspm_config_link(leaf->pdev, state); 515 __pcie_aspm_config_link(leaf, state);
564 } 516 }
565 } else { 517 } else {
566 list_for_each_entry_reverse(leaf, &link_list, sibiling) { 518 list_for_each_entry_reverse(leaf, &link_list, sibling) {
567 if (get_root_port_link(leaf) == root_port_link) 519 if (leaf->root == root)
568 __pcie_aspm_config_link(leaf->pdev, state); 520 __pcie_aspm_config_link(leaf, state);
569 } 521 }
570 } 522 }
571} 523}
@@ -574,45 +526,42 @@ static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
574 * pcie_aspm_configure_link_state: enable/disable PCI express link state 526 * pcie_aspm_configure_link_state: enable/disable PCI express link state
575 * @pdev: the root port or switch downstream port 527 * @pdev: the root port or switch downstream port
576 */ 528 */
577static void pcie_aspm_configure_link_state(struct pci_dev *pdev, 529static void pcie_aspm_configure_link_state(struct pcie_link_state *link,
578 unsigned int state) 530 u32 state)
579{ 531{
580 down_read(&pci_bus_sem); 532 down_read(&pci_bus_sem);
581 mutex_lock(&aspm_lock); 533 mutex_lock(&aspm_lock);
582 __pcie_aspm_configure_link_state(pdev, state); 534 __pcie_aspm_configure_link_state(link, state);
583 mutex_unlock(&aspm_lock); 535 mutex_unlock(&aspm_lock);
584 up_read(&pci_bus_sem); 536 up_read(&pci_bus_sem);
585} 537}
586 538
587static void free_link_state(struct pci_dev *pdev) 539static void free_link_state(struct pcie_link_state *link)
588{ 540{
589 kfree(pdev->link_state); 541 link->pdev->link_state = NULL;
590 pdev->link_state = NULL; 542 kfree(link);
591} 543}
592 544
593static int pcie_aspm_sanity_check(struct pci_dev *pdev) 545static int pcie_aspm_sanity_check(struct pci_dev *pdev)
594{ 546{
595 struct pci_dev *child_dev; 547 struct pci_dev *child;
596 int child_pos; 548 int pos;
597 u32 reg32; 549 u32 reg32;
598
599 /* 550 /*
600 * Some functions in a slot might not all be PCIE functions, very 551 * Some functions in a slot might not all be PCIE functions,
601 * strange. Disable ASPM for the whole slot 552 * very strange. Disable ASPM for the whole slot
602 */ 553 */
603 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 554 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
604 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 555 pos = pci_find_capability(child, PCI_CAP_ID_EXP);
605 if (!child_pos) 556 if (!pos)
606 return -EINVAL; 557 return -EINVAL;
607
608 /* 558 /*
609 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use 559 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
610 * RBER bit to determine if a function is 1.1 version device 560 * RBER bit to determine if a function is 1.1 version device
611 */ 561 */
612 pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, 562 pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
613 &reg32);
614 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { 563 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
615 dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM" 564 dev_printk(KERN_INFO, &child->dev, "disabling ASPM"
616 " on pre-1.1 PCIe device. You can enable it" 565 " on pre-1.1 PCIe device. You can enable it"
617 " with 'pcie_aspm=force'\n"); 566 " with 'pcie_aspm=force'\n");
618 return -EINVAL; 567 return -EINVAL;
@@ -621,6 +570,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
621 return 0; 570 return 0;
622} 571}
623 572
573static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
574{
575 struct pcie_link_state *link;
576 int blacklist = !!pcie_aspm_sanity_check(pdev);
577
578 link = kzalloc(sizeof(*link), GFP_KERNEL);
579 if (!link)
580 return NULL;
581 INIT_LIST_HEAD(&link->sibling);
582 INIT_LIST_HEAD(&link->children);
583 INIT_LIST_HEAD(&link->link);
584 link->pdev = pdev;
585 if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) {
586 struct pcie_link_state *parent;
587 parent = pdev->bus->parent->self->link_state;
588 if (!parent) {
589 kfree(link);
590 return NULL;
591 }
592 link->parent = parent;
593 list_add(&link->link, &parent->children);
594 }
595 /* Setup a pointer to the root port link */
596 if (!link->parent)
597 link->root = link;
598 else
599 link->root = link->parent->root;
600
601 list_add(&link->sibling, &link_list);
602
603 pdev->link_state = link;
604
605 /* Check ASPM capability */
606 pcie_aspm_cap_init(link, blacklist);
607
608 /* Check Clock PM capability */
609 pcie_clkpm_cap_init(link, blacklist);
610
611 return link;
612}
613
624/* 614/*
625 * pcie_aspm_init_link_state: Initiate PCI express link state. 615 * pcie_aspm_init_link_state: Initiate PCI express link state.
626 * It is called after the pcie and its children devices are scaned. 616 * It is called after the pcie and its children devices are scaned.
@@ -628,75 +618,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
628 */ 618 */
629void pcie_aspm_init_link_state(struct pci_dev *pdev) 619void pcie_aspm_init_link_state(struct pci_dev *pdev)
630{ 620{
631 unsigned int state; 621 u32 state;
632 struct pcie_link_state *link_state; 622 struct pcie_link_state *link;
633 int error = 0;
634 int blacklist;
635 623
636 if (aspm_disabled || !pdev->is_pcie || pdev->link_state) 624 if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
637 return; 625 return;
638 if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 626 if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
639 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) 627 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
628 return;
629
630 /* VIA has a strange chipset, root port is under a bridge */
631 if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
632 pdev->bus->self)
640 return; 633 return;
634
641 down_read(&pci_bus_sem); 635 down_read(&pci_bus_sem);
642 if (list_empty(&pdev->subordinate->devices)) 636 if (list_empty(&pdev->subordinate->devices))
643 goto out; 637 goto out;
644 638
645 blacklist = !!pcie_aspm_sanity_check(pdev);
646
647 mutex_lock(&aspm_lock); 639 mutex_lock(&aspm_lock);
648 640 link = pcie_aspm_setup_link_state(pdev);
649 link_state = kzalloc(sizeof(*link_state), GFP_KERNEL); 641 if (!link)
650 if (!link_state) 642 goto unlock;
651 goto unlock_out; 643 /*
652 644 * Setup initial ASPM state
653 link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev); 645 *
654 INIT_LIST_HEAD(&link_state->children); 646 * If link has switch, delay the link config. The leaf link
655 INIT_LIST_HEAD(&link_state->link); 647 * initialization will config the whole hierarchy. But we must
656 if (pdev->bus->self) {/* this is a switch */ 648 * make sure BIOS doesn't set unsupported link state.
657 struct pcie_link_state *parent_link_state; 649 */
658 650 if (pcie_aspm_downstream_has_switch(link)) {
659 parent_link_state = pdev->bus->parent->self->link_state; 651 state = pcie_aspm_check_state(link, link->aspm_default);
660 if (!parent_link_state) { 652 __pcie_aspm_config_link(link, state);
661 kfree(link_state);
662 goto unlock_out;
663 }
664 list_add(&link_state->link, &parent_link_state->children);
665 link_state->parent = parent_link_state;
666 }
667
668 pdev->link_state = link_state;
669
670 if (!blacklist) {
671 pcie_aspm_configure_common_clock(pdev);
672 pcie_aspm_cap_init(pdev);
673 } else { 653 } else {
674 link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; 654 state = policy_to_aspm_state(link);
675 link_state->bios_aspm_state = 0; 655 __pcie_aspm_configure_link_state(link, state);
676 /* Set support state to 0, so we will disable ASPM later */
677 link_state->support_state = 0;
678 } 656 }
679 657
680 link_state->pdev = pdev; 658 /* Setup initial Clock PM state */
681 list_add(&link_state->sibiling, &link_list); 659 state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0;
682 660 pcie_set_clkpm(link, state);
683 if (link_state->downstream_has_switch) { 661unlock:
684 /*
685 * If link has switch, delay the link config. The leaf link
686 * initialization will config the whole hierarchy. but we must
687 * make sure BIOS doesn't set unsupported link state
688 **/
689 state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
690 __pcie_aspm_config_link(pdev, state);
691 } else
692 __pcie_aspm_configure_link_state(pdev,
693 policy_to_aspm_state(pdev));
694
695 pcie_check_clock_pm(pdev, blacklist);
696
697unlock_out:
698 if (error)
699 free_link_state(pdev);
700 mutex_unlock(&aspm_lock); 662 mutex_unlock(&aspm_lock);
701out: 663out:
702 up_read(&pci_bus_sem); 664 up_read(&pci_bus_sem);
@@ -725,11 +687,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
725 687
726 /* All functions are removed, so just disable ASPM for the link */ 688 /* All functions are removed, so just disable ASPM for the link */
727 __pcie_aspm_config_one_dev(parent, 0); 689 __pcie_aspm_config_one_dev(parent, 0);
728 list_del(&link_state->sibiling); 690 list_del(&link_state->sibling);
729 list_del(&link_state->link); 691 list_del(&link_state->link);
730 /* Clock PM is for endpoint device */ 692 /* Clock PM is for endpoint device */
731 693
732 free_link_state(parent); 694 free_link_state(link_state);
733out: 695out:
734 mutex_unlock(&aspm_lock); 696 mutex_unlock(&aspm_lock);
735 up_read(&pci_bus_sem); 697 up_read(&pci_bus_sem);
@@ -749,7 +711,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
749 * devices changed PM state, we should recheck if latency meets all 711 * devices changed PM state, we should recheck if latency meets all
750 * functions' requirement 712 * functions' requirement
751 */ 713 */
752 pcie_aspm_configure_link_state(pdev, link_state->enabled_state); 714 pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
753} 715}
754 716
755/* 717/*
@@ -772,14 +734,12 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
772 down_read(&pci_bus_sem); 734 down_read(&pci_bus_sem);
773 mutex_lock(&aspm_lock); 735 mutex_lock(&aspm_lock);
774 link_state = parent->link_state; 736 link_state = parent->link_state;
775 link_state->support_state &= 737 link_state->aspm_support &= ~state;
776 ~(state & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1)); 738 __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
777 if (state & PCIE_LINK_STATE_CLKPM) 739 if (state & PCIE_LINK_STATE_CLKPM) {
778 link_state->clk_pm_capable = 0; 740 link_state->clkpm_capable = 0;
779 741 pcie_set_clkpm(link_state, 0);
780 __pcie_aspm_configure_link_state(parent, link_state->enabled_state); 742 }
781 if (!link_state->clk_pm_capable && link_state->clk_pm_enabled)
782 pcie_set_clock_pm(parent, 0);
783 mutex_unlock(&aspm_lock); 743 mutex_unlock(&aspm_lock);
784 up_read(&pci_bus_sem); 744 up_read(&pci_bus_sem);
785} 745}
@@ -788,7 +748,6 @@ EXPORT_SYMBOL(pci_disable_link_state);
788static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) 748static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
789{ 749{
790 int i; 750 int i;
791 struct pci_dev *pdev;
792 struct pcie_link_state *link_state; 751 struct pcie_link_state *link_state;
793 752
794 for (i = 0; i < ARRAY_SIZE(policy_str); i++) 753 for (i = 0; i < ARRAY_SIZE(policy_str); i++)
@@ -802,14 +761,10 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
802 down_read(&pci_bus_sem); 761 down_read(&pci_bus_sem);
803 mutex_lock(&aspm_lock); 762 mutex_lock(&aspm_lock);
804 aspm_policy = i; 763 aspm_policy = i;
805 list_for_each_entry(link_state, &link_list, sibiling) { 764 list_for_each_entry(link_state, &link_list, sibling) {
806 pdev = link_state->pdev; 765 __pcie_aspm_configure_link_state(link_state,
807 __pcie_aspm_configure_link_state(pdev, 766 policy_to_aspm_state(link_state));
808 policy_to_aspm_state(pdev)); 767 pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state));
809 if (link_state->clk_pm_capable &&
810 link_state->clk_pm_enabled != policy_to_clkpm_state(pdev))
811 pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
812
813 } 768 }
814 mutex_unlock(&aspm_lock); 769 mutex_unlock(&aspm_lock);
815 up_read(&pci_bus_sem); 770 up_read(&pci_bus_sem);
@@ -838,7 +793,7 @@ static ssize_t link_state_show(struct device *dev,
838 struct pci_dev *pci_device = to_pci_dev(dev); 793 struct pci_dev *pci_device = to_pci_dev(dev);
839 struct pcie_link_state *link_state = pci_device->link_state; 794 struct pcie_link_state *link_state = pci_device->link_state;
840 795
841 return sprintf(buf, "%d\n", link_state->enabled_state); 796 return sprintf(buf, "%d\n", link_state->aspm_enabled);
842} 797}
843 798
844static ssize_t link_state_store(struct device *dev, 799static ssize_t link_state_store(struct device *dev,
@@ -846,7 +801,7 @@ static ssize_t link_state_store(struct device *dev,
846 const char *buf, 801 const char *buf,
847 size_t n) 802 size_t n)
848{ 803{
849 struct pci_dev *pci_device = to_pci_dev(dev); 804 struct pci_dev *pdev = to_pci_dev(dev);
850 int state; 805 int state;
851 806
852 if (n < 1) 807 if (n < 1)
@@ -854,7 +809,7 @@ static ssize_t link_state_store(struct device *dev,
854 state = buf[0]-'0'; 809 state = buf[0]-'0';
855 if (state >= 0 && state <= 3) { 810 if (state >= 0 && state <= 3) {
856 /* setup link aspm state */ 811 /* setup link aspm state */
857 pcie_aspm_configure_link_state(pci_device, state); 812 pcie_aspm_configure_link_state(pdev->link_state, state);
858 return n; 813 return n;
859 } 814 }
860 815
@@ -868,7 +823,7 @@ static ssize_t clk_ctl_show(struct device *dev,
868 struct pci_dev *pci_device = to_pci_dev(dev); 823 struct pci_dev *pci_device = to_pci_dev(dev);
869 struct pcie_link_state *link_state = pci_device->link_state; 824 struct pcie_link_state *link_state = pci_device->link_state;
870 825
871 return sprintf(buf, "%d\n", link_state->clk_pm_enabled); 826 return sprintf(buf, "%d\n", link_state->clkpm_enabled);
872} 827}
873 828
874static ssize_t clk_ctl_store(struct device *dev, 829static ssize_t clk_ctl_store(struct device *dev,
@@ -876,7 +831,7 @@ static ssize_t clk_ctl_store(struct device *dev,
876 const char *buf, 831 const char *buf,
877 size_t n) 832 size_t n)
878{ 833{
879 struct pci_dev *pci_device = to_pci_dev(dev); 834 struct pci_dev *pdev = to_pci_dev(dev);
880 int state; 835 int state;
881 836
882 if (n < 1) 837 if (n < 1)
@@ -885,7 +840,7 @@ static ssize_t clk_ctl_store(struct device *dev,
885 840
886 down_read(&pci_bus_sem); 841 down_read(&pci_bus_sem);
887 mutex_lock(&aspm_lock); 842 mutex_lock(&aspm_lock);
888 pcie_set_clock_pm(pci_device, !!state); 843 pcie_set_clkpm_nocheck(pdev->link_state, !!state);
889 mutex_unlock(&aspm_lock); 844 mutex_unlock(&aspm_lock);
890 up_read(&pci_bus_sem); 845 up_read(&pci_bus_sem);
891 846
@@ -904,10 +859,10 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
904 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 859 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
905 return; 860 return;
906 861
907 if (link_state->support_state) 862 if (link_state->aspm_support)
908 sysfs_add_file_to_group(&pdev->dev.kobj, 863 sysfs_add_file_to_group(&pdev->dev.kobj,
909 &dev_attr_link_state.attr, power_group); 864 &dev_attr_link_state.attr, power_group);
910 if (link_state->clk_pm_capable) 865 if (link_state->clkpm_capable)
911 sysfs_add_file_to_group(&pdev->dev.kobj, 866 sysfs_add_file_to_group(&pdev->dev.kobj,
912 &dev_attr_clk_ctl.attr, power_group); 867 &dev_attr_clk_ctl.attr, power_group);
913} 868}
@@ -920,10 +875,10 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
920 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 875 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
921 return; 876 return;
922 877
923 if (link_state->support_state) 878 if (link_state->aspm_support)
924 sysfs_remove_file_from_group(&pdev->dev.kobj, 879 sysfs_remove_file_from_group(&pdev->dev.kobj,
925 &dev_attr_link_state.attr, power_group); 880 &dev_attr_link_state.attr, power_group);
926 if (link_state->clk_pm_capable) 881 if (link_state->clkpm_capable)
927 sysfs_remove_file_from_group(&pdev->dev.kobj, 882 sysfs_remove_file_from_group(&pdev->dev.kobj,
928 &dev_attr_clk_ctl.attr, power_group); 883 &dev_attr_clk_ctl.attr, power_group);
929} 884}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f1ae2475ffff..40e75f6a5056 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -193,7 +193,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
193 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; 193 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
194 if (type == pci_bar_io) { 194 if (type == pci_bar_io) {
195 l &= PCI_BASE_ADDRESS_IO_MASK; 195 l &= PCI_BASE_ADDRESS_IO_MASK;
196 mask = PCI_BASE_ADDRESS_IO_MASK & 0xffff; 196 mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
197 } else { 197 } else {
198 l &= PCI_BASE_ADDRESS_MEM_MASK; 198 l &= PCI_BASE_ADDRESS_MEM_MASK;
199 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 199 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
@@ -237,6 +237,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
237 dev_printk(KERN_DEBUG, &dev->dev, 237 dev_printk(KERN_DEBUG, &dev->dev,
238 "reg %x 64bit mmio: %pR\n", pos, res); 238 "reg %x 64bit mmio: %pR\n", pos, res);
239 } 239 }
240
241 res->flags |= IORESOURCE_MEM_64;
240 } else { 242 } else {
241 sz = pci_size(l, sz, mask); 243 sz = pci_size(l, sz, mask);
242 244
@@ -287,7 +289,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
287 struct resource *res; 289 struct resource *res;
288 int i; 290 int i;
289 291
290 if (!child->parent) /* It's a host bus, nothing to read */ 292 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
291 return; 293 return;
292 294
293 if (dev->transparent) { 295 if (dev->transparent) {
@@ -362,7 +364,10 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
362 } 364 }
363 } 365 }
364 if (base <= limit) { 366 if (base <= limit) {
365 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; 367 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
368 IORESOURCE_MEM | IORESOURCE_PREFETCH;
369 if (res->flags & PCI_PREF_RANGE_TYPE_64)
370 res->flags |= IORESOURCE_MEM_64;
366 res->start = base; 371 res->start = base;
367 res->end = limit + 0xfffff; 372 res->end = limit + 0xfffff;
368 dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n", 373 dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bd4253f93d5a..56552d74abea 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1133,6 +1133,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1133 switch (dev->subsystem_device) { 1133 switch (dev->subsystem_device) {
1134 case 0x1751: /* M2N notebook */ 1134 case 0x1751: /* M2N notebook */
1135 case 0x1821: /* M5N notebook */ 1135 case 0x1821: /* M5N notebook */
1136 case 0x1897: /* A6L notebook */
1136 asus_hides_smbus = 1; 1137 asus_hides_smbus = 1;
1137 } 1138 }
1138 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) 1139 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
@@ -1163,6 +1164,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1163 switch (dev->subsystem_device) { 1164 switch (dev->subsystem_device) {
1164 case 0x12bc: /* HP D330L */ 1165 case 0x12bc: /* HP D330L */
1165 case 0x12bd: /* HP D530 */ 1166 case 0x12bd: /* HP D530 */
1167 case 0x006a: /* HP Compaq nx9500 */
1166 asus_hides_smbus = 1; 1168 asus_hides_smbus = 1;
1167 } 1169 }
1168 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB) 1170 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
@@ -2016,6 +2018,28 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2016 PCI_DEVICE_ID_NX2_5709S, 2018 PCI_DEVICE_ID_NX2_5709S,
2017 quirk_brcm_570x_limit_vpd); 2019 quirk_brcm_570x_limit_vpd);
2018 2020
2021/* Originally in EDAC sources for i82875P:
2022 * Intel tells BIOS developers to hide device 6 which
2023 * configures the overflow device access containing
2024 * the DRBs - this is where we expose device 6.
2025 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
2026 */
2027static void __devinit quirk_unhide_mch_dev6(struct pci_dev *dev)
2028{
2029 u8 reg;
2030
2031 if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) {
2032 dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n");
2033 pci_write_config_byte(dev, 0xF4, reg | 0x02);
2034 }
2035}
2036
2037DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
2038 quirk_unhide_mch_dev6);
2039DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
2040 quirk_unhide_mch_dev6);
2041
2042
2019#ifdef CONFIG_PCI_MSI 2043#ifdef CONFIG_PCI_MSI
2020/* Some chipsets do not support MSI. We cannot easily rely on setting 2044/* Some chipsets do not support MSI. We cannot easily rely on setting
2021 * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually 2045 * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 86503c14ce7e..176615e7231f 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -32,8 +32,6 @@ static void pci_stop_dev(struct pci_dev *dev)
32 32
33static void pci_destroy_dev(struct pci_dev *dev) 33static void pci_destroy_dev(struct pci_dev *dev)
34{ 34{
35 pci_stop_dev(dev);
36
37 /* Remove the device from the device lists, and prevent any further 35 /* Remove the device from the device lists, and prevent any further
38 * list accesses from this device */ 36 * list accesses from this device */
39 down_write(&pci_bus_sem); 37 down_write(&pci_bus_sem);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 710d4ea69568..e8cb5051c311 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -29,7 +29,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
29 if (pdev->is_pcie) 29 if (pdev->is_pcie)
30 return NULL; 30 return NULL;
31 while (1) { 31 while (1) {
32 if (!pdev->bus->parent) 32 if (pci_is_root_bus(pdev->bus))
33 break; 33 break;
34 pdev = pdev->bus->self; 34 pdev = pdev->bus->self;
35 /* a p2p bridge */ 35 /* a p2p bridge */
@@ -115,36 +115,6 @@ pci_find_next_bus(const struct pci_bus *from)
115 115
116#ifdef CONFIG_PCI_LEGACY 116#ifdef CONFIG_PCI_LEGACY
117/** 117/**
118 * pci_find_slot - locate PCI device from a given PCI slot
119 * @bus: number of PCI bus on which desired PCI device resides
120 * @devfn: encodes number of PCI slot in which the desired PCI
121 * device resides and the logical device number within that slot
122 * in case of multi-function devices.
123 *
124 * Given a PCI bus and slot/function number, the desired PCI device
125 * is located in system global list of PCI devices. If the device
126 * is found, a pointer to its data structure is returned. If no
127 * device is found, %NULL is returned.
128 *
129 * NOTE: Do not use this function any more; use pci_get_slot() instead, as
130 * the PCI device returned by this function can disappear at any moment in
131 * time.
132 */
133struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn)
134{
135 struct pci_dev *dev = NULL;
136
137 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
138 if (dev->bus->number == bus && dev->devfn == devfn) {
139 pci_dev_put(dev);
140 return dev;
141 }
142 }
143 return NULL;
144}
145EXPORT_SYMBOL(pci_find_slot);
146
147/**
148 * pci_find_device - begin or continue searching for a PCI device by vendor/device id 118 * pci_find_device - begin or continue searching for a PCI device by vendor/device id
149 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids 119 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
150 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids 120 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index a00f85471b6e..b636e245445d 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -58,7 +58,6 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus)
58 res = list->res; 58 res = list->res;
59 idx = res - &list->dev->resource[0]; 59 idx = res - &list->dev->resource[0];
60 if (pci_assign_resource(list->dev, idx)) { 60 if (pci_assign_resource(list->dev, idx)) {
61 /* FIXME: get rid of this */
62 res->start = 0; 61 res->start = 0;
63 res->end = 0; 62 res->end = 0;
64 res->flags = 0; 63 res->flags = 0;
@@ -143,6 +142,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
143 struct pci_dev *bridge = bus->self; 142 struct pci_dev *bridge = bus->self;
144 struct pci_bus_region region; 143 struct pci_bus_region region;
145 u32 l, bu, lu, io_upper16; 144 u32 l, bu, lu, io_upper16;
145 int pref_mem64;
146 146
147 if (pci_is_enabled(bridge)) 147 if (pci_is_enabled(bridge))
148 return; 148 return;
@@ -198,16 +198,22 @@ static void pci_setup_bridge(struct pci_bus *bus)
198 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); 198 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
199 199
200 /* Set up PREF base/limit. */ 200 /* Set up PREF base/limit. */
201 pref_mem64 = 0;
201 bu = lu = 0; 202 bu = lu = 0;
202 pcibios_resource_to_bus(bridge, &region, bus->resource[2]); 203 pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
203 if (bus->resource[2]->flags & IORESOURCE_PREFETCH) { 204 if (bus->resource[2]->flags & IORESOURCE_PREFETCH) {
205 int width = 8;
204 l = (region.start >> 16) & 0xfff0; 206 l = (region.start >> 16) & 0xfff0;
205 l |= region.end & 0xfff00000; 207 l |= region.end & 0xfff00000;
206 bu = upper_32_bits(region.start); 208 if (bus->resource[2]->flags & IORESOURCE_MEM_64) {
207 lu = upper_32_bits(region.end); 209 pref_mem64 = 1;
208 dev_info(&bridge->dev, " PREFETCH window: %#016llx-%#016llx\n", 210 bu = upper_32_bits(region.start);
209 (unsigned long long)region.start, 211 lu = upper_32_bits(region.end);
210 (unsigned long long)region.end); 212 width = 16;
213 }
214 dev_info(&bridge->dev, " PREFETCH window: %#0*llx-%#0*llx\n",
215 width, (unsigned long long)region.start,
216 width, (unsigned long long)region.end);
211 } 217 }
212 else { 218 else {
213 l = 0x0000fff0; 219 l = 0x0000fff0;
@@ -215,9 +221,11 @@ static void pci_setup_bridge(struct pci_bus *bus)
215 } 221 }
216 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); 222 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
217 223
218 /* Set the upper 32 bits of PREF base & limit. */ 224 if (pref_mem64) {
219 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); 225 /* Set the upper 32 bits of PREF base & limit. */
220 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); 226 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
227 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
228 }
221 229
222 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); 230 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
223} 231}
@@ -255,8 +263,25 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
255 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); 263 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
256 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); 264 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
257 } 265 }
258 if (pmem) 266 if (pmem) {
259 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; 267 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
268 if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64)
269 b_res[2].flags |= IORESOURCE_MEM_64;
270 }
271
272 /* double check if bridge does support 64 bit pref */
273 if (b_res[2].flags & IORESOURCE_MEM_64) {
274 u32 mem_base_hi, tmp;
275 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
276 &mem_base_hi);
277 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
278 0xffffffff);
279 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
280 if (!tmp)
281 b_res[2].flags &= ~IORESOURCE_MEM_64;
282 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
283 mem_base_hi);
284 }
260} 285}
261 286
262/* Helper function for sizing routines: find first available 287/* Helper function for sizing routines: find first available
@@ -336,6 +361,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
336 resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ 361 resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
337 int order, max_order; 362 int order, max_order;
338 struct resource *b_res = find_free_bus_resource(bus, type); 363 struct resource *b_res = find_free_bus_resource(bus, type);
364 unsigned int mem64_mask = 0;
339 365
340 if (!b_res) 366 if (!b_res)
341 return 0; 367 return 0;
@@ -344,9 +370,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
344 max_order = 0; 370 max_order = 0;
345 size = 0; 371 size = 0;
346 372
373 mem64_mask = b_res->flags & IORESOURCE_MEM_64;
374 b_res->flags &= ~IORESOURCE_MEM_64;
375
347 list_for_each_entry(dev, &bus->devices, bus_list) { 376 list_for_each_entry(dev, &bus->devices, bus_list) {
348 int i; 377 int i;
349 378
350 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 379 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
351 struct resource *r = &dev->resource[i]; 380 struct resource *r = &dev->resource[i];
352 resource_size_t r_size; 381 resource_size_t r_size;
@@ -372,6 +401,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
372 aligns[order] += align; 401 aligns[order] += align;
373 if (order > max_order) 402 if (order > max_order)
374 max_order = order; 403 max_order = order;
404 mem64_mask &= r->flags & IORESOURCE_MEM_64;
375 } 405 }
376 } 406 }
377 407
@@ -396,6 +426,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
396 b_res->start = min_align; 426 b_res->start = min_align;
397 b_res->end = size + min_align - 1; 427 b_res->end = size + min_align - 1;
398 b_res->flags |= IORESOURCE_STARTALIGN; 428 b_res->flags |= IORESOURCE_STARTALIGN;
429 b_res->flags |= mem64_mask;
399 return 1; 430 return 1;
400} 431}
401 432
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 12403516776a..b711fb7181e2 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -135,23 +135,16 @@ void pci_disable_bridge_window(struct pci_dev *dev)
135} 135}
136#endif /* CONFIG_PCI_QUIRKS */ 136#endif /* CONFIG_PCI_QUIRKS */
137 137
138int pci_assign_resource(struct pci_dev *dev, int resno) 138static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
139 int resno)
139{ 140{
140 struct pci_bus *bus = dev->bus;
141 struct resource *res = dev->resource + resno; 141 struct resource *res = dev->resource + resno;
142 resource_size_t size, min, align; 142 resource_size_t size, min, align;
143 int ret; 143 int ret;
144 144
145 size = resource_size(res); 145 size = resource_size(res);
146 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; 146 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
147
148 align = resource_alignment(res); 147 align = resource_alignment(res);
149 if (!align) {
150 dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
151 "alignment) %pR flags %#lx\n",
152 resno, res, res->flags);
153 return -EINVAL;
154 }
155 148
156 /* First, try exact prefetching match.. */ 149 /* First, try exact prefetching match.. */
157 ret = pci_bus_alloc_resource(bus, res, size, align, min, 150 ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -169,10 +162,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
169 pcibios_align_resource, dev); 162 pcibios_align_resource, dev);
170 } 163 }
171 164
172 if (ret) { 165 if (!ret) {
173 dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
174 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
175 } else {
176 res->flags &= ~IORESOURCE_STARTALIGN; 166 res->flags &= ~IORESOURCE_STARTALIGN;
177 if (resno < PCI_BRIDGE_RESOURCES) 167 if (resno < PCI_BRIDGE_RESOURCES)
178 pci_update_resource(dev, resno); 168 pci_update_resource(dev, resno);
@@ -181,6 +171,39 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
181 return ret; 171 return ret;
182} 172}
183 173
174int pci_assign_resource(struct pci_dev *dev, int resno)
175{
176 struct resource *res = dev->resource + resno;
177 resource_size_t align;
178 struct pci_bus *bus;
179 int ret;
180
181 align = resource_alignment(res);
182 if (!align) {
183 dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
184 "alignment) %pR flags %#lx\n",
185 resno, res, res->flags);
186 return -EINVAL;
187 }
188
189 bus = dev->bus;
190 while ((ret = __pci_assign_resource(bus, dev, resno))) {
191 if (bus->parent && bus->self->transparent)
192 bus = bus->parent;
193 else
194 bus = NULL;
195 if (bus)
196 continue;
197 break;
198 }
199
200 if (ret)
201 dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
202 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
203
204 return ret;
205}
206
184#if 0 207#if 0
185int pci_assign_resource_fixed(struct pci_dev *dev, int resno) 208int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
186{ 209{
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index fe95ce20bcbd..eddb0748b0ea 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -307,6 +307,45 @@ void pci_destroy_slot(struct pci_slot *slot)
307} 307}
308EXPORT_SYMBOL_GPL(pci_destroy_slot); 308EXPORT_SYMBOL_GPL(pci_destroy_slot);
309 309
310#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
311#include <linux/pci_hotplug.h>
312/**
313 * pci_hp_create_link - create symbolic link to the hotplug driver module.
314 * @slot: struct pci_slot
315 *
316 * Helper function for pci_hotplug_core.c to create symbolic link to
317 * the hotplug driver module.
318 */
319void pci_hp_create_module_link(struct pci_slot *pci_slot)
320{
321 struct hotplug_slot *slot = pci_slot->hotplug;
322 struct kobject *kobj = NULL;
323 int no_warn;
324
325 if (!slot || !slot->ops)
326 return;
327 kobj = kset_find_obj(module_kset, slot->ops->mod_name);
328 if (!kobj)
329 return;
330 no_warn = sysfs_create_link(&pci_slot->kobj, kobj, "module");
331 kobject_put(kobj);
332}
333EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
334
335/**
336 * pci_hp_remove_link - remove symbolic link to the hotplug driver module.
337 * @slot: struct pci_slot
338 *
339 * Helper function for pci_hotplug_core.c to remove symbolic link to
340 * the hotplug driver module.
341 */
342void pci_hp_remove_module_link(struct pci_slot *pci_slot)
343{
344 sysfs_remove_link(&pci_slot->kobj, "module");
345}
346EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
347#endif
348
310static int pci_slot_init(void) 349static int pci_slot_init(void)
311{ 350{
312 struct kset *pci_bus_kset; 351 struct kset *pci_bus_kset;
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 09a503e5da6a..be2fd6f91639 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -958,12 +958,12 @@ static void acer_rfkill_update(struct work_struct *ignored)
958 958
959 status = get_u32(&state, ACER_CAP_WIRELESS); 959 status = get_u32(&state, ACER_CAP_WIRELESS);
960 if (ACPI_SUCCESS(status)) 960 if (ACPI_SUCCESS(status))
961 rfkill_set_sw_state(wireless_rfkill, !!state); 961 rfkill_set_sw_state(wireless_rfkill, !state);
962 962
963 if (has_cap(ACER_CAP_BLUETOOTH)) { 963 if (has_cap(ACER_CAP_BLUETOOTH)) {
964 status = get_u32(&state, ACER_CAP_BLUETOOTH); 964 status = get_u32(&state, ACER_CAP_BLUETOOTH);
965 if (ACPI_SUCCESS(status)) 965 if (ACPI_SUCCESS(status))
966 rfkill_set_sw_state(bluetooth_rfkill, !!state); 966 rfkill_set_sw_state(bluetooth_rfkill, !state);
967 } 967 }
968 968
969 schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); 969 schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 03bf522bd7ab..8153b3e59189 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -180,6 +180,7 @@ static struct key_entry eeepc_keymap[] = {
180 */ 180 */
181static int eeepc_hotk_add(struct acpi_device *device); 181static int eeepc_hotk_add(struct acpi_device *device);
182static int eeepc_hotk_remove(struct acpi_device *device, int type); 182static int eeepc_hotk_remove(struct acpi_device *device, int type);
183static int eeepc_hotk_resume(struct acpi_device *device);
183 184
184static const struct acpi_device_id eeepc_device_ids[] = { 185static const struct acpi_device_id eeepc_device_ids[] = {
185 {EEEPC_HOTK_HID, 0}, 186 {EEEPC_HOTK_HID, 0},
@@ -194,6 +195,7 @@ static struct acpi_driver eeepc_hotk_driver = {
194 .ops = { 195 .ops = {
195 .add = eeepc_hotk_add, 196 .add = eeepc_hotk_add,
196 .remove = eeepc_hotk_remove, 197 .remove = eeepc_hotk_remove,
198 .resume = eeepc_hotk_resume,
197 }, 199 },
198}; 200};
199 201
@@ -512,15 +514,12 @@ static int notify_brn(void)
512 return -1; 514 return -1;
513} 515}
514 516
515static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) 517static void eeepc_rfkill_hotplug(void)
516{ 518{
517 struct pci_dev *dev; 519 struct pci_dev *dev;
518 struct pci_bus *bus = pci_find_bus(0, 1); 520 struct pci_bus *bus = pci_find_bus(0, 1);
519 bool blocked; 521 bool blocked;
520 522
521 if (event != ACPI_NOTIFY_BUS_CHECK)
522 return;
523
524 if (!bus) { 523 if (!bus) {
525 printk(EEEPC_WARNING "Unable to find PCI bus 1?\n"); 524 printk(EEEPC_WARNING "Unable to find PCI bus 1?\n");
526 return; 525 return;
@@ -551,6 +550,14 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
551 rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, blocked); 550 rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, blocked);
552} 551}
553 552
553static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
554{
555 if (event != ACPI_NOTIFY_BUS_CHECK)
556 return;
557
558 eeepc_rfkill_hotplug();
559}
560
554static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data) 561static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
555{ 562{
556 static struct key_entry *key; 563 static struct key_entry *key;
@@ -675,8 +682,8 @@ static int eeepc_hotk_add(struct acpi_device *device)
675 if (!ehotk->eeepc_wlan_rfkill) 682 if (!ehotk->eeepc_wlan_rfkill)
676 goto wlan_fail; 683 goto wlan_fail;
677 684
678 rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, 685 rfkill_init_sw_state(ehotk->eeepc_wlan_rfkill,
679 get_acpi(CM_ASL_WLAN) != 1); 686 get_acpi(CM_ASL_WLAN) != 1);
680 result = rfkill_register(ehotk->eeepc_wlan_rfkill); 687 result = rfkill_register(ehotk->eeepc_wlan_rfkill);
681 if (result) 688 if (result)
682 goto wlan_fail; 689 goto wlan_fail;
@@ -693,8 +700,8 @@ static int eeepc_hotk_add(struct acpi_device *device)
693 if (!ehotk->eeepc_bluetooth_rfkill) 700 if (!ehotk->eeepc_bluetooth_rfkill)
694 goto bluetooth_fail; 701 goto bluetooth_fail;
695 702
696 rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill, 703 rfkill_init_sw_state(ehotk->eeepc_bluetooth_rfkill,
697 get_acpi(CM_ASL_BLUETOOTH) != 1); 704 get_acpi(CM_ASL_BLUETOOTH) != 1);
698 result = rfkill_register(ehotk->eeepc_bluetooth_rfkill); 705 result = rfkill_register(ehotk->eeepc_bluetooth_rfkill);
699 if (result) 706 if (result)
700 goto bluetooth_fail; 707 goto bluetooth_fail;
@@ -734,6 +741,33 @@ static int eeepc_hotk_remove(struct acpi_device *device, int type)
734 return 0; 741 return 0;
735} 742}
736 743
744static int eeepc_hotk_resume(struct acpi_device *device)
745{
746 if (ehotk->eeepc_wlan_rfkill) {
747 bool wlan;
748
749 /* Workaround - it seems that _PTS disables the wireless
750 without notification or changing the value read by WLAN.
751 Normally this is fine because the correct value is restored
752 from the non-volatile storage on resume, but we need to do
753 it ourself if case suspend is aborted, or we lose wireless.
754 */
755 wlan = get_acpi(CM_ASL_WLAN);
756 set_acpi(CM_ASL_WLAN, wlan);
757
758 rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill,
759 wlan != 1);
760
761 eeepc_rfkill_hotplug();
762 }
763
764 if (ehotk->eeepc_bluetooth_rfkill)
765 rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill,
766 get_acpi(CM_ASL_BLUETOOTH) != 1);
767
768 return 0;
769}
770
737/* 771/*
738 * Hwmon 772 * Hwmon
739 */ 773 */
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 86e958539f46..40d64c03278c 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1163,8 +1163,8 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
1163{ 1163{
1164 struct tpacpi_rfk *atp_rfk; 1164 struct tpacpi_rfk *atp_rfk;
1165 int res; 1165 int res;
1166 bool initial_sw_state = false; 1166 bool sw_state = false;
1167 int initial_sw_status; 1167 int sw_status;
1168 1168
1169 BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]); 1169 BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]);
1170 1170
@@ -1185,17 +1185,17 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
1185 atp_rfk->id = id; 1185 atp_rfk->id = id;
1186 atp_rfk->ops = tp_rfkops; 1186 atp_rfk->ops = tp_rfkops;
1187 1187
1188 initial_sw_status = (tp_rfkops->get_status)(); 1188 sw_status = (tp_rfkops->get_status)();
1189 if (initial_sw_status < 0) { 1189 if (sw_status < 0) {
1190 printk(TPACPI_ERR 1190 printk(TPACPI_ERR
1191 "failed to read initial state for %s, error %d\n", 1191 "failed to read initial state for %s, error %d\n",
1192 name, initial_sw_status); 1192 name, sw_status);
1193 } else { 1193 } else {
1194 initial_sw_state = (initial_sw_status == TPACPI_RFK_RADIO_OFF); 1194 sw_state = (sw_status == TPACPI_RFK_RADIO_OFF);
1195 if (set_default) { 1195 if (set_default) {
1196 /* try to keep the initial state, since we ask the 1196 /* try to keep the initial state, since we ask the
1197 * firmware to preserve it across S5 in NVRAM */ 1197 * firmware to preserve it across S5 in NVRAM */
1198 rfkill_set_sw_state(atp_rfk->rfkill, initial_sw_state); 1198 rfkill_init_sw_state(atp_rfk->rfkill, sw_state);
1199 } 1199 }
1200 } 1200 }
1201 rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state()); 1201 rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state());
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e5b84db0aa03..749836668655 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -470,7 +470,7 @@ static int dasd_decrease_state(struct dasd_device *device)
470 */ 470 */
471static void dasd_change_state(struct dasd_device *device) 471static void dasd_change_state(struct dasd_device *device)
472{ 472{
473 int rc; 473 int rc;
474 474
475 if (device->state == device->target) 475 if (device->state == device->target)
476 /* Already where we want to go today... */ 476 /* Already where we want to go today... */
@@ -479,8 +479,10 @@ static void dasd_change_state(struct dasd_device *device)
479 rc = dasd_increase_state(device); 479 rc = dasd_increase_state(device);
480 else 480 else
481 rc = dasd_decrease_state(device); 481 rc = dasd_decrease_state(device);
482 if (rc && rc != -EAGAIN) 482 if (rc == -EAGAIN)
483 device->target = device->state; 483 return;
484 if (rc)
485 device->target = device->state;
484 486
485 if (device->state == device->target) { 487 if (device->state == device->target) {
486 wake_up(&dasd_init_waitq); 488 wake_up(&dasd_init_waitq);
@@ -2503,15 +2505,25 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
2503 if (IS_ERR(device)) 2505 if (IS_ERR(device))
2504 return PTR_ERR(device); 2506 return PTR_ERR(device);
2505 2507
2508 /* allow new IO again */
2509 device->stopped &= ~DASD_STOPPED_PM;
2510 device->stopped &= ~DASD_UNRESUMED_PM;
2511
2506 dasd_schedule_device_bh(device); 2512 dasd_schedule_device_bh(device);
2507 if (device->block) 2513 if (device->block)
2508 dasd_schedule_block_bh(device->block); 2514 dasd_schedule_block_bh(device->block);
2509 2515
2510 if (device->discipline->restore) 2516 if (device->discipline->restore)
2511 rc = device->discipline->restore(device); 2517 rc = device->discipline->restore(device);
2518 if (rc)
2519 /*
2520 * if the resume failed for the DASD we put it in
2521 * an UNRESUMED stop state
2522 */
2523 device->stopped |= DASD_UNRESUMED_PM;
2512 2524
2513 dasd_put_device(device); 2525 dasd_put_device(device);
2514 return rc; 2526 return 0;
2515} 2527}
2516EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 2528EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
2517 2529
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 1c28ec3e4ccb..f8b1f04f26b8 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3243,9 +3243,6 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3243 int is_known, rc; 3243 int is_known, rc;
3244 struct dasd_uid temp_uid; 3244 struct dasd_uid temp_uid;
3245 3245
3246 /* allow new IO again */
3247 device->stopped &= ~DASD_STOPPED_PM;
3248
3249 private = (struct dasd_eckd_private *) device->private; 3246 private = (struct dasd_eckd_private *) device->private;
3250 3247
3251 /* Read Configuration Data */ 3248 /* Read Configuration Data */
@@ -3295,12 +3292,7 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3295 return 0; 3292 return 0;
3296 3293
3297out_err: 3294out_err:
3298 /* 3295 return -1;
3299 * if the resume failed for the DASD we put it in
3300 * an UNRESUMED stop state
3301 */
3302 device->stopped |= DASD_UNRESUMED_PM;
3303 return 0;
3304} 3296}
3305 3297
3306static struct ccw_driver dasd_eckd_driver = { 3298static struct ccw_driver dasd_eckd_driver = {
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 04dc734805c6..21639d6c996f 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -20,10 +20,7 @@
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/reboot.h> 22#include <linux/reboot.h>
23
24#include <linux/slab.h> 23#include <linux/slab.h>
25#include <linux/bootmem.h>
26
27#include <asm/ccwdev.h> 24#include <asm/ccwdev.h>
28#include <asm/cio.h> 25#include <asm/cio.h>
29#include <asm/io.h> 26#include <asm/io.h>
@@ -735,7 +732,7 @@ static int raw3215_pm_stop(struct ccw_device *cdev)
735 unsigned long flags; 732 unsigned long flags;
736 733
737 /* Empty the output buffer, then prevent new I/O. */ 734 /* Empty the output buffer, then prevent new I/O. */
738 raw = cdev->dev.driver_data; 735 raw = dev_get_drvdata(&cdev->dev);
739 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); 736 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
740 raw3215_make_room(raw, RAW3215_BUFFER_SIZE); 737 raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
741 raw->flags |= RAW3215_FROZEN; 738 raw->flags |= RAW3215_FROZEN;
@@ -749,7 +746,7 @@ static int raw3215_pm_start(struct ccw_device *cdev)
749 unsigned long flags; 746 unsigned long flags;
750 747
751 /* Allow I/O again and flush output buffer. */ 748 /* Allow I/O again and flush output buffer. */
752 raw = cdev->dev.driver_data; 749 raw = dev_get_drvdata(&cdev->dev);
753 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); 750 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
754 raw->flags &= ~RAW3215_FROZEN; 751 raw->flags &= ~RAW3215_FROZEN;
755 raw->flags |= RAW3215_FLUSHING; 752 raw->flags |= RAW3215_FLUSHING;
@@ -883,7 +880,7 @@ static int __init con3215_init(void)
883 raw3215_freelist = NULL; 880 raw3215_freelist = NULL;
884 spin_lock_init(&raw3215_freelist_lock); 881 spin_lock_init(&raw3215_freelist_lock);
885 for (i = 0; i < NR_3215_REQ; i++) { 882 for (i = 0; i < NR_3215_REQ; i++) {
886 req = (struct raw3215_req *) alloc_bootmem_low(sizeof(struct raw3215_req)); 883 req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA);
887 req->next = raw3215_freelist; 884 req->next = raw3215_freelist;
888 raw3215_freelist = req; 885 raw3215_freelist = req;
889 } 886 }
@@ -893,10 +890,9 @@ static int __init con3215_init(void)
893 return -ENODEV; 890 return -ENODEV;
894 891
895 raw3215[0] = raw = (struct raw3215_info *) 892 raw3215[0] = raw = (struct raw3215_info *)
896 alloc_bootmem_low(sizeof(struct raw3215_info)); 893 kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA);
897 memset(raw, 0, sizeof(struct raw3215_info)); 894 raw->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
898 raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE); 895 raw->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
899 raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
900 raw->cdev = cdev; 896 raw->cdev = cdev;
901 dev_set_drvdata(&cdev->dev, raw); 897 dev_set_drvdata(&cdev->dev, raw);
902 cdev->handler = raw3215_irq; 898 cdev->handler = raw3215_irq;
@@ -906,9 +902,9 @@ static int __init con3215_init(void)
906 902
907 /* Request the console irq */ 903 /* Request the console irq */
908 if (raw3215_startup(raw) != 0) { 904 if (raw3215_startup(raw) != 0) {
909 free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE); 905 kfree(raw->inbuf);
910 free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); 906 kfree(raw->buffer);
911 free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); 907 kfree(raw);
912 raw3215[0] = NULL; 908 raw3215[0] = NULL;
913 return -ENODEV; 909 return -ENODEV;
914 } 910 }
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 44d02e371c04..bb838bdf829d 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -7,7 +7,6 @@
7 * Copyright IBM Corp. 2003, 2009 7 * Copyright IBM Corp. 2003, 2009
8 */ 8 */
9 9
10#include <linux/bootmem.h>
11#include <linux/console.h> 10#include <linux/console.h>
12#include <linux/init.h> 11#include <linux/init.h>
13#include <linux/interrupt.h> 12#include <linux/interrupt.h>
@@ -600,16 +599,14 @@ con3270_init(void)
600 if (IS_ERR(rp)) 599 if (IS_ERR(rp))
601 return PTR_ERR(rp); 600 return PTR_ERR(rp);
602 601
603 condev = (struct con3270 *) alloc_bootmem_low(sizeof(struct con3270)); 602 condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
604 memset(condev, 0, sizeof(struct con3270));
605 condev->view.dev = rp; 603 condev->view.dev = rp;
606 604
607 condev->read = raw3270_request_alloc_bootmem(0); 605 condev->read = raw3270_request_alloc(0);
608 condev->read->callback = con3270_read_callback; 606 condev->read->callback = con3270_read_callback;
609 condev->read->callback_data = condev; 607 condev->read->callback_data = condev;
610 condev->write = 608 condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE);
611 raw3270_request_alloc_bootmem(CON3270_OUTPUT_BUFFER_SIZE); 609 condev->kreset = raw3270_request_alloc(1);
612 condev->kreset = raw3270_request_alloc_bootmem(1);
613 610
614 INIT_LIST_HEAD(&condev->lines); 611 INIT_LIST_HEAD(&condev->lines);
615 INIT_LIST_HEAD(&condev->update); 612 INIT_LIST_HEAD(&condev->update);
@@ -623,7 +620,7 @@ con3270_init(void)
623 620
624 INIT_LIST_HEAD(&condev->freemem); 621 INIT_LIST_HEAD(&condev->freemem);
625 for (i = 0; i < CON3270_STRING_PAGES; i++) { 622 for (i = 0; i < CON3270_STRING_PAGES; i++) {
626 cbuf = (void *) alloc_bootmem_low_pages(PAGE_SIZE); 623 cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
627 add_string_memory(&condev->freemem, cbuf, PAGE_SIZE); 624 add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
628 } 625 }
629 condev->cline = alloc_string(&condev->freemem, condev->view.cols); 626 condev->cline = alloc_string(&condev->freemem, condev->view.cols);
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 75a8831eebbc..7892550d7932 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -320,7 +320,7 @@ static int mon_open(struct inode *inode, struct file *filp)
320 goto out_path; 320 goto out_path;
321 } 321 }
322 filp->private_data = monpriv; 322 filp->private_data = monpriv;
323 monreader_device->driver_data = monpriv; 323 dev_set_drvdata(&monreader_device, monpriv);
324 unlock_kernel(); 324 unlock_kernel();
325 return nonseekable_open(inode, filp); 325 return nonseekable_open(inode, filp);
326 326
@@ -463,7 +463,7 @@ static struct miscdevice mon_dev = {
463 *****************************************************************************/ 463 *****************************************************************************/
464static int monreader_freeze(struct device *dev) 464static int monreader_freeze(struct device *dev)
465{ 465{
466 struct mon_private *monpriv = dev->driver_data; 466 struct mon_private *monpriv = dev_get_drvdata(&dev);
467 int rc; 467 int rc;
468 468
469 if (!monpriv) 469 if (!monpriv)
@@ -487,7 +487,7 @@ static int monreader_freeze(struct device *dev)
487 487
488static int monreader_thaw(struct device *dev) 488static int monreader_thaw(struct device *dev)
489{ 489{
490 struct mon_private *monpriv = dev->driver_data; 490 struct mon_private *monpriv = dev_get_drvdata(dev);
491 int rc; 491 int rc;
492 492
493 if (!monpriv) 493 if (!monpriv)
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index acab7b2dfe8a..d6a022f55e92 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -7,7 +7,6 @@
7 * Copyright IBM Corp. 2003, 2009 7 * Copyright IBM Corp. 2003, 2009
8 */ 8 */
9 9
10#include <linux/bootmem.h>
11#include <linux/module.h> 10#include <linux/module.h>
12#include <linux/err.h> 11#include <linux/err.h>
13#include <linux/init.h> 12#include <linux/init.h>
@@ -143,33 +142,6 @@ raw3270_request_alloc(size_t size)
143 return rq; 142 return rq;
144} 143}
145 144
146#ifdef CONFIG_TN3270_CONSOLE
147/*
148 * Allocate a new 3270 ccw request from bootmem. Only works very
149 * early in the boot process. Only con3270.c should be using this.
150 */
151struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
152{
153 struct raw3270_request *rq;
154
155 rq = alloc_bootmem_low(sizeof(struct raw3270));
156
157 /* alloc output buffer. */
158 if (size > 0)
159 rq->buffer = alloc_bootmem_low(size);
160 rq->size = size;
161 INIT_LIST_HEAD(&rq->list);
162
163 /*
164 * Setup ccw.
165 */
166 rq->ccw.cda = __pa(rq->buffer);
167 rq->ccw.flags = CCW_FLAG_SLI;
168
169 return rq;
170}
171#endif
172
173/* 145/*
174 * Free 3270 ccw request 146 * Free 3270 ccw request
175 */ 147 */
@@ -846,8 +818,8 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
846 char *ascebc; 818 char *ascebc;
847 int rc; 819 int rc;
848 820
849 rp = (struct raw3270 *) alloc_bootmem_low(sizeof(struct raw3270)); 821 rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
850 ascebc = (char *) alloc_bootmem(256); 822 ascebc = kzalloc(256, GFP_KERNEL);
851 rc = raw3270_setup_device(cdev, rp, ascebc); 823 rc = raw3270_setup_device(cdev, rp, ascebc);
852 if (rc) 824 if (rc)
853 return ERR_PTR(rc); 825 return ERR_PTR(rc);
@@ -1350,7 +1322,7 @@ static int raw3270_pm_stop(struct ccw_device *cdev)
1350 struct raw3270_view *view; 1322 struct raw3270_view *view;
1351 unsigned long flags; 1323 unsigned long flags;
1352 1324
1353 rp = cdev->dev.driver_data; 1325 rp = dev_get_drvdata(&cdev->dev);
1354 if (!rp) 1326 if (!rp)
1355 return 0; 1327 return 0;
1356 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 1328 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
@@ -1376,7 +1348,7 @@ static int raw3270_pm_start(struct ccw_device *cdev)
1376 struct raw3270 *rp; 1348 struct raw3270 *rp;
1377 unsigned long flags; 1349 unsigned long flags;
1378 1350
1379 rp = cdev->dev.driver_data; 1351 rp = dev_get_drvdata(&cdev->dev);
1380 if (!rp) 1352 if (!rp)
1381 return 0; 1353 return 0;
1382 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 1354 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 336811a77672..ad698d30cb3b 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -11,7 +11,6 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/timer.h> 12#include <linux/timer.h>
13#include <linux/jiffies.h> 13#include <linux/jiffies.h>
14#include <linux/bootmem.h>
15#include <linux/termios.h> 14#include <linux/termios.h>
16#include <linux/err.h> 15#include <linux/err.h>
17#include <linux/reboot.h> 16#include <linux/reboot.h>
@@ -110,7 +109,7 @@ static void sclp_console_sync_queue(void)
110 109
111 spin_lock_irqsave(&sclp_con_lock, flags); 110 spin_lock_irqsave(&sclp_con_lock, flags);
112 if (timer_pending(&sclp_con_timer)) 111 if (timer_pending(&sclp_con_timer))
113 del_timer_sync(&sclp_con_timer); 112 del_timer(&sclp_con_timer);
114 while (sclp_con_queue_running) { 113 while (sclp_con_queue_running) {
115 spin_unlock_irqrestore(&sclp_con_lock, flags); 114 spin_unlock_irqrestore(&sclp_con_lock, flags);
116 sclp_sync_wait(); 115 sclp_sync_wait();
@@ -298,8 +297,8 @@ sclp_console_init(void)
298 /* Allocate pages for output buffering */ 297 /* Allocate pages for output buffering */
299 INIT_LIST_HEAD(&sclp_con_pages); 298 INIT_LIST_HEAD(&sclp_con_pages);
300 for (i = 0; i < MAX_CONSOLE_PAGES; i++) { 299 for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
301 page = alloc_bootmem_low_pages(PAGE_SIZE); 300 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
302 list_add_tail((struct list_head *) page, &sclp_con_pages); 301 list_add_tail(page, &sclp_con_pages);
303 } 302 }
304 INIT_LIST_HEAD(&sclp_con_outqueue); 303 INIT_LIST_HEAD(&sclp_con_outqueue);
305 spin_lock_init(&sclp_con_lock); 304 spin_lock_init(&sclp_con_lock);
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 5518e24946aa..178724f2a4c3 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -20,7 +20,6 @@
20#include <linux/major.h> 20#include <linux/major.h>
21#include <linux/console.h> 21#include <linux/console.h>
22#include <linux/kdev_t.h> 22#include <linux/kdev_t.h>
23#include <linux/bootmem.h>
24#include <linux/interrupt.h> 23#include <linux/interrupt.h>
25#include <linux/init.h> 24#include <linux/init.h>
26#include <linux/reboot.h> 25#include <linux/reboot.h>
@@ -601,10 +600,7 @@ static void __init __sclp_vt220_free_pages(void)
601 600
602 list_for_each_safe(page, p, &sclp_vt220_empty) { 601 list_for_each_safe(page, p, &sclp_vt220_empty) {
603 list_del(page); 602 list_del(page);
604 if (slab_is_available()) 603 free_page((unsigned long) page);
605 free_page((unsigned long) page);
606 else
607 free_bootmem((unsigned long) page, PAGE_SIZE);
608 } 604 }
609} 605}
610 606
@@ -640,16 +636,12 @@ static int __init __sclp_vt220_init(int num_pages)
640 sclp_vt220_flush_later = 0; 636 sclp_vt220_flush_later = 0;
641 637
642 /* Allocate pages for output buffering */ 638 /* Allocate pages for output buffering */
639 rc = -ENOMEM;
643 for (i = 0; i < num_pages; i++) { 640 for (i = 0; i < num_pages; i++) {
644 if (slab_is_available()) 641 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
645 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 642 if (!page)
646 else
647 page = alloc_bootmem_low_pages(PAGE_SIZE);
648 if (!page) {
649 rc = -ENOMEM;
650 goto out; 643 goto out;
651 } 644 list_add_tail(page, &sclp_vt220_empty);
652 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
653 } 645 }
654 rc = sclp_register(&sclp_vt220_register); 646 rc = sclp_register(&sclp_vt220_register);
655out: 647out:
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 595aa04cfd01..1d420d947596 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -396,7 +396,7 @@ int tape_generic_pm_suspend(struct ccw_device *cdev)
396{ 396{
397 struct tape_device *device; 397 struct tape_device *device;
398 398
399 device = cdev->dev.driver_data; 399 device = dev_get_drvdata(&cdev->dev);
400 if (!device) { 400 if (!device) {
401 return -ENODEV; 401 return -ENODEV;
402 } 402 }
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 411cfa3c7719..c20a4fe6da51 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -663,7 +663,7 @@ static struct attribute *vmlogrdr_attrs[] = {
663static int vmlogrdr_pm_prepare(struct device *dev) 663static int vmlogrdr_pm_prepare(struct device *dev)
664{ 664{
665 int rc; 665 int rc;
666 struct vmlogrdr_priv_t *priv = dev->driver_data; 666 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
667 667
668 rc = 0; 668 rc = 0;
669 if (priv) { 669 if (priv) {
@@ -753,7 +753,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
753 dev->bus = &iucv_bus; 753 dev->bus = &iucv_bus;
754 dev->parent = iucv_root; 754 dev->parent = iucv_root;
755 dev->driver = &vmlogrdr_driver; 755 dev->driver = &vmlogrdr_driver;
756 dev->driver_data = priv; 756 dev_set_drvdata(dev, priv);
757 /* 757 /*
758 * The release function could be called after the 758 * The release function could be called after the
759 * module has been unloaded. It's _only_ task is to 759 * module has been unloaded. It's _only_ task is to
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 7d9e67cb6471..31b902e94f7b 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -170,7 +170,7 @@ static void urdev_put(struct urdev *urd)
170 */ 170 */
171static int ur_pm_suspend(struct ccw_device *cdev) 171static int ur_pm_suspend(struct ccw_device *cdev)
172{ 172{
173 struct urdev *urd = cdev->dev.driver_data; 173 struct urdev *urd = dev_get_drvdata(&cdev->dev);
174 174
175 TRACE("ur_pm_suspend: cdev=%p\n", cdev); 175 TRACE("ur_pm_suspend: cdev=%p\n", cdev);
176 if (urd->open_flag) { 176 if (urd->open_flag) {
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 13bcb8114388..b1241f8fae88 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -351,15 +351,6 @@ static inline unsigned long long get_usecs(void)
351 ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) 351 ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
352 352
353/* prototypes for thin interrupt */ 353/* prototypes for thin interrupt */
354void qdio_sync_after_thinint(struct qdio_q *q);
355int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state,
356 int auto_ack);
357void qdio_check_outbound_after_thinint(struct qdio_q *q);
358int qdio_inbound_q_moved(struct qdio_q *q);
359void qdio_kick_handler(struct qdio_q *q);
360void qdio_stop_polling(struct qdio_q *q);
361int qdio_siga_sync_q(struct qdio_q *q);
362
363void qdio_setup_thinint(struct qdio_irq *irq_ptr); 354void qdio_setup_thinint(struct qdio_irq *irq_ptr);
364int qdio_establish_thinint(struct qdio_irq *irq_ptr); 355int qdio_establish_thinint(struct qdio_irq *irq_ptr);
365void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); 356void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
@@ -392,4 +383,6 @@ void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
392int qdio_setup_init(void); 383int qdio_setup_init(void);
393void qdio_setup_exit(void); 384void qdio_setup_exit(void);
394 385
386int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
387 unsigned char *state);
395#endif /* _CIO_QDIO_H */ 388#endif /* _CIO_QDIO_H */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index e3434b34f86c..b8626d4df116 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -70,9 +70,8 @@ static int qstat_show(struct seq_file *m, void *v)
70 seq_printf(m, "slsb buffer states:\n"); 70 seq_printf(m, "slsb buffer states:\n");
71 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); 71 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
72 72
73 qdio_siga_sync_q(q);
74 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { 73 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
75 get_buf_state(q, i, &state, 0); 74 debug_get_buf_state(q, i, &state);
76 switch (state) { 75 switch (state) {
77 case SLSB_P_INPUT_NOT_INIT: 76 case SLSB_P_INPUT_NOT_INIT:
78 case SLSB_P_OUTPUT_NOT_INIT: 77 case SLSB_P_OUTPUT_NOT_INIT:
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index d79cf5bf0e62..0038750ad945 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -231,8 +231,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
231 return i; 231 return i;
232} 232}
233 233
234inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, 234static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
235 unsigned char *state, int auto_ack) 235 unsigned char *state, int auto_ack)
236{ 236{
237 return get_buf_states(q, bufnr, state, 1, auto_ack); 237 return get_buf_states(q, bufnr, state, 1, auto_ack);
238} 238}
@@ -276,7 +276,7 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
276 QDIO_MAX_BUFFERS_PER_Q); 276 QDIO_MAX_BUFFERS_PER_Q);
277} 277}
278 278
279static int qdio_siga_sync(struct qdio_q *q, unsigned int output, 279static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
280 unsigned int input) 280 unsigned int input)
281{ 281{
282 int cc; 282 int cc;
@@ -293,7 +293,7 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
293 return cc; 293 return cc;
294} 294}
295 295
296inline int qdio_siga_sync_q(struct qdio_q *q) 296static inline int qdio_siga_sync_q(struct qdio_q *q)
297{ 297{
298 if (q->is_input_q) 298 if (q->is_input_q)
299 return qdio_siga_sync(q, 0, q->mask); 299 return qdio_siga_sync(q, 0, q->mask);
@@ -358,8 +358,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
358 return cc; 358 return cc;
359} 359}
360 360
361/* called from thinint inbound handler */ 361static inline void qdio_sync_after_thinint(struct qdio_q *q)
362void qdio_sync_after_thinint(struct qdio_q *q)
363{ 362{
364 if (pci_out_supported(q)) { 363 if (pci_out_supported(q)) {
365 if (need_siga_sync_thinint(q)) 364 if (need_siga_sync_thinint(q))
@@ -370,7 +369,14 @@ void qdio_sync_after_thinint(struct qdio_q *q)
370 qdio_siga_sync_q(q); 369 qdio_siga_sync_q(q);
371} 370}
372 371
373inline void qdio_stop_polling(struct qdio_q *q) 372int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
373 unsigned char *state)
374{
375 qdio_siga_sync_q(q);
376 return get_buf_states(q, bufnr, state, 1, 0);
377}
378
379static inline void qdio_stop_polling(struct qdio_q *q)
374{ 380{
375 if (!q->u.in.polling) 381 if (!q->u.in.polling)
376 return; 382 return;
@@ -449,13 +455,6 @@ static inline void inbound_primed(struct qdio_q *q, int count)
449 count--; 455 count--;
450 if (!count) 456 if (!count)
451 return; 457 return;
452
453 /*
454 * Need to change all PRIMED buffers to NOT_INIT, otherwise
455 * we're loosing initiative in the thinint code.
456 */
457 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT,
458 count);
459} 458}
460 459
461static int get_inbound_buffer_frontier(struct qdio_q *q) 460static int get_inbound_buffer_frontier(struct qdio_q *q)
@@ -470,19 +469,13 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
470 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 469 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
471 stop = add_buf(q->first_to_check, count); 470 stop = add_buf(q->first_to_check, count);
472 471
473 /*
474 * No siga sync here, as a PCI or we after a thin interrupt
475 * will sync the queues.
476 */
477
478 /* need to set count to 1 for non-qebsm */
479 if (!is_qebsm(q))
480 count = 1;
481
482check_next:
483 if (q->first_to_check == stop) 472 if (q->first_to_check == stop)
484 goto out; 473 goto out;
485 474
475 /*
476 * No siga sync here, as a PCI or we after a thin interrupt
477 * already sync'ed the queues.
478 */
486 count = get_buf_states(q, q->first_to_check, &state, count, 1); 479 count = get_buf_states(q, q->first_to_check, &state, count, 1);
487 if (!count) 480 if (!count)
488 goto out; 481 goto out;
@@ -490,14 +483,9 @@ check_next:
490 switch (state) { 483 switch (state) {
491 case SLSB_P_INPUT_PRIMED: 484 case SLSB_P_INPUT_PRIMED:
492 inbound_primed(q, count); 485 inbound_primed(q, count);
493 /*
494 * No siga-sync needed for non-qebsm here, as the inbound queue
495 * will be synced on the next siga-r, resp.
496 * tiqdio_is_inbound_q_done will do the siga-sync.
497 */
498 q->first_to_check = add_buf(q->first_to_check, count); 486 q->first_to_check = add_buf(q->first_to_check, count);
499 atomic_sub(count, &q->nr_buf_used); 487 atomic_sub(count, &q->nr_buf_used);
500 goto check_next; 488 break;
501 case SLSB_P_INPUT_ERROR: 489 case SLSB_P_INPUT_ERROR:
502 announce_buffer_error(q, count); 490 announce_buffer_error(q, count);
503 /* process the buffer, the upper layer will take care of it */ 491 /* process the buffer, the upper layer will take care of it */
@@ -516,7 +504,7 @@ out:
516 return q->first_to_check; 504 return q->first_to_check;
517} 505}
518 506
519int qdio_inbound_q_moved(struct qdio_q *q) 507static int qdio_inbound_q_moved(struct qdio_q *q)
520{ 508{
521 int bufnr; 509 int bufnr;
522 510
@@ -524,35 +512,32 @@ int qdio_inbound_q_moved(struct qdio_q *q)
524 512
525 if ((bufnr != q->last_move) || q->qdio_error) { 513 if ((bufnr != q->last_move) || q->qdio_error) {
526 q->last_move = bufnr; 514 q->last_move = bufnr;
527 if (!need_siga_sync(q) && !pci_out_supported(q)) 515 if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM)
528 q->u.in.timestamp = get_usecs(); 516 q->u.in.timestamp = get_usecs();
529
530 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
531 return 1; 517 return 1;
532 } else 518 } else
533 return 0; 519 return 0;
534} 520}
535 521
536static int qdio_inbound_q_done(struct qdio_q *q) 522static inline int qdio_inbound_q_done(struct qdio_q *q)
537{ 523{
538 unsigned char state = 0; 524 unsigned char state = 0;
539 525
540 if (!atomic_read(&q->nr_buf_used)) 526 if (!atomic_read(&q->nr_buf_used))
541 return 1; 527 return 1;
542 528
543 /*
544 * We need that one for synchronization with the adapter, as it
545 * does a kind of PCI avoidance.
546 */
547 qdio_siga_sync_q(q); 529 qdio_siga_sync_q(q);
548
549 get_buf_state(q, q->first_to_check, &state, 0); 530 get_buf_state(q, q->first_to_check, &state, 0);
531
550 if (state == SLSB_P_INPUT_PRIMED) 532 if (state == SLSB_P_INPUT_PRIMED)
551 /* we got something to do */ 533 /* more work coming */
552 return 0; 534 return 0;
553 535
554 /* on VM, we don't poll, so the q is always done here */ 536 if (is_thinint_irq(q->irq_ptr))
555 if (need_siga_sync(q) || pci_out_supported(q)) 537 return 1;
538
539 /* don't poll under z/VM */
540 if (MACHINE_IS_VM)
556 return 1; 541 return 1;
557 542
558 /* 543 /*
@@ -563,14 +548,11 @@ static int qdio_inbound_q_done(struct qdio_q *q)
563 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", 548 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
564 q->first_to_check); 549 q->first_to_check);
565 return 1; 550 return 1;
566 } else { 551 } else
567 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
568 q->first_to_check);
569 return 0; 552 return 0;
570 }
571} 553}
572 554
573void qdio_kick_handler(struct qdio_q *q) 555static void qdio_kick_handler(struct qdio_q *q)
574{ 556{
575 int start = q->first_to_kick; 557 int start = q->first_to_kick;
576 int end = q->first_to_check; 558 int end = q->first_to_check;
@@ -619,7 +601,6 @@ again:
619 goto again; 601 goto again;
620} 602}
621 603
622/* inbound tasklet */
623void qdio_inbound_processing(unsigned long data) 604void qdio_inbound_processing(unsigned long data)
624{ 605{
625 struct qdio_q *q = (struct qdio_q *)data; 606 struct qdio_q *q = (struct qdio_q *)data;
@@ -642,11 +623,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
642 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 623 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
643 stop = add_buf(q->first_to_check, count); 624 stop = add_buf(q->first_to_check, count);
644 625
645 /* need to set count to 1 for non-qebsm */
646 if (!is_qebsm(q))
647 count = 1;
648
649check_next:
650 if (q->first_to_check == stop) 626 if (q->first_to_check == stop)
651 return q->first_to_check; 627 return q->first_to_check;
652 628
@@ -661,13 +637,7 @@ check_next:
661 637
662 atomic_sub(count, &q->nr_buf_used); 638 atomic_sub(count, &q->nr_buf_used);
663 q->first_to_check = add_buf(q->first_to_check, count); 639 q->first_to_check = add_buf(q->first_to_check, count);
664 /* 640 break;
665 * We fetch all buffer states at once. get_buf_states may
666 * return count < stop. For QEBSM we do not loop.
667 */
668 if (is_qebsm(q))
669 break;
670 goto check_next;
671 case SLSB_P_OUTPUT_ERROR: 641 case SLSB_P_OUTPUT_ERROR:
672 announce_buffer_error(q, count); 642 announce_buffer_error(q, count);
673 /* process the buffer, the upper layer will take care of it */ 643 /* process the buffer, the upper layer will take care of it */
@@ -797,8 +767,7 @@ void qdio_outbound_timer(unsigned long data)
797 tasklet_schedule(&q->tasklet); 767 tasklet_schedule(&q->tasklet);
798} 768}
799 769
800/* called from thinint inbound tasklet */ 770static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
801void qdio_check_outbound_after_thinint(struct qdio_q *q)
802{ 771{
803 struct qdio_q *out; 772 struct qdio_q *out;
804 int i; 773 int i;
@@ -811,6 +780,46 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q)
811 tasklet_schedule(&out->tasklet); 780 tasklet_schedule(&out->tasklet);
812} 781}
813 782
783static void __tiqdio_inbound_processing(struct qdio_q *q)
784{
785 qdio_perf_stat_inc(&perf_stats.thinint_inbound);
786 qdio_sync_after_thinint(q);
787
788 /*
789 * The interrupt could be caused by a PCI request. Check the
790 * PCI capable outbound queues.
791 */
792 qdio_check_outbound_after_thinint(q);
793
794 if (!qdio_inbound_q_moved(q))
795 return;
796
797 qdio_kick_handler(q);
798
799 if (!qdio_inbound_q_done(q)) {
800 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
801 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
802 tasklet_schedule(&q->tasklet);
803 }
804
805 qdio_stop_polling(q);
806 /*
807 * We need to check again to not lose initiative after
808 * resetting the ACK state.
809 */
810 if (!qdio_inbound_q_done(q)) {
811 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
812 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
813 tasklet_schedule(&q->tasklet);
814 }
815}
816
817void tiqdio_inbound_processing(unsigned long data)
818{
819 struct qdio_q *q = (struct qdio_q *)data;
820 __tiqdio_inbound_processing(q);
821}
822
814static inline void qdio_set_state(struct qdio_irq *irq_ptr, 823static inline void qdio_set_state(struct qdio_irq *irq_ptr,
815 enum qdio_irq_states state) 824 enum qdio_irq_states state)
816{ 825{
@@ -1488,18 +1497,13 @@ out:
1488 * @count: how many buffers to process 1497 * @count: how many buffers to process
1489 */ 1498 */
1490int do_QDIO(struct ccw_device *cdev, unsigned int callflags, 1499int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1491 int q_nr, int bufnr, int count) 1500 int q_nr, unsigned int bufnr, unsigned int count)
1492{ 1501{
1493 struct qdio_irq *irq_ptr; 1502 struct qdio_irq *irq_ptr;
1494 1503
1495 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || 1504 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1496 (count > QDIO_MAX_BUFFERS_PER_Q) ||
1497 (q_nr >= QDIO_MAX_QUEUES_PER_IRQ))
1498 return -EINVAL; 1505 return -EINVAL;
1499 1506
1500 if (!count)
1501 return 0;
1502
1503 irq_ptr = cdev->private->qdio_data; 1507 irq_ptr = cdev->private->qdio_data;
1504 if (!irq_ptr) 1508 if (!irq_ptr)
1505 return -ENODEV; 1509 return -ENODEV;
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index c655d011a78d..981a77ea7ee2 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -43,9 +43,6 @@ struct indicator_t {
43}; 43};
44static struct indicator_t *q_indicators; 44static struct indicator_t *q_indicators;
45 45
46static void tiqdio_tasklet_fn(unsigned long data);
47static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
48
49static int css_qdio_omit_svs; 46static int css_qdio_omit_svs;
50 47
51static inline unsigned long do_clear_global_summary(void) 48static inline unsigned long do_clear_global_summary(void)
@@ -103,11 +100,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
103 xchg(irq_ptr->dsci, 1); 100 xchg(irq_ptr->dsci, 1);
104} 101}
105 102
106/*
107 * we cannot stop the tiqdio tasklet here since it is for all
108 * thinint qdio devices and it must run as long as there is a
109 * thinint device left
110 */
111void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) 103void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
112{ 104{
113 struct qdio_q *q; 105 struct qdio_q *q;
@@ -126,79 +118,39 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
126 } 118 }
127} 119}
128 120
129static inline int tiqdio_inbound_q_done(struct qdio_q *q)
130{
131 unsigned char state = 0;
132
133 if (!atomic_read(&q->nr_buf_used))
134 return 1;
135
136 qdio_siga_sync_q(q);
137 get_buf_state(q, q->first_to_check, &state, 0);
138
139 if (state == SLSB_P_INPUT_PRIMED)
140 /* more work coming */
141 return 0;
142 return 1;
143}
144
145static inline int shared_ind(struct qdio_irq *irq_ptr) 121static inline int shared_ind(struct qdio_irq *irq_ptr)
146{ 122{
147 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 123 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
148} 124}
149 125
150static void __tiqdio_inbound_processing(struct qdio_q *q) 126/**
127 * tiqdio_thinint_handler - thin interrupt handler for qdio
128 * @ind: pointer to adapter local summary indicator
129 * @drv_data: NULL
130 */
131static void tiqdio_thinint_handler(void *ind, void *drv_data)
151{ 132{
152 qdio_perf_stat_inc(&perf_stats.thinint_inbound); 133 struct qdio_q *q;
153 qdio_sync_after_thinint(q); 134
135 qdio_perf_stat_inc(&perf_stats.thin_int);
154 136
155 /* 137 /*
156 * Maybe we have work on our outbound queues... at least 138 * SVS only when needed: issue SVS to benefit from iqdio interrupt
157 * we have to check the PCI capable queues. 139 * avoidance (SVS clears adapter interrupt suppression overwrite)
158 */ 140 */
159 qdio_check_outbound_after_thinint(q); 141 if (!css_qdio_omit_svs)
160 142 do_clear_global_summary();
161 if (!qdio_inbound_q_moved(q))
162 return;
163
164 qdio_kick_handler(q);
165
166 if (!tiqdio_inbound_q_done(q)) {
167 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
168 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
169 tasklet_schedule(&q->tasklet);
170 }
171 143
172 qdio_stop_polling(q);
173 /* 144 /*
174 * We need to check again to not lose initiative after 145 * reset local summary indicator (tiqdio_alsi) to stop adapter
175 * resetting the ACK state. 146 * interrupts for now
176 */ 147 */
177 if (!tiqdio_inbound_q_done(q)) { 148 xchg((u8 *)ind, 0);
178 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
179 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
180 tasklet_schedule(&q->tasklet);
181 }
182}
183
184void tiqdio_inbound_processing(unsigned long data)
185{
186 struct qdio_q *q = (struct qdio_q *)data;
187
188 __tiqdio_inbound_processing(q);
189}
190
191/* check for work on all inbound thinint queues */
192static void tiqdio_tasklet_fn(unsigned long data)
193{
194 struct qdio_q *q;
195
196 qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
197again:
198 149
199 /* protect tiq_list entries, only changed in activate or shutdown */ 150 /* protect tiq_list entries, only changed in activate or shutdown */
200 rcu_read_lock(); 151 rcu_read_lock();
201 152
153 /* check for work on all inbound thinint queues */
202 list_for_each_entry_rcu(q, &tiq_list, entry) 154 list_for_each_entry_rcu(q, &tiq_list, entry)
203 /* only process queues from changed sets */ 155 /* only process queues from changed sets */
204 if (*q->irq_ptr->dsci) { 156 if (*q->irq_ptr->dsci) {
@@ -226,37 +178,6 @@ again:
226 if (*tiqdio_alsi) 178 if (*tiqdio_alsi)
227 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); 179 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
228 } 180 }
229
230 /* check for more work */
231 if (*tiqdio_alsi) {
232 xchg(tiqdio_alsi, 0);
233 qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
234 goto again;
235 }
236}
237
238/**
239 * tiqdio_thinint_handler - thin interrupt handler for qdio
240 * @ind: pointer to adapter local summary indicator
241 * @drv_data: NULL
242 */
243static void tiqdio_thinint_handler(void *ind, void *drv_data)
244{
245 qdio_perf_stat_inc(&perf_stats.thin_int);
246
247 /*
248 * SVS only when needed: issue SVS to benefit from iqdio interrupt
249 * avoidance (SVS clears adapter interrupt suppression overwrite)
250 */
251 if (!css_qdio_omit_svs)
252 do_clear_global_summary();
253
254 /*
255 * reset local summary indicator (tiqdio_alsi) to stop adapter
256 * interrupts for now, the tasklet will clean all dsci's
257 */
258 xchg((u8 *)ind, 0);
259 tasklet_hi_schedule(&tiqdio_tasklet);
260} 181}
261 182
262static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) 183static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
@@ -376,5 +297,4 @@ void __exit tiqdio_unregister_thinints(void)
376 s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); 297 s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
377 isc_unregister(QDIO_AIRQ_ISC); 298 isc_unregister(QDIO_AIRQ_ISC);
378 } 299 }
379 tasklet_kill(&tiqdio_tasklet);
380} 300}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9c148406b980..727a809636d8 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -54,6 +54,12 @@ static int ap_poll_thread_start(void);
54static void ap_poll_thread_stop(void); 54static void ap_poll_thread_stop(void);
55static void ap_request_timeout(unsigned long); 55static void ap_request_timeout(unsigned long);
56static inline void ap_schedule_poll_timer(void); 56static inline void ap_schedule_poll_timer(void);
57static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
58static int ap_device_remove(struct device *dev);
59static int ap_device_probe(struct device *dev);
60static void ap_interrupt_handler(void *unused1, void *unused2);
61static void ap_reset(struct ap_device *ap_dev);
62static void ap_config_timeout(unsigned long ptr);
57 63
58/* 64/*
59 * Module description. 65 * Module description.
@@ -101,6 +107,10 @@ static struct hrtimer ap_poll_timer;
101 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ 107 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
102static unsigned long long poll_timeout = 250000; 108static unsigned long long poll_timeout = 250000;
103 109
110/* Suspend flag */
111static int ap_suspend_flag;
112static struct bus_type ap_bus_type;
113
104/** 114/**
105 * ap_using_interrupts() - Returns non-zero if interrupt support is 115 * ap_using_interrupts() - Returns non-zero if interrupt support is
106 * available. 116 * available.
@@ -617,10 +627,79 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
617 return retval; 627 return retval;
618} 628}
619 629
630static int ap_bus_suspend(struct device *dev, pm_message_t state)
631{
632 struct ap_device *ap_dev = to_ap_dev(dev);
633 unsigned long flags;
634
635 if (!ap_suspend_flag) {
636 ap_suspend_flag = 1;
637
638 /* Disable scanning for devices, thus we do not want to scan
639 * for them after removing.
640 */
641 del_timer_sync(&ap_config_timer);
642 if (ap_work_queue != NULL) {
643 destroy_workqueue(ap_work_queue);
644 ap_work_queue = NULL;
645 }
646 tasklet_disable(&ap_tasklet);
647 }
648 /* Poll on the device until all requests are finished. */
649 do {
650 flags = 0;
651 __ap_poll_device(ap_dev, &flags);
652 } while ((flags & 1) || (flags & 2));
653
654 ap_device_remove(dev);
655 return 0;
656}
657
658static int ap_bus_resume(struct device *dev)
659{
660 int rc = 0;
661 struct ap_device *ap_dev = to_ap_dev(dev);
662
663 if (ap_suspend_flag) {
664 ap_suspend_flag = 0;
665 if (!ap_interrupts_available())
666 ap_interrupt_indicator = NULL;
667 ap_device_probe(dev);
668 ap_reset(ap_dev);
669 setup_timer(&ap_dev->timeout, ap_request_timeout,
670 (unsigned long) ap_dev);
671 ap_scan_bus(NULL);
672 init_timer(&ap_config_timer);
673 ap_config_timer.function = ap_config_timeout;
674 ap_config_timer.data = 0;
675 ap_config_timer.expires = jiffies + ap_config_time * HZ;
676 add_timer(&ap_config_timer);
677 ap_work_queue = create_singlethread_workqueue("kapwork");
678 if (!ap_work_queue)
679 return -ENOMEM;
680 tasklet_enable(&ap_tasklet);
681 if (!ap_using_interrupts())
682 ap_schedule_poll_timer();
683 else
684 tasklet_schedule(&ap_tasklet);
685 if (ap_thread_flag)
686 rc = ap_poll_thread_start();
687 } else {
688 ap_device_probe(dev);
689 ap_reset(ap_dev);
690 setup_timer(&ap_dev->timeout, ap_request_timeout,
691 (unsigned long) ap_dev);
692 }
693
694 return rc;
695}
696
620static struct bus_type ap_bus_type = { 697static struct bus_type ap_bus_type = {
621 .name = "ap", 698 .name = "ap",
622 .match = &ap_bus_match, 699 .match = &ap_bus_match,
623 .uevent = &ap_uevent, 700 .uevent = &ap_uevent,
701 .suspend = ap_bus_suspend,
702 .resume = ap_bus_resume
624}; 703};
625 704
626static int ap_device_probe(struct device *dev) 705static int ap_device_probe(struct device *dev)
@@ -1066,7 +1145,7 @@ ap_config_timeout(unsigned long ptr)
1066 */ 1145 */
1067static inline void ap_schedule_poll_timer(void) 1146static inline void ap_schedule_poll_timer(void)
1068{ 1147{
1069 if (ap_using_interrupts()) 1148 if (ap_using_interrupts() || ap_suspend_flag)
1070 return; 1149 return;
1071 if (hrtimer_is_queued(&ap_poll_timer)) 1150 if (hrtimer_is_queued(&ap_poll_timer))
1072 return; 1151 return;
@@ -1384,6 +1463,8 @@ static int ap_poll_thread(void *data)
1384 1463
1385 set_user_nice(current, 19); 1464 set_user_nice(current, 19);
1386 while (1) { 1465 while (1) {
1466 if (ap_suspend_flag)
1467 return 0;
1387 if (need_resched()) { 1468 if (need_resched()) {
1388 schedule(); 1469 schedule();
1389 continue; 1470 continue;
@@ -1414,7 +1495,7 @@ static int ap_poll_thread_start(void)
1414{ 1495{
1415 int rc; 1496 int rc;
1416 1497
1417 if (ap_using_interrupts()) 1498 if (ap_using_interrupts() || ap_suspend_flag)
1418 return 0; 1499 return 0;
1419 mutex_lock(&ap_poll_thread_mutex); 1500 mutex_lock(&ap_poll_thread_mutex);
1420 if (!ap_poll_kthread) { 1501 if (!ap_poll_kthread) {
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 52574ce797b2..8c36eafcfbfe 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1307,7 +1307,7 @@ static void netiucv_pm_complete(struct device *dev)
1307 */ 1307 */
1308static int netiucv_pm_freeze(struct device *dev) 1308static int netiucv_pm_freeze(struct device *dev)
1309{ 1309{
1310 struct netiucv_priv *priv = dev->driver_data; 1310 struct netiucv_priv *priv = dev_get_drvdata(dev);
1311 struct net_device *ndev = NULL; 1311 struct net_device *ndev = NULL;
1312 int rc = 0; 1312 int rc = 0;
1313 1313
@@ -1331,7 +1331,7 @@ out:
1331 */ 1331 */
1332static int netiucv_pm_restore_thaw(struct device *dev) 1332static int netiucv_pm_restore_thaw(struct device *dev)
1333{ 1333{
1334 struct netiucv_priv *priv = dev->driver_data; 1334 struct netiucv_priv *priv = dev_get_drvdata(dev);
1335 struct net_device *ndev = NULL; 1335 struct net_device *ndev = NULL;
1336 int rc = 0; 1336 int rc = 0;
1337 1337
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 1132c5cae7ab..037c1e0b7c4c 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1320,6 +1320,16 @@ config SERIAL_SGI_IOC3
1320 If you have an SGI Altix with an IOC3 serial card, 1320 If you have an SGI Altix with an IOC3 serial card,
1321 say Y or M. Otherwise, say N. 1321 say Y or M. Otherwise, say N.
1322 1322
1323config SERIAL_MSM
1324 bool "MSM on-chip serial port support"
1325 depends on ARM && ARCH_MSM
1326 select SERIAL_CORE
1327
1328config SERIAL_MSM_CONSOLE
1329 bool "MSM serial console support"
1330 depends on SERIAL_MSM=y
1331 select SERIAL_CORE_CONSOLE
1332
1323config SERIAL_NETX 1333config SERIAL_NETX
1324 tristate "NetX serial port support" 1334 tristate "NetX serial port support"
1325 depends on ARM && ARCH_NETX 1335 depends on ARM && ARCH_NETX
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 45a8658f54d5..d5a29981c6c4 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_SERIAL_SGI_IOC4) += ioc4_serial.o
71obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o 71obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o
72obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o 72obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
73obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o 73obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
74obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
74obj-$(CONFIG_SERIAL_NETX) += netx-serial.o 75obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
75obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o 76obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
76obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o 77obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index e2f6b1bfac98..b4a7650af696 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -38,6 +38,10 @@
38#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
39#endif 39#endif
40 40
41#ifdef CONFIG_SERIAL_BFIN_MODULE
42# undef CONFIG_EARLY_PRINTK
43#endif
44
41/* UART name and device definitions */ 45/* UART name and device definitions */
42#define BFIN_SERIAL_NAME "ttyBF" 46#define BFIN_SERIAL_NAME "ttyBF"
43#define BFIN_SERIAL_MAJOR 204 47#define BFIN_SERIAL_MAJOR 204
@@ -1110,6 +1114,7 @@ static void __init bfin_serial_init_ports(void)
1110 bfin_serial_hw_init(); 1114 bfin_serial_hw_init();
1111 1115
1112 for (i = 0; i < nr_active_ports; i++) { 1116 for (i = 0; i < nr_active_ports; i++) {
1117 spin_lock_init(&bfin_serial_ports[i].port.lock);
1113 bfin_serial_ports[i].port.uartclk = get_sclk(); 1118 bfin_serial_ports[i].port.uartclk = get_sclk();
1114 bfin_serial_ports[i].port.fifosize = BFIN_UART_TX_FIFO_SIZE; 1119 bfin_serial_ports[i].port.fifosize = BFIN_UART_TX_FIFO_SIZE;
1115 bfin_serial_ports[i].port.ops = &bfin_serial_pops; 1120 bfin_serial_ports[i].port.ops = &bfin_serial_pops;
diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c
new file mode 100644
index 000000000000..698048f64f5e
--- /dev/null
+++ b/drivers/serial/msm_serial.c
@@ -0,0 +1,772 @@
1/*
2 * drivers/serial/msm_serial.c - driver for msm7k serial device and console
3 *
4 * Copyright (C) 2007 Google, Inc.
5 * Author: Robert Love <rlove@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#if defined(CONFIG_SERIAL_MSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
18# define SUPPORT_SYSRQ
19#endif
20
21#include <linux/hrtimer.h>
22#include <linux/module.h>
23#include <linux/io.h>
24#include <linux/ioport.h>
25#include <linux/irq.h>
26#include <linux/init.h>
27#include <linux/console.h>
28#include <linux/tty.h>
29#include <linux/tty_flip.h>
30#include <linux/serial_core.h>
31#include <linux/serial.h>
32#include <linux/clk.h>
33#include <linux/platform_device.h>
34
35#include "msm_serial.h"
36
37struct msm_port {
38 struct uart_port uart;
39 char name[16];
40 struct clk *clk;
41 unsigned int imr;
42};
43
44#define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port)
45
46static inline void msm_write(struct uart_port *port, unsigned int val,
47 unsigned int off)
48{
49 __raw_writel(val, port->membase + off);
50}
51
52static inline unsigned int msm_read(struct uart_port *port, unsigned int off)
53{
54 return __raw_readl(port->membase + off);
55}
56
57static void msm_stop_tx(struct uart_port *port)
58{
59 struct msm_port *msm_port = UART_TO_MSM(port);
60
61 msm_port->imr &= ~UART_IMR_TXLEV;
62 msm_write(port, msm_port->imr, UART_IMR);
63}
64
65static void msm_start_tx(struct uart_port *port)
66{
67 struct msm_port *msm_port = UART_TO_MSM(port);
68
69 msm_port->imr |= UART_IMR_TXLEV;
70 msm_write(port, msm_port->imr, UART_IMR);
71}
72
73static void msm_stop_rx(struct uart_port *port)
74{
75 struct msm_port *msm_port = UART_TO_MSM(port);
76
77 msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
78 msm_write(port, msm_port->imr, UART_IMR);
79}
80
81static void msm_enable_ms(struct uart_port *port)
82{
83 struct msm_port *msm_port = UART_TO_MSM(port);
84
85 msm_port->imr |= UART_IMR_DELTA_CTS;
86 msm_write(port, msm_port->imr, UART_IMR);
87}
88
89static void handle_rx(struct uart_port *port)
90{
91 struct tty_struct *tty = port->info->port.tty;
92 unsigned int sr;
93
94 /*
95 * Handle overrun. My understanding of the hardware is that overrun
96 * is not tied to the RX buffer, so we handle the case out of band.
97 */
98 if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
99 port->icount.overrun++;
100 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
101 msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
102 }
103
104 /* and now the main RX loop */
105 while ((sr = msm_read(port, UART_SR)) & UART_SR_RX_READY) {
106 unsigned int c;
107 char flag = TTY_NORMAL;
108
109 c = msm_read(port, UART_RF);
110
111 if (sr & UART_SR_RX_BREAK) {
112 port->icount.brk++;
113 if (uart_handle_break(port))
114 continue;
115 } else if (sr & UART_SR_PAR_FRAME_ERR) {
116 port->icount.frame++;
117 } else {
118 port->icount.rx++;
119 }
120
121 /* Mask conditions we're ignorning. */
122 sr &= port->read_status_mask;
123
124 if (sr & UART_SR_RX_BREAK) {
125 flag = TTY_BREAK;
126 } else if (sr & UART_SR_PAR_FRAME_ERR) {
127 flag = TTY_FRAME;
128 }
129
130 if (!uart_handle_sysrq_char(port, c))
131 tty_insert_flip_char(tty, c, flag);
132 }
133
134 tty_flip_buffer_push(tty);
135}
136
137static void handle_tx(struct uart_port *port)
138{
139 struct circ_buf *xmit = &port->info->xmit;
140 struct msm_port *msm_port = UART_TO_MSM(port);
141 int sent_tx;
142
143 if (port->x_char) {
144 msm_write(port, port->x_char, UART_TF);
145 port->icount.tx++;
146 port->x_char = 0;
147 }
148
149 while (msm_read(port, UART_SR) & UART_SR_TX_READY) {
150 if (uart_circ_empty(xmit)) {
151 /* disable tx interrupts */
152 msm_port->imr &= ~UART_IMR_TXLEV;
153 msm_write(port, msm_port->imr, UART_IMR);
154 break;
155 }
156
157 msm_write(port, xmit->buf[xmit->tail], UART_TF);
158
159 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
160 port->icount.tx++;
161 sent_tx = 1;
162 }
163
164 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
165 uart_write_wakeup(port);
166}
167
168static void handle_delta_cts(struct uart_port *port)
169{
170 msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
171 port->icount.cts++;
172 wake_up_interruptible(&port->info->delta_msr_wait);
173}
174
175static irqreturn_t msm_irq(int irq, void *dev_id)
176{
177 struct uart_port *port = dev_id;
178 struct msm_port *msm_port = UART_TO_MSM(port);
179 unsigned int misr;
180
181 spin_lock(&port->lock);
182 misr = msm_read(port, UART_MISR);
183 msm_write(port, 0, UART_IMR); /* disable interrupt */
184
185 if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE))
186 handle_rx(port);
187 if (misr & UART_IMR_TXLEV)
188 handle_tx(port);
189 if (misr & UART_IMR_DELTA_CTS)
190 handle_delta_cts(port);
191
192 msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
193 spin_unlock(&port->lock);
194
195 return IRQ_HANDLED;
196}
197
198static unsigned int msm_tx_empty(struct uart_port *port)
199{
200 return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
201}
202
203static unsigned int msm_get_mctrl(struct uart_port *port)
204{
205 return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS;
206}
207
208static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
209{
210 unsigned int mr;
211
212 mr = msm_read(port, UART_MR1);
213
214 if (!(mctrl & TIOCM_RTS)) {
215 mr &= ~UART_MR1_RX_RDY_CTL;
216 msm_write(port, mr, UART_MR1);
217 msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
218 } else {
219 mr |= UART_MR1_RX_RDY_CTL;
220 msm_write(port, mr, UART_MR1);
221 }
222}
223
224static void msm_break_ctl(struct uart_port *port, int break_ctl)
225{
226 if (break_ctl)
227 msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
228 else
229 msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
230}
231
232static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
233{
234 unsigned int baud_code, rxstale, watermark;
235
236 switch (baud) {
237 case 300:
238 baud_code = UART_CSR_300;
239 rxstale = 1;
240 break;
241 case 600:
242 baud_code = UART_CSR_600;
243 rxstale = 1;
244 break;
245 case 1200:
246 baud_code = UART_CSR_1200;
247 rxstale = 1;
248 break;
249 case 2400:
250 baud_code = UART_CSR_2400;
251 rxstale = 1;
252 break;
253 case 4800:
254 baud_code = UART_CSR_4800;
255 rxstale = 1;
256 break;
257 case 9600:
258 baud_code = UART_CSR_9600;
259 rxstale = 2;
260 break;
261 case 14400:
262 baud_code = UART_CSR_14400;
263 rxstale = 3;
264 break;
265 case 19200:
266 baud_code = UART_CSR_19200;
267 rxstale = 4;
268 break;
269 case 28800:
270 baud_code = UART_CSR_28800;
271 rxstale = 6;
272 break;
273 case 38400:
274 baud_code = UART_CSR_38400;
275 rxstale = 8;
276 break;
277 case 57600:
278 baud_code = UART_CSR_57600;
279 rxstale = 16;
280 break;
281 case 115200:
282 default:
283 baud_code = UART_CSR_115200;
284 baud = 115200;
285 rxstale = 31;
286 break;
287 }
288
289 msm_write(port, baud_code, UART_CSR);
290
291 /* RX stale watermark */
292 watermark = UART_IPR_STALE_LSB & rxstale;
293 watermark |= UART_IPR_RXSTALE_LAST;
294 watermark |= UART_IPR_STALE_TIMEOUT_MSB & (rxstale << 2);
295 msm_write(port, watermark, UART_IPR);
296
297 /* set RX watermark */
298 watermark = (port->fifosize * 3) / 4;
299 msm_write(port, watermark, UART_RFWR);
300
301 /* set TX watermark */
302 msm_write(port, 10, UART_TFWR);
303
304 return baud;
305}
306
307static void msm_reset(struct uart_port *port)
308{
309 /* reset everything */
310 msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
311 msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
312 msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
313 msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
314 msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
315 msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
316}
317
318static void msm_init_clock(struct uart_port *port)
319{
320 struct msm_port *msm_port = UART_TO_MSM(port);
321
322 clk_enable(msm_port->clk);
323
324 msm_write(port, 0xC0, UART_MREG);
325 msm_write(port, 0xB2, UART_NREG);
326 msm_write(port, 0x7D, UART_DREG);
327 msm_write(port, 0x1C, UART_MNDREG);
328}
329
330static int msm_startup(struct uart_port *port)
331{
332 struct msm_port *msm_port = UART_TO_MSM(port);
333 unsigned int data, rfr_level;
334 int ret;
335
336 snprintf(msm_port->name, sizeof(msm_port->name),
337 "msm_serial%d", port->line);
338
339 ret = request_irq(port->irq, msm_irq, IRQF_TRIGGER_HIGH,
340 msm_port->name, port);
341 if (unlikely(ret))
342 return ret;
343
344 msm_init_clock(port);
345
346 if (likely(port->fifosize > 12))
347 rfr_level = port->fifosize - 12;
348 else
349 rfr_level = port->fifosize;
350
351 /* set automatic RFR level */
352 data = msm_read(port, UART_MR1);
353 data &= ~UART_MR1_AUTO_RFR_LEVEL1;
354 data &= ~UART_MR1_AUTO_RFR_LEVEL0;
355 data |= UART_MR1_AUTO_RFR_LEVEL1 & (rfr_level << 2);
356 data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
357 msm_write(port, data, UART_MR1);
358
359 /* make sure that RXSTALE count is non-zero */
360 data = msm_read(port, UART_IPR);
361 if (unlikely(!data)) {
362 data |= UART_IPR_RXSTALE_LAST;
363 data |= UART_IPR_STALE_LSB;
364 msm_write(port, data, UART_IPR);
365 }
366
367 msm_reset(port);
368
369 msm_write(port, 0x05, UART_CR); /* enable TX & RX */
370
371 /* turn on RX and CTS interrupts */
372 msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
373 UART_IMR_CURRENT_CTS;
374 msm_write(port, msm_port->imr, UART_IMR);
375
376 return 0;
377}
378
379static void msm_shutdown(struct uart_port *port)
380{
381 struct msm_port *msm_port = UART_TO_MSM(port);
382
383 msm_port->imr = 0;
384 msm_write(port, 0, UART_IMR); /* disable interrupts */
385
386 clk_disable(msm_port->clk);
387
388 free_irq(port->irq, port);
389}
390
391static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
392 struct ktermios *old)
393{
394 unsigned long flags;
395 unsigned int baud, mr;
396
397 spin_lock_irqsave(&port->lock, flags);
398
399 /* calculate and set baud rate */
400 baud = uart_get_baud_rate(port, termios, old, 300, 115200);
401 baud = msm_set_baud_rate(port, baud);
402 if (tty_termios_baud_rate(termios))
403 tty_termios_encode_baud_rate(termios, baud, baud);
404
405 /* calculate parity */
406 mr = msm_read(port, UART_MR2);
407 mr &= ~UART_MR2_PARITY_MODE;
408 if (termios->c_cflag & PARENB) {
409 if (termios->c_cflag & PARODD)
410 mr |= UART_MR2_PARITY_MODE_ODD;
411 else if (termios->c_cflag & CMSPAR)
412 mr |= UART_MR2_PARITY_MODE_SPACE;
413 else
414 mr |= UART_MR2_PARITY_MODE_EVEN;
415 }
416
417 /* calculate bits per char */
418 mr &= ~UART_MR2_BITS_PER_CHAR;
419 switch (termios->c_cflag & CSIZE) {
420 case CS5:
421 mr |= UART_MR2_BITS_PER_CHAR_5;
422 break;
423 case CS6:
424 mr |= UART_MR2_BITS_PER_CHAR_6;
425 break;
426 case CS7:
427 mr |= UART_MR2_BITS_PER_CHAR_7;
428 break;
429 case CS8:
430 default:
431 mr |= UART_MR2_BITS_PER_CHAR_8;
432 break;
433 }
434
435 /* calculate stop bits */
436 mr &= ~(UART_MR2_STOP_BIT_LEN_ONE | UART_MR2_STOP_BIT_LEN_TWO);
437 if (termios->c_cflag & CSTOPB)
438 mr |= UART_MR2_STOP_BIT_LEN_TWO;
439 else
440 mr |= UART_MR2_STOP_BIT_LEN_ONE;
441
442 /* set parity, bits per char, and stop bit */
443 msm_write(port, mr, UART_MR2);
444
445 /* calculate and set hardware flow control */
446 mr = msm_read(port, UART_MR1);
447 mr &= ~(UART_MR1_CTS_CTL | UART_MR1_RX_RDY_CTL);
448 if (termios->c_cflag & CRTSCTS) {
449 mr |= UART_MR1_CTS_CTL;
450 mr |= UART_MR1_RX_RDY_CTL;
451 }
452 msm_write(port, mr, UART_MR1);
453
454 /* Configure status bits to ignore based on termio flags. */
455 port->read_status_mask = 0;
456 if (termios->c_iflag & INPCK)
457 port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
458 if (termios->c_iflag & (BRKINT | PARMRK))
459 port->read_status_mask |= UART_SR_RX_BREAK;
460
461 uart_update_timeout(port, termios->c_cflag, baud);
462
463 spin_unlock_irqrestore(&port->lock, flags);
464}
465
466static const char *msm_type(struct uart_port *port)
467{
468 return "MSM";
469}
470
471static void msm_release_port(struct uart_port *port)
472{
473 struct platform_device *pdev = to_platform_device(port->dev);
474 struct resource *resource;
475 resource_size_t size;
476
477 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
478 if (unlikely(!resource))
479 return;
480 size = resource->end - resource->start + 1;
481
482 release_mem_region(port->mapbase, size);
483 iounmap(port->membase);
484 port->membase = NULL;
485}
486
487static int msm_request_port(struct uart_port *port)
488{
489 struct platform_device *pdev = to_platform_device(port->dev);
490 struct resource *resource;
491 resource_size_t size;
492
493 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
494 if (unlikely(!resource))
495 return -ENXIO;
496 size = resource->end - resource->start + 1;
497
498 if (unlikely(!request_mem_region(port->mapbase, size, "msm_serial")))
499 return -EBUSY;
500
501 port->membase = ioremap(port->mapbase, size);
502 if (!port->membase) {
503 release_mem_region(port->mapbase, size);
504 return -EBUSY;
505 }
506
507 return 0;
508}
509
510static void msm_config_port(struct uart_port *port, int flags)
511{
512 if (flags & UART_CONFIG_TYPE) {
513 port->type = PORT_MSM;
514 msm_request_port(port);
515 }
516}
517
518static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
519{
520 if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_MSM))
521 return -EINVAL;
522 if (unlikely(port->irq != ser->irq))
523 return -EINVAL;
524 return 0;
525}
526
527static void msm_power(struct uart_port *port, unsigned int state,
528 unsigned int oldstate)
529{
530 struct msm_port *msm_port = UART_TO_MSM(port);
531
532 switch (state) {
533 case 0:
534 clk_enable(msm_port->clk);
535 break;
536 case 3:
537 clk_disable(msm_port->clk);
538 break;
539 default:
540 printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state);
541 }
542}
543
544static struct uart_ops msm_uart_pops = {
545 .tx_empty = msm_tx_empty,
546 .set_mctrl = msm_set_mctrl,
547 .get_mctrl = msm_get_mctrl,
548 .stop_tx = msm_stop_tx,
549 .start_tx = msm_start_tx,
550 .stop_rx = msm_stop_rx,
551 .enable_ms = msm_enable_ms,
552 .break_ctl = msm_break_ctl,
553 .startup = msm_startup,
554 .shutdown = msm_shutdown,
555 .set_termios = msm_set_termios,
556 .type = msm_type,
557 .release_port = msm_release_port,
558 .request_port = msm_request_port,
559 .config_port = msm_config_port,
560 .verify_port = msm_verify_port,
561 .pm = msm_power,
562};
563
564static struct msm_port msm_uart_ports[] = {
565 {
566 .uart = {
567 .iotype = UPIO_MEM,
568 .ops = &msm_uart_pops,
569 .flags = UPF_BOOT_AUTOCONF,
570 .fifosize = 512,
571 .line = 0,
572 },
573 },
574 {
575 .uart = {
576 .iotype = UPIO_MEM,
577 .ops = &msm_uart_pops,
578 .flags = UPF_BOOT_AUTOCONF,
579 .fifosize = 512,
580 .line = 1,
581 },
582 },
583 {
584 .uart = {
585 .iotype = UPIO_MEM,
586 .ops = &msm_uart_pops,
587 .flags = UPF_BOOT_AUTOCONF,
588 .fifosize = 64,
589 .line = 2,
590 },
591 },
592};
593
594#define UART_NR ARRAY_SIZE(msm_uart_ports)
595
596static inline struct uart_port *get_port_from_line(unsigned int line)
597{
598 return &msm_uart_ports[line].uart;
599}
600
601#ifdef CONFIG_SERIAL_MSM_CONSOLE
602
603static void msm_console_putchar(struct uart_port *port, int c)
604{
605 while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
606 ;
607 msm_write(port, c, UART_TF);
608}
609
610static void msm_console_write(struct console *co, const char *s,
611 unsigned int count)
612{
613 struct uart_port *port;
614 struct msm_port *msm_port;
615
616 BUG_ON(co->index < 0 || co->index >= UART_NR);
617
618 port = get_port_from_line(co->index);
619 msm_port = UART_TO_MSM(port);
620
621 spin_lock(&port->lock);
622 uart_console_write(port, s, count, msm_console_putchar);
623 spin_unlock(&port->lock);
624}
625
626static int __init msm_console_setup(struct console *co, char *options)
627{
628 struct uart_port *port;
629 int baud, flow, bits, parity;
630
631 if (unlikely(co->index >= UART_NR || co->index < 0))
632 return -ENXIO;
633
634 port = get_port_from_line(co->index);
635
636 if (unlikely(!port->membase))
637 return -ENXIO;
638
639 port->cons = co;
640
641 msm_init_clock(port);
642
643 if (options)
644 uart_parse_options(options, &baud, &parity, &bits, &flow);
645
646 bits = 8;
647 parity = 'n';
648 flow = 'n';
649 msm_write(port, UART_MR2_BITS_PER_CHAR_8 | UART_MR2_STOP_BIT_LEN_ONE,
650 UART_MR2); /* 8N1 */
651
652 if (baud < 300 || baud > 115200)
653 baud = 115200;
654 msm_set_baud_rate(port, baud);
655
656 msm_reset(port);
657
658 printk(KERN_INFO "msm_serial: console setup on port #%d\n", port->line);
659
660 return uart_set_options(port, co, baud, parity, bits, flow);
661}
662
663static struct uart_driver msm_uart_driver;
664
665static struct console msm_console = {
666 .name = "ttyMSM",
667 .write = msm_console_write,
668 .device = uart_console_device,
669 .setup = msm_console_setup,
670 .flags = CON_PRINTBUFFER,
671 .index = -1,
672 .data = &msm_uart_driver,
673};
674
675#define MSM_CONSOLE (&msm_console)
676
677#else
678#define MSM_CONSOLE NULL
679#endif
680
681static struct uart_driver msm_uart_driver = {
682 .owner = THIS_MODULE,
683 .driver_name = "msm_serial",
684 .dev_name = "ttyMSM",
685 .nr = UART_NR,
686 .cons = MSM_CONSOLE,
687};
688
689static int __init msm_serial_probe(struct platform_device *pdev)
690{
691 struct msm_port *msm_port;
692 struct resource *resource;
693 struct uart_port *port;
694
695 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
696 return -ENXIO;
697
698 printk(KERN_INFO "msm_serial: detected port #%d\n", pdev->id);
699
700 port = get_port_from_line(pdev->id);
701 port->dev = &pdev->dev;
702 msm_port = UART_TO_MSM(port);
703
704 msm_port->clk = clk_get(&pdev->dev, "uart_clk");
705 if (unlikely(IS_ERR(msm_port->clk)))
706 return PTR_ERR(msm_port->clk);
707 port->uartclk = clk_get_rate(msm_port->clk);
708
709 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
710 if (unlikely(!resource))
711 return -ENXIO;
712 port->mapbase = resource->start;
713
714 port->irq = platform_get_irq(pdev, 0);
715 if (unlikely(port->irq < 0))
716 return -ENXIO;
717
718 platform_set_drvdata(pdev, port);
719
720 return uart_add_one_port(&msm_uart_driver, port);
721}
722
723static int __devexit msm_serial_remove(struct platform_device *pdev)
724{
725 struct msm_port *msm_port = platform_get_drvdata(pdev);
726
727 clk_put(msm_port->clk);
728
729 return 0;
730}
731
732static struct platform_driver msm_platform_driver = {
733 .probe = msm_serial_probe,
734 .remove = msm_serial_remove,
735 .driver = {
736 .name = "msm_serial",
737 .owner = THIS_MODULE,
738 },
739};
740
741static int __init msm_serial_init(void)
742{
743 int ret;
744
745 ret = uart_register_driver(&msm_uart_driver);
746 if (unlikely(ret))
747 return ret;
748
749 ret = platform_driver_probe(&msm_platform_driver, msm_serial_probe);
750 if (unlikely(ret))
751 uart_unregister_driver(&msm_uart_driver);
752
753 printk(KERN_INFO "msm_serial: driver initialized\n");
754
755 return ret;
756}
757
758static void __exit msm_serial_exit(void)
759{
760#ifdef CONFIG_SERIAL_MSM_CONSOLE
761 unregister_console(&msm_console);
762#endif
763 platform_driver_unregister(&msm_platform_driver);
764 uart_unregister_driver(&msm_uart_driver);
765}
766
767module_init(msm_serial_init);
768module_exit(msm_serial_exit);
769
770MODULE_AUTHOR("Robert Love <rlove@google.com>");
771MODULE_DESCRIPTION("Driver for msm7x serial device");
772MODULE_LICENSE("GPL");
diff --git a/drivers/serial/msm_serial.h b/drivers/serial/msm_serial.h
new file mode 100644
index 000000000000..689f1fa0e84e
--- /dev/null
+++ b/drivers/serial/msm_serial.h
@@ -0,0 +1,117 @@
1/*
2 * drivers/serial/msm_serial.h
3 *
4 * Copyright (C) 2007 Google, Inc.
5 * Author: Robert Love <rlove@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef __DRIVERS_SERIAL_MSM_SERIAL_H
18#define __DRIVERS_SERIAL_MSM_SERIAL_H
19
20#define UART_MR1 0x0000
21
22#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
23#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
24#define UART_MR1_RX_RDY_CTL (1 << 7)
25#define UART_MR1_CTS_CTL (1 << 6)
26
27#define UART_MR2 0x0004
28#define UART_MR2_ERROR_MODE (1 << 6)
29#define UART_MR2_BITS_PER_CHAR 0x30
30#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
31#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
32#define UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
33#define UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
34#define UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
35#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
36#define UART_MR2_PARITY_MODE_NONE 0x0
37#define UART_MR2_PARITY_MODE_ODD 0x1
38#define UART_MR2_PARITY_MODE_EVEN 0x2
39#define UART_MR2_PARITY_MODE_SPACE 0x3
40#define UART_MR2_PARITY_MODE 0x3
41
42#define UART_CSR 0x0008
43#define UART_CSR_115200 0xFF
44#define UART_CSR_57600 0xEE
45#define UART_CSR_38400 0xDD
46#define UART_CSR_28800 0xCC
47#define UART_CSR_19200 0xBB
48#define UART_CSR_14400 0xAA
49#define UART_CSR_9600 0x99
50#define UART_CSR_4800 0x77
51#define UART_CSR_2400 0x55
52#define UART_CSR_1200 0x44
53#define UART_CSR_600 0x33
54#define UART_CSR_300 0x22
55
56#define UART_TF 0x000C
57
58#define UART_CR 0x0010
59#define UART_CR_CMD_NULL (0 << 4)
60#define UART_CR_CMD_RESET_RX (1 << 4)
61#define UART_CR_CMD_RESET_TX (2 << 4)
62#define UART_CR_CMD_RESET_ERR (3 << 4)
63#define UART_CR_CMD_RESET_BREAK_INT (4 << 4)
64#define UART_CR_CMD_START_BREAK (5 << 4)
65#define UART_CR_CMD_STOP_BREAK (6 << 4)
66#define UART_CR_CMD_RESET_CTS (7 << 4)
67#define UART_CR_CMD_PACKET_MODE (9 << 4)
68#define UART_CR_CMD_MODE_RESET (12 << 4)
69#define UART_CR_CMD_SET_RFR (13 << 4)
70#define UART_CR_CMD_RESET_RFR (14 << 4)
71#define UART_CR_TX_DISABLE (1 << 3)
72#define UART_CR_TX_ENABLE (1 << 3)
73#define UART_CR_RX_DISABLE (1 << 3)
74#define UART_CR_RX_ENABLE (1 << 3)
75
76#define UART_IMR 0x0014
77#define UART_IMR_TXLEV (1 << 0)
78#define UART_IMR_RXSTALE (1 << 3)
79#define UART_IMR_RXLEV (1 << 4)
80#define UART_IMR_DELTA_CTS (1 << 5)
81#define UART_IMR_CURRENT_CTS (1 << 6)
82
83#define UART_IPR_RXSTALE_LAST 0x20
84#define UART_IPR_STALE_LSB 0x1F
85#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
86
87#define UART_IPR 0x0018
88#define UART_TFWR 0x001C
89#define UART_RFWR 0x0020
90#define UART_HCR 0x0024
91
92#define UART_MREG 0x0028
93#define UART_NREG 0x002C
94#define UART_DREG 0x0030
95#define UART_MNDREG 0x0034
96#define UART_IRDA 0x0038
97#define UART_MISR_MODE 0x0040
98#define UART_MISR_RESET 0x0044
99#define UART_MISR_EXPORT 0x0048
100#define UART_MISR_VAL 0x004C
101#define UART_TEST_CTRL 0x0050
102
103#define UART_SR 0x0008
104#define UART_SR_HUNT_CHAR (1 << 7)
105#define UART_SR_RX_BREAK (1 << 6)
106#define UART_SR_PAR_FRAME_ERR (1 << 5)
107#define UART_SR_OVERRUN (1 << 4)
108#define UART_SR_TX_EMPTY (1 << 3)
109#define UART_SR_TX_READY (1 << 2)
110#define UART_SR_RX_FULL (1 << 1)
111#define UART_SR_RX_READY (1 << 0)
112
113#define UART_RF 0x000C
114#define UART_MISR 0x0010
115#define UART_ISR 0x0014
116
117#endif /* __DRIVERS_SERIAL_MSM_SERIAL_H */
diff --git a/drivers/serial/s3c2400.c b/drivers/serial/s3c2400.c
index 4873f2978bd2..fb00ed5296e6 100644
--- a/drivers/serial/s3c2400.c
+++ b/drivers/serial/s3c2400.c
@@ -78,7 +78,7 @@ static int s3c2400_serial_probe(struct platform_device *dev)
78 78
79static struct platform_driver s3c2400_serial_drv = { 79static struct platform_driver s3c2400_serial_drv = {
80 .probe = s3c2400_serial_probe, 80 .probe = s3c2400_serial_probe,
81 .remove = s3c24xx_serial_remove, 81 .remove = __devexit_p(s3c24xx_serial_remove),
82 .driver = { 82 .driver = {
83 .name = "s3c2400-uart", 83 .name = "s3c2400-uart",
84 .owner = THIS_MODULE, 84 .owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index 87c182ef71b8..b5d7cbcba2ae 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -90,7 +90,7 @@ static int s3c2410_serial_probe(struct platform_device *dev)
90 90
91static struct platform_driver s3c2410_serial_drv = { 91static struct platform_driver s3c2410_serial_drv = {
92 .probe = s3c2410_serial_probe, 92 .probe = s3c2410_serial_probe,
93 .remove = s3c24xx_serial_remove, 93 .remove = __devexit_p(s3c24xx_serial_remove),
94 .driver = { 94 .driver = {
95 .name = "s3c2410-uart", 95 .name = "s3c2410-uart",
96 .owner = THIS_MODULE, 96 .owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2412.c b/drivers/serial/s3c2412.c
index fd017b375568..11dcb90bdfef 100644
--- a/drivers/serial/s3c2412.c
+++ b/drivers/serial/s3c2412.c
@@ -123,7 +123,7 @@ static int s3c2412_serial_probe(struct platform_device *dev)
123 123
124static struct platform_driver s3c2412_serial_drv = { 124static struct platform_driver s3c2412_serial_drv = {
125 .probe = s3c2412_serial_probe, 125 .probe = s3c2412_serial_probe,
126 .remove = s3c24xx_serial_remove, 126 .remove = __devexit_p(s3c24xx_serial_remove),
127 .driver = { 127 .driver = {
128 .name = "s3c2412-uart", 128 .name = "s3c2412-uart",
129 .owner = THIS_MODULE, 129 .owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c
index 29cbb0afef8e..06c5b0cc47a3 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/serial/s3c2440.c
@@ -153,7 +153,7 @@ static int s3c2440_serial_probe(struct platform_device *dev)
153 153
154static struct platform_driver s3c2440_serial_drv = { 154static struct platform_driver s3c2440_serial_drv = {
155 .probe = s3c2440_serial_probe, 155 .probe = s3c2440_serial_probe,
156 .remove = s3c24xx_serial_remove, 156 .remove = __devexit_p(s3c24xx_serial_remove),
157 .driver = { 157 .driver = {
158 .name = "s3c2440-uart", 158 .name = "s3c2440-uart",
159 .owner = THIS_MODULE, 159 .owner = THIS_MODULE,
diff --git a/drivers/serial/s3c24a0.c b/drivers/serial/s3c24a0.c
index ebf2fd3c8f7d..786a067d62ac 100644
--- a/drivers/serial/s3c24a0.c
+++ b/drivers/serial/s3c24a0.c
@@ -94,7 +94,7 @@ static int s3c24a0_serial_probe(struct platform_device *dev)
94 94
95static struct platform_driver s3c24a0_serial_drv = { 95static struct platform_driver s3c24a0_serial_drv = {
96 .probe = s3c24a0_serial_probe, 96 .probe = s3c24a0_serial_probe,
97 .remove = s3c24xx_serial_remove, 97 .remove = __devexit_p(s3c24xx_serial_remove),
98 .driver = { 98 .driver = {
99 .name = "s3c24a0-uart", 99 .name = "s3c24a0-uart",
100 .owner = THIS_MODULE, 100 .owner = THIS_MODULE,
diff --git a/drivers/serial/s3c6400.c b/drivers/serial/s3c6400.c
index 3e3785233682..48f1a3781f0d 100644
--- a/drivers/serial/s3c6400.c
+++ b/drivers/serial/s3c6400.c
@@ -124,7 +124,7 @@ static int s3c6400_serial_probe(struct platform_device *dev)
124 124
125static struct platform_driver s3c6400_serial_drv = { 125static struct platform_driver s3c6400_serial_drv = {
126 .probe = s3c6400_serial_probe, 126 .probe = s3c6400_serial_probe,
127 .remove = s3c24xx_serial_remove, 127 .remove = __devexit_p(s3c24xx_serial_remove),
128 .driver = { 128 .driver = {
129 .name = "s3c6400-uart", 129 .name = "s3c6400-uart",
130 .owner = THIS_MODULE, 130 .owner = THIS_MODULE,
diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c
index 93b5d75db126..c8851a0db63a 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/serial/samsung.c
@@ -1174,7 +1174,7 @@ int s3c24xx_serial_probe(struct platform_device *dev,
1174 1174
1175EXPORT_SYMBOL_GPL(s3c24xx_serial_probe); 1175EXPORT_SYMBOL_GPL(s3c24xx_serial_probe);
1176 1176
1177int s3c24xx_serial_remove(struct platform_device *dev) 1177int __devexit s3c24xx_serial_remove(struct platform_device *dev)
1178{ 1178{
1179 struct uart_port *port = s3c24xx_dev_to_port(&dev->dev); 1179 struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
1180 1180
diff --git a/drivers/serial/samsung.h b/drivers/serial/samsung.h
index 7afb94843a08..d3fe315969f6 100644
--- a/drivers/serial/samsung.h
+++ b/drivers/serial/samsung.h
@@ -72,7 +72,7 @@ struct s3c24xx_uart_port {
72extern int s3c24xx_serial_probe(struct platform_device *dev, 72extern int s3c24xx_serial_probe(struct platform_device *dev,
73 struct s3c24xx_uart_info *uart); 73 struct s3c24xx_uart_info *uart);
74 74
75extern int s3c24xx_serial_remove(struct platform_device *dev); 75extern int __devexit s3c24xx_serial_remove(struct platform_device *dev);
76 76
77extern int s3c24xx_serial_initconsole(struct platform_driver *drv, 77extern int s3c24xx_serial_initconsole(struct platform_driver *drv,
78 struct s3c24xx_uart_info *uart); 78 struct s3c24xx_uart_info *uart);
diff --git a/drivers/serial/sb1250-duart.c b/drivers/serial/sb1250-duart.c
index a4fb343a08da..319e8b83f6be 100644
--- a/drivers/serial/sb1250-duart.c
+++ b/drivers/serial/sb1250-duart.c
@@ -204,7 +204,7 @@ static int sbd_receive_drain(struct sbd_port *sport)
204{ 204{
205 int loops = 10000; 205 int loops = 10000;
206 206
207 while (sbd_receive_ready(sport) && loops--) 207 while (sbd_receive_ready(sport) && --loops)
208 read_sbdchn(sport, R_DUART_RX_HOLD); 208 read_sbdchn(sport, R_DUART_RX_HOLD);
209 return loops; 209 return loops;
210} 210}
@@ -218,7 +218,7 @@ static int __maybe_unused sbd_transmit_drain(struct sbd_port *sport)
218{ 218{
219 int loops = 10000; 219 int loops = 10000;
220 220
221 while (!sbd_transmit_ready(sport) && loops--) 221 while (!sbd_transmit_ready(sport) && --loops)
222 udelay(2); 222 udelay(2);
223 return loops; 223 return loops;
224} 224}
@@ -232,7 +232,7 @@ static int sbd_line_drain(struct sbd_port *sport)
232{ 232{
233 int loops = 10000; 233 int loops = 10000;
234 234
235 while (!sbd_transmit_empty(sport) && loops--) 235 while (!sbd_transmit_empty(sport) && --loops)
236 udelay(2); 236 udelay(2);
237 return loops; 237 return loops;
238} 238}
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index a94a2ab4b571..1df5325faab2 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -461,7 +461,7 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
461 break; 461 break;
462 udelay(1); 462 udelay(1);
463 } 463 }
464 if (limit <= 0) 464 if (limit < 0)
465 break; 465 break;
466 page_bytes -= written; 466 page_bytes -= written;
467 ra += written; 467 ra += written;
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index ac9e5d5f742e..063a313b755c 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -33,29 +33,29 @@ struct timbuart_port {
33 struct uart_port port; 33 struct uart_port port;
34 struct tasklet_struct tasklet; 34 struct tasklet_struct tasklet;
35 int usedma; 35 int usedma;
36 u8 last_ier; 36 u32 last_ier;
37 struct platform_device *dev; 37 struct platform_device *dev;
38}; 38};
39 39
40static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800, 40static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800,
41 921600, 1843200, 3250000}; 41 921600, 1843200, 3250000};
42 42
43static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier); 43static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier);
44 44
45static irqreturn_t timbuart_handleinterrupt(int irq, void *devid); 45static irqreturn_t timbuart_handleinterrupt(int irq, void *devid);
46 46
47static void timbuart_stop_rx(struct uart_port *port) 47static void timbuart_stop_rx(struct uart_port *port)
48{ 48{
49 /* spin lock held by upper layer, disable all RX interrupts */ 49 /* spin lock held by upper layer, disable all RX interrupts */
50 u8 ier = ioread8(port->membase + TIMBUART_IER) & ~RXFLAGS; 50 u32 ier = ioread32(port->membase + TIMBUART_IER) & ~RXFLAGS;
51 iowrite8(ier, port->membase + TIMBUART_IER); 51 iowrite32(ier, port->membase + TIMBUART_IER);
52} 52}
53 53
54static void timbuart_stop_tx(struct uart_port *port) 54static void timbuart_stop_tx(struct uart_port *port)
55{ 55{
56 /* spinlock held by upper layer, disable TX interrupt */ 56 /* spinlock held by upper layer, disable TX interrupt */
57 u8 ier = ioread8(port->membase + TIMBUART_IER) & ~TXBAE; 57 u32 ier = ioread32(port->membase + TIMBUART_IER) & ~TXBAE;
58 iowrite8(ier, port->membase + TIMBUART_IER); 58 iowrite32(ier, port->membase + TIMBUART_IER);
59} 59}
60 60
61static void timbuart_start_tx(struct uart_port *port) 61static void timbuart_start_tx(struct uart_port *port)
@@ -72,14 +72,14 @@ static void timbuart_flush_buffer(struct uart_port *port)
72 u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX; 72 u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
73 73
74 iowrite8(ctl, port->membase + TIMBUART_CTRL); 74 iowrite8(ctl, port->membase + TIMBUART_CTRL);
75 iowrite8(TXBF, port->membase + TIMBUART_ISR); 75 iowrite32(TXBF, port->membase + TIMBUART_ISR);
76} 76}
77 77
78static void timbuart_rx_chars(struct uart_port *port) 78static void timbuart_rx_chars(struct uart_port *port)
79{ 79{
80 struct tty_struct *tty = port->info->port.tty; 80 struct tty_struct *tty = port->info->port.tty;
81 81
82 while (ioread8(port->membase + TIMBUART_ISR) & RXDP) { 82 while (ioread32(port->membase + TIMBUART_ISR) & RXDP) {
83 u8 ch = ioread8(port->membase + TIMBUART_RXFIFO); 83 u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
84 port->icount.rx++; 84 port->icount.rx++;
85 tty_insert_flip_char(tty, ch, TTY_NORMAL); 85 tty_insert_flip_char(tty, ch, TTY_NORMAL);
@@ -97,7 +97,7 @@ static void timbuart_tx_chars(struct uart_port *port)
97{ 97{
98 struct circ_buf *xmit = &port->info->xmit; 98 struct circ_buf *xmit = &port->info->xmit;
99 99
100 while (!(ioread8(port->membase + TIMBUART_ISR) & TXBF) && 100 while (!(ioread32(port->membase + TIMBUART_ISR) & TXBF) &&
101 !uart_circ_empty(xmit)) { 101 !uart_circ_empty(xmit)) {
102 iowrite8(xmit->buf[xmit->tail], 102 iowrite8(xmit->buf[xmit->tail],
103 port->membase + TIMBUART_TXFIFO); 103 port->membase + TIMBUART_TXFIFO);
@@ -114,7 +114,7 @@ static void timbuart_tx_chars(struct uart_port *port)
114 ioread8(port->membase + TIMBUART_BAUDRATE)); 114 ioread8(port->membase + TIMBUART_BAUDRATE));
115} 115}
116 116
117static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier) 117static void timbuart_handle_tx_port(struct uart_port *port, u32 isr, u32 *ier)
118{ 118{
119 struct timbuart_port *uart = 119 struct timbuart_port *uart =
120 container_of(port, struct timbuart_port, port); 120 container_of(port, struct timbuart_port, port);
@@ -129,7 +129,7 @@ static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
129 if (isr & TXFLAGS) { 129 if (isr & TXFLAGS) {
130 timbuart_tx_chars(port); 130 timbuart_tx_chars(port);
131 /* clear all TX interrupts */ 131 /* clear all TX interrupts */
132 iowrite8(TXFLAGS, port->membase + TIMBUART_ISR); 132 iowrite32(TXFLAGS, port->membase + TIMBUART_ISR);
133 133
134 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 134 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
135 uart_write_wakeup(port); 135 uart_write_wakeup(port);
@@ -148,7 +148,7 @@ static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
148 dev_dbg(port->dev, "%s - leaving\n", __func__); 148 dev_dbg(port->dev, "%s - leaving\n", __func__);
149} 149}
150 150
151void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier) 151void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier)
152{ 152{
153 if (isr & RXFLAGS) { 153 if (isr & RXFLAGS) {
154 /* Some RX status is set */ 154 /* Some RX status is set */
@@ -161,7 +161,7 @@ void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
161 timbuart_rx_chars(port); 161 timbuart_rx_chars(port);
162 162
163 /* ack all RX interrupts */ 163 /* ack all RX interrupts */
164 iowrite8(RXFLAGS, port->membase + TIMBUART_ISR); 164 iowrite32(RXFLAGS, port->membase + TIMBUART_ISR);
165 } 165 }
166 166
167 /* always have the RX interrupts enabled */ 167 /* always have the RX interrupts enabled */
@@ -173,11 +173,11 @@ void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
173void timbuart_tasklet(unsigned long arg) 173void timbuart_tasklet(unsigned long arg)
174{ 174{
175 struct timbuart_port *uart = (struct timbuart_port *)arg; 175 struct timbuart_port *uart = (struct timbuart_port *)arg;
176 u8 isr, ier = 0; 176 u32 isr, ier = 0;
177 177
178 spin_lock(&uart->port.lock); 178 spin_lock(&uart->port.lock);
179 179
180 isr = ioread8(uart->port.membase + TIMBUART_ISR); 180 isr = ioread32(uart->port.membase + TIMBUART_ISR);
181 dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr); 181 dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
182 182
183 if (!uart->usedma) 183 if (!uart->usedma)
@@ -188,7 +188,7 @@ void timbuart_tasklet(unsigned long arg)
188 if (!uart->usedma) 188 if (!uart->usedma)
189 timbuart_handle_rx_port(&uart->port, isr, &ier); 189 timbuart_handle_rx_port(&uart->port, isr, &ier);
190 190
191 iowrite8(ier, uart->port.membase + TIMBUART_IER); 191 iowrite32(ier, uart->port.membase + TIMBUART_IER);
192 192
193 spin_unlock(&uart->port.lock); 193 spin_unlock(&uart->port.lock);
194 dev_dbg(uart->port.dev, "%s leaving\n", __func__); 194 dev_dbg(uart->port.dev, "%s leaving\n", __func__);
@@ -196,9 +196,9 @@ void timbuart_tasklet(unsigned long arg)
196 196
197static unsigned int timbuart_tx_empty(struct uart_port *port) 197static unsigned int timbuart_tx_empty(struct uart_port *port)
198{ 198{
199 u8 isr = ioread8(port->membase + TIMBUART_ISR); 199 u32 isr = ioread32(port->membase + TIMBUART_ISR);
200 200
201 return (isr & TXBAE) ? TIOCSER_TEMT : 0; 201 return (isr & TXBE) ? TIOCSER_TEMT : 0;
202} 202}
203 203
204static unsigned int timbuart_get_mctrl(struct uart_port *port) 204static unsigned int timbuart_get_mctrl(struct uart_port *port)
@@ -222,13 +222,13 @@ static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
222 iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL); 222 iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
223} 223}
224 224
225static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier) 225static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier)
226{ 226{
227 unsigned int cts; 227 unsigned int cts;
228 228
229 if (isr & CTS_DELTA) { 229 if (isr & CTS_DELTA) {
230 /* ack */ 230 /* ack */
231 iowrite8(CTS_DELTA, port->membase + TIMBUART_ISR); 231 iowrite32(CTS_DELTA, port->membase + TIMBUART_ISR);
232 cts = timbuart_get_mctrl(port); 232 cts = timbuart_get_mctrl(port);
233 uart_handle_cts_change(port, cts & TIOCM_CTS); 233 uart_handle_cts_change(port, cts & TIOCM_CTS);
234 wake_up_interruptible(&port->info->delta_msr_wait); 234 wake_up_interruptible(&port->info->delta_msr_wait);
@@ -255,9 +255,9 @@ static int timbuart_startup(struct uart_port *port)
255 dev_dbg(port->dev, "%s\n", __func__); 255 dev_dbg(port->dev, "%s\n", __func__);
256 256
257 iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL); 257 iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL);
258 iowrite8(0xff, port->membase + TIMBUART_ISR); 258 iowrite32(0x1ff, port->membase + TIMBUART_ISR);
259 /* Enable all but TX interrupts */ 259 /* Enable all but TX interrupts */
260 iowrite8(RXBAF | RXBF | RXTT | CTS_DELTA, 260 iowrite32(RXBAF | RXBF | RXTT | CTS_DELTA,
261 port->membase + TIMBUART_IER); 261 port->membase + TIMBUART_IER);
262 262
263 return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED, 263 return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED,
@@ -270,7 +270,7 @@ static void timbuart_shutdown(struct uart_port *port)
270 container_of(port, struct timbuart_port, port); 270 container_of(port, struct timbuart_port, port);
271 dev_dbg(port->dev, "%s\n", __func__); 271 dev_dbg(port->dev, "%s\n", __func__);
272 free_irq(port->irq, uart); 272 free_irq(port->irq, uart);
273 iowrite8(0, port->membase + TIMBUART_IER); 273 iowrite32(0, port->membase + TIMBUART_IER);
274} 274}
275 275
276static int get_bindex(int baud) 276static int get_bindex(int baud)
@@ -359,10 +359,10 @@ static irqreturn_t timbuart_handleinterrupt(int irq, void *devid)
359 struct timbuart_port *uart = (struct timbuart_port *)devid; 359 struct timbuart_port *uart = (struct timbuart_port *)devid;
360 360
361 if (ioread8(uart->port.membase + TIMBUART_IPR)) { 361 if (ioread8(uart->port.membase + TIMBUART_IPR)) {
362 uart->last_ier = ioread8(uart->port.membase + TIMBUART_IER); 362 uart->last_ier = ioread32(uart->port.membase + TIMBUART_IER);
363 363
364 /* disable interrupts, the tasklet enables them again */ 364 /* disable interrupts, the tasklet enables them again */
365 iowrite8(0, uart->port.membase + TIMBUART_IER); 365 iowrite32(0, uart->port.membase + TIMBUART_IER);
366 366
367 /* fire off bottom half */ 367 /* fire off bottom half */
368 tasklet_schedule(&uart->tasklet); 368 tasklet_schedule(&uart->tasklet);
diff --git a/drivers/serial/zs.c b/drivers/serial/zs.c
index 9e6a873f8203..d8c2809b1ab6 100644
--- a/drivers/serial/zs.c
+++ b/drivers/serial/zs.c
@@ -231,7 +231,7 @@ static int zs_receive_drain(struct zs_port *zport)
231{ 231{
232 int loops = 10000; 232 int loops = 10000;
233 233
234 while ((read_zsreg(zport, R0) & Rx_CH_AV) && loops--) 234 while ((read_zsreg(zport, R0) & Rx_CH_AV) && --loops)
235 read_zsdata(zport); 235 read_zsdata(zport);
236 return loops; 236 return loops;
237} 237}
@@ -241,7 +241,7 @@ static int zs_transmit_drain(struct zs_port *zport, int irq)
241 struct zs_scc *scc = zport->scc; 241 struct zs_scc *scc = zport->scc;
242 int loops = 10000; 242 int loops = 10000;
243 243
244 while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && loops--) { 244 while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && --loops) {
245 zs_spin_unlock_cond_irq(&scc->zlock, irq); 245 zs_spin_unlock_cond_irq(&scc->zlock, irq);
246 udelay(2); 246 udelay(2);
247 zs_spin_lock_cond_irq(&scc->zlock, irq); 247 zs_spin_lock_cond_irq(&scc->zlock, irq);
@@ -254,7 +254,7 @@ static int zs_line_drain(struct zs_port *zport, int irq)
254 struct zs_scc *scc = zport->scc; 254 struct zs_scc *scc = zport->scc;
255 int loops = 10000; 255 int loops = 10000;
256 256
257 while (!(read_zsreg(zport, R1) & ALL_SNT) && loops--) { 257 while (!(read_zsreg(zport, R1) & ALL_SNT) && --loops) {
258 zs_spin_unlock_cond_irq(&scc->zlock, irq); 258 zs_spin_unlock_cond_irq(&scc->zlock, irq);
259 udelay(2); 259 udelay(2);
260 zs_spin_lock_cond_irq(&scc->zlock, irq); 260 zs_spin_lock_cond_irq(&scc->zlock, irq);
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 581232b719fd..90b29b564631 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -284,21 +284,12 @@ static void ProcessModemStatus(struct quatech_port *qt_port,
284 return; 284 return;
285} 285}
286 286
287static void ProcessRxChar(struct usb_serial_port *port, unsigned char Data) 287static void ProcessRxChar(struct tty_struct *tty, struct usb_serial_port *port,
288 unsigned char data)
288{ 289{
289 struct tty_struct *tty;
290 struct urb *urb = port->read_urb; 290 struct urb *urb = port->read_urb;
291 tty = tty_port_tty_get(&port->port); 291 if (urb->actual_length)
292 292 tty_insert_flip_char(tty, data, TTY_NORMAL);
293 /* if we insert more than TTY_FLIPBUF_SIZE characters, we drop them. */
294
295 if (tty && urb->actual_length) {
296 tty_buffer_request_room(tty, 1);
297 tty_insert_flip_string(tty, &Data, 1);
298 /* tty_flip_buffer_push(tty); */
299 }
300
301 return;
302} 293}
303 294
304static void qt_write_bulk_callback(struct urb *urb) 295static void qt_write_bulk_callback(struct urb *urb)
@@ -435,8 +426,10 @@ static void qt_read_bulk_callback(struct urb *urb)
435 case 0xff: 426 case 0xff:
436 dbg("No status sequence. \n"); 427 dbg("No status sequence. \n");
437 428
438 ProcessRxChar(port, data[i]); 429 if (tty) {
439 ProcessRxChar(port, data[i + 1]); 430 ProcessRxChar(tty, port, data[i]);
431 ProcessRxChar(tty, port, data[i + 1]);
432 }
440 i += 2; 433 i += 2;
441 break; 434 break;
442 } 435 }
@@ -444,10 +437,8 @@ static void qt_read_bulk_callback(struct urb *urb)
444 continue; 437 continue;
445 } 438 }
446 439
447 if (tty && urb->actual_length) { 440 if (tty && urb->actual_length)
448 tty_buffer_request_room(tty, 1); 441 tty_insert_flip_char(tty, data[i], TTY_NORMAL);
449 tty_insert_flip_string(tty, (data + i), 1);
450 }
451 442
452 } 443 }
453 tty_flip_buffer_push(tty); 444 tty_flip_buffer_push(tty);