aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/acpi/Kconfig15
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acconfig.h1
-rw-r--r--drivers/acpi/acpica/acevents.h17
-rw-r--r--drivers/acpi/acpica/acglobal.h13
-rw-r--r--drivers/acpi/acpica/amlcode.h15
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evglock.c335
-rw-r--r--drivers/acpi/acpica/evmisc.c303
-rw-r--r--drivers/acpi/acpica/evregion.c121
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c13
-rw-r--r--drivers/acpi/acpica/excreate.c3
-rw-r--r--drivers/acpi/acpica/nsrepair.c13
-rw-r--r--drivers/acpi/acpica/utdecode.c5
-rw-r--r--drivers/acpi/acpica/utmutex.c12
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/custom_method.c100
-rw-r--r--drivers/acpi/debugfs.c92
-rw-r--r--drivers/acpi/ec.c19
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/osl.c33
-rw-r--r--drivers/acpi/processor_core.c12
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/sysfs.c8
-rw-r--r--drivers/amba/bus.c5
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/paride/pcd.c1
-rw-r--r--drivers/block/virtio_blk.c91
-rw-r--r--drivers/cdrom/viocd.c1
-rw-r--r--drivers/char/virtio_console.c5
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/TODO14
-rw-r--r--drivers/dma/at_hdmac.c376
-rw-r--r--drivers/dma/at_hdmac_regs.h30
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dw_dmac.c272
-rw-r--r--drivers/dma/dw_dmac_regs.h2
-rw-r--r--drivers/dma/intel_mid_dma.c17
-rw-r--r--drivers/dma/ioat/dma_v2.c8
-rw-r--r--drivers/dma/iop-adma.c6
-rw-r--r--drivers/dma/mv_xor.c6
-rw-r--r--drivers/dma/pch_dma.c96
-rw-r--r--drivers/dma/ppc4xx/adma.c8
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/gpio/Kconfig38
-rw-r--r--drivers/gpio/Makefile7
-rw-r--r--drivers/gpio/gpio-exynos4.c365
-rw-r--r--drivers/gpio/gpio-nomadik.c1069
-rw-r--r--drivers/gpio/gpio-omap.c2007
-rw-r--r--drivers/gpio/gpio-plat-samsung.c206
-rw-r--r--drivers/gpio/gpio-s5pc100.c355
-rw-r--r--drivers/gpio/gpio-s5pv210.c288
-rw-r--r--drivers/gpio/gpio-u300.c700
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpio/langwell_gpio.c65
-rw-r--r--drivers/gpio/pca953x.c249
-rw-r--r--drivers/gpio/pch_gpio.c2
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/md/dm-io.c27
-rw-r--r--drivers/md/dm-kcopyd.c168
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm-snap-persistent.c13
-rw-r--r--drivers/md/dm-snap.c10
-rw-r--r--drivers/md/dm-table.c23
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/db8500-prcmu.c3
-rw-r--r--drivers/misc/kgdbts.c5
-rw-r--r--drivers/mmc/host/mmci.c25
-rw-r--r--drivers/mtd/Kconfig18
-rw-r--r--drivers/mtd/Makefile3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/doc2000.c4
-rw-r--r--drivers/mtd/devices/doc2001.c4
-rw-r--r--drivers/mtd/devices/doc2001plus.c4
-rw-r--r--drivers/mtd/devices/lart.c9
-rw-r--r--drivers/mtd/devices/m25p80.c109
-rw-r--r--drivers/mtd/devices/ms02-nv.c4
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c45
-rw-r--r--drivers/mtd/devices/mtdram.c5
-rw-r--r--drivers/mtd/devices/phram.c4
-rw-r--r--drivers/mtd/devices/pmc551.c6
-rw-r--r--drivers/mtd/devices/slram.c4
-rw-r--r--drivers/mtd/devices/sst25l.c68
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c8
-rw-r--r--drivers/mtd/maps/Kconfig23
-rw-r--r--drivers/mtd/maps/amd76xrom.c4
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c4
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c6
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c21
-rw-r--r--drivers/mtd/maps/cdb89712.c12
-rw-r--r--drivers/mtd/maps/ceiva.c6
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c4
-rw-r--r--drivers/mtd/maps/ck804xrom.c4
-rw-r--r--drivers/mtd/maps/dbox2-flash.c4
-rw-r--r--drivers/mtd/maps/dc21285.c20
-rw-r--r--drivers/mtd/maps/dilnetpc.c9
-rw-r--r--drivers/mtd/maps/dmv182.c4
-rw-r--r--drivers/mtd/maps/edb7312.c26
-rw-r--r--drivers/mtd/maps/esb2rom.c4
-rw-r--r--drivers/mtd/maps/fortunet.c7
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c27
-rw-r--r--drivers/mtd/maps/h720x-flash.c6
-rw-r--r--drivers/mtd/maps/ichxrom.c4
-rw-r--r--drivers/mtd/maps/impa7.c22
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c19
-rw-r--r--drivers/mtd/maps/ixp2000.c4
-rw-r--r--drivers/mtd/maps/ixp4xx.c16
-rw-r--r--drivers/mtd/maps/l440gx.c4
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c45
-rw-r--r--drivers/mtd/maps/mbx860.c6
-rw-r--r--drivers/mtd/maps/netsc520.c4
-rw-r--r--drivers/mtd/maps/nettel.c12
-rw-r--r--drivers/mtd/maps/octagon-5066.c4
-rw-r--r--drivers/mtd/maps/pci.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c4
-rw-r--r--drivers/mtd/maps/physmap.c34
-rw-r--r--drivers/mtd/maps/physmap_of.c30
-rw-r--r--drivers/mtd/maps/plat-ram.c24
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c6
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c18
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c24
-rw-r--r--drivers/mtd/maps/rpxlite.c4
-rw-r--r--drivers/mtd/maps/sa1100-flash.c21
-rw-r--r--drivers/mtd/maps/sbc_gxx.c4
-rw-r--r--drivers/mtd/maps/sc520cdp.c8
-rw-r--r--drivers/mtd/maps/scb2_flash.c6
-rw-r--r--drivers/mtd/maps/scx200_docflash.c16
-rw-r--r--drivers/mtd/maps/solutionengine.c12
-rw-r--r--drivers/mtd/maps/sun_uflash.c4
-rw-r--r--drivers/mtd/maps/tqm8xxl.c20
-rw-r--r--drivers/mtd/maps/ts5500_flash.c4
-rw-r--r--drivers/mtd/maps/tsunami_flash.c4
-rw-r--r--drivers/mtd/maps/uclinux.c12
-rw-r--r--drivers/mtd/maps/vmax301.c4
-rw-r--r--drivers/mtd/maps/vmu-flash.c4
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c15
-rw-r--r--drivers/mtd/mtd_blkdevs.c24
-rw-r--r--drivers/mtd/mtdchar.c55
-rw-r--r--drivers/mtd/mtdconcat.c4
-rw-r--r--drivers/mtd/mtdcore.c167
-rw-r--r--drivers/mtd/mtdcore.h6
-rw-r--r--drivers/mtd/mtdpart.c9
-rw-r--r--drivers/mtd/mtdswap.c8
-rw-r--r--drivers/mtd/nand/Kconfig5
-rw-r--r--drivers/mtd/nand/alauda.c4
-rw-r--r--drivers/mtd/nand/ams-delta.c4
-rw-r--r--drivers/mtd/nand/atmel_nand.c13
-rw-r--r--drivers/mtd/nand/au1550nd.c3
-rw-r--r--drivers/mtd/nand/autcpu12.c16
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c4
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c7
-rw-r--r--drivers/mtd/nand/cafe_nand.c11
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/cs553x_nand.c19
-rw-r--r--drivers/mtd/nand/davinci_nand.c51
-rw-r--r--drivers/mtd/nand/denali.c247
-rw-r--r--drivers/mtd/nand/denali.h373
-rw-r--r--drivers/mtd/nand/diskonchip.c18
-rw-r--r--drivers/mtd/nand/edb7312.c9
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c12
-rw-r--r--drivers/mtd/nand/fsl_upm.c12
-rw-r--r--drivers/mtd/nand/fsmc_nand.c25
-rw-r--r--drivers/mtd/nand/gpio.c4
-rw-r--r--drivers/mtd/nand/h1910.c5
-rw-r--r--drivers/mtd/nand/jz4740_nand.c10
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c12
-rw-r--r--drivers/mtd/nand/mxc_nand.c64
-rw-r--r--drivers/mtd/nand/nand_base.c18
-rw-r--r--drivers/mtd/nand/nand_bbt.c27
-rw-r--r--drivers/mtd/nand/nandsim.c4
-rw-r--r--drivers/mtd/nand/ndfc.c65
-rw-r--r--drivers/mtd/nand/nomadik_nand.c7
-rw-r--r--drivers/mtd/nand/nuc900_nand.c4
-rw-r--r--drivers/mtd/nand/omap2.c32
-rw-r--r--drivers/mtd/nand/orion_nand.c14
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/plat_nand.c12
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c15
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c13
-rw-r--r--drivers/mtd/nand/rtc_from4.c3
-rw-r--r--drivers/mtd/nand/s3c2410.c75
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/sharpsl.c12
-rw-r--r--drivers/mtd/nand/sm_common.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c16
-rw-r--r--drivers/mtd/nand/spia.c2
-rw-r--r--drivers/mtd/nand/tmio_nand.c10
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c14
-rw-r--r--drivers/mtd/onenand/Kconfig1
-rw-r--r--drivers/mtd/onenand/generic.c16
-rw-r--r--drivers/mtd/onenand/omap2.c10
-rw-r--r--drivers/mtd/onenand/onenand_base.c54
-rw-r--r--drivers/mtd/onenand/onenand_sim.c3
-rw-r--r--drivers/mtd/onenand/samsung.c12
-rw-r--r--drivers/mtd/ubi/gluebi.c6
-rw-r--r--drivers/net/sfc/mtd.c6
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/of/fdt.c8
-rw-r--r--drivers/oprofile/event_buffer.h2
-rw-r--r--drivers/oprofile/oprof.c2
-rw-r--r--drivers/pci/dmar.c7
-rw-r--r--drivers/pci/intel-iommu.c240
-rw-r--r--drivers/pci/iova.c12
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/platform/x86/Kconfig11
-rw-r--r--drivers/platform/x86/Makefile3
-rw-r--r--drivers/platform/x86/acer-wmi.c184
-rw-r--r--drivers/platform/x86/acerhdf.c4
-rw-r--r--drivers/platform/x86/asus-laptop.c34
-rw-r--r--drivers/platform/x86/asus-wmi.c22
-rw-r--r--drivers/platform/x86/asus_acpi.c77
-rw-r--r--drivers/platform/x86/compal-laptop.c36
-rw-r--r--drivers/platform/x86/dell-laptop.c12
-rw-r--r--drivers/platform/x86/dell-wmi-aio.c3
-rw-r--r--drivers/platform/x86/dell-wmi.c17
-rw-r--r--drivers/platform/x86/eeepc-laptop.c21
-rw-r--r--drivers/platform/x86/eeepc-wmi.c14
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c39
-rw-r--r--drivers/platform/x86/hdaps.c19
-rw-r--r--drivers/platform/x86/hp-wmi.c43
-rw-r--r--drivers/platform/x86/ibm_rtl.c23
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/intel_menlow.c5
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c72
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c607
-rw-r--r--drivers/platform/x86/intel_oaktrail.c396
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c14
-rw-r--r--drivers/platform/x86/msi-laptop.c23
-rw-r--r--drivers/platform/x86/msi-wmi.c45
-rw-r--r--drivers/platform/x86/sony-laptop.c106
-rw-r--r--drivers/platform/x86/tc1100-wmi.c7
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c490
-rw-r--r--drivers/platform/x86/topstar-laptop.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c59
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c11
-rw-r--r--drivers/platform/x86/wmi.c10
-rw-r--r--drivers/platform/x86/xo15-ebook.c5
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c2
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h1
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c11
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c26
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c27
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c13
-rw-r--r--drivers/scsi/fcoe/fcoe.c58
-rw-r--r--drivers/scsi/fcoe/fcoe.h10
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c133
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c40
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/libfc/fc_disc.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c16
-rw-r--r--drivers/scsi/libfc/fc_libfc.h1
-rw-r--r--drivers/scsi/libsas/sas_ata.c60
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_phy.c4
-rw-r--r--drivers/scsi/libsas/sas_port.c21
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c14
-rw-r--r--drivers/scsi/lpfc/lpfc.h43
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c312
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2111
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h87
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h501
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c545
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c166
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1659
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h33
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c93
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c83
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c102
-rw-r--r--drivers/scsi/osst.c6
-rw-r--r--drivers/scsi/qla4xxx/Makefile2
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c69
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h11
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h23
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c22
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c77
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c19
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c68
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_error.c87
-rw-r--r--drivers/scsi/scsi_proc.c5
-rw-r--r--drivers/scsi/scsi_trace.c4
-rw-r--r--drivers/scsi/sd.c82
-rw-r--r--drivers/scsi/ultrastor.c2
-rw-r--r--drivers/spi/Kconfig9
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi_bfin_sport.c952
-rw-r--r--drivers/spi/tle62x0.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c25
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c29
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_tmr.c7
-rw-r--r--drivers/target/target_core_transport.c68
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c8
-rw-r--r--drivers/thermal/thermal_sys.c10
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/usb/host/ehci-pci.c39
-rw-r--r--drivers/usb/host/pci-quirks.c63
-rw-r--r--drivers/usb/host/pci-quirks.h2
-rw-r--r--drivers/usb/host/xhci-pci.c26
-rw-r--r--drivers/usb/host/xhci-ring.c89
-rw-r--r--drivers/usb/host/xhci.c240
-rw-r--r--drivers/usb/host/xhci.h22
-rw-r--r--drivers/vhost/net.c12
-rw-r--r--drivers/vhost/test.c6
-rw-r--r--drivers/vhost/vhost.c138
-rw-r--r--drivers/vhost/vhost.h21
-rw-r--r--drivers/virtio/virtio_balloon.c21
-rw-r--r--drivers/virtio/virtio_ring.c53
339 files changed, 16896 insertions, 4941 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 6b17f5864340..09f3232bcdcd 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -17,6 +17,9 @@ obj-$(CONFIG_SFI) += sfi/
17# was used and do nothing if so 17# was used and do nothing if so
18obj-$(CONFIG_PNP) += pnp/ 18obj-$(CONFIG_PNP) += pnp/
19obj-$(CONFIG_ARM_AMBA) += amba/ 19obj-$(CONFIG_ARM_AMBA) += amba/
20# Many drivers will want to use DMA so this has to be made available
21# really early.
22obj-$(CONFIG_DMA_ENGINE) += dma/
20 23
21obj-$(CONFIG_VIRTIO) += virtio/ 24obj-$(CONFIG_VIRTIO) += virtio/
22obj-$(CONFIG_XEN) += xen/ 25obj-$(CONFIG_XEN) += xen/
@@ -92,7 +95,6 @@ obj-$(CONFIG_EISA) += eisa/
92obj-y += lguest/ 95obj-y += lguest/
93obj-$(CONFIG_CPU_FREQ) += cpufreq/ 96obj-$(CONFIG_CPU_FREQ) += cpufreq/
94obj-$(CONFIG_CPU_IDLE) += cpuidle/ 97obj-$(CONFIG_CPU_IDLE) += cpuidle/
95obj-$(CONFIG_DMA_ENGINE) += dma/
96obj-$(CONFIG_MMC) += mmc/ 98obj-$(CONFIG_MMC) += mmc/
97obj-$(CONFIG_MEMSTICK) += memstick/ 99obj-$(CONFIG_MEMSTICK) += memstick/
98obj-y += leds/ 100obj-y += leds/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index bc2218db5ba9..de0e3df76776 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -369,6 +369,21 @@ config ACPI_HED
369 which is used to report some hardware errors notified via 369 which is used to report some hardware errors notified via
370 SCI, mainly the corrected errors. 370 SCI, mainly the corrected errors.
371 371
372config ACPI_CUSTOM_METHOD
373 tristate "Allow ACPI methods to be inserted/replaced at run time"
374 depends on DEBUG_FS
375 default n
376 help
377 This debug facility allows ACPI AML methods to me inserted and/or
378 replaced without rebooting the system. For details refer to:
379 Documentation/acpi/method-customizing.txt.
380
381 NOTE: This option is security sensitive, because it allows arbitrary
382 kernel memory to be written to by root (uid=0) users, allowing them
383 to bypass certain security measures (e.g. if root is not allowed to
384 load additional kernel modules after boot, this feature may be used
385 to override that restriction).
386
372source "drivers/acpi/apei/Kconfig" 387source "drivers/acpi/apei/Kconfig"
373 388
374endif # ACPI 389endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index b66fbb2fc85f..ecb26b4f29a0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_ACPI_SBS) += sbshc.o
61obj-$(CONFIG_ACPI_SBS) += sbs.o 61obj-$(CONFIG_ACPI_SBS) += sbs.o
62obj-$(CONFIG_ACPI_HED) += hed.o 62obj-$(CONFIG_ACPI_HED) += hed.o
63obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o 63obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
64obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
64 65
65# processor has its own "processor." module_param namespace 66# processor has its own "processor." module_param namespace
66processor-y := processor_driver.o processor_throttling.o 67processor-y := processor_driver.o processor_throttling.o
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a1224712fd0c..301bd2d388ad 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
14 14
15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ 15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
16 evmisc.o evrgnini.o evxface.o evxfregn.o \ 16 evmisc.o evrgnini.o evxface.o evxfregn.o \
17 evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o 17 evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o
18 18
19acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ 19acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
20 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ 20 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index ab87396c2c07..bc533dde16c4 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -187,7 +187,6 @@
187 187
188/* Operation regions */ 188/* Operation regions */
189 189
190#define ACPI_NUM_PREDEFINED_REGIONS 9
191#define ACPI_USER_REGION_BEGIN 0x80 190#define ACPI_USER_REGION_BEGIN 0x80
192 191
193/* Maximum space_ids for Operation Regions */ 192/* Maximum space_ids for Operation Regions */
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 41d247daf461..bea3b4899183 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -58,12 +58,6 @@ u32 acpi_ev_fixed_event_detect(void);
58 */ 58 */
59u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node); 59u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node);
60 60
61acpi_status acpi_ev_acquire_global_lock(u16 timeout);
62
63acpi_status acpi_ev_release_global_lock(void);
64
65acpi_status acpi_ev_init_global_lock_handler(void);
66
67u32 acpi_ev_get_gpe_number_index(u32 gpe_number); 61u32 acpi_ev_get_gpe_number_index(u32 gpe_number);
68 62
69acpi_status 63acpi_status
@@ -71,6 +65,17 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
71 u32 notify_value); 65 u32 notify_value);
72 66
73/* 67/*
68 * evglock - Global Lock support
69 */
70acpi_status acpi_ev_init_global_lock_handler(void);
71
72acpi_status acpi_ev_acquire_global_lock(u16 timeout);
73
74acpi_status acpi_ev_release_global_lock(void);
75
76acpi_status acpi_ev_remove_global_lock_handler(void);
77
78/*
74 * evgpe - Low-level GPE support 79 * evgpe - Low-level GPE support
75 */ 80 */
76u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list); 81u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index d69750b83b36..73863d86f022 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -214,24 +214,23 @@ ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
214 214
215/* 215/*
216 * Global lock mutex is an actual AML mutex object 216 * Global lock mutex is an actual AML mutex object
217 * Global lock semaphore works in conjunction with the HW global lock 217 * Global lock semaphore works in conjunction with the actual global lock
218 * Global lock spinlock is used for "pending" handshake
218 */ 219 */
219ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex; 220ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
220ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore; 221ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
222ACPI_EXTERN acpi_spinlock acpi_gbl_global_lock_pending_lock;
221ACPI_EXTERN u16 acpi_gbl_global_lock_handle; 223ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
222ACPI_EXTERN u8 acpi_gbl_global_lock_acquired; 224ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
223ACPI_EXTERN u8 acpi_gbl_global_lock_present; 225ACPI_EXTERN u8 acpi_gbl_global_lock_present;
226ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
224 227
225/* 228/*
226 * Spinlocks are used for interfaces that can be possibly called at 229 * Spinlocks are used for interfaces that can be possibly called at
227 * interrupt level 230 * interrupt level
228 */ 231 */
229ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */ 232ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
230ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ 233ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
231ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */
232#define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock
233#define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock
234#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
235 234
236/***************************************************************************** 235/*****************************************************************************
237 * 236 *
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index f4f0998d3967..1077f17859ed 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -394,21 +394,6 @@
394#define AML_CLASS_METHOD_CALL 0x09 394#define AML_CLASS_METHOD_CALL 0x09
395#define AML_CLASS_UNKNOWN 0x0A 395#define AML_CLASS_UNKNOWN 0x0A
396 396
397/* Predefined Operation Region space_iDs */
398
399typedef enum {
400 REGION_MEMORY = 0,
401 REGION_IO,
402 REGION_PCI_CONFIG,
403 REGION_EC,
404 REGION_SMBUS,
405 REGION_CMOS,
406 REGION_PCI_BAR,
407 REGION_IPMI,
408 REGION_DATA_TABLE, /* Internal use only */
409 REGION_FIXED_HW = 0x7F
410} AML_REGION_TYPES;
411
412/* Comparison operation codes for match_op operator */ 397/* Comparison operation codes for match_op operator */
413 398
414typedef enum { 399typedef enum {
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 23a3b1ab20c1..324acec1179a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -450,7 +450,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
450 status = 450 status =
451 acpi_ex_create_region(op->named.data, 451 acpi_ex_create_region(op->named.data,
452 op->named.length, 452 op->named.length,
453 REGION_DATA_TABLE, 453 ACPI_ADR_SPACE_DATA_TABLE,
454 walk_state); 454 walk_state);
455 if (ACPI_FAILURE(status)) { 455 if (ACPI_FAILURE(status)) {
456 return_ACPI_STATUS(status); 456 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 4be4e921dfe1..976318138c56 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -562,7 +562,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
562 ((op->common.value.arg)->common.value. 562 ((op->common.value.arg)->common.value.
563 integer); 563 integer);
564 } else { 564 } else {
565 region_space = REGION_DATA_TABLE; 565 region_space = ACPI_ADR_SPACE_DATA_TABLE;
566 } 566 }
567 567
568 /* 568 /*
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
new file mode 100644
index 000000000000..56a562a1e5d7
--- /dev/null
+++ b/drivers/acpi/acpica/evglock.c
@@ -0,0 +1,335 @@
1/******************************************************************************
2 *
3 * Module Name: evglock - Global Lock support
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47#include "acinterp.h"
48
49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evglock")
51
52/* Local prototypes */
53static u32 acpi_ev_global_lock_handler(void *context);
54
55/*******************************************************************************
56 *
57 * FUNCTION: acpi_ev_init_global_lock_handler
58 *
59 * PARAMETERS: None
60 *
61 * RETURN: Status
62 *
63 * DESCRIPTION: Install a handler for the global lock release event
64 *
65 ******************************************************************************/
66
67acpi_status acpi_ev_init_global_lock_handler(void)
68{
69 acpi_status status;
70
71 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
72
73 /* Attempt installation of the global lock handler */
74
75 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
76 acpi_ev_global_lock_handler,
77 NULL);
78
79 /*
80 * If the global lock does not exist on this platform, the attempt to
81 * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
82 * Map to AE_OK, but mark global lock as not present. Any attempt to
83 * actually use the global lock will be flagged with an error.
84 */
85 acpi_gbl_global_lock_present = FALSE;
86 if (status == AE_NO_HARDWARE_RESPONSE) {
87 ACPI_ERROR((AE_INFO,
88 "No response from Global Lock hardware, disabling lock"));
89
90 return_ACPI_STATUS(AE_OK);
91 }
92
93 status = acpi_os_create_lock(&acpi_gbl_global_lock_pending_lock);
94 if (ACPI_FAILURE(status)) {
95 return_ACPI_STATUS(status);
96 }
97
98 acpi_gbl_global_lock_pending = FALSE;
99 acpi_gbl_global_lock_present = TRUE;
100 return_ACPI_STATUS(status);
101}
102
103/*******************************************************************************
104 *
105 * FUNCTION: acpi_ev_remove_global_lock_handler
106 *
107 * PARAMETERS: None
108 *
109 * RETURN: Status
110 *
111 * DESCRIPTION: Remove the handler for the Global Lock
112 *
113 ******************************************************************************/
114
115acpi_status acpi_ev_remove_global_lock_handler(void)
116{
117 acpi_status status;
118
119 ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
120
121 acpi_gbl_global_lock_present = FALSE;
122 status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
123 acpi_ev_global_lock_handler);
124
125 return_ACPI_STATUS(status);
126}
127
128/*******************************************************************************
129 *
130 * FUNCTION: acpi_ev_global_lock_handler
131 *
132 * PARAMETERS: Context - From thread interface, not used
133 *
134 * RETURN: ACPI_INTERRUPT_HANDLED
135 *
136 * DESCRIPTION: Invoked directly from the SCI handler when a global lock
137 * release interrupt occurs. If there is actually a pending
138 * request for the lock, signal the waiting thread.
139 *
140 ******************************************************************************/
141
142static u32 acpi_ev_global_lock_handler(void *context)
143{
144 acpi_status status;
145 acpi_cpu_flags flags;
146
147 flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
148
149 /*
150 * If a request for the global lock is not actually pending,
151 * we are done. This handles "spurious" global lock interrupts
152 * which are possible (and have been seen) with bad BIOSs.
153 */
154 if (!acpi_gbl_global_lock_pending) {
155 goto cleanup_and_exit;
156 }
157
158 /*
159 * Send a unit to the global lock semaphore. The actual acquisition
160 * of the global lock will be performed by the waiting thread.
161 */
162 status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
163 if (ACPI_FAILURE(status)) {
164 ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
165 }
166
167 acpi_gbl_global_lock_pending = FALSE;
168
169 cleanup_and_exit:
170
171 acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
172 return (ACPI_INTERRUPT_HANDLED);
173}
174
175/******************************************************************************
176 *
177 * FUNCTION: acpi_ev_acquire_global_lock
178 *
179 * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
180 *
181 * RETURN: Status
182 *
183 * DESCRIPTION: Attempt to gain ownership of the Global Lock.
184 *
185 * MUTEX: Interpreter must be locked
186 *
187 * Note: The original implementation allowed multiple threads to "acquire" the
188 * Global Lock, and the OS would hold the lock until the last thread had
189 * released it. However, this could potentially starve the BIOS out of the
190 * lock, especially in the case where there is a tight handshake between the
191 * Embedded Controller driver and the BIOS. Therefore, this implementation
192 * allows only one thread to acquire the HW Global Lock at a time, and makes
193 * the global lock appear as a standard mutex on the OS side.
194 *
195 *****************************************************************************/
196
197acpi_status acpi_ev_acquire_global_lock(u16 timeout)
198{
199 acpi_cpu_flags flags;
200 acpi_status status;
201 u8 acquired = FALSE;
202
203 ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
204
205 /*
206 * Only one thread can acquire the GL at a time, the global_lock_mutex
207 * enforces this. This interface releases the interpreter if we must wait.
208 */
209 status =
210 acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex->mutex.
211 os_mutex, timeout);
212 if (ACPI_FAILURE(status)) {
213 return_ACPI_STATUS(status);
214 }
215
216 /*
217 * Update the global lock handle and check for wraparound. The handle is
218 * only used for the external global lock interfaces, but it is updated
219 * here to properly handle the case where a single thread may acquire the
220 * lock via both the AML and the acpi_acquire_global_lock interfaces. The
221 * handle is therefore updated on the first acquire from a given thread
222 * regardless of where the acquisition request originated.
223 */
224 acpi_gbl_global_lock_handle++;
225 if (acpi_gbl_global_lock_handle == 0) {
226 acpi_gbl_global_lock_handle = 1;
227 }
228
229 /*
230 * Make sure that a global lock actually exists. If not, just
231 * treat the lock as a standard mutex.
232 */
233 if (!acpi_gbl_global_lock_present) {
234 acpi_gbl_global_lock_acquired = TRUE;
235 return_ACPI_STATUS(AE_OK);
236 }
237
238 flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
239
240 do {
241
242 /* Attempt to acquire the actual hardware lock */
243
244 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
245 if (acquired) {
246 acpi_gbl_global_lock_acquired = TRUE;
247 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
248 "Acquired hardware Global Lock\n"));
249 break;
250 }
251
252 /*
253 * Did not get the lock. The pending bit was set above, and
254 * we must now wait until we receive the global lock
255 * released interrupt.
256 */
257 acpi_gbl_global_lock_pending = TRUE;
258 acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
259
260 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
261 "Waiting for hardware Global Lock\n"));
262
263 /*
264 * Wait for handshake with the global lock interrupt handler.
265 * This interface releases the interpreter if we must wait.
266 */
267 status =
268 acpi_ex_system_wait_semaphore
269 (acpi_gbl_global_lock_semaphore, ACPI_WAIT_FOREVER);
270
271 flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
272
273 } while (ACPI_SUCCESS(status));
274
275 acpi_gbl_global_lock_pending = FALSE;
276 acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
277
278 return_ACPI_STATUS(status);
279}
280
281/*******************************************************************************
282 *
283 * FUNCTION: acpi_ev_release_global_lock
284 *
285 * PARAMETERS: None
286 *
287 * RETURN: Status
288 *
289 * DESCRIPTION: Releases ownership of the Global Lock.
290 *
291 ******************************************************************************/
292
293acpi_status acpi_ev_release_global_lock(void)
294{
295 u8 pending = FALSE;
296 acpi_status status = AE_OK;
297
298 ACPI_FUNCTION_TRACE(ev_release_global_lock);
299
300 /* Lock must be already acquired */
301
302 if (!acpi_gbl_global_lock_acquired) {
303 ACPI_WARNING((AE_INFO,
304 "Cannot release the ACPI Global Lock, it has not been acquired"));
305 return_ACPI_STATUS(AE_NOT_ACQUIRED);
306 }
307
308 if (acpi_gbl_global_lock_present) {
309
310 /* Allow any thread to release the lock */
311
312 ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
313
314 /*
315 * If the pending bit was set, we must write GBL_RLS to the control
316 * register
317 */
318 if (pending) {
319 status =
320 acpi_write_bit_register
321 (ACPI_BITREG_GLOBAL_LOCK_RELEASE,
322 ACPI_ENABLE_EVENT);
323 }
324
325 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
326 "Released hardware Global Lock\n"));
327 }
328
329 acpi_gbl_global_lock_acquired = FALSE;
330
331 /* Release the local GL mutex */
332
333 acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
334 return_ACPI_STATUS(status);
335}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 7dc80946f7bd..d0b331844427 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -45,7 +45,6 @@
45#include "accommon.h" 45#include "accommon.h"
46#include "acevents.h" 46#include "acevents.h"
47#include "acnamesp.h" 47#include "acnamesp.h"
48#include "acinterp.h"
49 48
50#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
51ACPI_MODULE_NAME("evmisc") 50ACPI_MODULE_NAME("evmisc")
@@ -53,10 +52,6 @@ ACPI_MODULE_NAME("evmisc")
53/* Local prototypes */ 52/* Local prototypes */
54static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); 53static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
55 54
56static u32 acpi_ev_global_lock_handler(void *context);
57
58static acpi_status acpi_ev_remove_global_lock_handler(void);
59
60/******************************************************************************* 55/*******************************************************************************
61 * 56 *
62 * FUNCTION: acpi_ev_is_notify_object 57 * FUNCTION: acpi_ev_is_notify_object
@@ -275,304 +270,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
275 acpi_ut_delete_generic_state(notify_info); 270 acpi_ut_delete_generic_state(notify_info);
276} 271}
277 272
278/*******************************************************************************
279 *
280 * FUNCTION: acpi_ev_global_lock_handler
281 *
282 * PARAMETERS: Context - From thread interface, not used
283 *
284 * RETURN: ACPI_INTERRUPT_HANDLED
285 *
286 * DESCRIPTION: Invoked directly from the SCI handler when a global lock
287 * release interrupt occurs. If there's a thread waiting for
288 * the global lock, signal it.
289 *
290 * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
291 * this is not possible for some reason, a separate thread will have to be
292 * scheduled to do this.
293 *
294 ******************************************************************************/
295static u8 acpi_ev_global_lock_pending;
296
297static u32 acpi_ev_global_lock_handler(void *context)
298{
299 acpi_status status;
300 acpi_cpu_flags flags;
301
302 flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
303
304 if (!acpi_ev_global_lock_pending) {
305 goto out;
306 }
307
308 /* Send a unit to the semaphore */
309
310 status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
311 if (ACPI_FAILURE(status)) {
312 ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
313 }
314
315 acpi_ev_global_lock_pending = FALSE;
316
317 out:
318 acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
319
320 return (ACPI_INTERRUPT_HANDLED);
321}
322
323/*******************************************************************************
324 *
325 * FUNCTION: acpi_ev_init_global_lock_handler
326 *
327 * PARAMETERS: None
328 *
329 * RETURN: Status
330 *
331 * DESCRIPTION: Install a handler for the global lock release event
332 *
333 ******************************************************************************/
334
335acpi_status acpi_ev_init_global_lock_handler(void)
336{
337 acpi_status status;
338
339 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
340
341 /* Attempt installation of the global lock handler */
342
343 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
344 acpi_ev_global_lock_handler,
345 NULL);
346
347 /*
348 * If the global lock does not exist on this platform, the attempt to
349 * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
350 * Map to AE_OK, but mark global lock as not present. Any attempt to
351 * actually use the global lock will be flagged with an error.
352 */
353 if (status == AE_NO_HARDWARE_RESPONSE) {
354 ACPI_ERROR((AE_INFO,
355 "No response from Global Lock hardware, disabling lock"));
356
357 acpi_gbl_global_lock_present = FALSE;
358 return_ACPI_STATUS(AE_OK);
359 }
360
361 acpi_gbl_global_lock_present = TRUE;
362 return_ACPI_STATUS(status);
363}
364
365/*******************************************************************************
366 *
367 * FUNCTION: acpi_ev_remove_global_lock_handler
368 *
369 * PARAMETERS: None
370 *
371 * RETURN: Status
372 *
373 * DESCRIPTION: Remove the handler for the Global Lock
374 *
375 ******************************************************************************/
376
377static acpi_status acpi_ev_remove_global_lock_handler(void)
378{
379 acpi_status status;
380
381 ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
382
383 acpi_gbl_global_lock_present = FALSE;
384 status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
385 acpi_ev_global_lock_handler);
386
387 return_ACPI_STATUS(status);
388}
389
390/******************************************************************************
391 *
392 * FUNCTION: acpi_ev_acquire_global_lock
393 *
394 * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
395 *
396 * RETURN: Status
397 *
398 * DESCRIPTION: Attempt to gain ownership of the Global Lock.
399 *
400 * MUTEX: Interpreter must be locked
401 *
402 * Note: The original implementation allowed multiple threads to "acquire" the
403 * Global Lock, and the OS would hold the lock until the last thread had
404 * released it. However, this could potentially starve the BIOS out of the
405 * lock, especially in the case where there is a tight handshake between the
406 * Embedded Controller driver and the BIOS. Therefore, this implementation
407 * allows only one thread to acquire the HW Global Lock at a time, and makes
408 * the global lock appear as a standard mutex on the OS side.
409 *
410 *****************************************************************************/
411static acpi_thread_id acpi_ev_global_lock_thread_id;
412static int acpi_ev_global_lock_acquired;
413
414acpi_status acpi_ev_acquire_global_lock(u16 timeout)
415{
416 acpi_cpu_flags flags;
417 acpi_status status = AE_OK;
418 u8 acquired = FALSE;
419
420 ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
421
422 /*
423 * Only one thread can acquire the GL at a time, the global_lock_mutex
424 * enforces this. This interface releases the interpreter if we must wait.
425 */
426 status = acpi_ex_system_wait_mutex(
427 acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
428 if (status == AE_TIME) {
429 if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
430 acpi_ev_global_lock_acquired++;
431 return AE_OK;
432 }
433 }
434
435 if (ACPI_FAILURE(status)) {
436 status = acpi_ex_system_wait_mutex(
437 acpi_gbl_global_lock_mutex->mutex.os_mutex,
438 timeout);
439 }
440 if (ACPI_FAILURE(status)) {
441 return_ACPI_STATUS(status);
442 }
443
444 acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
445 acpi_ev_global_lock_acquired++;
446
447 /*
448 * Update the global lock handle and check for wraparound. The handle is
449 * only used for the external global lock interfaces, but it is updated
450 * here to properly handle the case where a single thread may acquire the
451 * lock via both the AML and the acpi_acquire_global_lock interfaces. The
452 * handle is therefore updated on the first acquire from a given thread
453 * regardless of where the acquisition request originated.
454 */
455 acpi_gbl_global_lock_handle++;
456 if (acpi_gbl_global_lock_handle == 0) {
457 acpi_gbl_global_lock_handle = 1;
458 }
459
460 /*
461 * Make sure that a global lock actually exists. If not, just treat the
462 * lock as a standard mutex.
463 */
464 if (!acpi_gbl_global_lock_present) {
465 acpi_gbl_global_lock_acquired = TRUE;
466 return_ACPI_STATUS(AE_OK);
467 }
468
469 flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
470
471 do {
472
473 /* Attempt to acquire the actual hardware lock */
474
475 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
476 if (acquired) {
477 acpi_gbl_global_lock_acquired = TRUE;
478
479 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
480 "Acquired hardware Global Lock\n"));
481 break;
482 }
483
484 acpi_ev_global_lock_pending = TRUE;
485
486 acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
487
488 /*
489 * Did not get the lock. The pending bit was set above, and we
490 * must wait until we get the global lock released interrupt.
491 */
492 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
493 "Waiting for hardware Global Lock\n"));
494
495 /*
496 * Wait for handshake with the global lock interrupt handler.
497 * This interface releases the interpreter if we must wait.
498 */
499 status = acpi_ex_system_wait_semaphore(
500 acpi_gbl_global_lock_semaphore,
501 ACPI_WAIT_FOREVER);
502
503 flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
504
505 } while (ACPI_SUCCESS(status));
506
507 acpi_ev_global_lock_pending = FALSE;
508
509 acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
510
511 return_ACPI_STATUS(status);
512}
513
514/*******************************************************************************
515 *
516 * FUNCTION: acpi_ev_release_global_lock
517 *
518 * PARAMETERS: None
519 *
520 * RETURN: Status
521 *
522 * DESCRIPTION: Releases ownership of the Global Lock.
523 *
524 ******************************************************************************/
525
526acpi_status acpi_ev_release_global_lock(void)
527{
528 u8 pending = FALSE;
529 acpi_status status = AE_OK;
530
531 ACPI_FUNCTION_TRACE(ev_release_global_lock);
532
533 /* Lock must be already acquired */
534
535 if (!acpi_gbl_global_lock_acquired) {
536 ACPI_WARNING((AE_INFO,
537 "Cannot release the ACPI Global Lock, it has not been acquired"));
538 return_ACPI_STATUS(AE_NOT_ACQUIRED);
539 }
540
541 acpi_ev_global_lock_acquired--;
542 if (acpi_ev_global_lock_acquired > 0) {
543 return AE_OK;
544 }
545
546 if (acpi_gbl_global_lock_present) {
547
548 /* Allow any thread to release the lock */
549
550 ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
551
552 /*
553 * If the pending bit was set, we must write GBL_RLS to the control
554 * register
555 */
556 if (pending) {
557 status =
558 acpi_write_bit_register
559 (ACPI_BITREG_GLOBAL_LOCK_RELEASE,
560 ACPI_ENABLE_EVENT);
561 }
562
563 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
564 "Released hardware Global Lock\n"));
565 }
566
567 acpi_gbl_global_lock_acquired = FALSE;
568
569 /* Release the local GL mutex */
570 acpi_ev_global_lock_thread_id = 0;
571 acpi_ev_global_lock_acquired = 0;
572 acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
573 return_ACPI_STATUS(status);
574}
575
576/****************************************************************************** 273/******************************************************************************
577 * 274 *
578 * FUNCTION: acpi_ev_terminate 275 * FUNCTION: acpi_ev_terminate
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index bea7223d7a71..f0edf5c43c03 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -55,6 +55,8 @@ static u8
55acpi_ev_has_default_handler(struct acpi_namespace_node *node, 55acpi_ev_has_default_handler(struct acpi_namespace_node *node,
56 acpi_adr_space_type space_id); 56 acpi_adr_space_type space_id);
57 57
58static void acpi_ev_orphan_ec_reg_method(void);
59
58static acpi_status 60static acpi_status
59acpi_ev_reg_run(acpi_handle obj_handle, 61acpi_ev_reg_run(acpi_handle obj_handle,
60 u32 level, void *context, void **return_value); 62 u32 level, void *context, void **return_value);
@@ -561,7 +563,9 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
561 563
562 /* Now stop region accesses by executing the _REG method */ 564 /* Now stop region accesses by executing the _REG method */
563 565
564 status = acpi_ev_execute_reg_method(region_obj, 0); 566 status =
567 acpi_ev_execute_reg_method(region_obj,
568 ACPI_REG_DISCONNECT);
565 if (ACPI_FAILURE(status)) { 569 if (ACPI_FAILURE(status)) {
566 ACPI_EXCEPTION((AE_INFO, status, 570 ACPI_EXCEPTION((AE_INFO, status,
567 "from region _REG, [%s]", 571 "from region _REG, [%s]",
@@ -1062,6 +1066,12 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
1062 ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, 1066 ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
1063 NULL, &space_id, NULL); 1067 NULL, &space_id, NULL);
1064 1068
1069 /* Special case for EC: handle "orphan" _REG methods with no region */
1070
1071 if (space_id == ACPI_ADR_SPACE_EC) {
1072 acpi_ev_orphan_ec_reg_method();
1073 }
1074
1065 return_ACPI_STATUS(status); 1075 return_ACPI_STATUS(status);
1066} 1076}
1067 1077
@@ -1120,6 +1130,113 @@ acpi_ev_reg_run(acpi_handle obj_handle,
1120 return (AE_OK); 1130 return (AE_OK);
1121 } 1131 }
1122 1132
1123 status = acpi_ev_execute_reg_method(obj_desc, 1); 1133 status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT);
1124 return (status); 1134 return (status);
1125} 1135}
1136
1137/*******************************************************************************
1138 *
1139 * FUNCTION: acpi_ev_orphan_ec_reg_method
1140 *
1141 * PARAMETERS: None
1142 *
1143 * RETURN: None
1144 *
1145 * DESCRIPTION: Execute an "orphan" _REG method that appears under the EC
1146 * device. This is a _REG method that has no corresponding region
1147 * within the EC device scope. The orphan _REG method appears to
1148 * have been enabled by the description of the ECDT in the ACPI
1149 * specification: "The availability of the region space can be
1150 * detected by providing a _REG method object underneath the
1151 * Embedded Controller device."
1152 *
1153 * To quickly access the EC device, we use the EC_ID that appears
1154 * within the ECDT. Otherwise, we would need to perform a time-
1155 * consuming namespace walk, executing _HID methods to find the
1156 * EC device.
1157 *
1158 ******************************************************************************/
1159
1160static void acpi_ev_orphan_ec_reg_method(void)
1161{
1162 struct acpi_table_ecdt *table;
1163 acpi_status status;
1164 struct acpi_object_list args;
1165 union acpi_object objects[2];
1166 struct acpi_namespace_node *ec_device_node;
1167 struct acpi_namespace_node *reg_method;
1168 struct acpi_namespace_node *next_node;
1169
1170 ACPI_FUNCTION_TRACE(ev_orphan_ec_reg_method);
1171
1172 /* Get the ECDT (if present in system) */
1173
1174 status = acpi_get_table(ACPI_SIG_ECDT, 0,
1175 ACPI_CAST_INDIRECT_PTR(struct acpi_table_header,
1176 &table));
1177 if (ACPI_FAILURE(status)) {
1178 return_VOID;
1179 }
1180
1181 /* We need a valid EC_ID string */
1182
1183 if (!(*table->id)) {
1184 return_VOID;
1185 }
1186
1187 /* Namespace is currently locked, must release */
1188
1189 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
1190
1191 /* Get a handle to the EC device referenced in the ECDT */
1192
1193 status = acpi_get_handle(NULL,
1194 ACPI_CAST_PTR(char, table->id),
1195 ACPI_CAST_PTR(acpi_handle, &ec_device_node));
1196 if (ACPI_FAILURE(status)) {
1197 goto exit;
1198 }
1199
1200 /* Get a handle to a _REG method immediately under the EC device */
1201
1202 status = acpi_get_handle(ec_device_node,
1203 METHOD_NAME__REG, ACPI_CAST_PTR(acpi_handle,
1204 &reg_method));
1205 if (ACPI_FAILURE(status)) {
1206 goto exit;
1207 }
1208
1209 /*
1210 * Execute the _REG method only if there is no Operation Region in
1211 * this scope with the Embedded Controller space ID. Otherwise, it
1212 * will already have been executed. Note, this allows for Regions
1213 * with other space IDs to be present; but the code below will then
1214 * execute the _REG method with the EC space ID argument.
1215 */
1216 next_node = acpi_ns_get_next_node(ec_device_node, NULL);
1217 while (next_node) {
1218 if ((next_node->type == ACPI_TYPE_REGION) &&
1219 (next_node->object) &&
1220 (next_node->object->region.space_id == ACPI_ADR_SPACE_EC)) {
1221 goto exit; /* Do not execute _REG */
1222 }
1223 next_node = acpi_ns_get_next_node(ec_device_node, next_node);
1224 }
1225
1226 /* Evaluate the _REG(EC,Connect) method */
1227
1228 args.count = 2;
1229 args.pointer = objects;
1230 objects[0].type = ACPI_TYPE_INTEGER;
1231 objects[0].integer.value = ACPI_ADR_SPACE_EC;
1232 objects[1].type = ACPI_TYPE_INTEGER;
1233 objects[1].integer.value = ACPI_REG_CONNECT;
1234
1235 status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
1236
1237 exit:
1238 /* We ignore all errors from above, don't care */
1239
1240 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
1241 return_VOID;
1242}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 9659cee6093e..55a5d35ef34a 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -637,7 +637,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
637 637
638 status = 638 status =
639 acpi_ev_execute_reg_method 639 acpi_ev_execute_reg_method
640 (region_obj, 1); 640 (region_obj, ACPI_REG_CONNECT);
641 641
642 if (acpi_ns_locked) { 642 if (acpi_ns_locked) {
643 status = 643 status =
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index c85c8c45599d..00cd95692a91 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -130,20 +130,21 @@ acpi_install_address_space_handler(acpi_handle device,
130 case ACPI_ADR_SPACE_PCI_CONFIG: 130 case ACPI_ADR_SPACE_PCI_CONFIG:
131 case ACPI_ADR_SPACE_DATA_TABLE: 131 case ACPI_ADR_SPACE_DATA_TABLE:
132 132
133 if (acpi_gbl_reg_methods_executed) { 133 if (!acpi_gbl_reg_methods_executed) {
134 134
135 /* Run all _REG methods for this address space */ 135 /* We will defer execution of the _REG methods for this space */
136 136 goto unlock_and_exit;
137 status = acpi_ev_execute_reg_methods(node, space_id);
138 } 137 }
139 break; 138 break;
140 139
141 default: 140 default:
142
143 status = acpi_ev_execute_reg_methods(node, space_id);
144 break; 141 break;
145 } 142 }
146 143
144 /* Run all _REG methods for this address space */
145
146 status = acpi_ev_execute_reg_methods(node, space_id);
147
147 unlock_and_exit: 148 unlock_and_exit:
148 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 149 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
149 return_ACPI_STATUS(status); 150 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index e7b372d17667..110711afada8 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -305,7 +305,8 @@ acpi_ex_create_region(u8 * aml_start,
305 * range 305 * range
306 */ 306 */
307 if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) && 307 if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
308 (region_space < ACPI_USER_REGION_BEGIN)) { 308 (region_space < ACPI_USER_REGION_BEGIN) &&
309 (region_space != ACPI_ADR_SPACE_DATA_TABLE)) {
309 ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X", 310 ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
310 region_space)); 311 region_space));
311 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); 312 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 1d76ac85b5e7..ac7b854b0bd7 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -74,7 +74,6 @@ ACPI_MODULE_NAME("nsrepair")
74 * 74 *
75 * Additional possible repairs: 75 * Additional possible repairs:
76 * 76 *
77 * Optional/unnecessary NULL package elements removed
78 * Required package elements that are NULL replaced by Integer/String/Buffer 77 * Required package elements that are NULL replaced by Integer/String/Buffer
79 * Incorrect standalone package wrapped with required outer package 78 * Incorrect standalone package wrapped with required outer package
80 * 79 *
@@ -623,16 +622,12 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
623 ACPI_FUNCTION_NAME(ns_remove_null_elements); 622 ACPI_FUNCTION_NAME(ns_remove_null_elements);
624 623
625 /* 624 /*
626 * PTYPE1 packages contain no subpackages. 625 * We can safely remove all NULL elements from these package types:
627 * PTYPE2 packages contain a variable number of sub-packages. We can 626 * PTYPE1_VAR packages contain a variable number of simple data types.
628 * safely remove all NULL elements from the PTYPE2 packages. 627 * PTYPE2 packages contain a variable number of sub-packages.
629 */ 628 */
630 switch (package_type) { 629 switch (package_type) {
631 case ACPI_PTYPE1_FIXED:
632 case ACPI_PTYPE1_VAR: 630 case ACPI_PTYPE1_VAR:
633 case ACPI_PTYPE1_OPTION:
634 return;
635
636 case ACPI_PTYPE2: 631 case ACPI_PTYPE2:
637 case ACPI_PTYPE2_COUNT: 632 case ACPI_PTYPE2_COUNT:
638 case ACPI_PTYPE2_PKG_COUNT: 633 case ACPI_PTYPE2_PKG_COUNT:
@@ -642,6 +637,8 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
642 break; 637 break;
643 638
644 default: 639 default:
640 case ACPI_PTYPE1_FIXED:
641 case ACPI_PTYPE1_OPTION:
645 return; 642 return;
646 } 643 }
647 644
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 136a814cec69..97cb36f85ce9 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -170,8 +170,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
170 "SMBus", 170 "SMBus",
171 "SystemCMOS", 171 "SystemCMOS",
172 "PCIBARTarget", 172 "PCIBARTarget",
173 "IPMI", 173 "IPMI"
174 "DataTable"
175}; 174};
176 175
177char *acpi_ut_get_region_name(u8 space_id) 176char *acpi_ut_get_region_name(u8 space_id)
@@ -179,6 +178,8 @@ char *acpi_ut_get_region_name(u8 space_id)
179 178
180 if (space_id >= ACPI_USER_REGION_BEGIN) { 179 if (space_id >= ACPI_USER_REGION_BEGIN) {
181 return ("UserDefinedRegion"); 180 return ("UserDefinedRegion");
181 } else if (space_id == ACPI_ADR_SPACE_DATA_TABLE) {
182 return ("DataTable");
182 } else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 183 } else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
183 return ("FunctionalFixedHW"); 184 return ("FunctionalFixedHW");
184 } else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) { 185 } else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index a946c689f03b..7d797e2baecd 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -83,9 +83,15 @@ acpi_status acpi_ut_mutex_initialize(void)
83 83
84 /* Create the spinlocks for use at interrupt level */ 84 /* Create the spinlocks for use at interrupt level */
85 85
86 spin_lock_init(acpi_gbl_gpe_lock); 86 status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
87 spin_lock_init(acpi_gbl_hardware_lock); 87 if (ACPI_FAILURE (status)) {
88 spin_lock_init(acpi_ev_global_lock_pending_lock); 88 return_ACPI_STATUS (status);
89 }
90
91 status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
92 if (ACPI_FAILURE (status)) {
93 return_ACPI_STATUS (status);
94 }
89 95
90 /* Mutex for _OSI support */ 96 /* Mutex for _OSI support */
91 status = acpi_os_create_mutex(&acpi_gbl_osi_mutex); 97 status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 9749980ca6ca..d1e06c182cdb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -227,7 +227,7 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
227 acpi_status status = AE_OK; 227 acpi_status status = AE_OK;
228 char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' }; 228 char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
229 229
230 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) 230 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
231 return -EINVAL; 231 return -EINVAL;
232 232
233 /* Make sure this is a valid target state */ 233 /* Make sure this is a valid target state */
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
new file mode 100644
index 000000000000..5d42c2414ae5
--- /dev/null
+++ b/drivers/acpi/custom_method.c
@@ -0,0 +1,100 @@
1/*
2 * debugfs.c - ACPI debugfs interface to userspace.
3 */
4
5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/uaccess.h>
9#include <linux/debugfs.h>
10#include <acpi/acpi_drivers.h>
11
12#include "internal.h"
13
14#define _COMPONENT ACPI_SYSTEM_COMPONENT
15ACPI_MODULE_NAME("custom_method");
16MODULE_LICENSE("GPL");
17
18static struct dentry *cm_dentry;
19
20/* /sys/kernel/debug/acpi/custom_method */
21
22static ssize_t cm_write(struct file *file, const char __user * user_buf,
23 size_t count, loff_t *ppos)
24{
25 static char *buf;
26 static u32 max_size;
27 static u32 uncopied_bytes;
28
29 struct acpi_table_header table;
30 acpi_status status;
31
32 if (!(*ppos)) {
33 /* parse the table header to get the table length */
34 if (count <= sizeof(struct acpi_table_header))
35 return -EINVAL;
36 if (copy_from_user(&table, user_buf,
37 sizeof(struct acpi_table_header)))
38 return -EFAULT;
39 uncopied_bytes = max_size = table.length;
40 buf = kzalloc(max_size, GFP_KERNEL);
41 if (!buf)
42 return -ENOMEM;
43 }
44
45 if (buf == NULL)
46 return -EINVAL;
47
48 if ((*ppos > max_size) ||
49 (*ppos + count > max_size) ||
50 (*ppos + count < count) ||
51 (count > uncopied_bytes))
52 return -EINVAL;
53
54 if (copy_from_user(buf + (*ppos), user_buf, count)) {
55 kfree(buf);
56 buf = NULL;
57 return -EFAULT;
58 }
59
60 uncopied_bytes -= count;
61 *ppos += count;
62
63 if (!uncopied_bytes) {
64 status = acpi_install_method(buf);
65 kfree(buf);
66 buf = NULL;
67 if (ACPI_FAILURE(status))
68 return -EINVAL;
69 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
70 }
71
72 return count;
73}
74
75static const struct file_operations cm_fops = {
76 .write = cm_write,
77 .llseek = default_llseek,
78};
79
80static int __init acpi_custom_method_init(void)
81{
82 if (acpi_debugfs_dir == NULL)
83 return -ENOENT;
84
85 cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
86 acpi_debugfs_dir, NULL, &cm_fops);
87 if (cm_dentry == NULL)
88 return -ENODEV;
89
90 return 0;
91}
92
93static void __exit acpi_custom_method_exit(void)
94{
95 if (cm_dentry)
96 debugfs_remove(cm_dentry);
97 }
98
99module_init(acpi_custom_method_init);
100module_exit(acpi_custom_method_exit);
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 384f7abcff77..182a9fc36355 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -3,100 +3,16 @@
3 */ 3 */
4 4
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/uaccess.h>
9#include <linux/debugfs.h> 6#include <linux/debugfs.h>
10#include <acpi/acpi_drivers.h> 7#include <acpi/acpi_drivers.h>
11 8
12#define _COMPONENT ACPI_SYSTEM_COMPONENT 9#define _COMPONENT ACPI_SYSTEM_COMPONENT
13ACPI_MODULE_NAME("debugfs"); 10ACPI_MODULE_NAME("debugfs");
14 11
12struct dentry *acpi_debugfs_dir;
13EXPORT_SYMBOL_GPL(acpi_debugfs_dir);
15 14
16/* /sys/modules/acpi/parameters/aml_debug_output */ 15void __init acpi_debugfs_init(void)
17
18module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
19 bool, 0644);
20MODULE_PARM_DESC(aml_debug_output,
21 "To enable/disable the ACPI Debug Object output.");
22
23/* /sys/kernel/debug/acpi/custom_method */
24
25static ssize_t cm_write(struct file *file, const char __user * user_buf,
26 size_t count, loff_t *ppos)
27{ 16{
28 static char *buf; 17 acpi_debugfs_dir = debugfs_create_dir("acpi", NULL);
29 static u32 max_size;
30 static u32 uncopied_bytes;
31
32 struct acpi_table_header table;
33 acpi_status status;
34
35 if (!(*ppos)) {
36 /* parse the table header to get the table length */
37 if (count <= sizeof(struct acpi_table_header))
38 return -EINVAL;
39 if (copy_from_user(&table, user_buf,
40 sizeof(struct acpi_table_header)))
41 return -EFAULT;
42 uncopied_bytes = max_size = table.length;
43 buf = kzalloc(max_size, GFP_KERNEL);
44 if (!buf)
45 return -ENOMEM;
46 }
47
48 if (buf == NULL)
49 return -EINVAL;
50
51 if ((*ppos > max_size) ||
52 (*ppos + count > max_size) ||
53 (*ppos + count < count) ||
54 (count > uncopied_bytes))
55 return -EINVAL;
56
57 if (copy_from_user(buf + (*ppos), user_buf, count)) {
58 kfree(buf);
59 buf = NULL;
60 return -EFAULT;
61 }
62
63 uncopied_bytes -= count;
64 *ppos += count;
65
66 if (!uncopied_bytes) {
67 status = acpi_install_method(buf);
68 kfree(buf);
69 buf = NULL;
70 if (ACPI_FAILURE(status))
71 return -EINVAL;
72 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
73 }
74
75 return count;
76}
77
78static const struct file_operations cm_fops = {
79 .write = cm_write,
80 .llseek = default_llseek,
81};
82
83int __init acpi_debugfs_init(void)
84{
85 struct dentry *acpi_dir, *cm_dentry;
86
87 acpi_dir = debugfs_create_dir("acpi", NULL);
88 if (!acpi_dir)
89 goto err;
90
91 cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
92 acpi_dir, NULL, &cm_fops);
93 if (!cm_dentry)
94 goto err;
95
96 return 0;
97
98err:
99 if (acpi_dir)
100 debugfs_remove(acpi_dir);
101 return -EINVAL;
102} 18}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index fa848c4116a8..b19a18dd994f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -69,7 +69,6 @@ enum ec_command {
69 69
70#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ 70#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
71#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 71#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
72#define ACPI_EC_CDELAY 10 /* Wait 10us before polling EC */
73#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ 72#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
74 73
75#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts 74#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
@@ -433,8 +432,7 @@ EXPORT_SYMBOL(ec_write);
433 432
434int ec_transaction(u8 command, 433int ec_transaction(u8 command,
435 const u8 * wdata, unsigned wdata_len, 434 const u8 * wdata, unsigned wdata_len,
436 u8 * rdata, unsigned rdata_len, 435 u8 * rdata, unsigned rdata_len)
437 int force_poll)
438{ 436{
439 struct transaction t = {.command = command, 437 struct transaction t = {.command = command,
440 .wdata = wdata, .rdata = rdata, 438 .wdata = wdata, .rdata = rdata,
@@ -592,8 +590,6 @@ static void acpi_ec_gpe_query(void *ec_cxt)
592 mutex_unlock(&ec->lock); 590 mutex_unlock(&ec->lock);
593} 591}
594 592
595static void acpi_ec_gpe_query(void *ec_cxt);
596
597static int ec_check_sci(struct acpi_ec *ec, u8 state) 593static int ec_check_sci(struct acpi_ec *ec, u8 state)
598{ 594{
599 if (state & ACPI_EC_FLAG_SCI) { 595 if (state & ACPI_EC_FLAG_SCI) {
@@ -808,8 +804,6 @@ static int acpi_ec_add(struct acpi_device *device)
808 return -EINVAL; 804 return -EINVAL;
809 } 805 }
810 806
811 ec->handle = device->handle;
812
813 /* Find and register all query methods */ 807 /* Find and register all query methods */
814 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1, 808 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
815 acpi_ec_register_query_methods, NULL, ec, NULL); 809 acpi_ec_register_query_methods, NULL, ec, NULL);
@@ -938,8 +932,19 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
938 ec_flag_msi, "MSI hardware", { 932 ec_flag_msi, "MSI hardware", {
939 DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL}, 933 DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL},
940 { 934 {
935 ec_flag_msi, "Quanta hardware", {
936 DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
937 DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL},
938 {
939 ec_flag_msi, "Quanta hardware", {
940 DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
941 DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL},
942 {
941 ec_validate_ecdt, "ASUS hardware", { 943 ec_validate_ecdt, "ASUS hardware", {
942 DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, 944 DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
945 {
946 ec_validate_ecdt, "ASUS hardware", {
947 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
943 {}, 948 {},
944}; 949};
945 950
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 4bfb759deb10..ca75b9ce0489 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -28,9 +28,10 @@ int acpi_scan_init(void);
28int acpi_sysfs_init(void); 28int acpi_sysfs_init(void);
29 29
30#ifdef CONFIG_DEBUG_FS 30#ifdef CONFIG_DEBUG_FS
31extern struct dentry *acpi_debugfs_dir;
31int acpi_debugfs_init(void); 32int acpi_debugfs_init(void);
32#else 33#else
33static inline int acpi_debugfs_init(void) { return 0; } 34static inline void acpi_debugfs_init(void) { return; }
34#endif 35#endif
35 36
36/* -------------------------------------------------------------------------- 37/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 45ad4ffef533..52ca9649d769 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -902,14 +902,6 @@ void acpi_os_wait_events_complete(void *context)
902 902
903EXPORT_SYMBOL(acpi_os_wait_events_complete); 903EXPORT_SYMBOL(acpi_os_wait_events_complete);
904 904
905/*
906 * Deallocate the memory for a spinlock.
907 */
908void acpi_os_delete_lock(acpi_spinlock handle)
909{
910 return;
911}
912
913acpi_status 905acpi_status
914acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 906acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
915{ 907{
@@ -1341,6 +1333,31 @@ int acpi_resources_are_enforced(void)
1341EXPORT_SYMBOL(acpi_resources_are_enforced); 1333EXPORT_SYMBOL(acpi_resources_are_enforced);
1342 1334
1343/* 1335/*
1336 * Create and initialize a spinlock.
1337 */
1338acpi_status
1339acpi_os_create_lock(acpi_spinlock *out_handle)
1340{
1341 spinlock_t *lock;
1342
1343 lock = ACPI_ALLOCATE(sizeof(spinlock_t));
1344 if (!lock)
1345 return AE_NO_MEMORY;
1346 spin_lock_init(lock);
1347 *out_handle = lock;
1348
1349 return AE_OK;
1350}
1351
1352/*
1353 * Deallocate the memory for a spinlock.
1354 */
1355void acpi_os_delete_lock(acpi_spinlock handle)
1356{
1357 ACPI_FREE(handle);
1358}
1359
1360/*
1344 * Acquire a spinlock. 1361 * Acquire a spinlock.
1345 * 1362 *
1346 * handle is a pointer to the spinlock_t. 1363 * handle is a pointer to the spinlock_t.
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 25bf17da69fd..02d2a4c9084d 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -37,7 +37,6 @@ static struct dmi_system_id __initdata processor_idle_dmi_table[] = {
37 {}, 37 {},
38}; 38};
39 39
40#ifdef CONFIG_SMP
41static int map_lapic_id(struct acpi_subtable_header *entry, 40static int map_lapic_id(struct acpi_subtable_header *entry,
42 u32 acpi_id, int *apic_id) 41 u32 acpi_id, int *apic_id)
43{ 42{
@@ -165,7 +164,9 @@ exit:
165 164
166int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) 165int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
167{ 166{
167#ifdef CONFIG_SMP
168 int i; 168 int i;
169#endif
169 int apic_id = -1; 170 int apic_id = -1;
170 171
171 apic_id = map_mat_entry(handle, type, acpi_id); 172 apic_id = map_mat_entry(handle, type, acpi_id);
@@ -174,14 +175,19 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
174 if (apic_id == -1) 175 if (apic_id == -1)
175 return apic_id; 176 return apic_id;
176 177
178#ifdef CONFIG_SMP
177 for_each_possible_cpu(i) { 179 for_each_possible_cpu(i) {
178 if (cpu_physical_id(i) == apic_id) 180 if (cpu_physical_id(i) == apic_id)
179 return i; 181 return i;
180 } 182 }
183#else
184 /* In UP kernel, only processor 0 is valid */
185 if (apic_id == 0)
186 return apic_id;
187#endif
181 return -1; 188 return -1;
182} 189}
183EXPORT_SYMBOL_GPL(acpi_get_cpuid); 190EXPORT_SYMBOL_GPL(acpi_get_cpuid);
184#endif
185 191
186static bool __init processor_physically_present(acpi_handle handle) 192static bool __init processor_physically_present(acpi_handle handle)
187{ 193{
@@ -217,7 +223,7 @@ static bool __init processor_physically_present(acpi_handle handle)
217 type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; 223 type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
218 cpuid = acpi_get_cpuid(handle, type, acpi_id); 224 cpuid = acpi_get_cpuid(handle, type, acpi_id);
219 225
220 if ((cpuid == -1) && (num_possible_cpus() > 1)) 226 if (cpuid == -1)
221 return false; 227 return false;
222 228
223 return true; 229 return true;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d615b7d69bca..431ab11c8c1b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -161,7 +161,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
161 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 161 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
162 return; 162 return;
163 163
164 if (c1e_detected) 164 if (amd_e400_c1e_detected)
165 type = ACPI_STATE_C1; 165 type = ACPI_STATE_C1;
166 166
167 /* 167 /*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 61891e75583d..77255f250dbb 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -220,6 +220,14 @@ module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
220 NULL, 0644); 220 NULL, 0644);
221#endif /* CONFIG_ACPI_DEBUG */ 221#endif /* CONFIG_ACPI_DEBUG */
222 222
223
224/* /sys/modules/acpi/parameters/aml_debug_output */
225
226module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
227 bool, 0644);
228MODULE_PARM_DESC(aml_debug_output,
229 "To enable/disable the ACPI Debug Object output.");
230
223/* /sys/module/acpi/parameters/acpica_version */ 231/* /sys/module/acpi/parameters/acpica_version */
224static int param_get_acpica_version(char *buffer, struct kernel_param *kp) 232static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
225{ 233{
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 7025593a58c8..d74926e0939e 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -603,6 +603,10 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
603 if (ret) 603 if (ret)
604 goto err_out; 604 goto err_out;
605 605
606 /* Hard-coded primecell ID instead of plug-n-play */
607 if (dev->periphid != 0)
608 goto skip_probe;
609
606 /* 610 /*
607 * Dynamically calculate the size of the resource 611 * Dynamically calculate the size of the resource
608 * and use this for iomap 612 * and use this for iomap
@@ -643,6 +647,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
643 if (ret) 647 if (ret)
644 goto err_release; 648 goto err_release;
645 649
650 skip_probe:
646 ret = device_add(&dev->dev); 651 ret = device_add(&dev->dev);
647 if (ret) 652 if (ret)
648 goto err_release; 653 goto err_release;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index db8f88586c8d..98de8f418676 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1038,6 +1038,7 @@ static void floppy_disable_hlt(void)
1038{ 1038{
1039 unsigned long flags; 1039 unsigned long flags;
1040 1040
1041 WARN_ONCE(1, "floppy_disable_hlt() scheduled for removal in 2012");
1041 spin_lock_irqsave(&floppy_hlt_lock, flags); 1042 spin_lock_irqsave(&floppy_hlt_lock, flags);
1042 if (!hlt_disabled) { 1043 if (!hlt_disabled) {
1043 hlt_disabled = 1; 1044 hlt_disabled = 1;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index a0aabd904a51..46b8136c31bb 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -321,7 +321,6 @@ static void pcd_init_units(void)
321 strcpy(disk->disk_name, cd->name); /* umm... */ 321 strcpy(disk->disk_name, cd->name); /* umm... */
322 disk->fops = &pcd_bdops; 322 disk->fops = &pcd_bdops;
323 disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; 323 disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
324 disk->events = DISK_EVENT_MEDIA_CHANGE;
325 } 324 }
326} 325}
327 326
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6ecf89cdf006..079c08808d8a 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -6,10 +6,13 @@
6#include <linux/virtio.h> 6#include <linux/virtio.h>
7#include <linux/virtio_blk.h> 7#include <linux/virtio_blk.h>
8#include <linux/scatterlist.h> 8#include <linux/scatterlist.h>
9#include <linux/string_helpers.h>
10#include <scsi/scsi_cmnd.h>
9 11
10#define PART_BITS 4 12#define PART_BITS 4
11 13
12static int major, index; 14static int major, index;
15struct workqueue_struct *virtblk_wq;
13 16
14struct virtio_blk 17struct virtio_blk
15{ 18{
@@ -26,6 +29,9 @@ struct virtio_blk
26 29
27 mempool_t *pool; 30 mempool_t *pool;
28 31
32 /* Process context for config space updates */
33 struct work_struct config_work;
34
29 /* What host tells us, plus 2 for header & tailer. */ 35 /* What host tells us, plus 2 for header & tailer. */
30 unsigned int sg_elems; 36 unsigned int sg_elems;
31 37
@@ -141,7 +147,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
141 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); 147 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
142 148
143 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) { 149 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
144 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96); 150 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
145 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, 151 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
146 sizeof(vbr->in_hdr)); 152 sizeof(vbr->in_hdr));
147 } 153 }
@@ -291,6 +297,46 @@ static ssize_t virtblk_serial_show(struct device *dev,
291} 297}
292DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); 298DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
293 299
300static void virtblk_config_changed_work(struct work_struct *work)
301{
302 struct virtio_blk *vblk =
303 container_of(work, struct virtio_blk, config_work);
304 struct virtio_device *vdev = vblk->vdev;
305 struct request_queue *q = vblk->disk->queue;
306 char cap_str_2[10], cap_str_10[10];
307 u64 capacity, size;
308
309 /* Host must always specify the capacity. */
310 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
311 &capacity, sizeof(capacity));
312
313 /* If capacity is too big, truncate with warning. */
314 if ((sector_t)capacity != capacity) {
315 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
316 (unsigned long long)capacity);
317 capacity = (sector_t)-1;
318 }
319
320 size = capacity * queue_logical_block_size(q);
321 string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
322 string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
323
324 dev_notice(&vdev->dev,
325 "new size: %llu %d-byte logical blocks (%s/%s)\n",
326 (unsigned long long)capacity,
327 queue_logical_block_size(q),
328 cap_str_10, cap_str_2);
329
330 set_capacity(vblk->disk, capacity);
331}
332
333static void virtblk_config_changed(struct virtio_device *vdev)
334{
335 struct virtio_blk *vblk = vdev->priv;
336
337 queue_work(virtblk_wq, &vblk->config_work);
338}
339
294static int __devinit virtblk_probe(struct virtio_device *vdev) 340static int __devinit virtblk_probe(struct virtio_device *vdev)
295{ 341{
296 struct virtio_blk *vblk; 342 struct virtio_blk *vblk;
@@ -327,6 +373,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
327 vblk->vdev = vdev; 373 vblk->vdev = vdev;
328 vblk->sg_elems = sg_elems; 374 vblk->sg_elems = sg_elems;
329 sg_init_table(vblk->sg, vblk->sg_elems); 375 sg_init_table(vblk->sg, vblk->sg_elems);
376 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
330 377
331 /* We expect one virtqueue, for output. */ 378 /* We expect one virtqueue, for output. */
332 vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests"); 379 vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
@@ -477,6 +524,8 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
477{ 524{
478 struct virtio_blk *vblk = vdev->priv; 525 struct virtio_blk *vblk = vdev->priv;
479 526
527 flush_work(&vblk->config_work);
528
480 /* Nothing should be pending. */ 529 /* Nothing should be pending. */
481 BUG_ON(!list_empty(&vblk->reqs)); 530 BUG_ON(!list_empty(&vblk->reqs));
482 531
@@ -508,27 +557,47 @@ static unsigned int features[] = {
508 * Use __refdata to avoid this warning. 557 * Use __refdata to avoid this warning.
509 */ 558 */
510static struct virtio_driver __refdata virtio_blk = { 559static struct virtio_driver __refdata virtio_blk = {
511 .feature_table = features, 560 .feature_table = features,
512 .feature_table_size = ARRAY_SIZE(features), 561 .feature_table_size = ARRAY_SIZE(features),
513 .driver.name = KBUILD_MODNAME, 562 .driver.name = KBUILD_MODNAME,
514 .driver.owner = THIS_MODULE, 563 .driver.owner = THIS_MODULE,
515 .id_table = id_table, 564 .id_table = id_table,
516 .probe = virtblk_probe, 565 .probe = virtblk_probe,
517 .remove = __devexit_p(virtblk_remove), 566 .remove = __devexit_p(virtblk_remove),
567 .config_changed = virtblk_config_changed,
518}; 568};
519 569
520static int __init init(void) 570static int __init init(void)
521{ 571{
572 int error;
573
574 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
575 if (!virtblk_wq)
576 return -ENOMEM;
577
522 major = register_blkdev(0, "virtblk"); 578 major = register_blkdev(0, "virtblk");
523 if (major < 0) 579 if (major < 0) {
524 return major; 580 error = major;
525 return register_virtio_driver(&virtio_blk); 581 goto out_destroy_workqueue;
582 }
583
584 error = register_virtio_driver(&virtio_blk);
585 if (error)
586 goto out_unregister_blkdev;
587 return 0;
588
589out_unregister_blkdev:
590 unregister_blkdev(major, "virtblk");
591out_destroy_workqueue:
592 destroy_workqueue(virtblk_wq);
593 return error;
526} 594}
527 595
528static void __exit fini(void) 596static void __exit fini(void)
529{ 597{
530 unregister_blkdev(major, "virtblk"); 598 unregister_blkdev(major, "virtblk");
531 unregister_virtio_driver(&virtio_blk); 599 unregister_virtio_driver(&virtio_blk);
600 destroy_workqueue(virtblk_wq);
532} 601}
533module_init(init); 602module_init(init);
534module_exit(fini); 603module_exit(fini);
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index ae15a4ddaa9b..7878da89d29e 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -627,7 +627,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
627 gendisk->fops = &viocd_fops; 627 gendisk->fops = &viocd_fops;
628 gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE | 628 gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
629 GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; 629 GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
630 gendisk->events = DISK_EVENT_MEDIA_CHANGE;
631 set_capacity(gendisk, 0); 630 set_capacity(gendisk, 0);
632 gendisk->private_data = d; 631 gendisk->private_data = d;
633 d->viocd_disk = gendisk; 632 d->viocd_disk = gendisk;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 838568a7dbf5..fb68b1295373 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1677,17 +1677,12 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
1677 portdev->config.max_nr_ports = 1; 1677 portdev->config.max_nr_ports = 1;
1678 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { 1678 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
1679 multiport = true; 1679 multiport = true;
1680 vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT;
1681
1682 vdev->config->get(vdev, offsetof(struct virtio_console_config, 1680 vdev->config->get(vdev, offsetof(struct virtio_console_config,
1683 max_nr_ports), 1681 max_nr_ports),
1684 &portdev->config.max_nr_ports, 1682 &portdev->config.max_nr_ports,
1685 sizeof(portdev->config.max_nr_ports)); 1683 sizeof(portdev->config.max_nr_ports));
1686 } 1684 }
1687 1685
1688 /* Let the Host know we support multiple ports.*/
1689 vdev->config->finalize_features(vdev);
1690
1691 err = init_vqs(portdev); 1686 err = init_vqs(portdev);
1692 if (err < 0) { 1687 if (err < 0) {
1693 dev_err(&vdev->dev, "Error %d initializing vqs\n", err); 1688 dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f508690eb958..c47f3d09c1ee 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -237,6 +237,7 @@ static int menu_select(struct cpuidle_device *dev)
237 unsigned int power_usage = -1; 237 unsigned int power_usage = -1;
238 int i; 238 int i;
239 int multiplier; 239 int multiplier;
240 struct timespec t;
240 241
241 if (data->needs_update) { 242 if (data->needs_update) {
242 menu_update(dev); 243 menu_update(dev);
@@ -251,8 +252,9 @@ static int menu_select(struct cpuidle_device *dev)
251 return 0; 252 return 0;
252 253
253 /* determine the expected residency time, round up */ 254 /* determine the expected residency time, round up */
255 t = ktime_to_timespec(tick_nohz_get_sleep_length());
254 data->expected_us = 256 data->expected_us =
255 DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000); 257 t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
256 258
257 259
258 data->bucket = which_bucket(data->expected_us); 260 data->bucket = which_bucket(data->expected_us);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a572600e44eb..25cf327cd1cb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -200,16 +200,18 @@ config PL330_DMA
200 platform_data for a dma-pl330 device. 200 platform_data for a dma-pl330 device.
201 201
202config PCH_DMA 202config PCH_DMA
203 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support" 203 tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
204 depends on PCI && X86 204 depends on PCI && X86
205 select DMA_ENGINE 205 select DMA_ENGINE
206 help 206 help
207 Enable support for Intel EG20T PCH DMA engine. 207 Enable support for Intel EG20T PCH DMA engine.
208 208
209 This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/ 209 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
210 Output Hub) which is for IVI(In-Vehicle Infotainment) use. 210 Output Hub), ML7213 and ML7223.
211 ML7213 is companion chip for Intel Atom E6xx series. 211 ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
212 ML7213 is completely compatible for Intel EG20T PCH. 212 for MP(Media Phone) use.
213 ML7213/ML7223 is companion chip for Intel Atom E6xx series.
214 ML7213/ML7223 is completely compatible for Intel EG20T PCH.
213 215
214config IMX_SDMA 216config IMX_SDMA
215 tristate "i.MX SDMA support" 217 tristate "i.MX SDMA support"
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
new file mode 100644
index 000000000000..a4af8589330c
--- /dev/null
+++ b/drivers/dma/TODO
@@ -0,0 +1,14 @@
1TODO for slave dma
2
31. Move remaining drivers to use new slave interface
42. Remove old slave pointer machansim
53. Make issue_pending to start the transaction in below drivers
6 - mpc512x_dma
7 - imx-dma
8 - imx-sdma
9 - mxs-dma.c
10 - dw_dmac
11 - intel_mid_dma
12 - ste_dma40
134. Check other subsystems for dma drivers and merge/move to dmaengine
145. Remove dma_slave_config's dma direction.
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 235f53bf494e..36144f88d718 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -37,8 +37,8 @@
37 37
38#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 38#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
39#define ATC_DEFAULT_CTRLA (0) 39#define ATC_DEFAULT_CTRLA (0)
40#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \ 40#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
41 |ATC_DIF(1)) 41 |ATC_DIF(AT_DMA_MEM_IF))
42 42
43/* 43/*
44 * Initial number of descriptors to allocate for each channel. This could 44 * Initial number of descriptors to allocate for each channel. This could
@@ -165,6 +165,29 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
165} 165}
166 166
167/** 167/**
168 * atc_desc_chain - build chain adding a descripor
169 * @first: address of first descripor of the chain
170 * @prev: address of previous descripor of the chain
171 * @desc: descriptor to queue
172 *
173 * Called from prep_* functions
174 */
175static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
176 struct at_desc *desc)
177{
178 if (!(*first)) {
179 *first = desc;
180 } else {
181 /* inform the HW lli about chaining */
182 (*prev)->lli.dscr = desc->txd.phys;
183 /* insert the link descriptor to the LD ring */
184 list_add_tail(&desc->desc_node,
185 &(*first)->tx_list);
186 }
187 *prev = desc;
188}
189
190/**
168 * atc_assign_cookie - compute and assign new cookie 191 * atc_assign_cookie - compute and assign new cookie
169 * @atchan: channel we work on 192 * @atchan: channel we work on
170 * @desc: descriptor to assign cookie for 193 * @desc: descriptor to assign cookie for
@@ -237,16 +260,12 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
237static void 260static void
238atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 261atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
239{ 262{
240 dma_async_tx_callback callback;
241 void *param;
242 struct dma_async_tx_descriptor *txd = &desc->txd; 263 struct dma_async_tx_descriptor *txd = &desc->txd;
243 264
244 dev_vdbg(chan2dev(&atchan->chan_common), 265 dev_vdbg(chan2dev(&atchan->chan_common),
245 "descriptor %u complete\n", txd->cookie); 266 "descriptor %u complete\n", txd->cookie);
246 267
247 atchan->completed_cookie = txd->cookie; 268 atchan->completed_cookie = txd->cookie;
248 callback = txd->callback;
249 param = txd->callback_param;
250 269
251 /* move children to free_list */ 270 /* move children to free_list */
252 list_splice_init(&desc->tx_list, &atchan->free_list); 271 list_splice_init(&desc->tx_list, &atchan->free_list);
@@ -278,12 +297,19 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
278 } 297 }
279 } 298 }
280 299
281 /* 300 /* for cyclic transfers,
282 * The API requires that no submissions are done from a 301 * no need to replay callback function while stopping */
283 * callback, so we don't need to drop the lock here 302 if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
284 */ 303 dma_async_tx_callback callback = txd->callback;
285 if (callback) 304 void *param = txd->callback_param;
286 callback(param); 305
306 /*
307 * The API requires that no submissions are done from a
308 * callback, so we don't need to drop the lock here
309 */
310 if (callback)
311 callback(param);
312 }
287 313
288 dma_run_dependencies(txd); 314 dma_run_dependencies(txd);
289} 315}
@@ -419,6 +445,26 @@ static void atc_handle_error(struct at_dma_chan *atchan)
419 atc_chain_complete(atchan, bad_desc); 445 atc_chain_complete(atchan, bad_desc);
420} 446}
421 447
448/**
449 * atc_handle_cyclic - at the end of a period, run callback function
450 * @atchan: channel used for cyclic operations
451 *
452 * Called with atchan->lock held and bh disabled
453 */
454static void atc_handle_cyclic(struct at_dma_chan *atchan)
455{
456 struct at_desc *first = atc_first_active(atchan);
457 struct dma_async_tx_descriptor *txd = &first->txd;
458 dma_async_tx_callback callback = txd->callback;
459 void *param = txd->callback_param;
460
461 dev_vdbg(chan2dev(&atchan->chan_common),
462 "new cyclic period llp 0x%08x\n",
463 channel_readl(atchan, DSCR));
464
465 if (callback)
466 callback(param);
467}
422 468
423/*-- IRQ & Tasklet ---------------------------------------------------*/ 469/*-- IRQ & Tasklet ---------------------------------------------------*/
424 470
@@ -426,16 +472,11 @@ static void atc_tasklet(unsigned long data)
426{ 472{
427 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 473 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
428 474
429 /* Channel cannot be enabled here */
430 if (atc_chan_is_enabled(atchan)) {
431 dev_err(chan2dev(&atchan->chan_common),
432 "BUG: channel enabled in tasklet\n");
433 return;
434 }
435
436 spin_lock(&atchan->lock); 475 spin_lock(&atchan->lock);
437 if (test_and_clear_bit(0, &atchan->error_status)) 476 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
438 atc_handle_error(atchan); 477 atc_handle_error(atchan);
478 else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
479 atc_handle_cyclic(atchan);
439 else 480 else
440 atc_advance_work(atchan); 481 atc_advance_work(atchan);
441 482
@@ -464,12 +505,13 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
464 505
465 for (i = 0; i < atdma->dma_common.chancnt; i++) { 506 for (i = 0; i < atdma->dma_common.chancnt; i++) {
466 atchan = &atdma->chan[i]; 507 atchan = &atdma->chan[i];
467 if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) { 508 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
468 if (pending & AT_DMA_ERR(i)) { 509 if (pending & AT_DMA_ERR(i)) {
469 /* Disable channel on AHB error */ 510 /* Disable channel on AHB error */
470 dma_writel(atdma, CHDR, atchan->mask); 511 dma_writel(atdma, CHDR,
512 AT_DMA_RES(i) | atchan->mask);
471 /* Give information to tasklet */ 513 /* Give information to tasklet */
472 set_bit(0, &atchan->error_status); 514 set_bit(ATC_IS_ERROR, &atchan->status);
473 } 515 }
474 tasklet_schedule(&atchan->tasklet); 516 tasklet_schedule(&atchan->tasklet);
475 ret = IRQ_HANDLED; 517 ret = IRQ_HANDLED;
@@ -549,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
549 } 591 }
550 592
551 ctrla = ATC_DEFAULT_CTRLA; 593 ctrla = ATC_DEFAULT_CTRLA;
552 ctrlb = ATC_DEFAULT_CTRLB 594 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
553 | ATC_SRC_ADDR_MODE_INCR 595 | ATC_SRC_ADDR_MODE_INCR
554 | ATC_DST_ADDR_MODE_INCR 596 | ATC_DST_ADDR_MODE_INCR
555 | ATC_FC_MEM2MEM; 597 | ATC_FC_MEM2MEM;
@@ -584,16 +626,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
584 626
585 desc->txd.cookie = 0; 627 desc->txd.cookie = 0;
586 628
587 if (!first) { 629 atc_desc_chain(&first, &prev, desc);
588 first = desc;
589 } else {
590 /* inform the HW lli about chaining */
591 prev->lli.dscr = desc->txd.phys;
592 /* insert the link descriptor to the LD ring */
593 list_add_tail(&desc->desc_node,
594 &first->tx_list);
595 }
596 prev = desc;
597 } 630 }
598 631
599 /* First descriptor of the chain embedds additional information */ 632 /* First descriptor of the chain embedds additional information */
@@ -639,7 +672,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
639 struct scatterlist *sg; 672 struct scatterlist *sg;
640 size_t total_len = 0; 673 size_t total_len = 0;
641 674
642 dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n", 675 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
676 sg_len,
643 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", 677 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
644 flags); 678 flags);
645 679
@@ -651,14 +685,15 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
651 reg_width = atslave->reg_width; 685 reg_width = atslave->reg_width;
652 686
653 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; 687 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
654 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; 688 ctrlb = ATC_IEN;
655 689
656 switch (direction) { 690 switch (direction) {
657 case DMA_TO_DEVICE: 691 case DMA_TO_DEVICE:
658 ctrla |= ATC_DST_WIDTH(reg_width); 692 ctrla |= ATC_DST_WIDTH(reg_width);
659 ctrlb |= ATC_DST_ADDR_MODE_FIXED 693 ctrlb |= ATC_DST_ADDR_MODE_FIXED
660 | ATC_SRC_ADDR_MODE_INCR 694 | ATC_SRC_ADDR_MODE_INCR
661 | ATC_FC_MEM2PER; 695 | ATC_FC_MEM2PER
696 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
662 reg = atslave->tx_reg; 697 reg = atslave->tx_reg;
663 for_each_sg(sgl, sg, sg_len, i) { 698 for_each_sg(sgl, sg, sg_len, i) {
664 struct at_desc *desc; 699 struct at_desc *desc;
@@ -682,16 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
682 | len >> mem_width; 717 | len >> mem_width;
683 desc->lli.ctrlb = ctrlb; 718 desc->lli.ctrlb = ctrlb;
684 719
685 if (!first) { 720 atc_desc_chain(&first, &prev, desc);
686 first = desc;
687 } else {
688 /* inform the HW lli about chaining */
689 prev->lli.dscr = desc->txd.phys;
690 /* insert the link descriptor to the LD ring */
691 list_add_tail(&desc->desc_node,
692 &first->tx_list);
693 }
694 prev = desc;
695 total_len += len; 721 total_len += len;
696 } 722 }
697 break; 723 break;
@@ -699,7 +725,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
699 ctrla |= ATC_SRC_WIDTH(reg_width); 725 ctrla |= ATC_SRC_WIDTH(reg_width);
700 ctrlb |= ATC_DST_ADDR_MODE_INCR 726 ctrlb |= ATC_DST_ADDR_MODE_INCR
701 | ATC_SRC_ADDR_MODE_FIXED 727 | ATC_SRC_ADDR_MODE_FIXED
702 | ATC_FC_PER2MEM; 728 | ATC_FC_PER2MEM
729 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
703 730
704 reg = atslave->rx_reg; 731 reg = atslave->rx_reg;
705 for_each_sg(sgl, sg, sg_len, i) { 732 for_each_sg(sgl, sg, sg_len, i) {
@@ -724,16 +751,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
724 | len >> reg_width; 751 | len >> reg_width;
725 desc->lli.ctrlb = ctrlb; 752 desc->lli.ctrlb = ctrlb;
726 753
727 if (!first) { 754 atc_desc_chain(&first, &prev, desc);
728 first = desc;
729 } else {
730 /* inform the HW lli about chaining */
731 prev->lli.dscr = desc->txd.phys;
732 /* insert the link descriptor to the LD ring */
733 list_add_tail(&desc->desc_node,
734 &first->tx_list);
735 }
736 prev = desc;
737 total_len += len; 755 total_len += len;
738 } 756 }
739 break; 757 break;
@@ -759,41 +777,211 @@ err_desc_get:
759 return NULL; 777 return NULL;
760} 778}
761 779
780/**
781 * atc_dma_cyclic_check_values
782 * Check for too big/unaligned periods and unaligned DMA buffer
783 */
784static int
785atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
786 size_t period_len, enum dma_data_direction direction)
787{
788 if (period_len > (ATC_BTSIZE_MAX << reg_width))
789 goto err_out;
790 if (unlikely(period_len & ((1 << reg_width) - 1)))
791 goto err_out;
792 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
793 goto err_out;
794 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
795 goto err_out;
796
797 return 0;
798
799err_out:
800 return -EINVAL;
801}
802
803/**
804 * atc_dma_cyclic_fill_desc - Fill one period decriptor
805 */
806static int
807atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
808 unsigned int period_index, dma_addr_t buf_addr,
809 size_t period_len, enum dma_data_direction direction)
810{
811 u32 ctrla;
812 unsigned int reg_width = atslave->reg_width;
813
814 /* prepare common CRTLA value */
815 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
816 | ATC_DST_WIDTH(reg_width)
817 | ATC_SRC_WIDTH(reg_width)
818 | period_len >> reg_width;
819
820 switch (direction) {
821 case DMA_TO_DEVICE:
822 desc->lli.saddr = buf_addr + (period_len * period_index);
823 desc->lli.daddr = atslave->tx_reg;
824 desc->lli.ctrla = ctrla;
825 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
826 | ATC_SRC_ADDR_MODE_INCR
827 | ATC_FC_MEM2PER
828 | ATC_SIF(AT_DMA_MEM_IF)
829 | ATC_DIF(AT_DMA_PER_IF);
830 break;
831
832 case DMA_FROM_DEVICE:
833 desc->lli.saddr = atslave->rx_reg;
834 desc->lli.daddr = buf_addr + (period_len * period_index);
835 desc->lli.ctrla = ctrla;
836 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
837 | ATC_SRC_ADDR_MODE_FIXED
838 | ATC_FC_PER2MEM
839 | ATC_SIF(AT_DMA_PER_IF)
840 | ATC_DIF(AT_DMA_MEM_IF);
841 break;
842
843 default:
844 return -EINVAL;
845 }
846
847 return 0;
848}
849
850/**
851 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
852 * @chan: the DMA channel to prepare
853 * @buf_addr: physical DMA address where the buffer starts
854 * @buf_len: total number of bytes for the entire buffer
855 * @period_len: number of bytes for each period
856 * @direction: transfer direction, to or from device
857 */
858static struct dma_async_tx_descriptor *
859atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
860 size_t period_len, enum dma_data_direction direction)
861{
862 struct at_dma_chan *atchan = to_at_dma_chan(chan);
863 struct at_dma_slave *atslave = chan->private;
864 struct at_desc *first = NULL;
865 struct at_desc *prev = NULL;
866 unsigned long was_cyclic;
867 unsigned int periods = buf_len / period_len;
868 unsigned int i;
869
870 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
871 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
872 buf_addr,
873 periods, buf_len, period_len);
874
875 if (unlikely(!atslave || !buf_len || !period_len)) {
876 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
877 return NULL;
878 }
879
880 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
881 if (was_cyclic) {
882 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
883 return NULL;
884 }
885
886 /* Check for too big/unaligned periods and unaligned DMA buffer */
887 if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
888 period_len, direction))
889 goto err_out;
890
891 /* build cyclic linked list */
892 for (i = 0; i < periods; i++) {
893 struct at_desc *desc;
894
895 desc = atc_desc_get(atchan);
896 if (!desc)
897 goto err_desc_get;
898
899 if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
900 period_len, direction))
901 goto err_desc_get;
902
903 atc_desc_chain(&first, &prev, desc);
904 }
905
906 /* lets make a cyclic list */
907 prev->lli.dscr = first->txd.phys;
908
909 /* First descriptor of the chain embedds additional information */
910 first->txd.cookie = -EBUSY;
911 first->len = buf_len;
912
913 return &first->txd;
914
915err_desc_get:
916 dev_err(chan2dev(chan), "not enough descriptors available\n");
917 atc_desc_put(atchan, first);
918err_out:
919 clear_bit(ATC_IS_CYCLIC, &atchan->status);
920 return NULL;
921}
922
923
762static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 924static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
763 unsigned long arg) 925 unsigned long arg)
764{ 926{
765 struct at_dma_chan *atchan = to_at_dma_chan(chan); 927 struct at_dma_chan *atchan = to_at_dma_chan(chan);
766 struct at_dma *atdma = to_at_dma(chan->device); 928 struct at_dma *atdma = to_at_dma(chan->device);
767 struct at_desc *desc, *_desc; 929 int chan_id = atchan->chan_common.chan_id;
930
768 LIST_HEAD(list); 931 LIST_HEAD(list);
769 932
770 /* Only supports DMA_TERMINATE_ALL */ 933 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
771 if (cmd != DMA_TERMINATE_ALL)
772 return -ENXIO;
773 934
774 /* 935 if (cmd == DMA_PAUSE) {
775 * This is only called when something went wrong elsewhere, so 936 spin_lock_bh(&atchan->lock);
776 * we don't really care about the data. Just disable the
777 * channel. We still have to poll the channel enable bit due
778 * to AHB/HSB limitations.
779 */
780 spin_lock_bh(&atchan->lock);
781 937
782 dma_writel(atdma, CHDR, atchan->mask); 938 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
939 set_bit(ATC_IS_PAUSED, &atchan->status);
783 940
784 /* confirm that this channel is disabled */ 941 spin_unlock_bh(&atchan->lock);
785 while (dma_readl(atdma, CHSR) & atchan->mask) 942 } else if (cmd == DMA_RESUME) {
786 cpu_relax(); 943 if (!test_bit(ATC_IS_PAUSED, &atchan->status))
944 return 0;
787 945
788 /* active_list entries will end up before queued entries */ 946 spin_lock_bh(&atchan->lock);
789 list_splice_init(&atchan->queue, &list);
790 list_splice_init(&atchan->active_list, &list);
791 947
792 /* Flush all pending and queued descriptors */ 948 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
793 list_for_each_entry_safe(desc, _desc, &list, desc_node) 949 clear_bit(ATC_IS_PAUSED, &atchan->status);
794 atc_chain_complete(atchan, desc);
795 950
796 spin_unlock_bh(&atchan->lock); 951 spin_unlock_bh(&atchan->lock);
952 } else if (cmd == DMA_TERMINATE_ALL) {
953 struct at_desc *desc, *_desc;
954 /*
955 * This is only called when something went wrong elsewhere, so
956 * we don't really care about the data. Just disable the
957 * channel. We still have to poll the channel enable bit due
958 * to AHB/HSB limitations.
959 */
960 spin_lock_bh(&atchan->lock);
961
962 /* disabling channel: must also remove suspend state */
963 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
964
965 /* confirm that this channel is disabled */
966 while (dma_readl(atdma, CHSR) & atchan->mask)
967 cpu_relax();
968
969 /* active_list entries will end up before queued entries */
970 list_splice_init(&atchan->queue, &list);
971 list_splice_init(&atchan->active_list, &list);
972
973 /* Flush all pending and queued descriptors */
974 list_for_each_entry_safe(desc, _desc, &list, desc_node)
975 atc_chain_complete(atchan, desc);
976
977 clear_bit(ATC_IS_PAUSED, &atchan->status);
978 /* if channel dedicated to cyclic operations, free it */
979 clear_bit(ATC_IS_CYCLIC, &atchan->status);
980
981 spin_unlock_bh(&atchan->lock);
982 } else {
983 return -ENXIO;
984 }
797 985
798 return 0; 986 return 0;
799} 987}
@@ -835,9 +1023,17 @@ atc_tx_status(struct dma_chan *chan,
835 1023
836 spin_unlock_bh(&atchan->lock); 1024 spin_unlock_bh(&atchan->lock);
837 1025
838 dma_set_tx_state(txstate, last_complete, last_used, 0); 1026 if (ret != DMA_SUCCESS)
839 dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n", 1027 dma_set_tx_state(txstate, last_complete, last_used,
840 cookie, last_complete ? last_complete : 0, 1028 atc_first_active(atchan)->len);
1029 else
1030 dma_set_tx_state(txstate, last_complete, last_used, 0);
1031
1032 if (test_bit(ATC_IS_PAUSED, &atchan->status))
1033 ret = DMA_PAUSED;
1034
1035 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
1036 ret, cookie, last_complete ? last_complete : 0,
841 last_used ? last_used : 0); 1037 last_used ? last_used : 0);
842 1038
843 return ret; 1039 return ret;
@@ -853,6 +1049,10 @@ static void atc_issue_pending(struct dma_chan *chan)
853 1049
854 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1050 dev_vdbg(chan2dev(chan), "issue_pending\n");
855 1051
1052 /* Not needed for cyclic transfers */
1053 if (test_bit(ATC_IS_CYCLIC, &atchan->status))
1054 return;
1055
856 spin_lock_bh(&atchan->lock); 1056 spin_lock_bh(&atchan->lock);
857 if (!atc_chan_is_enabled(atchan)) { 1057 if (!atc_chan_is_enabled(atchan)) {
858 atc_advance_work(atchan); 1058 atc_advance_work(atchan);
@@ -959,6 +1159,7 @@ static void atc_free_chan_resources(struct dma_chan *chan)
959 } 1159 }
960 list_splice_init(&atchan->free_list, &list); 1160 list_splice_init(&atchan->free_list, &list);
961 atchan->descs_allocated = 0; 1161 atchan->descs_allocated = 0;
1162 atchan->status = 0;
962 1163
963 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1164 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
964} 1165}
@@ -1092,10 +1293,15 @@ static int __init at_dma_probe(struct platform_device *pdev)
1092 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1293 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1093 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1294 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1094 1295
1095 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1296 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
1096 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1297 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1298
1299 if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1300 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1301
1302 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
1303 dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1097 atdma->dma_common.device_control = atc_control; 1304 atdma->dma_common.device_control = atc_control;
1098 }
1099 1305
1100 dma_writel(atdma, EN, AT_DMA_ENABLE); 1306 dma_writel(atdma, EN, AT_DMA_ENABLE);
1101 1307
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 495457e3dc4b..087dbf1dd39c 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -103,6 +103,10 @@
103/* Bitfields in CTRLB */ 103/* Bitfields in CTRLB */
104#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */ 104#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */
105#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */ 105#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */
106 /* Specify AHB interfaces */
107#define AT_DMA_MEM_IF 0 /* interface 0 as memory interface */
108#define AT_DMA_PER_IF 1 /* interface 1 as peripheral interface */
109
106#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */ 110#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */
107#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */ 111#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */
108#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */ 112#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */
@@ -181,12 +185,23 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
181/*-- Channels --------------------------------------------------------*/ 185/*-- Channels --------------------------------------------------------*/
182 186
183/** 187/**
188 * atc_status - information bits stored in channel status flag
189 *
190 * Manipulated with atomic operations.
191 */
192enum atc_status {
193 ATC_IS_ERROR = 0,
194 ATC_IS_PAUSED = 1,
195 ATC_IS_CYCLIC = 24,
196};
197
198/**
184 * struct at_dma_chan - internal representation of an Atmel HDMAC channel 199 * struct at_dma_chan - internal representation of an Atmel HDMAC channel
185 * @chan_common: common dmaengine channel object members 200 * @chan_common: common dmaengine channel object members
186 * @device: parent device 201 * @device: parent device
187 * @ch_regs: memory mapped register base 202 * @ch_regs: memory mapped register base
188 * @mask: channel index in a mask 203 * @mask: channel index in a mask
189 * @error_status: transmit error status information from irq handler 204 * @status: transmit status information from irq/prep* functions
190 * to tasklet (use atomic operations) 205 * to tasklet (use atomic operations)
191 * @tasklet: bottom half to finish transaction work 206 * @tasklet: bottom half to finish transaction work
192 * @lock: serializes enqueue/dequeue operations to descriptors lists 207 * @lock: serializes enqueue/dequeue operations to descriptors lists
@@ -201,7 +216,7 @@ struct at_dma_chan {
201 struct at_dma *device; 216 struct at_dma *device;
202 void __iomem *ch_regs; 217 void __iomem *ch_regs;
203 u8 mask; 218 u8 mask;
204 unsigned long error_status; 219 unsigned long status;
205 struct tasklet_struct tasklet; 220 struct tasklet_struct tasklet;
206 221
207 spinlock_t lock; 222 spinlock_t lock;
@@ -309,8 +324,8 @@ static void atc_setup_irq(struct at_dma_chan *atchan, int on)
309 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 324 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
310 u32 ebci; 325 u32 ebci;
311 326
312 /* enable interrupts on buffer chain completion & error */ 327 /* enable interrupts on buffer transfer completion & error */
313 ebci = AT_DMA_CBTC(atchan->chan_common.chan_id) 328 ebci = AT_DMA_BTC(atchan->chan_common.chan_id)
314 | AT_DMA_ERR(atchan->chan_common.chan_id); 329 | AT_DMA_ERR(atchan->chan_common.chan_id);
315 if (on) 330 if (on)
316 dma_writel(atdma, EBCIER, ebci); 331 dma_writel(atdma, EBCIER, ebci);
@@ -347,7 +362,12 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
347 */ 362 */
348static void set_desc_eol(struct at_desc *desc) 363static void set_desc_eol(struct at_desc *desc)
349{ 364{
350 desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; 365 u32 ctrlb = desc->lli.ctrlb;
366
367 ctrlb &= ~ATC_IEN;
368 ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
369
370 desc->lli.ctrlb = ctrlb;
351 desc->lli.dscr = 0; 371 desc->lli.dscr = 0;
352} 372}
353 373
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index f48e54006518..af8c0b5ed70f 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1610,7 +1610,7 @@ int __init coh901318_init(void)
1610{ 1610{
1611 return platform_driver_probe(&coh901318_driver, coh901318_probe); 1611 return platform_driver_probe(&coh901318_driver, coh901318_probe);
1612} 1612}
1613arch_initcall(coh901318_init); 1613subsys_initcall(coh901318_init);
1614 1614
1615void __exit coh901318_exit(void) 1615void __exit coh901318_exit(void)
1616{ 1616{
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 2a2e2fa00e91..4d180ca9a1d8 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -3,6 +3,7 @@
3 * AVR32 systems.) 3 * AVR32 systems.)
4 * 4 *
5 * Copyright (C) 2007-2008 Atmel Corporation 5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -93,8 +94,9 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
93 struct dw_desc *desc, *_desc; 94 struct dw_desc *desc, *_desc;
94 struct dw_desc *ret = NULL; 95 struct dw_desc *ret = NULL;
95 unsigned int i = 0; 96 unsigned int i = 0;
97 unsigned long flags;
96 98
97 spin_lock_bh(&dwc->lock); 99 spin_lock_irqsave(&dwc->lock, flags);
98 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { 100 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
99 if (async_tx_test_ack(&desc->txd)) { 101 if (async_tx_test_ack(&desc->txd)) {
100 list_del(&desc->desc_node); 102 list_del(&desc->desc_node);
@@ -104,7 +106,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
104 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); 106 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
105 i++; 107 i++;
106 } 108 }
107 spin_unlock_bh(&dwc->lock); 109 spin_unlock_irqrestore(&dwc->lock, flags);
108 110
109 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); 111 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
110 112
@@ -130,12 +132,14 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
130 */ 132 */
131static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) 133static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
132{ 134{
135 unsigned long flags;
136
133 if (desc) { 137 if (desc) {
134 struct dw_desc *child; 138 struct dw_desc *child;
135 139
136 dwc_sync_desc_for_cpu(dwc, desc); 140 dwc_sync_desc_for_cpu(dwc, desc);
137 141
138 spin_lock_bh(&dwc->lock); 142 spin_lock_irqsave(&dwc->lock, flags);
139 list_for_each_entry(child, &desc->tx_list, desc_node) 143 list_for_each_entry(child, &desc->tx_list, desc_node)
140 dev_vdbg(chan2dev(&dwc->chan), 144 dev_vdbg(chan2dev(&dwc->chan),
141 "moving child desc %p to freelist\n", 145 "moving child desc %p to freelist\n",
@@ -143,7 +147,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
143 list_splice_init(&desc->tx_list, &dwc->free_list); 147 list_splice_init(&desc->tx_list, &dwc->free_list);
144 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); 148 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
145 list_add(&desc->desc_node, &dwc->free_list); 149 list_add(&desc->desc_node, &dwc->free_list);
146 spin_unlock_bh(&dwc->lock); 150 spin_unlock_irqrestore(&dwc->lock, flags);
147 } 151 }
148} 152}
149 153
@@ -195,18 +199,23 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
195/*----------------------------------------------------------------------*/ 199/*----------------------------------------------------------------------*/
196 200
197static void 201static void
198dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) 202dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
203 bool callback_required)
199{ 204{
200 dma_async_tx_callback callback; 205 dma_async_tx_callback callback = NULL;
201 void *param; 206 void *param = NULL;
202 struct dma_async_tx_descriptor *txd = &desc->txd; 207 struct dma_async_tx_descriptor *txd = &desc->txd;
203 struct dw_desc *child; 208 struct dw_desc *child;
209 unsigned long flags;
204 210
205 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 211 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
206 212
213 spin_lock_irqsave(&dwc->lock, flags);
207 dwc->completed = txd->cookie; 214 dwc->completed = txd->cookie;
208 callback = txd->callback; 215 if (callback_required) {
209 param = txd->callback_param; 216 callback = txd->callback;
217 param = txd->callback_param;
218 }
210 219
211 dwc_sync_desc_for_cpu(dwc, desc); 220 dwc_sync_desc_for_cpu(dwc, desc);
212 221
@@ -238,11 +247,9 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
238 } 247 }
239 } 248 }
240 249
241 /* 250 spin_unlock_irqrestore(&dwc->lock, flags);
242 * The API requires that no submissions are done from a 251
243 * callback, so we don't need to drop the lock here 252 if (callback_required && callback)
244 */
245 if (callback)
246 callback(param); 253 callback(param);
247} 254}
248 255
@@ -250,7 +257,9 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
250{ 257{
251 struct dw_desc *desc, *_desc; 258 struct dw_desc *desc, *_desc;
252 LIST_HEAD(list); 259 LIST_HEAD(list);
260 unsigned long flags;
253 261
262 spin_lock_irqsave(&dwc->lock, flags);
254 if (dma_readl(dw, CH_EN) & dwc->mask) { 263 if (dma_readl(dw, CH_EN) & dwc->mask) {
255 dev_err(chan2dev(&dwc->chan), 264 dev_err(chan2dev(&dwc->chan),
256 "BUG: XFER bit set, but channel not idle!\n"); 265 "BUG: XFER bit set, but channel not idle!\n");
@@ -271,8 +280,10 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
271 dwc_dostart(dwc, dwc_first_active(dwc)); 280 dwc_dostart(dwc, dwc_first_active(dwc));
272 } 281 }
273 282
283 spin_unlock_irqrestore(&dwc->lock, flags);
284
274 list_for_each_entry_safe(desc, _desc, &list, desc_node) 285 list_for_each_entry_safe(desc, _desc, &list, desc_node)
275 dwc_descriptor_complete(dwc, desc); 286 dwc_descriptor_complete(dwc, desc, true);
276} 287}
277 288
278static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) 289static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -281,7 +292,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
281 struct dw_desc *desc, *_desc; 292 struct dw_desc *desc, *_desc;
282 struct dw_desc *child; 293 struct dw_desc *child;
283 u32 status_xfer; 294 u32 status_xfer;
295 unsigned long flags;
284 296
297 spin_lock_irqsave(&dwc->lock, flags);
285 /* 298 /*
286 * Clear block interrupt flag before scanning so that we don't 299 * Clear block interrupt flag before scanning so that we don't
287 * miss any, and read LLP before RAW_XFER to ensure it is 300 * miss any, and read LLP before RAW_XFER to ensure it is
@@ -294,30 +307,47 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
294 if (status_xfer & dwc->mask) { 307 if (status_xfer & dwc->mask) {
295 /* Everything we've submitted is done */ 308 /* Everything we've submitted is done */
296 dma_writel(dw, CLEAR.XFER, dwc->mask); 309 dma_writel(dw, CLEAR.XFER, dwc->mask);
310 spin_unlock_irqrestore(&dwc->lock, flags);
311
297 dwc_complete_all(dw, dwc); 312 dwc_complete_all(dw, dwc);
298 return; 313 return;
299 } 314 }
300 315
301 if (list_empty(&dwc->active_list)) 316 if (list_empty(&dwc->active_list)) {
317 spin_unlock_irqrestore(&dwc->lock, flags);
302 return; 318 return;
319 }
303 320
304 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); 321 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
305 322
306 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 323 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
307 if (desc->lli.llp == llp) 324 /* check first descriptors addr */
325 if (desc->txd.phys == llp) {
326 spin_unlock_irqrestore(&dwc->lock, flags);
327 return;
328 }
329
330 /* check first descriptors llp */
331 if (desc->lli.llp == llp) {
308 /* This one is currently in progress */ 332 /* This one is currently in progress */
333 spin_unlock_irqrestore(&dwc->lock, flags);
309 return; 334 return;
335 }
310 336
311 list_for_each_entry(child, &desc->tx_list, desc_node) 337 list_for_each_entry(child, &desc->tx_list, desc_node)
312 if (child->lli.llp == llp) 338 if (child->lli.llp == llp) {
313 /* Currently in progress */ 339 /* Currently in progress */
340 spin_unlock_irqrestore(&dwc->lock, flags);
314 return; 341 return;
342 }
315 343
316 /* 344 /*
317 * No descriptors so far seem to be in progress, i.e. 345 * No descriptors so far seem to be in progress, i.e.
318 * this one must be done. 346 * this one must be done.
319 */ 347 */
320 dwc_descriptor_complete(dwc, desc); 348 spin_unlock_irqrestore(&dwc->lock, flags);
349 dwc_descriptor_complete(dwc, desc, true);
350 spin_lock_irqsave(&dwc->lock, flags);
321 } 351 }
322 352
323 dev_err(chan2dev(&dwc->chan), 353 dev_err(chan2dev(&dwc->chan),
@@ -332,6 +362,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
332 list_move(dwc->queue.next, &dwc->active_list); 362 list_move(dwc->queue.next, &dwc->active_list);
333 dwc_dostart(dwc, dwc_first_active(dwc)); 363 dwc_dostart(dwc, dwc_first_active(dwc));
334 } 364 }
365 spin_unlock_irqrestore(&dwc->lock, flags);
335} 366}
336 367
337static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) 368static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
@@ -346,9 +377,12 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
346{ 377{
347 struct dw_desc *bad_desc; 378 struct dw_desc *bad_desc;
348 struct dw_desc *child; 379 struct dw_desc *child;
380 unsigned long flags;
349 381
350 dwc_scan_descriptors(dw, dwc); 382 dwc_scan_descriptors(dw, dwc);
351 383
384 spin_lock_irqsave(&dwc->lock, flags);
385
352 /* 386 /*
353 * The descriptor currently at the head of the active list is 387 * The descriptor currently at the head of the active list is
354 * borked. Since we don't have any way to report errors, we'll 388 * borked. Since we don't have any way to report errors, we'll
@@ -378,8 +412,10 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
378 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 412 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
379 dwc_dump_lli(dwc, &child->lli); 413 dwc_dump_lli(dwc, &child->lli);
380 414
415 spin_unlock_irqrestore(&dwc->lock, flags);
416
381 /* Pretend the descriptor completed successfully */ 417 /* Pretend the descriptor completed successfully */
382 dwc_descriptor_complete(dwc, bad_desc); 418 dwc_descriptor_complete(dwc, bad_desc, true);
383} 419}
384 420
385/* --------------------- Cyclic DMA API extensions -------------------- */ 421/* --------------------- Cyclic DMA API extensions -------------------- */
@@ -402,6 +438,8 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
402static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 438static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
403 u32 status_block, u32 status_err, u32 status_xfer) 439 u32 status_block, u32 status_err, u32 status_xfer)
404{ 440{
441 unsigned long flags;
442
405 if (status_block & dwc->mask) { 443 if (status_block & dwc->mask) {
406 void (*callback)(void *param); 444 void (*callback)(void *param);
407 void *callback_param; 445 void *callback_param;
@@ -412,11 +450,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
412 450
413 callback = dwc->cdesc->period_callback; 451 callback = dwc->cdesc->period_callback;
414 callback_param = dwc->cdesc->period_callback_param; 452 callback_param = dwc->cdesc->period_callback_param;
415 if (callback) { 453
416 spin_unlock(&dwc->lock); 454 if (callback)
417 callback(callback_param); 455 callback(callback_param);
418 spin_lock(&dwc->lock);
419 }
420 } 456 }
421 457
422 /* 458 /*
@@ -430,6 +466,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
430 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " 466 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
431 "interrupt, stopping DMA transfer\n", 467 "interrupt, stopping DMA transfer\n",
432 status_xfer ? "xfer" : "error"); 468 status_xfer ? "xfer" : "error");
469
470 spin_lock_irqsave(&dwc->lock, flags);
471
433 dev_err(chan2dev(&dwc->chan), 472 dev_err(chan2dev(&dwc->chan),
434 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 473 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
435 channel_readl(dwc, SAR), 474 channel_readl(dwc, SAR),
@@ -453,6 +492,8 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
453 492
454 for (i = 0; i < dwc->cdesc->periods; i++) 493 for (i = 0; i < dwc->cdesc->periods; i++)
455 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); 494 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
495
496 spin_unlock_irqrestore(&dwc->lock, flags);
456 } 497 }
457} 498}
458 499
@@ -476,7 +517,6 @@ static void dw_dma_tasklet(unsigned long data)
476 517
477 for (i = 0; i < dw->dma.chancnt; i++) { 518 for (i = 0; i < dw->dma.chancnt; i++) {
478 dwc = &dw->chan[i]; 519 dwc = &dw->chan[i];
479 spin_lock(&dwc->lock);
480 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 520 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
481 dwc_handle_cyclic(dw, dwc, status_block, status_err, 521 dwc_handle_cyclic(dw, dwc, status_block, status_err,
482 status_xfer); 522 status_xfer);
@@ -484,7 +524,6 @@ static void dw_dma_tasklet(unsigned long data)
484 dwc_handle_error(dw, dwc); 524 dwc_handle_error(dw, dwc);
485 else if ((status_block | status_xfer) & (1 << i)) 525 else if ((status_block | status_xfer) & (1 << i))
486 dwc_scan_descriptors(dw, dwc); 526 dwc_scan_descriptors(dw, dwc);
487 spin_unlock(&dwc->lock);
488 } 527 }
489 528
490 /* 529 /*
@@ -539,8 +578,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
539 struct dw_desc *desc = txd_to_dw_desc(tx); 578 struct dw_desc *desc = txd_to_dw_desc(tx);
540 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); 579 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
541 dma_cookie_t cookie; 580 dma_cookie_t cookie;
581 unsigned long flags;
542 582
543 spin_lock_bh(&dwc->lock); 583 spin_lock_irqsave(&dwc->lock, flags);
544 cookie = dwc_assign_cookie(dwc, desc); 584 cookie = dwc_assign_cookie(dwc, desc);
545 585
546 /* 586 /*
@@ -560,7 +600,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
560 list_add_tail(&desc->desc_node, &dwc->queue); 600 list_add_tail(&desc->desc_node, &dwc->queue);
561 } 601 }
562 602
563 spin_unlock_bh(&dwc->lock); 603 spin_unlock_irqrestore(&dwc->lock, flags);
564 604
565 return cookie; 605 return cookie;
566} 606}
@@ -689,9 +729,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
689 reg = dws->tx_reg; 729 reg = dws->tx_reg;
690 for_each_sg(sgl, sg, sg_len, i) { 730 for_each_sg(sgl, sg, sg_len, i) {
691 struct dw_desc *desc; 731 struct dw_desc *desc;
692 u32 len; 732 u32 len, dlen, mem;
693 u32 mem; 733
734 mem = sg_phys(sg);
735 len = sg_dma_len(sg);
736 mem_width = 2;
737 if (unlikely(mem & 3 || len & 3))
738 mem_width = 0;
694 739
740slave_sg_todev_fill_desc:
695 desc = dwc_desc_get(dwc); 741 desc = dwc_desc_get(dwc);
696 if (!desc) { 742 if (!desc) {
697 dev_err(chan2dev(chan), 743 dev_err(chan2dev(chan),
@@ -699,16 +745,19 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
699 goto err_desc_get; 745 goto err_desc_get;
700 } 746 }
701 747
702 mem = sg_phys(sg);
703 len = sg_dma_len(sg);
704 mem_width = 2;
705 if (unlikely(mem & 3 || len & 3))
706 mem_width = 0;
707
708 desc->lli.sar = mem; 748 desc->lli.sar = mem;
709 desc->lli.dar = reg; 749 desc->lli.dar = reg;
710 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); 750 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
711 desc->lli.ctlhi = len >> mem_width; 751 if ((len >> mem_width) > DWC_MAX_COUNT) {
752 dlen = DWC_MAX_COUNT << mem_width;
753 mem += dlen;
754 len -= dlen;
755 } else {
756 dlen = len;
757 len = 0;
758 }
759
760 desc->lli.ctlhi = dlen >> mem_width;
712 761
713 if (!first) { 762 if (!first) {
714 first = desc; 763 first = desc;
@@ -722,7 +771,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
722 &first->tx_list); 771 &first->tx_list);
723 } 772 }
724 prev = desc; 773 prev = desc;
725 total_len += len; 774 total_len += dlen;
775
776 if (len)
777 goto slave_sg_todev_fill_desc;
726 } 778 }
727 break; 779 break;
728 case DMA_FROM_DEVICE: 780 case DMA_FROM_DEVICE:
@@ -735,15 +787,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
735 reg = dws->rx_reg; 787 reg = dws->rx_reg;
736 for_each_sg(sgl, sg, sg_len, i) { 788 for_each_sg(sgl, sg, sg_len, i) {
737 struct dw_desc *desc; 789 struct dw_desc *desc;
738 u32 len; 790 u32 len, dlen, mem;
739 u32 mem;
740
741 desc = dwc_desc_get(dwc);
742 if (!desc) {
743 dev_err(chan2dev(chan),
744 "not enough descriptors available\n");
745 goto err_desc_get;
746 }
747 791
748 mem = sg_phys(sg); 792 mem = sg_phys(sg);
749 len = sg_dma_len(sg); 793 len = sg_dma_len(sg);
@@ -751,10 +795,26 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
751 if (unlikely(mem & 3 || len & 3)) 795 if (unlikely(mem & 3 || len & 3))
752 mem_width = 0; 796 mem_width = 0;
753 797
798slave_sg_fromdev_fill_desc:
799 desc = dwc_desc_get(dwc);
800 if (!desc) {
801 dev_err(chan2dev(chan),
802 "not enough descriptors available\n");
803 goto err_desc_get;
804 }
805
754 desc->lli.sar = reg; 806 desc->lli.sar = reg;
755 desc->lli.dar = mem; 807 desc->lli.dar = mem;
756 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); 808 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
757 desc->lli.ctlhi = len >> reg_width; 809 if ((len >> reg_width) > DWC_MAX_COUNT) {
810 dlen = DWC_MAX_COUNT << reg_width;
811 mem += dlen;
812 len -= dlen;
813 } else {
814 dlen = len;
815 len = 0;
816 }
817 desc->lli.ctlhi = dlen >> reg_width;
758 818
759 if (!first) { 819 if (!first) {
760 first = desc; 820 first = desc;
@@ -768,7 +828,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
768 &first->tx_list); 828 &first->tx_list);
769 } 829 }
770 prev = desc; 830 prev = desc;
771 total_len += len; 831 total_len += dlen;
832
833 if (len)
834 goto slave_sg_fromdev_fill_desc;
772 } 835 }
773 break; 836 break;
774 default: 837 default:
@@ -799,34 +862,51 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
799 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 862 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
800 struct dw_dma *dw = to_dw_dma(chan->device); 863 struct dw_dma *dw = to_dw_dma(chan->device);
801 struct dw_desc *desc, *_desc; 864 struct dw_desc *desc, *_desc;
865 unsigned long flags;
866 u32 cfglo;
802 LIST_HEAD(list); 867 LIST_HEAD(list);
803 868
804 /* Only supports DMA_TERMINATE_ALL */ 869 if (cmd == DMA_PAUSE) {
805 if (cmd != DMA_TERMINATE_ALL) 870 spin_lock_irqsave(&dwc->lock, flags);
806 return -ENXIO;
807 871
808 /* 872 cfglo = channel_readl(dwc, CFG_LO);
809 * This is only called when something went wrong elsewhere, so 873 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
810 * we don't really care about the data. Just disable the 874 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
811 * channel. We still have to poll the channel enable bit due 875 cpu_relax();
812 * to AHB/HSB limitations.
813 */
814 spin_lock_bh(&dwc->lock);
815 876
816 channel_clear_bit(dw, CH_EN, dwc->mask); 877 dwc->paused = true;
878 spin_unlock_irqrestore(&dwc->lock, flags);
879 } else if (cmd == DMA_RESUME) {
880 if (!dwc->paused)
881 return 0;
817 882
818 while (dma_readl(dw, CH_EN) & dwc->mask) 883 spin_lock_irqsave(&dwc->lock, flags);
819 cpu_relax();
820 884
821 /* active_list entries will end up before queued entries */ 885 cfglo = channel_readl(dwc, CFG_LO);
822 list_splice_init(&dwc->queue, &list); 886 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
823 list_splice_init(&dwc->active_list, &list); 887 dwc->paused = false;
824 888
825 spin_unlock_bh(&dwc->lock); 889 spin_unlock_irqrestore(&dwc->lock, flags);
890 } else if (cmd == DMA_TERMINATE_ALL) {
891 spin_lock_irqsave(&dwc->lock, flags);
826 892
827 /* Flush all pending and queued descriptors */ 893 channel_clear_bit(dw, CH_EN, dwc->mask);
828 list_for_each_entry_safe(desc, _desc, &list, desc_node) 894 while (dma_readl(dw, CH_EN) & dwc->mask)
829 dwc_descriptor_complete(dwc, desc); 895 cpu_relax();
896
897 dwc->paused = false;
898
899 /* active_list entries will end up before queued entries */
900 list_splice_init(&dwc->queue, &list);
901 list_splice_init(&dwc->active_list, &list);
902
903 spin_unlock_irqrestore(&dwc->lock, flags);
904
905 /* Flush all pending and queued descriptors */
906 list_for_each_entry_safe(desc, _desc, &list, desc_node)
907 dwc_descriptor_complete(dwc, desc, false);
908 } else
909 return -ENXIO;
830 910
831 return 0; 911 return 0;
832} 912}
@@ -846,9 +926,7 @@ dwc_tx_status(struct dma_chan *chan,
846 926
847 ret = dma_async_is_complete(cookie, last_complete, last_used); 927 ret = dma_async_is_complete(cookie, last_complete, last_used);
848 if (ret != DMA_SUCCESS) { 928 if (ret != DMA_SUCCESS) {
849 spin_lock_bh(&dwc->lock);
850 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 929 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
851 spin_unlock_bh(&dwc->lock);
852 930
853 last_complete = dwc->completed; 931 last_complete = dwc->completed;
854 last_used = chan->cookie; 932 last_used = chan->cookie;
@@ -856,7 +934,14 @@ dwc_tx_status(struct dma_chan *chan,
856 ret = dma_async_is_complete(cookie, last_complete, last_used); 934 ret = dma_async_is_complete(cookie, last_complete, last_used);
857 } 935 }
858 936
859 dma_set_tx_state(txstate, last_complete, last_used, 0); 937 if (ret != DMA_SUCCESS)
938 dma_set_tx_state(txstate, last_complete, last_used,
939 dwc_first_active(dwc)->len);
940 else
941 dma_set_tx_state(txstate, last_complete, last_used, 0);
942
943 if (dwc->paused)
944 return DMA_PAUSED;
860 945
861 return ret; 946 return ret;
862} 947}
@@ -865,10 +950,8 @@ static void dwc_issue_pending(struct dma_chan *chan)
865{ 950{
866 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 951 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
867 952
868 spin_lock_bh(&dwc->lock);
869 if (!list_empty(&dwc->queue)) 953 if (!list_empty(&dwc->queue))
870 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 954 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
871 spin_unlock_bh(&dwc->lock);
872} 955}
873 956
874static int dwc_alloc_chan_resources(struct dma_chan *chan) 957static int dwc_alloc_chan_resources(struct dma_chan *chan)
@@ -880,6 +963,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
880 int i; 963 int i;
881 u32 cfghi; 964 u32 cfghi;
882 u32 cfglo; 965 u32 cfglo;
966 unsigned long flags;
883 967
884 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 968 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
885 969
@@ -917,16 +1001,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
917 * doesn't mean what you think it means), and status writeback. 1001 * doesn't mean what you think it means), and status writeback.
918 */ 1002 */
919 1003
920 spin_lock_bh(&dwc->lock); 1004 spin_lock_irqsave(&dwc->lock, flags);
921 i = dwc->descs_allocated; 1005 i = dwc->descs_allocated;
922 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { 1006 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
923 spin_unlock_bh(&dwc->lock); 1007 spin_unlock_irqrestore(&dwc->lock, flags);
924 1008
925 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); 1009 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
926 if (!desc) { 1010 if (!desc) {
927 dev_info(chan2dev(chan), 1011 dev_info(chan2dev(chan),
928 "only allocated %d descriptors\n", i); 1012 "only allocated %d descriptors\n", i);
929 spin_lock_bh(&dwc->lock); 1013 spin_lock_irqsave(&dwc->lock, flags);
930 break; 1014 break;
931 } 1015 }
932 1016
@@ -938,7 +1022,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
938 sizeof(desc->lli), DMA_TO_DEVICE); 1022 sizeof(desc->lli), DMA_TO_DEVICE);
939 dwc_desc_put(dwc, desc); 1023 dwc_desc_put(dwc, desc);
940 1024
941 spin_lock_bh(&dwc->lock); 1025 spin_lock_irqsave(&dwc->lock, flags);
942 i = ++dwc->descs_allocated; 1026 i = ++dwc->descs_allocated;
943 } 1027 }
944 1028
@@ -947,7 +1031,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
947 channel_set_bit(dw, MASK.BLOCK, dwc->mask); 1031 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
948 channel_set_bit(dw, MASK.ERROR, dwc->mask); 1032 channel_set_bit(dw, MASK.ERROR, dwc->mask);
949 1033
950 spin_unlock_bh(&dwc->lock); 1034 spin_unlock_irqrestore(&dwc->lock, flags);
951 1035
952 dev_dbg(chan2dev(chan), 1036 dev_dbg(chan2dev(chan),
953 "alloc_chan_resources allocated %d descriptors\n", i); 1037 "alloc_chan_resources allocated %d descriptors\n", i);
@@ -960,6 +1044,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
960 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1044 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
961 struct dw_dma *dw = to_dw_dma(chan->device); 1045 struct dw_dma *dw = to_dw_dma(chan->device);
962 struct dw_desc *desc, *_desc; 1046 struct dw_desc *desc, *_desc;
1047 unsigned long flags;
963 LIST_HEAD(list); 1048 LIST_HEAD(list);
964 1049
965 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", 1050 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
@@ -970,7 +1055,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
970 BUG_ON(!list_empty(&dwc->queue)); 1055 BUG_ON(!list_empty(&dwc->queue));
971 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); 1056 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
972 1057
973 spin_lock_bh(&dwc->lock); 1058 spin_lock_irqsave(&dwc->lock, flags);
974 list_splice_init(&dwc->free_list, &list); 1059 list_splice_init(&dwc->free_list, &list);
975 dwc->descs_allocated = 0; 1060 dwc->descs_allocated = 0;
976 1061
@@ -979,7 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
979 channel_clear_bit(dw, MASK.BLOCK, dwc->mask); 1064 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
980 channel_clear_bit(dw, MASK.ERROR, dwc->mask); 1065 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
981 1066
982 spin_unlock_bh(&dwc->lock); 1067 spin_unlock_irqrestore(&dwc->lock, flags);
983 1068
984 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 1069 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
985 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1070 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
@@ -1004,13 +1089,14 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1004{ 1089{
1005 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1090 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1006 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1091 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1092 unsigned long flags;
1007 1093
1008 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1094 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1009 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); 1095 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1010 return -ENODEV; 1096 return -ENODEV;
1011 } 1097 }
1012 1098
1013 spin_lock(&dwc->lock); 1099 spin_lock_irqsave(&dwc->lock, flags);
1014 1100
1015 /* assert channel is idle */ 1101 /* assert channel is idle */
1016 if (dma_readl(dw, CH_EN) & dwc->mask) { 1102 if (dma_readl(dw, CH_EN) & dwc->mask) {
@@ -1023,7 +1109,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1023 channel_readl(dwc, LLP), 1109 channel_readl(dwc, LLP),
1024 channel_readl(dwc, CTL_HI), 1110 channel_readl(dwc, CTL_HI),
1025 channel_readl(dwc, CTL_LO)); 1111 channel_readl(dwc, CTL_LO));
1026 spin_unlock(&dwc->lock); 1112 spin_unlock_irqrestore(&dwc->lock, flags);
1027 return -EBUSY; 1113 return -EBUSY;
1028 } 1114 }
1029 1115
@@ -1038,7 +1124,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1038 1124
1039 channel_set_bit(dw, CH_EN, dwc->mask); 1125 channel_set_bit(dw, CH_EN, dwc->mask);
1040 1126
1041 spin_unlock(&dwc->lock); 1127 spin_unlock_irqrestore(&dwc->lock, flags);
1042 1128
1043 return 0; 1129 return 0;
1044} 1130}
@@ -1054,14 +1140,15 @@ void dw_dma_cyclic_stop(struct dma_chan *chan)
1054{ 1140{
1055 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1141 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1056 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1142 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1143 unsigned long flags;
1057 1144
1058 spin_lock(&dwc->lock); 1145 spin_lock_irqsave(&dwc->lock, flags);
1059 1146
1060 channel_clear_bit(dw, CH_EN, dwc->mask); 1147 channel_clear_bit(dw, CH_EN, dwc->mask);
1061 while (dma_readl(dw, CH_EN) & dwc->mask) 1148 while (dma_readl(dw, CH_EN) & dwc->mask)
1062 cpu_relax(); 1149 cpu_relax();
1063 1150
1064 spin_unlock(&dwc->lock); 1151 spin_unlock_irqrestore(&dwc->lock, flags);
1065} 1152}
1066EXPORT_SYMBOL(dw_dma_cyclic_stop); 1153EXPORT_SYMBOL(dw_dma_cyclic_stop);
1067 1154
@@ -1090,17 +1177,18 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1090 unsigned int reg_width; 1177 unsigned int reg_width;
1091 unsigned int periods; 1178 unsigned int periods;
1092 unsigned int i; 1179 unsigned int i;
1180 unsigned long flags;
1093 1181
1094 spin_lock_bh(&dwc->lock); 1182 spin_lock_irqsave(&dwc->lock, flags);
1095 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { 1183 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1096 spin_unlock_bh(&dwc->lock); 1184 spin_unlock_irqrestore(&dwc->lock, flags);
1097 dev_dbg(chan2dev(&dwc->chan), 1185 dev_dbg(chan2dev(&dwc->chan),
1098 "queue and/or active list are not empty\n"); 1186 "queue and/or active list are not empty\n");
1099 return ERR_PTR(-EBUSY); 1187 return ERR_PTR(-EBUSY);
1100 } 1188 }
1101 1189
1102 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1190 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1103 spin_unlock_bh(&dwc->lock); 1191 spin_unlock_irqrestore(&dwc->lock, flags);
1104 if (was_cyclic) { 1192 if (was_cyclic) {
1105 dev_dbg(chan2dev(&dwc->chan), 1193 dev_dbg(chan2dev(&dwc->chan),
1106 "channel already prepared for cyclic DMA\n"); 1194 "channel already prepared for cyclic DMA\n");
@@ -1214,13 +1302,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
1214 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1302 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1215 struct dw_cyclic_desc *cdesc = dwc->cdesc; 1303 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1216 int i; 1304 int i;
1305 unsigned long flags;
1217 1306
1218 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); 1307 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1219 1308
1220 if (!cdesc) 1309 if (!cdesc)
1221 return; 1310 return;
1222 1311
1223 spin_lock_bh(&dwc->lock); 1312 spin_lock_irqsave(&dwc->lock, flags);
1224 1313
1225 channel_clear_bit(dw, CH_EN, dwc->mask); 1314 channel_clear_bit(dw, CH_EN, dwc->mask);
1226 while (dma_readl(dw, CH_EN) & dwc->mask) 1315 while (dma_readl(dw, CH_EN) & dwc->mask)
@@ -1230,7 +1319,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
1230 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1319 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1231 dma_writel(dw, CLEAR.XFER, dwc->mask); 1320 dma_writel(dw, CLEAR.XFER, dwc->mask);
1232 1321
1233 spin_unlock_bh(&dwc->lock); 1322 spin_unlock_irqrestore(&dwc->lock, flags);
1234 1323
1235 for (i = 0; i < cdesc->periods; i++) 1324 for (i = 0; i < cdesc->periods; i++)
1236 dwc_desc_put(dwc, cdesc->desc[i]); 1325 dwc_desc_put(dwc, cdesc->desc[i]);
@@ -1487,3 +1576,4 @@ module_exit(dw_exit);
1487MODULE_LICENSE("GPL v2"); 1576MODULE_LICENSE("GPL v2");
1488MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); 1577MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1489MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1578MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1579MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 720f821527f8..c3419518d701 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -2,6 +2,7 @@
2 * Driver for the Synopsys DesignWare AHB DMA Controller 2 * Driver for the Synopsys DesignWare AHB DMA Controller
3 * 3 *
4 * Copyright (C) 2005-2007 Atmel Corporation 4 * Copyright (C) 2005-2007 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -138,6 +139,7 @@ struct dw_dma_chan {
138 void __iomem *ch_regs; 139 void __iomem *ch_regs;
139 u8 mask; 140 u8 mask;
140 u8 priority; 141 u8 priority;
142 bool paused;
141 143
142 spinlock_t lock; 144 spinlock_t lock;
143 145
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 3d4ec38b9b62..f653517ef744 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1292,8 +1292,7 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
1292 if (err) 1292 if (err)
1293 goto err_dma; 1293 goto err_dma;
1294 1294
1295 pm_runtime_set_active(&pdev->dev); 1295 pm_runtime_put_noidle(&pdev->dev);
1296 pm_runtime_enable(&pdev->dev);
1297 pm_runtime_allow(&pdev->dev); 1296 pm_runtime_allow(&pdev->dev);
1298 return 0; 1297 return 0;
1299 1298
@@ -1322,6 +1321,9 @@ err_enable_device:
1322static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) 1321static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
1323{ 1322{
1324 struct middma_device *device = pci_get_drvdata(pdev); 1323 struct middma_device *device = pci_get_drvdata(pdev);
1324
1325 pm_runtime_get_noresume(&pdev->dev);
1326 pm_runtime_forbid(&pdev->dev);
1325 middma_shutdown(pdev); 1327 middma_shutdown(pdev);
1326 pci_dev_put(pdev); 1328 pci_dev_put(pdev);
1327 kfree(device); 1329 kfree(device);
@@ -1385,13 +1387,20 @@ int dma_resume(struct pci_dev *pci)
1385static int dma_runtime_suspend(struct device *dev) 1387static int dma_runtime_suspend(struct device *dev)
1386{ 1388{
1387 struct pci_dev *pci_dev = to_pci_dev(dev); 1389 struct pci_dev *pci_dev = to_pci_dev(dev);
1388 return dma_suspend(pci_dev, PMSG_SUSPEND); 1390 struct middma_device *device = pci_get_drvdata(pci_dev);
1391
1392 device->state = SUSPENDED;
1393 return 0;
1389} 1394}
1390 1395
1391static int dma_runtime_resume(struct device *dev) 1396static int dma_runtime_resume(struct device *dev)
1392{ 1397{
1393 struct pci_dev *pci_dev = to_pci_dev(dev); 1398 struct pci_dev *pci_dev = to_pci_dev(dev);
1394 return dma_resume(pci_dev); 1399 struct middma_device *device = pci_get_drvdata(pci_dev);
1400
1401 device->state = RUNNING;
1402 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1403 return 0;
1395} 1404}
1396 1405
1397static int dma_runtime_idle(struct device *dev) 1406static int dma_runtime_idle(struct device *dev)
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index f4a51d4d0349..5d65f8377971 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -508,6 +508,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
508 struct ioat_ring_ent **ring; 508 struct ioat_ring_ent **ring;
509 u64 status; 509 u64 status;
510 int order; 510 int order;
511 int i = 0;
511 512
512 /* have we already been set up? */ 513 /* have we already been set up? */
513 if (ioat->ring) 514 if (ioat->ring)
@@ -548,8 +549,11 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
548 ioat2_start_null_desc(ioat); 549 ioat2_start_null_desc(ioat);
549 550
550 /* check that we got off the ground */ 551 /* check that we got off the ground */
551 udelay(5); 552 do {
552 status = ioat_chansts(chan); 553 udelay(1);
554 status = ioat_chansts(chan);
555 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
556
553 if (is_ioat_active(status) || is_ioat_idle(status)) { 557 if (is_ioat_active(status) || is_ioat_idle(status)) {
554 set_bit(IOAT_RUN, &chan->state); 558 set_bit(IOAT_RUN, &chan->state);
555 return 1 << ioat->alloc_order; 559 return 1 << ioat->alloc_order;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c6b01f535b29..e03f811a83dd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -619,7 +619,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
619 619
620 if (unlikely(!len)) 620 if (unlikely(!len))
621 return NULL; 621 return NULL;
622 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 622 BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
623 623
624 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 624 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
625 __func__, len); 625 __func__, len);
@@ -652,7 +652,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
652 652
653 if (unlikely(!len)) 653 if (unlikely(!len))
654 return NULL; 654 return NULL;
655 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 655 BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
656 656
657 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 657 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
658 __func__, len); 658 __func__, len);
@@ -686,7 +686,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
686 686
687 if (unlikely(!len)) 687 if (unlikely(!len))
688 return NULL; 688 return NULL;
689 BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); 689 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
690 690
691 dev_dbg(iop_chan->device->common.dev, 691 dev_dbg(iop_chan->device->common.dev,
692 "%s src_cnt: %d len: %u flags: %lx\n", 692 "%s src_cnt: %d len: %u flags: %lx\n",
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index a25f5f61e0e0..954e334e01bb 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -671,7 +671,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
671 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 671 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
672 return NULL; 672 return NULL;
673 673
674 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); 674 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
675 675
676 spin_lock_bh(&mv_chan->lock); 676 spin_lock_bh(&mv_chan->lock);
677 slot_cnt = mv_chan_memcpy_slot_count(len); 677 slot_cnt = mv_chan_memcpy_slot_count(len);
@@ -710,7 +710,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
710 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 710 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
711 return NULL; 711 return NULL;
712 712
713 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); 713 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
714 714
715 spin_lock_bh(&mv_chan->lock); 715 spin_lock_bh(&mv_chan->lock);
716 slot_cnt = mv_chan_memset_slot_count(len); 716 slot_cnt = mv_chan_memset_slot_count(len);
@@ -744,7 +744,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
744 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 744 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
745 return NULL; 745 return NULL;
746 746
747 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); 747 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
748 748
749 dev_dbg(mv_chan->device->common.dev, 749 dev_dbg(mv_chan->device->common.dev,
750 "%s src_cnt: %d len: dest %x %u flags: %ld\n", 750 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 8d8fef1480a9..ff5b38f9d45b 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -77,10 +77,10 @@ struct pch_dma_regs {
77 u32 dma_ctl0; 77 u32 dma_ctl0;
78 u32 dma_ctl1; 78 u32 dma_ctl1;
79 u32 dma_ctl2; 79 u32 dma_ctl2;
80 u32 reserved1; 80 u32 dma_ctl3;
81 u32 dma_sts0; 81 u32 dma_sts0;
82 u32 dma_sts1; 82 u32 dma_sts1;
83 u32 reserved2; 83 u32 dma_sts2;
84 u32 reserved3; 84 u32 reserved3;
85 struct pch_dma_desc_regs desc[MAX_CHAN_NR]; 85 struct pch_dma_desc_regs desc[MAX_CHAN_NR];
86}; 86};
@@ -130,6 +130,7 @@ struct pch_dma {
130#define PCH_DMA_CTL0 0x00 130#define PCH_DMA_CTL0 0x00
131#define PCH_DMA_CTL1 0x04 131#define PCH_DMA_CTL1 0x04
132#define PCH_DMA_CTL2 0x08 132#define PCH_DMA_CTL2 0x08
133#define PCH_DMA_CTL3 0x0C
133#define PCH_DMA_STS0 0x10 134#define PCH_DMA_STS0 0x10
134#define PCH_DMA_STS1 0x14 135#define PCH_DMA_STS1 0x14
135 136
@@ -138,7 +139,8 @@ struct pch_dma {
138#define dma_writel(pd, name, val) \ 139#define dma_writel(pd, name, val) \
139 writel((val), (pd)->membase + PCH_DMA_##name) 140 writel((val), (pd)->membase + PCH_DMA_##name)
140 141
141static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) 142static inline
143struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
142{ 144{
143 return container_of(txd, struct pch_dma_desc, txd); 145 return container_of(txd, struct pch_dma_desc, txd);
144} 146}
@@ -163,13 +165,15 @@ static inline struct device *chan2parent(struct dma_chan *chan)
163 return chan->dev->device.parent; 165 return chan->dev->device.parent;
164} 166}
165 167
166static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) 168static inline
169struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
167{ 170{
168 return list_first_entry(&pd_chan->active_list, 171 return list_first_entry(&pd_chan->active_list,
169 struct pch_dma_desc, desc_node); 172 struct pch_dma_desc, desc_node);
170} 173}
171 174
172static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) 175static inline
176struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
173{ 177{
174 return list_first_entry(&pd_chan->queue, 178 return list_first_entry(&pd_chan->queue,
175 struct pch_dma_desc, desc_node); 179 struct pch_dma_desc, desc_node);
@@ -199,16 +203,30 @@ static void pdc_set_dir(struct dma_chan *chan)
199 struct pch_dma *pd = to_pd(chan->device); 203 struct pch_dma *pd = to_pd(chan->device);
200 u32 val; 204 u32 val;
201 205
202 val = dma_readl(pd, CTL0); 206 if (chan->chan_id < 8) {
207 val = dma_readl(pd, CTL0);
203 208
204 if (pd_chan->dir == DMA_TO_DEVICE) 209 if (pd_chan->dir == DMA_TO_DEVICE)
205 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + 210 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
206 DMA_CTL0_DIR_SHIFT_BITS); 211 DMA_CTL0_DIR_SHIFT_BITS);
207 else 212 else
208 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + 213 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
209 DMA_CTL0_DIR_SHIFT_BITS)); 214 DMA_CTL0_DIR_SHIFT_BITS));
215
216 dma_writel(pd, CTL0, val);
217 } else {
218 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
219 val = dma_readl(pd, CTL3);
210 220
211 dma_writel(pd, CTL0, val); 221 if (pd_chan->dir == DMA_TO_DEVICE)
222 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
223 DMA_CTL0_DIR_SHIFT_BITS);
224 else
225 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
226 DMA_CTL0_DIR_SHIFT_BITS));
227
228 dma_writel(pd, CTL3, val);
229 }
212 230
213 dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n", 231 dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
214 chan->chan_id, val); 232 chan->chan_id, val);
@@ -219,13 +237,26 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
219 struct pch_dma *pd = to_pd(chan->device); 237 struct pch_dma *pd = to_pd(chan->device);
220 u32 val; 238 u32 val;
221 239
222 val = dma_readl(pd, CTL0); 240 if (chan->chan_id < 8) {
241 val = dma_readl(pd, CTL0);
242
243 val &= ~(DMA_CTL0_MODE_MASK_BITS <<
244 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
245 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
223 246
224 val &= ~(DMA_CTL0_MODE_MASK_BITS << 247 dma_writel(pd, CTL0, val);
225 (DMA_CTL0_BITS_PER_CH * chan->chan_id)); 248 } else {
226 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); 249 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
250
251 val = dma_readl(pd, CTL3);
252
253 val &= ~(DMA_CTL0_MODE_MASK_BITS <<
254 (DMA_CTL0_BITS_PER_CH * ch));
255 val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
227 256
228 dma_writel(pd, CTL0, val); 257 dma_writel(pd, CTL3, val);
258
259 }
229 260
230 dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", 261 dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
231 chan->chan_id, val); 262 chan->chan_id, val);
@@ -251,9 +282,6 @@ static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
251 282
252static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) 283static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
253{ 284{
254 struct pch_dma *pd = to_pd(pd_chan->chan.device);
255 u32 val;
256
257 if (!pdc_is_idle(pd_chan)) { 285 if (!pdc_is_idle(pd_chan)) {
258 dev_err(chan2dev(&pd_chan->chan), 286 dev_err(chan2dev(&pd_chan->chan),
259 "BUG: Attempt to start non-idle channel\n"); 287 "BUG: Attempt to start non-idle channel\n");
@@ -279,10 +307,6 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
279 channel_writel(pd_chan, NEXT, desc->txd.phys); 307 channel_writel(pd_chan, NEXT, desc->txd.phys);
280 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); 308 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
281 } 309 }
282
283 val = dma_readl(pd, CTL2);
284 val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id);
285 dma_writel(pd, CTL2, val);
286} 310}
287 311
288static void pdc_chain_complete(struct pch_dma_chan *pd_chan, 312static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
@@ -403,7 +427,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
403{ 427{
404 struct pch_dma_desc *desc, *_d; 428 struct pch_dma_desc *desc, *_d;
405 struct pch_dma_desc *ret = NULL; 429 struct pch_dma_desc *ret = NULL;
406 int i; 430 int i = 0;
407 431
408 spin_lock(&pd_chan->lock); 432 spin_lock(&pd_chan->lock);
409 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { 433 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
@@ -478,7 +502,6 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
478 spin_unlock_bh(&pd_chan->lock); 502 spin_unlock_bh(&pd_chan->lock);
479 503
480 pdc_enable_irq(chan, 1); 504 pdc_enable_irq(chan, 1);
481 pdc_set_dir(chan);
482 505
483 return pd_chan->descs_allocated; 506 return pd_chan->descs_allocated;
484} 507}
@@ -561,6 +584,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
561 else 584 else
562 return NULL; 585 return NULL;
563 586
587 pd_chan->dir = direction;
588 pdc_set_dir(chan);
589
564 for_each_sg(sgl, sg, sg_len, i) { 590 for_each_sg(sgl, sg, sg_len, i) {
565 desc = pdc_desc_get(pd_chan); 591 desc = pdc_desc_get(pd_chan);
566 592
@@ -703,6 +729,7 @@ static void pch_dma_save_regs(struct pch_dma *pd)
703 pd->regs.dma_ctl0 = dma_readl(pd, CTL0); 729 pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
704 pd->regs.dma_ctl1 = dma_readl(pd, CTL1); 730 pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
705 pd->regs.dma_ctl2 = dma_readl(pd, CTL2); 731 pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
732 pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
706 733
707 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { 734 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
708 pd_chan = to_pd_chan(chan); 735 pd_chan = to_pd_chan(chan);
@@ -725,6 +752,7 @@ static void pch_dma_restore_regs(struct pch_dma *pd)
725 dma_writel(pd, CTL0, pd->regs.dma_ctl0); 752 dma_writel(pd, CTL0, pd->regs.dma_ctl0);
726 dma_writel(pd, CTL1, pd->regs.dma_ctl1); 753 dma_writel(pd, CTL1, pd->regs.dma_ctl1);
727 dma_writel(pd, CTL2, pd->regs.dma_ctl2); 754 dma_writel(pd, CTL2, pd->regs.dma_ctl2);
755 dma_writel(pd, CTL3, pd->regs.dma_ctl3);
728 756
729 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { 757 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
730 pd_chan = to_pd_chan(chan); 758 pd_chan = to_pd_chan(chan);
@@ -850,8 +878,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
850 878
851 pd_chan->membase = &regs->desc[i]; 879 pd_chan->membase = &regs->desc[i];
852 880
853 pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
854
855 spin_lock_init(&pd_chan->lock); 881 spin_lock_init(&pd_chan->lock);
856 882
857 INIT_LIST_HEAD(&pd_chan->active_list); 883 INIT_LIST_HEAD(&pd_chan->active_list);
@@ -929,13 +955,23 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
929#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 955#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
930#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B 956#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
931#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 957#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
958#define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
959#define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
960#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
961#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
962#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
932 963
933static const struct pci_device_id pch_dma_id_table[] = { 964DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
934 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, 965 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
935 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, 966 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
936 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ 967 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
937 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ 968 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
938 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ 969 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
970 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
971 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
972 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
973 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
974 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
939 { 0, }, 975 { 0, },
940}; 976};
941 977
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 3b0247e74cc4..fc457a7e8832 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -2313,7 +2313,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
2313 if (unlikely(!len)) 2313 if (unlikely(!len))
2314 return NULL; 2314 return NULL;
2315 2315
2316 BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT)); 2316 BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
2317 2317
2318 spin_lock_bh(&ppc440spe_chan->lock); 2318 spin_lock_bh(&ppc440spe_chan->lock);
2319 2319
@@ -2354,7 +2354,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
2354 if (unlikely(!len)) 2354 if (unlikely(!len))
2355 return NULL; 2355 return NULL;
2356 2356
2357 BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT)); 2357 BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
2358 2358
2359 spin_lock_bh(&ppc440spe_chan->lock); 2359 spin_lock_bh(&ppc440spe_chan->lock);
2360 2360
@@ -2397,7 +2397,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
2397 dma_dest, dma_src, src_cnt)); 2397 dma_dest, dma_src, src_cnt));
2398 if (unlikely(!len)) 2398 if (unlikely(!len))
2399 return NULL; 2399 return NULL;
2400 BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT)); 2400 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
2401 2401
2402 dev_dbg(ppc440spe_chan->device->common.dev, 2402 dev_dbg(ppc440spe_chan->device->common.dev,
2403 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n", 2403 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
@@ -2887,7 +2887,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
2887 ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id, 2887 ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
2888 dst, src, src_cnt)); 2888 dst, src, src_cnt));
2889 BUG_ON(!len); 2889 BUG_ON(!len);
2890 BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT)); 2890 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
2891 BUG_ON(!src_cnt); 2891 BUG_ON(!src_cnt);
2892 2892
2893 if (src_cnt == 1 && dst[1] == src[0]) { 2893 if (src_cnt == 1 && dst[1] == src[0]) {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 94ee15dd3aed..8f222d4db7de 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1829,7 +1829,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
1829{ 1829{
1830 struct stedma40_platform_data *plat = chan->base->plat_data; 1830 struct stedma40_platform_data *plat = chan->base->plat_data;
1831 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 1831 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1832 dma_addr_t addr; 1832 dma_addr_t addr = 0;
1833 1833
1834 if (chan->runtime_addr) 1834 if (chan->runtime_addr)
1835 return chan->runtime_addr; 1835 return chan->runtime_addr;
@@ -2962,4 +2962,4 @@ static int __init stedma40_init(void)
2962{ 2962{
2963 return platform_driver_probe(&d40_driver, d40_probe); 2963 return platform_driver_probe(&d40_driver, d40_probe);
2964} 2964}
2965arch_initcall(stedma40_init); 2965subsys_initcall(stedma40_init);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d21364603755..4a7f63143455 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -86,6 +86,34 @@ config GPIO_IT8761E
86 help 86 help
87 Say yes here to support GPIO functionality of IT8761E super I/O chip. 87 Say yes here to support GPIO functionality of IT8761E super I/O chip.
88 88
89config GPIO_EXYNOS4
90 bool "Samsung Exynos4 GPIO library support"
91 default y if CPU_EXYNOS4210
92 depends on ARM
93 help
94 Say yes here to support Samsung Exynos4 series SoCs GPIO library
95
96config GPIO_PLAT_SAMSUNG
97 bool "Samsung SoCs GPIO library support"
98 default y if SAMSUNG_GPIOLIB_4BIT
99 depends on ARM
100 help
101 Say yes here to support Samsung SoCs GPIO library
102
103config GPIO_S5PC100
104 bool "Samsung S5PC100 GPIO library support"
105 default y if CPU_S5PC100
106 depends on ARM
107 help
108 Say yes here to support Samsung S5PC100 SoCs GPIO library
109
110config GPIO_S5PV210
111 bool "Samsung S5PV210/S5PC110 GPIO library support"
112 default y if CPU_S5PV210
113 depends on ARM
114 help
115 Say yes here to support Samsung S5PV210/S5PC110 SoCs GPIO library
116
89config GPIO_PL061 117config GPIO_PL061
90 bool "PrimeCell PL061 GPIO support" 118 bool "PrimeCell PL061 GPIO support"
91 depends on ARM_AMBA 119 depends on ARM_AMBA
@@ -303,7 +331,7 @@ comment "PCI GPIO expanders:"
303 331
304config GPIO_CS5535 332config GPIO_CS5535
305 tristate "AMD CS5535/CS5536 GPIO support" 333 tristate "AMD CS5535/CS5536 GPIO support"
306 depends on PCI && X86 && !CS5535_GPIO 334 depends on PCI && X86 && !CS5535_GPIO && MFD_CS5535
307 help 335 help
308 The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that 336 The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
309 can be used for quite a number of things. The CS5535/6 is found on 337 can be used for quite a number of things. The CS5535/6 is found on
@@ -334,13 +362,19 @@ config GPIO_LANGWELL
334 Say Y here to support Intel Langwell/Penwell GPIO. 362 Say Y here to support Intel Langwell/Penwell GPIO.
335 363
336config GPIO_PCH 364config GPIO_PCH
337 tristate "PCH GPIO of Intel Topcliff" 365 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GPIO"
338 depends on PCI && X86 366 depends on PCI && X86
339 help 367 help
340 This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff 368 This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff
341 which is an IOH(Input/Output Hub) for x86 embedded processor. 369 which is an IOH(Input/Output Hub) for x86 embedded processor.
342 This driver can access PCH GPIO device. 370 This driver can access PCH GPIO device.
343 371
372 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
373 Output Hub), ML7223.
374 ML7223 IOH is for MP(Media Phone) use.
375 ML7223 is companion chip for Intel Atom E6xx series.
376 ML7223 is completely compatible for Intel EG20T PCH.
377
344config GPIO_ML_IOH 378config GPIO_ML_IOH
345 tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support" 379 tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
346 depends on PCI 380 depends on PCI
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 6a3387acc0e5..b605f8ec6fbe 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -8,6 +8,10 @@ obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
8obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o 8obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
9obj-$(CONFIG_GPIO_BASIC_MMIO_CORE) += basic_mmio_gpio.o 9obj-$(CONFIG_GPIO_BASIC_MMIO_CORE) += basic_mmio_gpio.o
10obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o 10obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o
11obj-$(CONFIG_GPIO_EXYNOS4) += gpio-exynos4.o
12obj-$(CONFIG_GPIO_PLAT_SAMSUNG) += gpio-plat-samsung.o
13obj-$(CONFIG_GPIO_S5PC100) += gpio-s5pc100.o
14obj-$(CONFIG_GPIO_S5PV210) += gpio-s5pv210.o
11obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o 15obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
12obj-$(CONFIG_GPIO_MAX730X) += max730x.o 16obj-$(CONFIG_GPIO_MAX730X) += max730x.o
13obj-$(CONFIG_GPIO_MAX7300) += max7300.o 17obj-$(CONFIG_GPIO_MAX7300) += max7300.o
@@ -16,6 +20,7 @@ obj-$(CONFIG_GPIO_MAX732X) += max732x.o
16obj-$(CONFIG_GPIO_MC33880) += mc33880.o 20obj-$(CONFIG_GPIO_MC33880) += mc33880.o
17obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o 21obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
18obj-$(CONFIG_GPIO_74X164) += 74x164.o 22obj-$(CONFIG_GPIO_74X164) += 74x164.o
23obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o
19obj-$(CONFIG_GPIO_PCA953X) += pca953x.o 24obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
20obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o 25obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
21obj-$(CONFIG_GPIO_PCH) += pch_gpio.o 26obj-$(CONFIG_GPIO_PCH) += pch_gpio.o
@@ -34,6 +39,8 @@ obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
34obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o 39obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
35obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o 40obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o
36obj-$(CONFIG_GPIO_SCH) += sch_gpio.o 41obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
42obj-$(CONFIG_MACH_U300) += gpio-u300.o
43obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o
37obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o 44obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
38obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o 45obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
39obj-$(CONFIG_GPIO_SX150X) += sx150x.o 46obj-$(CONFIG_GPIO_SX150X) += sx150x.o
diff --git a/drivers/gpio/gpio-exynos4.c b/drivers/gpio/gpio-exynos4.c
new file mode 100644
index 000000000000..d54ca6adb660
--- /dev/null
+++ b/drivers/gpio/gpio-exynos4.c
@@ -0,0 +1,365 @@
1/* linux/arch/arm/mach-exynos4/gpiolib.c
2 *
3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * EXYNOS4 - GPIOlib support
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/kernel.h>
14#include <linux/irq.h>
15#include <linux/io.h>
16#include <linux/gpio.h>
17
18#include <mach/map.h>
19
20#include <plat/gpio-core.h>
21#include <plat/gpio-cfg.h>
22#include <plat/gpio-cfg-helpers.h>
23
24static struct s3c_gpio_cfg gpio_cfg = {
25 .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
26 .set_pull = s3c_gpio_setpull_updown,
27 .get_pull = s3c_gpio_getpull_updown,
28};
29
30static struct s3c_gpio_cfg gpio_cfg_noint = {
31 .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
32 .set_pull = s3c_gpio_setpull_updown,
33 .get_pull = s3c_gpio_getpull_updown,
34};
35
36/*
37 * Following are the gpio banks in v310.
38 *
39 * The 'config' member when left to NULL, is initialized to the default
40 * structure gpio_cfg in the init function below.
41 *
42 * The 'base' member is also initialized in the init function below.
43 * Note: The initialization of 'base' member of s3c_gpio_chip structure
44 * uses the above macro and depends on the banks being listed in order here.
45 */
46static struct s3c_gpio_chip exynos4_gpio_part1_4bit[] = {
47 {
48 .chip = {
49 .base = EXYNOS4_GPA0(0),
50 .ngpio = EXYNOS4_GPIO_A0_NR,
51 .label = "GPA0",
52 },
53 }, {
54 .chip = {
55 .base = EXYNOS4_GPA1(0),
56 .ngpio = EXYNOS4_GPIO_A1_NR,
57 .label = "GPA1",
58 },
59 }, {
60 .chip = {
61 .base = EXYNOS4_GPB(0),
62 .ngpio = EXYNOS4_GPIO_B_NR,
63 .label = "GPB",
64 },
65 }, {
66 .chip = {
67 .base = EXYNOS4_GPC0(0),
68 .ngpio = EXYNOS4_GPIO_C0_NR,
69 .label = "GPC0",
70 },
71 }, {
72 .chip = {
73 .base = EXYNOS4_GPC1(0),
74 .ngpio = EXYNOS4_GPIO_C1_NR,
75 .label = "GPC1",
76 },
77 }, {
78 .chip = {
79 .base = EXYNOS4_GPD0(0),
80 .ngpio = EXYNOS4_GPIO_D0_NR,
81 .label = "GPD0",
82 },
83 }, {
84 .chip = {
85 .base = EXYNOS4_GPD1(0),
86 .ngpio = EXYNOS4_GPIO_D1_NR,
87 .label = "GPD1",
88 },
89 }, {
90 .chip = {
91 .base = EXYNOS4_GPE0(0),
92 .ngpio = EXYNOS4_GPIO_E0_NR,
93 .label = "GPE0",
94 },
95 }, {
96 .chip = {
97 .base = EXYNOS4_GPE1(0),
98 .ngpio = EXYNOS4_GPIO_E1_NR,
99 .label = "GPE1",
100 },
101 }, {
102 .chip = {
103 .base = EXYNOS4_GPE2(0),
104 .ngpio = EXYNOS4_GPIO_E2_NR,
105 .label = "GPE2",
106 },
107 }, {
108 .chip = {
109 .base = EXYNOS4_GPE3(0),
110 .ngpio = EXYNOS4_GPIO_E3_NR,
111 .label = "GPE3",
112 },
113 }, {
114 .chip = {
115 .base = EXYNOS4_GPE4(0),
116 .ngpio = EXYNOS4_GPIO_E4_NR,
117 .label = "GPE4",
118 },
119 }, {
120 .chip = {
121 .base = EXYNOS4_GPF0(0),
122 .ngpio = EXYNOS4_GPIO_F0_NR,
123 .label = "GPF0",
124 },
125 }, {
126 .chip = {
127 .base = EXYNOS4_GPF1(0),
128 .ngpio = EXYNOS4_GPIO_F1_NR,
129 .label = "GPF1",
130 },
131 }, {
132 .chip = {
133 .base = EXYNOS4_GPF2(0),
134 .ngpio = EXYNOS4_GPIO_F2_NR,
135 .label = "GPF2",
136 },
137 }, {
138 .chip = {
139 .base = EXYNOS4_GPF3(0),
140 .ngpio = EXYNOS4_GPIO_F3_NR,
141 .label = "GPF3",
142 },
143 },
144};
145
146static struct s3c_gpio_chip exynos4_gpio_part2_4bit[] = {
147 {
148 .chip = {
149 .base = EXYNOS4_GPJ0(0),
150 .ngpio = EXYNOS4_GPIO_J0_NR,
151 .label = "GPJ0",
152 },
153 }, {
154 .chip = {
155 .base = EXYNOS4_GPJ1(0),
156 .ngpio = EXYNOS4_GPIO_J1_NR,
157 .label = "GPJ1",
158 },
159 }, {
160 .chip = {
161 .base = EXYNOS4_GPK0(0),
162 .ngpio = EXYNOS4_GPIO_K0_NR,
163 .label = "GPK0",
164 },
165 }, {
166 .chip = {
167 .base = EXYNOS4_GPK1(0),
168 .ngpio = EXYNOS4_GPIO_K1_NR,
169 .label = "GPK1",
170 },
171 }, {
172 .chip = {
173 .base = EXYNOS4_GPK2(0),
174 .ngpio = EXYNOS4_GPIO_K2_NR,
175 .label = "GPK2",
176 },
177 }, {
178 .chip = {
179 .base = EXYNOS4_GPK3(0),
180 .ngpio = EXYNOS4_GPIO_K3_NR,
181 .label = "GPK3",
182 },
183 }, {
184 .chip = {
185 .base = EXYNOS4_GPL0(0),
186 .ngpio = EXYNOS4_GPIO_L0_NR,
187 .label = "GPL0",
188 },
189 }, {
190 .chip = {
191 .base = EXYNOS4_GPL1(0),
192 .ngpio = EXYNOS4_GPIO_L1_NR,
193 .label = "GPL1",
194 },
195 }, {
196 .chip = {
197 .base = EXYNOS4_GPL2(0),
198 .ngpio = EXYNOS4_GPIO_L2_NR,
199 .label = "GPL2",
200 },
201 }, {
202 .config = &gpio_cfg_noint,
203 .chip = {
204 .base = EXYNOS4_GPY0(0),
205 .ngpio = EXYNOS4_GPIO_Y0_NR,
206 .label = "GPY0",
207 },
208 }, {
209 .config = &gpio_cfg_noint,
210 .chip = {
211 .base = EXYNOS4_GPY1(0),
212 .ngpio = EXYNOS4_GPIO_Y1_NR,
213 .label = "GPY1",
214 },
215 }, {
216 .config = &gpio_cfg_noint,
217 .chip = {
218 .base = EXYNOS4_GPY2(0),
219 .ngpio = EXYNOS4_GPIO_Y2_NR,
220 .label = "GPY2",
221 },
222 }, {
223 .config = &gpio_cfg_noint,
224 .chip = {
225 .base = EXYNOS4_GPY3(0),
226 .ngpio = EXYNOS4_GPIO_Y3_NR,
227 .label = "GPY3",
228 },
229 }, {
230 .config = &gpio_cfg_noint,
231 .chip = {
232 .base = EXYNOS4_GPY4(0),
233 .ngpio = EXYNOS4_GPIO_Y4_NR,
234 .label = "GPY4",
235 },
236 }, {
237 .config = &gpio_cfg_noint,
238 .chip = {
239 .base = EXYNOS4_GPY5(0),
240 .ngpio = EXYNOS4_GPIO_Y5_NR,
241 .label = "GPY5",
242 },
243 }, {
244 .config = &gpio_cfg_noint,
245 .chip = {
246 .base = EXYNOS4_GPY6(0),
247 .ngpio = EXYNOS4_GPIO_Y6_NR,
248 .label = "GPY6",
249 },
250 }, {
251 .base = (S5P_VA_GPIO2 + 0xC00),
252 .config = &gpio_cfg_noint,
253 .irq_base = IRQ_EINT(0),
254 .chip = {
255 .base = EXYNOS4_GPX0(0),
256 .ngpio = EXYNOS4_GPIO_X0_NR,
257 .label = "GPX0",
258 .to_irq = samsung_gpiolib_to_irq,
259 },
260 }, {
261 .base = (S5P_VA_GPIO2 + 0xC20),
262 .config = &gpio_cfg_noint,
263 .irq_base = IRQ_EINT(8),
264 .chip = {
265 .base = EXYNOS4_GPX1(0),
266 .ngpio = EXYNOS4_GPIO_X1_NR,
267 .label = "GPX1",
268 .to_irq = samsung_gpiolib_to_irq,
269 },
270 }, {
271 .base = (S5P_VA_GPIO2 + 0xC40),
272 .config = &gpio_cfg_noint,
273 .irq_base = IRQ_EINT(16),
274 .chip = {
275 .base = EXYNOS4_GPX2(0),
276 .ngpio = EXYNOS4_GPIO_X2_NR,
277 .label = "GPX2",
278 .to_irq = samsung_gpiolib_to_irq,
279 },
280 }, {
281 .base = (S5P_VA_GPIO2 + 0xC60),
282 .config = &gpio_cfg_noint,
283 .irq_base = IRQ_EINT(24),
284 .chip = {
285 .base = EXYNOS4_GPX3(0),
286 .ngpio = EXYNOS4_GPIO_X3_NR,
287 .label = "GPX3",
288 .to_irq = samsung_gpiolib_to_irq,
289 },
290 },
291};
292
293static struct s3c_gpio_chip exynos4_gpio_part3_4bit[] = {
294 {
295 .chip = {
296 .base = EXYNOS4_GPZ(0),
297 .ngpio = EXYNOS4_GPIO_Z_NR,
298 .label = "GPZ",
299 },
300 },
301};
302
303static __init int exynos4_gpiolib_init(void)
304{
305 struct s3c_gpio_chip *chip;
306 int i;
307 int group = 0;
308 int nr_chips;
309
310 /* GPIO part 1 */
311
312 chip = exynos4_gpio_part1_4bit;
313 nr_chips = ARRAY_SIZE(exynos4_gpio_part1_4bit);
314
315 for (i = 0; i < nr_chips; i++, chip++) {
316 if (chip->config == NULL) {
317 chip->config = &gpio_cfg;
318 /* Assign the GPIO interrupt group */
319 chip->group = group++;
320 }
321 if (chip->base == NULL)
322 chip->base = S5P_VA_GPIO1 + (i) * 0x20;
323 }
324
325 samsung_gpiolib_add_4bit_chips(exynos4_gpio_part1_4bit, nr_chips);
326
327 /* GPIO part 2 */
328
329 chip = exynos4_gpio_part2_4bit;
330 nr_chips = ARRAY_SIZE(exynos4_gpio_part2_4bit);
331
332 for (i = 0; i < nr_chips; i++, chip++) {
333 if (chip->config == NULL) {
334 chip->config = &gpio_cfg;
335 /* Assign the GPIO interrupt group */
336 chip->group = group++;
337 }
338 if (chip->base == NULL)
339 chip->base = S5P_VA_GPIO2 + (i) * 0x20;
340 }
341
342 samsung_gpiolib_add_4bit_chips(exynos4_gpio_part2_4bit, nr_chips);
343
344 /* GPIO part 3 */
345
346 chip = exynos4_gpio_part3_4bit;
347 nr_chips = ARRAY_SIZE(exynos4_gpio_part3_4bit);
348
349 for (i = 0; i < nr_chips; i++, chip++) {
350 if (chip->config == NULL) {
351 chip->config = &gpio_cfg;
352 /* Assign the GPIO interrupt group */
353 chip->group = group++;
354 }
355 if (chip->base == NULL)
356 chip->base = S5P_VA_GPIO3 + (i) * 0x20;
357 }
358
359 samsung_gpiolib_add_4bit_chips(exynos4_gpio_part3_4bit, nr_chips);
360 s5p_register_gpioint_bank(IRQ_GPIO_XA, 0, IRQ_GPIO1_NR_GROUPS);
361 s5p_register_gpioint_bank(IRQ_GPIO_XB, IRQ_GPIO1_NR_GROUPS, IRQ_GPIO2_NR_GROUPS);
362
363 return 0;
364}
365core_initcall(exynos4_gpiolib_init);
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
new file mode 100644
index 000000000000..4961ef9bc153
--- /dev/null
+++ b/drivers/gpio/gpio-nomadik.c
@@ -0,0 +1,1069 @@
1/*
2 * Generic GPIO driver for logic cells found in the Nomadik SoC
3 *
4 * Copyright (C) 2008,2009 STMicroelectronics
5 * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
6 * Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/device.h>
16#include <linux/platform_device.h>
17#include <linux/io.h>
18#include <linux/clk.h>
19#include <linux/err.h>
20#include <linux/gpio.h>
21#include <linux/spinlock.h>
22#include <linux/interrupt.h>
23#include <linux/irq.h>
24#include <linux/slab.h>
25
26#include <asm/mach/irq.h>
27
28#include <plat/pincfg.h>
29#include <mach/hardware.h>
30#include <mach/gpio.h>
31
32/*
33 * The GPIO module in the Nomadik family of Systems-on-Chip is an
34 * AMBA device, managing 32 pins and alternate functions. The logic block
35 * is currently used in the Nomadik and ux500.
36 *
37 * Symbols in this file are called "nmk_gpio" for "nomadik gpio"
38 */
39
40#define NMK_GPIO_PER_CHIP 32
41
42struct nmk_gpio_chip {
43 struct gpio_chip chip;
44 void __iomem *addr;
45 struct clk *clk;
46 unsigned int bank;
47 unsigned int parent_irq;
48 int secondary_parent_irq;
49 u32 (*get_secondary_status)(unsigned int bank);
50 void (*set_ioforce)(bool enable);
51 spinlock_t lock;
52 /* Keep track of configured edges */
53 u32 edge_rising;
54 u32 edge_falling;
55 u32 real_wake;
56 u32 rwimsc;
57 u32 fwimsc;
58 u32 slpm;
59 u32 enabled;
60 u32 pull_up;
61};
62
63static struct nmk_gpio_chip *
64nmk_gpio_chips[DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)];
65
66static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
67
68#define NUM_BANKS ARRAY_SIZE(nmk_gpio_chips)
69
70static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip,
71 unsigned offset, int gpio_mode)
72{
73 u32 bit = 1 << offset;
74 u32 afunc, bfunc;
75
76 afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & ~bit;
77 bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & ~bit;
78 if (gpio_mode & NMK_GPIO_ALT_A)
79 afunc |= bit;
80 if (gpio_mode & NMK_GPIO_ALT_B)
81 bfunc |= bit;
82 writel(afunc, nmk_chip->addr + NMK_GPIO_AFSLA);
83 writel(bfunc, nmk_chip->addr + NMK_GPIO_AFSLB);
84}
85
86static void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip,
87 unsigned offset, enum nmk_gpio_slpm mode)
88{
89 u32 bit = 1 << offset;
90 u32 slpm;
91
92 slpm = readl(nmk_chip->addr + NMK_GPIO_SLPC);
93 if (mode == NMK_GPIO_SLPM_NOCHANGE)
94 slpm |= bit;
95 else
96 slpm &= ~bit;
97 writel(slpm, nmk_chip->addr + NMK_GPIO_SLPC);
98}
99
100static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
101 unsigned offset, enum nmk_gpio_pull pull)
102{
103 u32 bit = 1 << offset;
104 u32 pdis;
105
106 pdis = readl(nmk_chip->addr + NMK_GPIO_PDIS);
107 if (pull == NMK_GPIO_PULL_NONE) {
108 pdis |= bit;
109 nmk_chip->pull_up &= ~bit;
110 } else {
111 pdis &= ~bit;
112 }
113
114 writel(pdis, nmk_chip->addr + NMK_GPIO_PDIS);
115
116 if (pull == NMK_GPIO_PULL_UP) {
117 nmk_chip->pull_up |= bit;
118 writel(bit, nmk_chip->addr + NMK_GPIO_DATS);
119 } else if (pull == NMK_GPIO_PULL_DOWN) {
120 nmk_chip->pull_up &= ~bit;
121 writel(bit, nmk_chip->addr + NMK_GPIO_DATC);
122 }
123}
124
125static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip,
126 unsigned offset)
127{
128 writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
129}
130
131static void __nmk_gpio_set_output(struct nmk_gpio_chip *nmk_chip,
132 unsigned offset, int val)
133{
134 if (val)
135 writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATS);
136 else
137 writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATC);
138}
139
140static void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
141 unsigned offset, int val)
142{
143 writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRS);
144 __nmk_gpio_set_output(nmk_chip, offset, val);
145}
146
147static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
148 unsigned offset, int gpio_mode,
149 bool glitch)
150{
151 u32 rwimsc = readl(nmk_chip->addr + NMK_GPIO_RWIMSC);
152 u32 fwimsc = readl(nmk_chip->addr + NMK_GPIO_FWIMSC);
153
154 if (glitch && nmk_chip->set_ioforce) {
155 u32 bit = BIT(offset);
156
157 /* Prevent spurious wakeups */
158 writel(rwimsc & ~bit, nmk_chip->addr + NMK_GPIO_RWIMSC);
159 writel(fwimsc & ~bit, nmk_chip->addr + NMK_GPIO_FWIMSC);
160
161 nmk_chip->set_ioforce(true);
162 }
163
164 __nmk_gpio_set_mode(nmk_chip, offset, gpio_mode);
165
166 if (glitch && nmk_chip->set_ioforce) {
167 nmk_chip->set_ioforce(false);
168
169 writel(rwimsc, nmk_chip->addr + NMK_GPIO_RWIMSC);
170 writel(fwimsc, nmk_chip->addr + NMK_GPIO_FWIMSC);
171 }
172}
173
174static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
175 pin_cfg_t cfg, bool sleep, unsigned int *slpmregs)
176{
177 static const char *afnames[] = {
178 [NMK_GPIO_ALT_GPIO] = "GPIO",
179 [NMK_GPIO_ALT_A] = "A",
180 [NMK_GPIO_ALT_B] = "B",
181 [NMK_GPIO_ALT_C] = "C"
182 };
183 static const char *pullnames[] = {
184 [NMK_GPIO_PULL_NONE] = "none",
185 [NMK_GPIO_PULL_UP] = "up",
186 [NMK_GPIO_PULL_DOWN] = "down",
187 [3] /* illegal */ = "??"
188 };
189 static const char *slpmnames[] = {
190 [NMK_GPIO_SLPM_INPUT] = "input/wakeup",
191 [NMK_GPIO_SLPM_NOCHANGE] = "no-change/no-wakeup",
192 };
193
194 int pin = PIN_NUM(cfg);
195 int pull = PIN_PULL(cfg);
196 int af = PIN_ALT(cfg);
197 int slpm = PIN_SLPM(cfg);
198 int output = PIN_DIR(cfg);
199 int val = PIN_VAL(cfg);
200 bool glitch = af == NMK_GPIO_ALT_C;
201
202 dev_dbg(nmk_chip->chip.dev, "pin %d [%#lx]: af %s, pull %s, slpm %s (%s%s)\n",
203 pin, cfg, afnames[af], pullnames[pull], slpmnames[slpm],
204 output ? "output " : "input",
205 output ? (val ? "high" : "low") : "");
206
207 if (sleep) {
208 int slpm_pull = PIN_SLPM_PULL(cfg);
209 int slpm_output = PIN_SLPM_DIR(cfg);
210 int slpm_val = PIN_SLPM_VAL(cfg);
211
212 af = NMK_GPIO_ALT_GPIO;
213
214 /*
215 * The SLPM_* values are normal values + 1 to allow zero to
216 * mean "same as normal".
217 */
218 if (slpm_pull)
219 pull = slpm_pull - 1;
220 if (slpm_output)
221 output = slpm_output - 1;
222 if (slpm_val)
223 val = slpm_val - 1;
224
225 dev_dbg(nmk_chip->chip.dev, "pin %d: sleep pull %s, dir %s, val %s\n",
226 pin,
227 slpm_pull ? pullnames[pull] : "same",
228 slpm_output ? (output ? "output" : "input") : "same",
229 slpm_val ? (val ? "high" : "low") : "same");
230 }
231
232 if (output)
233 __nmk_gpio_make_output(nmk_chip, offset, val);
234 else {
235 __nmk_gpio_make_input(nmk_chip, offset);
236 __nmk_gpio_set_pull(nmk_chip, offset, pull);
237 }
238
239 /*
240 * If we've backed up the SLPM registers (glitch workaround), modify
241 * the backups since they will be restored.
242 */
243 if (slpmregs) {
244 if (slpm == NMK_GPIO_SLPM_NOCHANGE)
245 slpmregs[nmk_chip->bank] |= BIT(offset);
246 else
247 slpmregs[nmk_chip->bank] &= ~BIT(offset);
248 } else
249 __nmk_gpio_set_slpm(nmk_chip, offset, slpm);
250
251 __nmk_gpio_set_mode_safe(nmk_chip, offset, af, glitch);
252}
253
254/*
255 * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
256 * - Save SLPM registers
257 * - Set SLPM=0 for the IOs you want to switch and others to 1
258 * - Configure the GPIO registers for the IOs that are being switched
259 * - Set IOFORCE=1
260 * - Modify the AFLSA/B registers for the IOs that are being switched
261 * - Set IOFORCE=0
262 * - Restore SLPM registers
263 * - Any spurious wake up event during switch sequence to be ignored and
264 * cleared
265 */
266static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
267{
268 int i;
269
270 for (i = 0; i < NUM_BANKS; i++) {
271 struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
272 unsigned int temp = slpm[i];
273
274 if (!chip)
275 break;
276
277 slpm[i] = readl(chip->addr + NMK_GPIO_SLPC);
278 writel(temp, chip->addr + NMK_GPIO_SLPC);
279 }
280}
281
282static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
283{
284 int i;
285
286 for (i = 0; i < NUM_BANKS; i++) {
287 struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
288
289 if (!chip)
290 break;
291
292 writel(slpm[i], chip->addr + NMK_GPIO_SLPC);
293 }
294}
295
296static int __nmk_config_pins(pin_cfg_t *cfgs, int num, bool sleep)
297{
298 static unsigned int slpm[NUM_BANKS];
299 unsigned long flags;
300 bool glitch = false;
301 int ret = 0;
302 int i;
303
304 for (i = 0; i < num; i++) {
305 if (PIN_ALT(cfgs[i]) == NMK_GPIO_ALT_C) {
306 glitch = true;
307 break;
308 }
309 }
310
311 spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
312
313 if (glitch) {
314 memset(slpm, 0xff, sizeof(slpm));
315
316 for (i = 0; i < num; i++) {
317 int pin = PIN_NUM(cfgs[i]);
318 int offset = pin % NMK_GPIO_PER_CHIP;
319
320 if (PIN_ALT(cfgs[i]) == NMK_GPIO_ALT_C)
321 slpm[pin / NMK_GPIO_PER_CHIP] &= ~BIT(offset);
322 }
323
324 nmk_gpio_glitch_slpm_init(slpm);
325 }
326
327 for (i = 0; i < num; i++) {
328 struct nmk_gpio_chip *nmk_chip;
329 int pin = PIN_NUM(cfgs[i]);
330
331 nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(pin));
332 if (!nmk_chip) {
333 ret = -EINVAL;
334 break;
335 }
336
337 spin_lock(&nmk_chip->lock);
338 __nmk_config_pin(nmk_chip, pin - nmk_chip->chip.base,
339 cfgs[i], sleep, glitch ? slpm : NULL);
340 spin_unlock(&nmk_chip->lock);
341 }
342
343 if (glitch)
344 nmk_gpio_glitch_slpm_restore(slpm);
345
346 spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
347
348 return ret;
349}
350
351/**
352 * nmk_config_pin - configure a pin's mux attributes
353 * @cfg: pin confguration
354 *
355 * Configures a pin's mode (alternate function or GPIO), its pull up status,
356 * and its sleep mode based on the specified configuration. The @cfg is
357 * usually one of the SoC specific macros defined in mach/<soc>-pins.h. These
358 * are constructed using, and can be further enhanced with, the macros in
359 * plat/pincfg.h.
360 *
361 * If a pin's mode is set to GPIO, it is configured as an input to avoid
362 * side-effects. The gpio can be manipulated later using standard GPIO API
363 * calls.
364 */
365int nmk_config_pin(pin_cfg_t cfg, bool sleep)
366{
367 return __nmk_config_pins(&cfg, 1, sleep);
368}
369EXPORT_SYMBOL(nmk_config_pin);
370
371/**
372 * nmk_config_pins - configure several pins at once
373 * @cfgs: array of pin configurations
374 * @num: number of elments in the array
375 *
376 * Configures several pins using nmk_config_pin(). Refer to that function for
377 * further information.
378 */
379int nmk_config_pins(pin_cfg_t *cfgs, int num)
380{
381 return __nmk_config_pins(cfgs, num, false);
382}
383EXPORT_SYMBOL(nmk_config_pins);
384
385int nmk_config_pins_sleep(pin_cfg_t *cfgs, int num)
386{
387 return __nmk_config_pins(cfgs, num, true);
388}
389EXPORT_SYMBOL(nmk_config_pins_sleep);
390
391/**
392 * nmk_gpio_set_slpm() - configure the sleep mode of a pin
393 * @gpio: pin number
394 * @mode: NMK_GPIO_SLPM_INPUT or NMK_GPIO_SLPM_NOCHANGE,
395 *
396 * Sets the sleep mode of a pin. If @mode is NMK_GPIO_SLPM_INPUT, the pin is
397 * changed to an input (with pullup/down enabled) in sleep and deep sleep. If
398 * @mode is NMK_GPIO_SLPM_NOCHANGE, the pin remains in the state it was
399 * configured even when in sleep and deep sleep.
400 *
401 * On DB8500v2 onwards, this setting loses the previous meaning and instead
402 * indicates if wakeup detection is enabled on the pin. Note that
403 * enable_irq_wake() will automatically enable wakeup detection.
404 */
405int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode)
406{
407 struct nmk_gpio_chip *nmk_chip;
408 unsigned long flags;
409
410 nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
411 if (!nmk_chip)
412 return -EINVAL;
413
414 spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
415 spin_lock(&nmk_chip->lock);
416
417 __nmk_gpio_set_slpm(nmk_chip, gpio - nmk_chip->chip.base, mode);
418
419 spin_unlock(&nmk_chip->lock);
420 spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
421
422 return 0;
423}
424
425/**
426 * nmk_gpio_set_pull() - enable/disable pull up/down on a gpio
427 * @gpio: pin number
428 * @pull: one of NMK_GPIO_PULL_DOWN, NMK_GPIO_PULL_UP, and NMK_GPIO_PULL_NONE
429 *
430 * Enables/disables pull up/down on a specified pin. This only takes effect if
431 * the pin is configured as an input (either explicitly or by the alternate
432 * function).
433 *
434 * NOTE: If enabling the pull up/down, the caller must ensure that the GPIO is
435 * configured as an input. Otherwise, due to the way the controller registers
436 * work, this function will change the value output on the pin.
437 */
438int nmk_gpio_set_pull(int gpio, enum nmk_gpio_pull pull)
439{
440 struct nmk_gpio_chip *nmk_chip;
441 unsigned long flags;
442
443 nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
444 if (!nmk_chip)
445 return -EINVAL;
446
447 spin_lock_irqsave(&nmk_chip->lock, flags);
448 __nmk_gpio_set_pull(nmk_chip, gpio - nmk_chip->chip.base, pull);
449 spin_unlock_irqrestore(&nmk_chip->lock, flags);
450
451 return 0;
452}
453
454/* Mode functions */
455/**
456 * nmk_gpio_set_mode() - set the mux mode of a gpio pin
457 * @gpio: pin number
458 * @gpio_mode: one of NMK_GPIO_ALT_GPIO, NMK_GPIO_ALT_A,
459 * NMK_GPIO_ALT_B, and NMK_GPIO_ALT_C
460 *
461 * Sets the mode of the specified pin to one of the alternate functions or
462 * plain GPIO.
463 */
464int nmk_gpio_set_mode(int gpio, int gpio_mode)
465{
466 struct nmk_gpio_chip *nmk_chip;
467 unsigned long flags;
468
469 nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
470 if (!nmk_chip)
471 return -EINVAL;
472
473 spin_lock_irqsave(&nmk_chip->lock, flags);
474 __nmk_gpio_set_mode(nmk_chip, gpio - nmk_chip->chip.base, gpio_mode);
475 spin_unlock_irqrestore(&nmk_chip->lock, flags);
476
477 return 0;
478}
479EXPORT_SYMBOL(nmk_gpio_set_mode);
480
481int nmk_gpio_get_mode(int gpio)
482{
483 struct nmk_gpio_chip *nmk_chip;
484 u32 afunc, bfunc, bit;
485
486 nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
487 if (!nmk_chip)
488 return -EINVAL;
489
490 bit = 1 << (gpio - nmk_chip->chip.base);
491
492 afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & bit;
493 bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & bit;
494
495 return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
496}
497EXPORT_SYMBOL(nmk_gpio_get_mode);
498
499
500/* IRQ functions */
501static inline int nmk_gpio_get_bitmask(int gpio)
502{
503 return 1 << (gpio % 32);
504}
505
506static void nmk_gpio_irq_ack(struct irq_data *d)
507{
508 int gpio;
509 struct nmk_gpio_chip *nmk_chip;
510
511 gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
512 nmk_chip = irq_data_get_irq_chip_data(d);
513 if (!nmk_chip)
514 return;
515 writel(nmk_gpio_get_bitmask(gpio), nmk_chip->addr + NMK_GPIO_IC);
516}
517
518enum nmk_gpio_irq_type {
519 NORMAL,
520 WAKE,
521};
522
523static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
524 int gpio, enum nmk_gpio_irq_type which,
525 bool enable)
526{
527 u32 rimsc = which == WAKE ? NMK_GPIO_RWIMSC : NMK_GPIO_RIMSC;
528 u32 fimsc = which == WAKE ? NMK_GPIO_FWIMSC : NMK_GPIO_FIMSC;
529 u32 bitmask = nmk_gpio_get_bitmask(gpio);
530 u32 reg;
531
532 /* we must individually set/clear the two edges */
533 if (nmk_chip->edge_rising & bitmask) {
534 reg = readl(nmk_chip->addr + rimsc);
535 if (enable)
536 reg |= bitmask;
537 else
538 reg &= ~bitmask;
539 writel(reg, nmk_chip->addr + rimsc);
540 }
541 if (nmk_chip->edge_falling & bitmask) {
542 reg = readl(nmk_chip->addr + fimsc);
543 if (enable)
544 reg |= bitmask;
545 else
546 reg &= ~bitmask;
547 writel(reg, nmk_chip->addr + fimsc);
548 }
549}
550
551static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
552 int gpio, bool on)
553{
554 __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on);
555}
556
557static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable)
558{
559 int gpio;
560 struct nmk_gpio_chip *nmk_chip;
561 unsigned long flags;
562 u32 bitmask;
563
564 gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
565 nmk_chip = irq_data_get_irq_chip_data(d);
566 bitmask = nmk_gpio_get_bitmask(gpio);
567 if (!nmk_chip)
568 return -EINVAL;
569
570 if (enable)
571 nmk_chip->enabled |= bitmask;
572 else
573 nmk_chip->enabled &= ~bitmask;
574
575 spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
576 spin_lock(&nmk_chip->lock);
577
578 __nmk_gpio_irq_modify(nmk_chip, gpio, NORMAL, enable);
579
580 if (!(nmk_chip->real_wake & bitmask))
581 __nmk_gpio_set_wake(nmk_chip, gpio, enable);
582
583 spin_unlock(&nmk_chip->lock);
584 spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
585
586 return 0;
587}
588
589static void nmk_gpio_irq_mask(struct irq_data *d)
590{
591 nmk_gpio_irq_maskunmask(d, false);
592}
593
594static void nmk_gpio_irq_unmask(struct irq_data *d)
595{
596 nmk_gpio_irq_maskunmask(d, true);
597}
598
599static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
600{
601 struct nmk_gpio_chip *nmk_chip;
602 unsigned long flags;
603 u32 bitmask;
604 int gpio;
605
606 gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
607 nmk_chip = irq_data_get_irq_chip_data(d);
608 if (!nmk_chip)
609 return -EINVAL;
610 bitmask = nmk_gpio_get_bitmask(gpio);
611
612 spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
613 spin_lock(&nmk_chip->lock);
614
615 if (!(nmk_chip->enabled & bitmask))
616 __nmk_gpio_set_wake(nmk_chip, gpio, on);
617
618 if (on)
619 nmk_chip->real_wake |= bitmask;
620 else
621 nmk_chip->real_wake &= ~bitmask;
622
623 spin_unlock(&nmk_chip->lock);
624 spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
625
626 return 0;
627}
628
629static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
630{
631 bool enabled, wake = irqd_is_wakeup_set(d);
632 int gpio;
633 struct nmk_gpio_chip *nmk_chip;
634 unsigned long flags;
635 u32 bitmask;
636
637 gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
638 nmk_chip = irq_data_get_irq_chip_data(d);
639 bitmask = nmk_gpio_get_bitmask(gpio);
640 if (!nmk_chip)
641 return -EINVAL;
642
643 if (type & IRQ_TYPE_LEVEL_HIGH)
644 return -EINVAL;
645 if (type & IRQ_TYPE_LEVEL_LOW)
646 return -EINVAL;
647
648 enabled = nmk_chip->enabled & bitmask;
649
650 spin_lock_irqsave(&nmk_chip->lock, flags);
651
652 if (enabled)
653 __nmk_gpio_irq_modify(nmk_chip, gpio, NORMAL, false);
654
655 if (enabled || wake)
656 __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, false);
657
658 nmk_chip->edge_rising &= ~bitmask;
659 if (type & IRQ_TYPE_EDGE_RISING)
660 nmk_chip->edge_rising |= bitmask;
661
662 nmk_chip->edge_falling &= ~bitmask;
663 if (type & IRQ_TYPE_EDGE_FALLING)
664 nmk_chip->edge_falling |= bitmask;
665
666 if (enabled)
667 __nmk_gpio_irq_modify(nmk_chip, gpio, NORMAL, true);
668
669 if (enabled || wake)
670 __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, true);
671
672 spin_unlock_irqrestore(&nmk_chip->lock, flags);
673
674 return 0;
675}
676
677static struct irq_chip nmk_gpio_irq_chip = {
678 .name = "Nomadik-GPIO",
679 .irq_ack = nmk_gpio_irq_ack,
680 .irq_mask = nmk_gpio_irq_mask,
681 .irq_unmask = nmk_gpio_irq_unmask,
682 .irq_set_type = nmk_gpio_irq_set_type,
683 .irq_set_wake = nmk_gpio_irq_set_wake,
684};
685
686static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
687 u32 status)
688{
689 struct nmk_gpio_chip *nmk_chip;
690 struct irq_chip *host_chip = irq_get_chip(irq);
691 unsigned int first_irq;
692
693 chained_irq_enter(host_chip, desc);
694
695 nmk_chip = irq_get_handler_data(irq);
696 first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
697 while (status) {
698 int bit = __ffs(status);
699
700 generic_handle_irq(first_irq + bit);
701 status &= ~BIT(bit);
702 }
703
704 chained_irq_exit(host_chip, desc);
705}
706
707static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
708{
709 struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq);
710 u32 status = readl(nmk_chip->addr + NMK_GPIO_IS);
711
712 __nmk_gpio_irq_handler(irq, desc, status);
713}
714
715static void nmk_gpio_secondary_irq_handler(unsigned int irq,
716 struct irq_desc *desc)
717{
718 struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq);
719 u32 status = nmk_chip->get_secondary_status(nmk_chip->bank);
720
721 __nmk_gpio_irq_handler(irq, desc, status);
722}
723
724static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip)
725{
726 unsigned int first_irq;
727 int i;
728
729 first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
730 for (i = first_irq; i < first_irq + nmk_chip->chip.ngpio; i++) {
731 irq_set_chip_and_handler(i, &nmk_gpio_irq_chip,
732 handle_edge_irq);
733 set_irq_flags(i, IRQF_VALID);
734 irq_set_chip_data(i, nmk_chip);
735 irq_set_irq_type(i, IRQ_TYPE_EDGE_FALLING);
736 }
737
738 irq_set_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler);
739 irq_set_handler_data(nmk_chip->parent_irq, nmk_chip);
740
741 if (nmk_chip->secondary_parent_irq >= 0) {
742 irq_set_chained_handler(nmk_chip->secondary_parent_irq,
743 nmk_gpio_secondary_irq_handler);
744 irq_set_handler_data(nmk_chip->secondary_parent_irq, nmk_chip);
745 }
746
747 return 0;
748}
749
750/* I/O Functions */
751static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
752{
753 struct nmk_gpio_chip *nmk_chip =
754 container_of(chip, struct nmk_gpio_chip, chip);
755
756 writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
757 return 0;
758}
759
760static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned offset)
761{
762 struct nmk_gpio_chip *nmk_chip =
763 container_of(chip, struct nmk_gpio_chip, chip);
764 u32 bit = 1 << offset;
765
766 return (readl(nmk_chip->addr + NMK_GPIO_DAT) & bit) != 0;
767}
768
769static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned offset,
770 int val)
771{
772 struct nmk_gpio_chip *nmk_chip =
773 container_of(chip, struct nmk_gpio_chip, chip);
774
775 __nmk_gpio_set_output(nmk_chip, offset, val);
776}
777
778static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset,
779 int val)
780{
781 struct nmk_gpio_chip *nmk_chip =
782 container_of(chip, struct nmk_gpio_chip, chip);
783
784 __nmk_gpio_make_output(nmk_chip, offset, val);
785
786 return 0;
787}
788
789static int nmk_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
790{
791 struct nmk_gpio_chip *nmk_chip =
792 container_of(chip, struct nmk_gpio_chip, chip);
793
794 return NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base) + offset;
795}
796
797#ifdef CONFIG_DEBUG_FS
798
799#include <linux/seq_file.h>
800
801static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
802{
803 int mode;
804 unsigned i;
805 unsigned gpio = chip->base;
806 int is_out;
807 struct nmk_gpio_chip *nmk_chip =
808 container_of(chip, struct nmk_gpio_chip, chip);
809 const char *modes[] = {
810 [NMK_GPIO_ALT_GPIO] = "gpio",
811 [NMK_GPIO_ALT_A] = "altA",
812 [NMK_GPIO_ALT_B] = "altB",
813 [NMK_GPIO_ALT_C] = "altC",
814 };
815
816 for (i = 0; i < chip->ngpio; i++, gpio++) {
817 const char *label = gpiochip_is_requested(chip, i);
818 bool pull;
819 u32 bit = 1 << i;
820
821 is_out = readl(nmk_chip->addr + NMK_GPIO_DIR) & bit;
822 pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & bit);
823 mode = nmk_gpio_get_mode(gpio);
824 seq_printf(s, " gpio-%-3d (%-20.20s) %s %s %s %s",
825 gpio, label ?: "(none)",
826 is_out ? "out" : "in ",
827 chip->get
828 ? (chip->get(chip, i) ? "hi" : "lo")
829 : "? ",
830 (mode < 0) ? "unknown" : modes[mode],
831 pull ? "pull" : "none");
832
833 if (label && !is_out) {
834 int irq = gpio_to_irq(gpio);
835 struct irq_desc *desc = irq_to_desc(irq);
836
837 /* This races with request_irq(), set_irq_type(),
838 * and set_irq_wake() ... but those are "rare".
839 */
840 if (irq >= 0 && desc->action) {
841 char *trigger;
842 u32 bitmask = nmk_gpio_get_bitmask(gpio);
843
844 if (nmk_chip->edge_rising & bitmask)
845 trigger = "edge-rising";
846 else if (nmk_chip->edge_falling & bitmask)
847 trigger = "edge-falling";
848 else
849 trigger = "edge-undefined";
850
851 seq_printf(s, " irq-%d %s%s",
852 irq, trigger,
853 irqd_is_wakeup_set(&desc->irq_data)
854 ? " wakeup" : "");
855 }
856 }
857
858 seq_printf(s, "\n");
859 }
860}
861
862#else
863#define nmk_gpio_dbg_show NULL
864#endif
865
866/* This structure is replicated for each GPIO block allocated at probe time */
867static struct gpio_chip nmk_gpio_template = {
868 .direction_input = nmk_gpio_make_input,
869 .get = nmk_gpio_get_input,
870 .direction_output = nmk_gpio_make_output,
871 .set = nmk_gpio_set_output,
872 .to_irq = nmk_gpio_to_irq,
873 .dbg_show = nmk_gpio_dbg_show,
874 .can_sleep = 0,
875};
876
877/*
878 * Called from the suspend/resume path to only keep the real wakeup interrupts
879 * (those that have had set_irq_wake() called on them) as wakeup interrupts,
880 * and not the rest of the interrupts which we needed to have as wakeups for
881 * cpuidle.
882 *
883 * PM ops are not used since this needs to be done at the end, after all the
884 * other drivers are done with their suspend callbacks.
885 */
886void nmk_gpio_wakeups_suspend(void)
887{
888 int i;
889
890 for (i = 0; i < NUM_BANKS; i++) {
891 struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
892
893 if (!chip)
894 break;
895
896 chip->rwimsc = readl(chip->addr + NMK_GPIO_RWIMSC);
897 chip->fwimsc = readl(chip->addr + NMK_GPIO_FWIMSC);
898
899 writel(chip->rwimsc & chip->real_wake,
900 chip->addr + NMK_GPIO_RWIMSC);
901 writel(chip->fwimsc & chip->real_wake,
902 chip->addr + NMK_GPIO_FWIMSC);
903
904 if (cpu_is_u8500v2()) {
905 chip->slpm = readl(chip->addr + NMK_GPIO_SLPC);
906
907 /* 0 -> wakeup enable */
908 writel(~chip->real_wake, chip->addr + NMK_GPIO_SLPC);
909 }
910 }
911}
912
913void nmk_gpio_wakeups_resume(void)
914{
915 int i;
916
917 for (i = 0; i < NUM_BANKS; i++) {
918 struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
919
920 if (!chip)
921 break;
922
923 writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC);
924 writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC);
925
926 if (cpu_is_u8500v2())
927 writel(chip->slpm, chip->addr + NMK_GPIO_SLPC);
928 }
929}
930
931/*
932 * Read the pull up/pull down status.
933 * A bit set in 'pull_up' means that pull up
934 * is selected if pull is enabled in PDIS register.
935 * Note: only pull up/down set via this driver can
936 * be detected due to HW limitations.
937 */
938void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up)
939{
940 if (gpio_bank < NUM_BANKS) {
941 struct nmk_gpio_chip *chip = nmk_gpio_chips[gpio_bank];
942
943 if (!chip)
944 return;
945
946 *pull_up = chip->pull_up;
947 }
948}
949
950static int __devinit nmk_gpio_probe(struct platform_device *dev)
951{
952 struct nmk_gpio_platform_data *pdata = dev->dev.platform_data;
953 struct nmk_gpio_chip *nmk_chip;
954 struct gpio_chip *chip;
955 struct resource *res;
956 struct clk *clk;
957 int secondary_irq;
958 int irq;
959 int ret;
960
961 if (!pdata)
962 return -ENODEV;
963
964 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
965 if (!res) {
966 ret = -ENOENT;
967 goto out;
968 }
969
970 irq = platform_get_irq(dev, 0);
971 if (irq < 0) {
972 ret = irq;
973 goto out;
974 }
975
976 secondary_irq = platform_get_irq(dev, 1);
977 if (secondary_irq >= 0 && !pdata->get_secondary_status) {
978 ret = -EINVAL;
979 goto out;
980 }
981
982 if (request_mem_region(res->start, resource_size(res),
983 dev_name(&dev->dev)) == NULL) {
984 ret = -EBUSY;
985 goto out;
986 }
987
988 clk = clk_get(&dev->dev, NULL);
989 if (IS_ERR(clk)) {
990 ret = PTR_ERR(clk);
991 goto out_release;
992 }
993
994 clk_enable(clk);
995
996 nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
997 if (!nmk_chip) {
998 ret = -ENOMEM;
999 goto out_clk;
1000 }
1001 /*
1002 * The virt address in nmk_chip->addr is in the nomadik register space,
1003 * so we can simply convert the resource address, without remapping
1004 */
1005 nmk_chip->bank = dev->id;
1006 nmk_chip->clk = clk;
1007 nmk_chip->addr = io_p2v(res->start);
1008 nmk_chip->chip = nmk_gpio_template;
1009 nmk_chip->parent_irq = irq;
1010 nmk_chip->secondary_parent_irq = secondary_irq;
1011 nmk_chip->get_secondary_status = pdata->get_secondary_status;
1012 nmk_chip->set_ioforce = pdata->set_ioforce;
1013 spin_lock_init(&nmk_chip->lock);
1014
1015 chip = &nmk_chip->chip;
1016 chip->base = pdata->first_gpio;
1017 chip->ngpio = pdata->num_gpio;
1018 chip->label = pdata->name ?: dev_name(&dev->dev);
1019 chip->dev = &dev->dev;
1020 chip->owner = THIS_MODULE;
1021
1022 ret = gpiochip_add(&nmk_chip->chip);
1023 if (ret)
1024 goto out_free;
1025
1026 BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
1027
1028 nmk_gpio_chips[nmk_chip->bank] = nmk_chip;
1029 platform_set_drvdata(dev, nmk_chip);
1030
1031 nmk_gpio_init_irq(nmk_chip);
1032
1033 dev_info(&dev->dev, "Bits %i-%i at address %p\n",
1034 nmk_chip->chip.base, nmk_chip->chip.base+31, nmk_chip->addr);
1035 return 0;
1036
1037out_free:
1038 kfree(nmk_chip);
1039out_clk:
1040 clk_disable(clk);
1041 clk_put(clk);
1042out_release:
1043 release_mem_region(res->start, resource_size(res));
1044out:
1045 dev_err(&dev->dev, "Failure %i for GPIO %i-%i\n", ret,
1046 pdata->first_gpio, pdata->first_gpio+31);
1047 return ret;
1048}
1049
1050static struct platform_driver nmk_gpio_driver = {
1051 .driver = {
1052 .owner = THIS_MODULE,
1053 .name = "gpio",
1054 },
1055 .probe = nmk_gpio_probe,
1056};
1057
1058static int __init nmk_gpio_init(void)
1059{
1060 return platform_driver_register(&nmk_gpio_driver);
1061}
1062
1063core_initcall(nmk_gpio_init);
1064
1065MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini");
1066MODULE_DESCRIPTION("Nomadik GPIO Driver");
1067MODULE_LICENSE("GPL");
1068
1069
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
new file mode 100644
index 000000000000..6c51191da567
--- /dev/null
+++ b/drivers/gpio/gpio-omap.c
@@ -0,0 +1,2007 @@
1/*
2 * Support functions for OMAP GPIO
3 *
4 * Copyright (C) 2003-2005 Nokia Corporation
5 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
6 *
7 * Copyright (C) 2009 Texas Instruments
8 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/interrupt.h>
18#include <linux/syscore_ops.h>
19#include <linux/err.h>
20#include <linux/clk.h>
21#include <linux/io.h>
22#include <linux/slab.h>
23#include <linux/pm_runtime.h>
24
25#include <mach/hardware.h>
26#include <asm/irq.h>
27#include <mach/irqs.h>
28#include <mach/gpio.h>
29#include <asm/mach/irq.h>
30
31struct gpio_bank {
32 unsigned long pbase;
33 void __iomem *base;
34 u16 irq;
35 u16 virtual_irq_start;
36 int method;
37#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
38 u32 suspend_wakeup;
39 u32 saved_wakeup;
40#endif
41 u32 non_wakeup_gpios;
42 u32 enabled_non_wakeup_gpios;
43
44 u32 saved_datain;
45 u32 saved_fallingdetect;
46 u32 saved_risingdetect;
47 u32 level_mask;
48 u32 toggle_mask;
49 spinlock_t lock;
50 struct gpio_chip chip;
51 struct clk *dbck;
52 u32 mod_usage;
53 u32 dbck_enable_mask;
54 struct device *dev;
55 bool dbck_flag;
56 int stride;
57};
58
59#ifdef CONFIG_ARCH_OMAP3
60struct omap3_gpio_regs {
61 u32 irqenable1;
62 u32 irqenable2;
63 u32 wake_en;
64 u32 ctrl;
65 u32 oe;
66 u32 leveldetect0;
67 u32 leveldetect1;
68 u32 risingdetect;
69 u32 fallingdetect;
70 u32 dataout;
71};
72
73static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS];
74#endif
75
76/*
77 * TODO: Cleanup gpio_bank usage as it is having information
78 * related to all instances of the device
79 */
80static struct gpio_bank *gpio_bank;
81
82static int bank_width;
83
84/* TODO: Analyze removing gpio_bank_count usage from driver code */
85int gpio_bank_count;
86
87static inline struct gpio_bank *get_gpio_bank(int gpio)
88{
89 if (cpu_is_omap15xx()) {
90 if (OMAP_GPIO_IS_MPUIO(gpio))
91 return &gpio_bank[0];
92 return &gpio_bank[1];
93 }
94 if (cpu_is_omap16xx()) {
95 if (OMAP_GPIO_IS_MPUIO(gpio))
96 return &gpio_bank[0];
97 return &gpio_bank[1 + (gpio >> 4)];
98 }
99 if (cpu_is_omap7xx()) {
100 if (OMAP_GPIO_IS_MPUIO(gpio))
101 return &gpio_bank[0];
102 return &gpio_bank[1 + (gpio >> 5)];
103 }
104 if (cpu_is_omap24xx())
105 return &gpio_bank[gpio >> 5];
106 if (cpu_is_omap34xx() || cpu_is_omap44xx())
107 return &gpio_bank[gpio >> 5];
108 BUG();
109 return NULL;
110}
111
112static inline int get_gpio_index(int gpio)
113{
114 if (cpu_is_omap7xx())
115 return gpio & 0x1f;
116 if (cpu_is_omap24xx())
117 return gpio & 0x1f;
118 if (cpu_is_omap34xx() || cpu_is_omap44xx())
119 return gpio & 0x1f;
120 return gpio & 0x0f;
121}
122
123static inline int gpio_valid(int gpio)
124{
125 if (gpio < 0)
126 return -1;
127 if (cpu_class_is_omap1() && OMAP_GPIO_IS_MPUIO(gpio)) {
128 if (gpio >= OMAP_MAX_GPIO_LINES + 16)
129 return -1;
130 return 0;
131 }
132 if (cpu_is_omap15xx() && gpio < 16)
133 return 0;
134 if ((cpu_is_omap16xx()) && gpio < 64)
135 return 0;
136 if (cpu_is_omap7xx() && gpio < 192)
137 return 0;
138 if (cpu_is_omap2420() && gpio < 128)
139 return 0;
140 if (cpu_is_omap2430() && gpio < 160)
141 return 0;
142 if ((cpu_is_omap34xx() || cpu_is_omap44xx()) && gpio < 192)
143 return 0;
144 return -1;
145}
146
147static int check_gpio(int gpio)
148{
149 if (unlikely(gpio_valid(gpio) < 0)) {
150 printk(KERN_ERR "omap-gpio: invalid GPIO %d\n", gpio);
151 dump_stack();
152 return -1;
153 }
154 return 0;
155}
156
157static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
158{
159 void __iomem *reg = bank->base;
160 u32 l;
161
162 switch (bank->method) {
163#ifdef CONFIG_ARCH_OMAP1
164 case METHOD_MPUIO:
165 reg += OMAP_MPUIO_IO_CNTL / bank->stride;
166 break;
167#endif
168#ifdef CONFIG_ARCH_OMAP15XX
169 case METHOD_GPIO_1510:
170 reg += OMAP1510_GPIO_DIR_CONTROL;
171 break;
172#endif
173#ifdef CONFIG_ARCH_OMAP16XX
174 case METHOD_GPIO_1610:
175 reg += OMAP1610_GPIO_DIRECTION;
176 break;
177#endif
178#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
179 case METHOD_GPIO_7XX:
180 reg += OMAP7XX_GPIO_DIR_CONTROL;
181 break;
182#endif
183#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
184 case METHOD_GPIO_24XX:
185 reg += OMAP24XX_GPIO_OE;
186 break;
187#endif
188#if defined(CONFIG_ARCH_OMAP4)
189 case METHOD_GPIO_44XX:
190 reg += OMAP4_GPIO_OE;
191 break;
192#endif
193 default:
194 WARN_ON(1);
195 return;
196 }
197 l = __raw_readl(reg);
198 if (is_input)
199 l |= 1 << gpio;
200 else
201 l &= ~(1 << gpio);
202 __raw_writel(l, reg);
203}
204
205static void _set_gpio_dataout(struct gpio_bank *bank, int gpio, int enable)
206{
207 void __iomem *reg = bank->base;
208 u32 l = 0;
209
210 switch (bank->method) {
211#ifdef CONFIG_ARCH_OMAP1
212 case METHOD_MPUIO:
213 reg += OMAP_MPUIO_OUTPUT / bank->stride;
214 l = __raw_readl(reg);
215 if (enable)
216 l |= 1 << gpio;
217 else
218 l &= ~(1 << gpio);
219 break;
220#endif
221#ifdef CONFIG_ARCH_OMAP15XX
222 case METHOD_GPIO_1510:
223 reg += OMAP1510_GPIO_DATA_OUTPUT;
224 l = __raw_readl(reg);
225 if (enable)
226 l |= 1 << gpio;
227 else
228 l &= ~(1 << gpio);
229 break;
230#endif
231#ifdef CONFIG_ARCH_OMAP16XX
232 case METHOD_GPIO_1610:
233 if (enable)
234 reg += OMAP1610_GPIO_SET_DATAOUT;
235 else
236 reg += OMAP1610_GPIO_CLEAR_DATAOUT;
237 l = 1 << gpio;
238 break;
239#endif
240#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
241 case METHOD_GPIO_7XX:
242 reg += OMAP7XX_GPIO_DATA_OUTPUT;
243 l = __raw_readl(reg);
244 if (enable)
245 l |= 1 << gpio;
246 else
247 l &= ~(1 << gpio);
248 break;
249#endif
250#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
251 case METHOD_GPIO_24XX:
252 if (enable)
253 reg += OMAP24XX_GPIO_SETDATAOUT;
254 else
255 reg += OMAP24XX_GPIO_CLEARDATAOUT;
256 l = 1 << gpio;
257 break;
258#endif
259#ifdef CONFIG_ARCH_OMAP4
260 case METHOD_GPIO_44XX:
261 if (enable)
262 reg += OMAP4_GPIO_SETDATAOUT;
263 else
264 reg += OMAP4_GPIO_CLEARDATAOUT;
265 l = 1 << gpio;
266 break;
267#endif
268 default:
269 WARN_ON(1);
270 return;
271 }
272 __raw_writel(l, reg);
273}
274
275static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
276{
277 void __iomem *reg;
278
279 if (check_gpio(gpio) < 0)
280 return -EINVAL;
281 reg = bank->base;
282 switch (bank->method) {
283#ifdef CONFIG_ARCH_OMAP1
284 case METHOD_MPUIO:
285 reg += OMAP_MPUIO_INPUT_LATCH / bank->stride;
286 break;
287#endif
288#ifdef CONFIG_ARCH_OMAP15XX
289 case METHOD_GPIO_1510:
290 reg += OMAP1510_GPIO_DATA_INPUT;
291 break;
292#endif
293#ifdef CONFIG_ARCH_OMAP16XX
294 case METHOD_GPIO_1610:
295 reg += OMAP1610_GPIO_DATAIN;
296 break;
297#endif
298#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
299 case METHOD_GPIO_7XX:
300 reg += OMAP7XX_GPIO_DATA_INPUT;
301 break;
302#endif
303#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
304 case METHOD_GPIO_24XX:
305 reg += OMAP24XX_GPIO_DATAIN;
306 break;
307#endif
308#ifdef CONFIG_ARCH_OMAP4
309 case METHOD_GPIO_44XX:
310 reg += OMAP4_GPIO_DATAIN;
311 break;
312#endif
313 default:
314 return -EINVAL;
315 }
316 return (__raw_readl(reg)
317 & (1 << get_gpio_index(gpio))) != 0;
318}
319
320static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
321{
322 void __iomem *reg;
323
324 if (check_gpio(gpio) < 0)
325 return -EINVAL;
326 reg = bank->base;
327
328 switch (bank->method) {
329#ifdef CONFIG_ARCH_OMAP1
330 case METHOD_MPUIO:
331 reg += OMAP_MPUIO_OUTPUT / bank->stride;
332 break;
333#endif
334#ifdef CONFIG_ARCH_OMAP15XX
335 case METHOD_GPIO_1510:
336 reg += OMAP1510_GPIO_DATA_OUTPUT;
337 break;
338#endif
339#ifdef CONFIG_ARCH_OMAP16XX
340 case METHOD_GPIO_1610:
341 reg += OMAP1610_GPIO_DATAOUT;
342 break;
343#endif
344#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
345 case METHOD_GPIO_7XX:
346 reg += OMAP7XX_GPIO_DATA_OUTPUT;
347 break;
348#endif
349#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
350 case METHOD_GPIO_24XX:
351 reg += OMAP24XX_GPIO_DATAOUT;
352 break;
353#endif
354#ifdef CONFIG_ARCH_OMAP4
355 case METHOD_GPIO_44XX:
356 reg += OMAP4_GPIO_DATAOUT;
357 break;
358#endif
359 default:
360 return -EINVAL;
361 }
362
363 return (__raw_readl(reg) & (1 << get_gpio_index(gpio))) != 0;
364}
365
366#define MOD_REG_BIT(reg, bit_mask, set) \
367do { \
368 int l = __raw_readl(base + reg); \
369 if (set) l |= bit_mask; \
370 else l &= ~bit_mask; \
371 __raw_writel(l, base + reg); \
372} while(0)
373
374/**
375 * _set_gpio_debounce - low level gpio debounce time
376 * @bank: the gpio bank we're acting upon
377 * @gpio: the gpio number on this @gpio
378 * @debounce: debounce time to use
379 *
380 * OMAP's debounce time is in 31us steps so we need
381 * to convert and round up to the closest unit.
382 */
383static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
384 unsigned debounce)
385{
386 void __iomem *reg = bank->base;
387 u32 val;
388 u32 l;
389
390 if (!bank->dbck_flag)
391 return;
392
393 if (debounce < 32)
394 debounce = 0x01;
395 else if (debounce > 7936)
396 debounce = 0xff;
397 else
398 debounce = (debounce / 0x1f) - 1;
399
400 l = 1 << get_gpio_index(gpio);
401
402 if (bank->method == METHOD_GPIO_44XX)
403 reg += OMAP4_GPIO_DEBOUNCINGTIME;
404 else
405 reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
406
407 __raw_writel(debounce, reg);
408
409 reg = bank->base;
410 if (bank->method == METHOD_GPIO_44XX)
411 reg += OMAP4_GPIO_DEBOUNCENABLE;
412 else
413 reg += OMAP24XX_GPIO_DEBOUNCE_EN;
414
415 val = __raw_readl(reg);
416
417 if (debounce) {
418 val |= l;
419 clk_enable(bank->dbck);
420 } else {
421 val &= ~l;
422 clk_disable(bank->dbck);
423 }
424 bank->dbck_enable_mask = val;
425
426 __raw_writel(val, reg);
427}
428
429#ifdef CONFIG_ARCH_OMAP2PLUS
430static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
431 int trigger)
432{
433 void __iomem *base = bank->base;
434 u32 gpio_bit = 1 << gpio;
435 u32 val;
436
437 if (cpu_is_omap44xx()) {
438 MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit,
439 trigger & IRQ_TYPE_LEVEL_LOW);
440 MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT1, gpio_bit,
441 trigger & IRQ_TYPE_LEVEL_HIGH);
442 MOD_REG_BIT(OMAP4_GPIO_RISINGDETECT, gpio_bit,
443 trigger & IRQ_TYPE_EDGE_RISING);
444 MOD_REG_BIT(OMAP4_GPIO_FALLINGDETECT, gpio_bit,
445 trigger & IRQ_TYPE_EDGE_FALLING);
446 } else {
447 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
448 trigger & IRQ_TYPE_LEVEL_LOW);
449 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
450 trigger & IRQ_TYPE_LEVEL_HIGH);
451 MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
452 trigger & IRQ_TYPE_EDGE_RISING);
453 MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
454 trigger & IRQ_TYPE_EDGE_FALLING);
455 }
456 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
457 if (cpu_is_omap44xx()) {
458 if (trigger != 0)
459 __raw_writel(1 << gpio, bank->base+
460 OMAP4_GPIO_IRQWAKEN0);
461 else {
462 val = __raw_readl(bank->base +
463 OMAP4_GPIO_IRQWAKEN0);
464 __raw_writel(val & (~(1 << gpio)), bank->base +
465 OMAP4_GPIO_IRQWAKEN0);
466 }
467 } else {
468 /*
469 * GPIO wakeup request can only be generated on edge
470 * transitions
471 */
472 if (trigger & IRQ_TYPE_EDGE_BOTH)
473 __raw_writel(1 << gpio, bank->base
474 + OMAP24XX_GPIO_SETWKUENA);
475 else
476 __raw_writel(1 << gpio, bank->base
477 + OMAP24XX_GPIO_CLEARWKUENA);
478 }
479 }
480 /* This part needs to be executed always for OMAP34xx */
481 if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) {
482 /*
483 * Log the edge gpio and manually trigger the IRQ
484 * after resume if the input level changes
485 * to avoid irq lost during PER RET/OFF mode
486 * Applies for omap2 non-wakeup gpio and all omap3 gpios
487 */
488 if (trigger & IRQ_TYPE_EDGE_BOTH)
489 bank->enabled_non_wakeup_gpios |= gpio_bit;
490 else
491 bank->enabled_non_wakeup_gpios &= ~gpio_bit;
492 }
493
494 if (cpu_is_omap44xx()) {
495 bank->level_mask =
496 __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT0) |
497 __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT1);
498 } else {
499 bank->level_mask =
500 __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0) |
501 __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
502 }
503}
504#endif
505
506#ifdef CONFIG_ARCH_OMAP1
507/*
508 * This only applies to chips that can't do both rising and falling edge
509 * detection at once. For all other chips, this function is a noop.
510 */
511static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
512{
513 void __iomem *reg = bank->base;
514 u32 l = 0;
515
516 switch (bank->method) {
517 case METHOD_MPUIO:
518 reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
519 break;
520#ifdef CONFIG_ARCH_OMAP15XX
521 case METHOD_GPIO_1510:
522 reg += OMAP1510_GPIO_INT_CONTROL;
523 break;
524#endif
525#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
526 case METHOD_GPIO_7XX:
527 reg += OMAP7XX_GPIO_INT_CONTROL;
528 break;
529#endif
530 default:
531 return;
532 }
533
534 l = __raw_readl(reg);
535 if ((l >> gpio) & 1)
536 l &= ~(1 << gpio);
537 else
538 l |= 1 << gpio;
539
540 __raw_writel(l, reg);
541}
542#endif
543
544static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
545{
546 void __iomem *reg = bank->base;
547 u32 l = 0;
548
549 switch (bank->method) {
550#ifdef CONFIG_ARCH_OMAP1
551 case METHOD_MPUIO:
552 reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
553 l = __raw_readl(reg);
554 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
555 bank->toggle_mask |= 1 << gpio;
556 if (trigger & IRQ_TYPE_EDGE_RISING)
557 l |= 1 << gpio;
558 else if (trigger & IRQ_TYPE_EDGE_FALLING)
559 l &= ~(1 << gpio);
560 else
561 goto bad;
562 break;
563#endif
564#ifdef CONFIG_ARCH_OMAP15XX
565 case METHOD_GPIO_1510:
566 reg += OMAP1510_GPIO_INT_CONTROL;
567 l = __raw_readl(reg);
568 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
569 bank->toggle_mask |= 1 << gpio;
570 if (trigger & IRQ_TYPE_EDGE_RISING)
571 l |= 1 << gpio;
572 else if (trigger & IRQ_TYPE_EDGE_FALLING)
573 l &= ~(1 << gpio);
574 else
575 goto bad;
576 break;
577#endif
578#ifdef CONFIG_ARCH_OMAP16XX
579 case METHOD_GPIO_1610:
580 if (gpio & 0x08)
581 reg += OMAP1610_GPIO_EDGE_CTRL2;
582 else
583 reg += OMAP1610_GPIO_EDGE_CTRL1;
584 gpio &= 0x07;
585 l = __raw_readl(reg);
586 l &= ~(3 << (gpio << 1));
587 if (trigger & IRQ_TYPE_EDGE_RISING)
588 l |= 2 << (gpio << 1);
589 if (trigger & IRQ_TYPE_EDGE_FALLING)
590 l |= 1 << (gpio << 1);
591 if (trigger)
592 /* Enable wake-up during idle for dynamic tick */
593 __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_SET_WAKEUPENA);
594 else
595 __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA);
596 break;
597#endif
598#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
599 case METHOD_GPIO_7XX:
600 reg += OMAP7XX_GPIO_INT_CONTROL;
601 l = __raw_readl(reg);
602 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
603 bank->toggle_mask |= 1 << gpio;
604 if (trigger & IRQ_TYPE_EDGE_RISING)
605 l |= 1 << gpio;
606 else if (trigger & IRQ_TYPE_EDGE_FALLING)
607 l &= ~(1 << gpio);
608 else
609 goto bad;
610 break;
611#endif
612#ifdef CONFIG_ARCH_OMAP2PLUS
613 case METHOD_GPIO_24XX:
614 case METHOD_GPIO_44XX:
615 set_24xx_gpio_triggering(bank, gpio, trigger);
616 return 0;
617#endif
618 default:
619 goto bad;
620 }
621 __raw_writel(l, reg);
622 return 0;
623bad:
624 return -EINVAL;
625}
626
627static int gpio_irq_type(struct irq_data *d, unsigned type)
628{
629 struct gpio_bank *bank;
630 unsigned gpio;
631 int retval;
632 unsigned long flags;
633
634 if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE)
635 gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
636 else
637 gpio = d->irq - IH_GPIO_BASE;
638
639 if (check_gpio(gpio) < 0)
640 return -EINVAL;
641
642 if (type & ~IRQ_TYPE_SENSE_MASK)
643 return -EINVAL;
644
645 /* OMAP1 allows only only edge triggering */
646 if (!cpu_class_is_omap2()
647 && (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
648 return -EINVAL;
649
650 bank = irq_data_get_irq_chip_data(d);
651 spin_lock_irqsave(&bank->lock, flags);
652 retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type);
653 spin_unlock_irqrestore(&bank->lock, flags);
654
655 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
656 __irq_set_handler_locked(d->irq, handle_level_irq);
657 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
658 __irq_set_handler_locked(d->irq, handle_edge_irq);
659
660 return retval;
661}
662
663static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
664{
665 void __iomem *reg = bank->base;
666
667 switch (bank->method) {
668#ifdef CONFIG_ARCH_OMAP1
669 case METHOD_MPUIO:
670 /* MPUIO irqstatus is reset by reading the status register,
671 * so do nothing here */
672 return;
673#endif
674#ifdef CONFIG_ARCH_OMAP15XX
675 case METHOD_GPIO_1510:
676 reg += OMAP1510_GPIO_INT_STATUS;
677 break;
678#endif
679#ifdef CONFIG_ARCH_OMAP16XX
680 case METHOD_GPIO_1610:
681 reg += OMAP1610_GPIO_IRQSTATUS1;
682 break;
683#endif
684#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
685 case METHOD_GPIO_7XX:
686 reg += OMAP7XX_GPIO_INT_STATUS;
687 break;
688#endif
689#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
690 case METHOD_GPIO_24XX:
691 reg += OMAP24XX_GPIO_IRQSTATUS1;
692 break;
693#endif
694#if defined(CONFIG_ARCH_OMAP4)
695 case METHOD_GPIO_44XX:
696 reg += OMAP4_GPIO_IRQSTATUS0;
697 break;
698#endif
699 default:
700 WARN_ON(1);
701 return;
702 }
703 __raw_writel(gpio_mask, reg);
704
705 /* Workaround for clearing DSP GPIO interrupts to allow retention */
706 if (cpu_is_omap24xx() || cpu_is_omap34xx())
707 reg = bank->base + OMAP24XX_GPIO_IRQSTATUS2;
708 else if (cpu_is_omap44xx())
709 reg = bank->base + OMAP4_GPIO_IRQSTATUS1;
710
711 if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
712 __raw_writel(gpio_mask, reg);
713
714 /* Flush posted write for the irq status to avoid spurious interrupts */
715 __raw_readl(reg);
716 }
717}
718
719static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
720{
721 _clear_gpio_irqbank(bank, 1 << get_gpio_index(gpio));
722}
723
724static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
725{
726 void __iomem *reg = bank->base;
727 int inv = 0;
728 u32 l;
729 u32 mask;
730
731 switch (bank->method) {
732#ifdef CONFIG_ARCH_OMAP1
733 case METHOD_MPUIO:
734 reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
735 mask = 0xffff;
736 inv = 1;
737 break;
738#endif
739#ifdef CONFIG_ARCH_OMAP15XX
740 case METHOD_GPIO_1510:
741 reg += OMAP1510_GPIO_INT_MASK;
742 mask = 0xffff;
743 inv = 1;
744 break;
745#endif
746#ifdef CONFIG_ARCH_OMAP16XX
747 case METHOD_GPIO_1610:
748 reg += OMAP1610_GPIO_IRQENABLE1;
749 mask = 0xffff;
750 break;
751#endif
752#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
753 case METHOD_GPIO_7XX:
754 reg += OMAP7XX_GPIO_INT_MASK;
755 mask = 0xffffffff;
756 inv = 1;
757 break;
758#endif
759#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
760 case METHOD_GPIO_24XX:
761 reg += OMAP24XX_GPIO_IRQENABLE1;
762 mask = 0xffffffff;
763 break;
764#endif
765#if defined(CONFIG_ARCH_OMAP4)
766 case METHOD_GPIO_44XX:
767 reg += OMAP4_GPIO_IRQSTATUSSET0;
768 mask = 0xffffffff;
769 break;
770#endif
771 default:
772 WARN_ON(1);
773 return 0;
774 }
775
776 l = __raw_readl(reg);
777 if (inv)
778 l = ~l;
779 l &= mask;
780 return l;
781}
782
783static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enable)
784{
785 void __iomem *reg = bank->base;
786 u32 l;
787
788 switch (bank->method) {
789#ifdef CONFIG_ARCH_OMAP1
790 case METHOD_MPUIO:
791 reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
792 l = __raw_readl(reg);
793 if (enable)
794 l &= ~(gpio_mask);
795 else
796 l |= gpio_mask;
797 break;
798#endif
799#ifdef CONFIG_ARCH_OMAP15XX
800 case METHOD_GPIO_1510:
801 reg += OMAP1510_GPIO_INT_MASK;
802 l = __raw_readl(reg);
803 if (enable)
804 l &= ~(gpio_mask);
805 else
806 l |= gpio_mask;
807 break;
808#endif
809#ifdef CONFIG_ARCH_OMAP16XX
810 case METHOD_GPIO_1610:
811 if (enable)
812 reg += OMAP1610_GPIO_SET_IRQENABLE1;
813 else
814 reg += OMAP1610_GPIO_CLEAR_IRQENABLE1;
815 l = gpio_mask;
816 break;
817#endif
818#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
819 case METHOD_GPIO_7XX:
820 reg += OMAP7XX_GPIO_INT_MASK;
821 l = __raw_readl(reg);
822 if (enable)
823 l &= ~(gpio_mask);
824 else
825 l |= gpio_mask;
826 break;
827#endif
828#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
829 case METHOD_GPIO_24XX:
830 if (enable)
831 reg += OMAP24XX_GPIO_SETIRQENABLE1;
832 else
833 reg += OMAP24XX_GPIO_CLEARIRQENABLE1;
834 l = gpio_mask;
835 break;
836#endif
837#ifdef CONFIG_ARCH_OMAP4
838 case METHOD_GPIO_44XX:
839 if (enable)
840 reg += OMAP4_GPIO_IRQSTATUSSET0;
841 else
842 reg += OMAP4_GPIO_IRQSTATUSCLR0;
843 l = gpio_mask;
844 break;
845#endif
846 default:
847 WARN_ON(1);
848 return;
849 }
850 __raw_writel(l, reg);
851}
852
853static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
854{
855 _enable_gpio_irqbank(bank, 1 << get_gpio_index(gpio), enable);
856}
857
858/*
859 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
860 * 1510 does not seem to have a wake-up register. If JTAG is connected
861 * to the target, system will wake up always on GPIO events. While
862 * system is running all registered GPIO interrupts need to have wake-up
863 * enabled. When system is suspended, only selected GPIO interrupts need
864 * to have wake-up enabled.
865 */
866static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
867{
868 unsigned long uninitialized_var(flags);
869
870 switch (bank->method) {
871#ifdef CONFIG_ARCH_OMAP16XX
872 case METHOD_MPUIO:
873 case METHOD_GPIO_1610:
874 spin_lock_irqsave(&bank->lock, flags);
875 if (enable)
876 bank->suspend_wakeup |= (1 << gpio);
877 else
878 bank->suspend_wakeup &= ~(1 << gpio);
879 spin_unlock_irqrestore(&bank->lock, flags);
880 return 0;
881#endif
882#ifdef CONFIG_ARCH_OMAP2PLUS
883 case METHOD_GPIO_24XX:
884 case METHOD_GPIO_44XX:
885 if (bank->non_wakeup_gpios & (1 << gpio)) {
886 printk(KERN_ERR "Unable to modify wakeup on "
887 "non-wakeup GPIO%d\n",
888 (bank - gpio_bank) * 32 + gpio);
889 return -EINVAL;
890 }
891 spin_lock_irqsave(&bank->lock, flags);
892 if (enable)
893 bank->suspend_wakeup |= (1 << gpio);
894 else
895 bank->suspend_wakeup &= ~(1 << gpio);
896 spin_unlock_irqrestore(&bank->lock, flags);
897 return 0;
898#endif
899 default:
900 printk(KERN_ERR "Can't enable GPIO wakeup for method %i\n",
901 bank->method);
902 return -EINVAL;
903 }
904}
905
906static void _reset_gpio(struct gpio_bank *bank, int gpio)
907{
908 _set_gpio_direction(bank, get_gpio_index(gpio), 1);
909 _set_gpio_irqenable(bank, gpio, 0);
910 _clear_gpio_irqstatus(bank, gpio);
911 _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
912}
913
914/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
915static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
916{
917 unsigned int gpio = d->irq - IH_GPIO_BASE;
918 struct gpio_bank *bank;
919 int retval;
920
921 if (check_gpio(gpio) < 0)
922 return -ENODEV;
923 bank = irq_data_get_irq_chip_data(d);
924 retval = _set_gpio_wakeup(bank, get_gpio_index(gpio), enable);
925
926 return retval;
927}
928
929static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
930{
931 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
932 unsigned long flags;
933
934 spin_lock_irqsave(&bank->lock, flags);
935
936 /* Set trigger to none. You need to enable the desired trigger with
937 * request_irq() or set_irq_type().
938 */
939 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
940
941#ifdef CONFIG_ARCH_OMAP15XX
942 if (bank->method == METHOD_GPIO_1510) {
943 void __iomem *reg;
944
945 /* Claim the pin for MPU */
946 reg = bank->base + OMAP1510_GPIO_PIN_CONTROL;
947 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
948 }
949#endif
950 if (!cpu_class_is_omap1()) {
951 if (!bank->mod_usage) {
952 void __iomem *reg = bank->base;
953 u32 ctrl;
954
955 if (cpu_is_omap24xx() || cpu_is_omap34xx())
956 reg += OMAP24XX_GPIO_CTRL;
957 else if (cpu_is_omap44xx())
958 reg += OMAP4_GPIO_CTRL;
959 ctrl = __raw_readl(reg);
960 /* Module is enabled, clocks are not gated */
961 ctrl &= 0xFFFFFFFE;
962 __raw_writel(ctrl, reg);
963 }
964 bank->mod_usage |= 1 << offset;
965 }
966 spin_unlock_irqrestore(&bank->lock, flags);
967
968 return 0;
969}
970
971static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
972{
973 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
974 unsigned long flags;
975
976 spin_lock_irqsave(&bank->lock, flags);
977#ifdef CONFIG_ARCH_OMAP16XX
978 if (bank->method == METHOD_GPIO_1610) {
979 /* Disable wake-up during idle for dynamic tick */
980 void __iomem *reg = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
981 __raw_writel(1 << offset, reg);
982 }
983#endif
984#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
985 if (bank->method == METHOD_GPIO_24XX) {
986 /* Disable wake-up during idle for dynamic tick */
987 void __iomem *reg = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
988 __raw_writel(1 << offset, reg);
989 }
990#endif
991#ifdef CONFIG_ARCH_OMAP4
992 if (bank->method == METHOD_GPIO_44XX) {
993 /* Disable wake-up during idle for dynamic tick */
994 void __iomem *reg = bank->base + OMAP4_GPIO_IRQWAKEN0;
995 __raw_writel(1 << offset, reg);
996 }
997#endif
998 if (!cpu_class_is_omap1()) {
999 bank->mod_usage &= ~(1 << offset);
1000 if (!bank->mod_usage) {
1001 void __iomem *reg = bank->base;
1002 u32 ctrl;
1003
1004 if (cpu_is_omap24xx() || cpu_is_omap34xx())
1005 reg += OMAP24XX_GPIO_CTRL;
1006 else if (cpu_is_omap44xx())
1007 reg += OMAP4_GPIO_CTRL;
1008 ctrl = __raw_readl(reg);
1009 /* Module is disabled, clocks are gated */
1010 ctrl |= 1;
1011 __raw_writel(ctrl, reg);
1012 }
1013 }
1014 _reset_gpio(bank, bank->chip.base + offset);
1015 spin_unlock_irqrestore(&bank->lock, flags);
1016}
1017
1018/*
1019 * We need to unmask the GPIO bank interrupt as soon as possible to
1020 * avoid missing GPIO interrupts for other lines in the bank.
1021 * Then we need to mask-read-clear-unmask the triggered GPIO lines
1022 * in the bank to avoid missing nested interrupts for a GPIO line.
1023 * If we wait to unmask individual GPIO lines in the bank after the
1024 * line's interrupt handler has been run, we may miss some nested
1025 * interrupts.
1026 */
1027static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
1028{
1029 void __iomem *isr_reg = NULL;
1030 u32 isr;
1031 unsigned int gpio_irq, gpio_index;
1032 struct gpio_bank *bank;
1033 u32 retrigger = 0;
1034 int unmasked = 0;
1035 struct irq_chip *chip = irq_desc_get_chip(desc);
1036
1037 chained_irq_enter(chip, desc);
1038
1039 bank = irq_get_handler_data(irq);
1040#ifdef CONFIG_ARCH_OMAP1
1041 if (bank->method == METHOD_MPUIO)
1042 isr_reg = bank->base +
1043 OMAP_MPUIO_GPIO_INT / bank->stride;
1044#endif
1045#ifdef CONFIG_ARCH_OMAP15XX
1046 if (bank->method == METHOD_GPIO_1510)
1047 isr_reg = bank->base + OMAP1510_GPIO_INT_STATUS;
1048#endif
1049#if defined(CONFIG_ARCH_OMAP16XX)
1050 if (bank->method == METHOD_GPIO_1610)
1051 isr_reg = bank->base + OMAP1610_GPIO_IRQSTATUS1;
1052#endif
1053#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
1054 if (bank->method == METHOD_GPIO_7XX)
1055 isr_reg = bank->base + OMAP7XX_GPIO_INT_STATUS;
1056#endif
1057#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
1058 if (bank->method == METHOD_GPIO_24XX)
1059 isr_reg = bank->base + OMAP24XX_GPIO_IRQSTATUS1;
1060#endif
1061#if defined(CONFIG_ARCH_OMAP4)
1062 if (bank->method == METHOD_GPIO_44XX)
1063 isr_reg = bank->base + OMAP4_GPIO_IRQSTATUS0;
1064#endif
1065
1066 if (WARN_ON(!isr_reg))
1067 goto exit;
1068
1069 while(1) {
1070 u32 isr_saved, level_mask = 0;
1071 u32 enabled;
1072
1073 enabled = _get_gpio_irqbank_mask(bank);
1074 isr_saved = isr = __raw_readl(isr_reg) & enabled;
1075
1076 if (cpu_is_omap15xx() && (bank->method == METHOD_MPUIO))
1077 isr &= 0x0000ffff;
1078
1079 if (cpu_class_is_omap2()) {
1080 level_mask = bank->level_mask & enabled;
1081 }
1082
1083 /* clear edge sensitive interrupts before handler(s) are
1084 called so that we don't miss any interrupt occurred while
1085 executing them */
1086 _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 0);
1087 _clear_gpio_irqbank(bank, isr_saved & ~level_mask);
1088 _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 1);
1089
1090 /* if there is only edge sensitive GPIO pin interrupts
1091 configured, we could unmask GPIO bank interrupt immediately */
1092 if (!level_mask && !unmasked) {
1093 unmasked = 1;
1094 chained_irq_exit(chip, desc);
1095 }
1096
1097 isr |= retrigger;
1098 retrigger = 0;
1099 if (!isr)
1100 break;
1101
1102 gpio_irq = bank->virtual_irq_start;
1103 for (; isr != 0; isr >>= 1, gpio_irq++) {
1104 gpio_index = get_gpio_index(irq_to_gpio(gpio_irq));
1105
1106 if (!(isr & 1))
1107 continue;
1108
1109#ifdef CONFIG_ARCH_OMAP1
1110 /*
1111 * Some chips can't respond to both rising and falling
1112 * at the same time. If this irq was requested with
1113 * both flags, we need to flip the ICR data for the IRQ
1114 * to respond to the IRQ for the opposite direction.
1115 * This will be indicated in the bank toggle_mask.
1116 */
1117 if (bank->toggle_mask & (1 << gpio_index))
1118 _toggle_gpio_edge_triggering(bank, gpio_index);
1119#endif
1120
1121 generic_handle_irq(gpio_irq);
1122 }
1123 }
1124 /* if bank has any level sensitive GPIO pin interrupt
1125 configured, we must unmask the bank interrupt only after
1126 handler(s) are executed in order to avoid spurious bank
1127 interrupt */
1128exit:
1129 if (!unmasked)
1130 chained_irq_exit(chip, desc);
1131}
1132
1133static void gpio_irq_shutdown(struct irq_data *d)
1134{
1135 unsigned int gpio = d->irq - IH_GPIO_BASE;
1136 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
1137
1138 _reset_gpio(bank, gpio);
1139}
1140
1141static void gpio_ack_irq(struct irq_data *d)
1142{
1143 unsigned int gpio = d->irq - IH_GPIO_BASE;
1144 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
1145
1146 _clear_gpio_irqstatus(bank, gpio);
1147}
1148
1149static void gpio_mask_irq(struct irq_data *d)
1150{
1151 unsigned int gpio = d->irq - IH_GPIO_BASE;
1152 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
1153
1154 _set_gpio_irqenable(bank, gpio, 0);
1155 _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
1156}
1157
1158static void gpio_unmask_irq(struct irq_data *d)
1159{
1160 unsigned int gpio = d->irq - IH_GPIO_BASE;
1161 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
1162 unsigned int irq_mask = 1 << get_gpio_index(gpio);
1163 u32 trigger = irqd_get_trigger_type(d);
1164
1165 if (trigger)
1166 _set_gpio_triggering(bank, get_gpio_index(gpio), trigger);
1167
1168 /* For level-triggered GPIOs, the clearing must be done after
1169 * the HW source is cleared, thus after the handler has run */
1170 if (bank->level_mask & irq_mask) {
1171 _set_gpio_irqenable(bank, gpio, 0);
1172 _clear_gpio_irqstatus(bank, gpio);
1173 }
1174
1175 _set_gpio_irqenable(bank, gpio, 1);
1176}
1177
1178static struct irq_chip gpio_irq_chip = {
1179 .name = "GPIO",
1180 .irq_shutdown = gpio_irq_shutdown,
1181 .irq_ack = gpio_ack_irq,
1182 .irq_mask = gpio_mask_irq,
1183 .irq_unmask = gpio_unmask_irq,
1184 .irq_set_type = gpio_irq_type,
1185 .irq_set_wake = gpio_wake_enable,
1186};
1187
1188/*---------------------------------------------------------------------*/
1189
1190#ifdef CONFIG_ARCH_OMAP1
1191
1192/* MPUIO uses the always-on 32k clock */
1193
1194static void mpuio_ack_irq(struct irq_data *d)
1195{
1196 /* The ISR is reset automatically, so do nothing here. */
1197}
1198
1199static void mpuio_mask_irq(struct irq_data *d)
1200{
1201 unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
1202 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
1203
1204 _set_gpio_irqenable(bank, gpio, 0);
1205}
1206
1207static void mpuio_unmask_irq(struct irq_data *d)
1208{
1209 unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
1210 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
1211
1212 _set_gpio_irqenable(bank, gpio, 1);
1213}
1214
1215static struct irq_chip mpuio_irq_chip = {
1216 .name = "MPUIO",
1217 .irq_ack = mpuio_ack_irq,
1218 .irq_mask = mpuio_mask_irq,
1219 .irq_unmask = mpuio_unmask_irq,
1220 .irq_set_type = gpio_irq_type,
1221#ifdef CONFIG_ARCH_OMAP16XX
1222 /* REVISIT: assuming only 16xx supports MPUIO wake events */
1223 .irq_set_wake = gpio_wake_enable,
1224#endif
1225};
1226
1227
1228#define bank_is_mpuio(bank) ((bank)->method == METHOD_MPUIO)
1229
1230
1231#ifdef CONFIG_ARCH_OMAP16XX
1232
1233#include <linux/platform_device.h>
1234
1235static int omap_mpuio_suspend_noirq(struct device *dev)
1236{
1237 struct platform_device *pdev = to_platform_device(dev);
1238 struct gpio_bank *bank = platform_get_drvdata(pdev);
1239 void __iomem *mask_reg = bank->base +
1240 OMAP_MPUIO_GPIO_MASKIT / bank->stride;
1241 unsigned long flags;
1242
1243 spin_lock_irqsave(&bank->lock, flags);
1244 bank->saved_wakeup = __raw_readl(mask_reg);
1245 __raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
1246 spin_unlock_irqrestore(&bank->lock, flags);
1247
1248 return 0;
1249}
1250
1251static int omap_mpuio_resume_noirq(struct device *dev)
1252{
1253 struct platform_device *pdev = to_platform_device(dev);
1254 struct gpio_bank *bank = platform_get_drvdata(pdev);
1255 void __iomem *mask_reg = bank->base +
1256 OMAP_MPUIO_GPIO_MASKIT / bank->stride;
1257 unsigned long flags;
1258
1259 spin_lock_irqsave(&bank->lock, flags);
1260 __raw_writel(bank->saved_wakeup, mask_reg);
1261 spin_unlock_irqrestore(&bank->lock, flags);
1262
1263 return 0;
1264}
1265
1266static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
1267 .suspend_noirq = omap_mpuio_suspend_noirq,
1268 .resume_noirq = omap_mpuio_resume_noirq,
1269};
1270
1271/* use platform_driver for this. */
1272static struct platform_driver omap_mpuio_driver = {
1273 .driver = {
1274 .name = "mpuio",
1275 .pm = &omap_mpuio_dev_pm_ops,
1276 },
1277};
1278
1279static struct platform_device omap_mpuio_device = {
1280 .name = "mpuio",
1281 .id = -1,
1282 .dev = {
1283 .driver = &omap_mpuio_driver.driver,
1284 }
1285 /* could list the /proc/iomem resources */
1286};
1287
1288static inline void mpuio_init(void)
1289{
1290 struct gpio_bank *bank = get_gpio_bank(OMAP_MPUIO(0));
1291 platform_set_drvdata(&omap_mpuio_device, bank);
1292
1293 if (platform_driver_register(&omap_mpuio_driver) == 0)
1294 (void) platform_device_register(&omap_mpuio_device);
1295}
1296
1297#else
1298static inline void mpuio_init(void) {}
1299#endif /* 16xx */
1300
1301#else
1302
1303extern struct irq_chip mpuio_irq_chip;
1304
1305#define bank_is_mpuio(bank) 0
1306static inline void mpuio_init(void) {}
1307
1308#endif
1309
1310/*---------------------------------------------------------------------*/
1311
1312/* REVISIT these are stupid implementations! replace by ones that
1313 * don't switch on METHOD_* and which mostly avoid spinlocks
1314 */
1315
1316static int gpio_input(struct gpio_chip *chip, unsigned offset)
1317{
1318 struct gpio_bank *bank;
1319 unsigned long flags;
1320
1321 bank = container_of(chip, struct gpio_bank, chip);
1322 spin_lock_irqsave(&bank->lock, flags);
1323 _set_gpio_direction(bank, offset, 1);
1324 spin_unlock_irqrestore(&bank->lock, flags);
1325 return 0;
1326}
1327
1328static int gpio_is_input(struct gpio_bank *bank, int mask)
1329{
1330 void __iomem *reg = bank->base;
1331
1332 switch (bank->method) {
1333 case METHOD_MPUIO:
1334 reg += OMAP_MPUIO_IO_CNTL / bank->stride;
1335 break;
1336 case METHOD_GPIO_1510:
1337 reg += OMAP1510_GPIO_DIR_CONTROL;
1338 break;
1339 case METHOD_GPIO_1610:
1340 reg += OMAP1610_GPIO_DIRECTION;
1341 break;
1342 case METHOD_GPIO_7XX:
1343 reg += OMAP7XX_GPIO_DIR_CONTROL;
1344 break;
1345 case METHOD_GPIO_24XX:
1346 reg += OMAP24XX_GPIO_OE;
1347 break;
1348 case METHOD_GPIO_44XX:
1349 reg += OMAP4_GPIO_OE;
1350 break;
1351 default:
1352 WARN_ONCE(1, "gpio_is_input: incorrect OMAP GPIO method");
1353 return -EINVAL;
1354 }
1355 return __raw_readl(reg) & mask;
1356}
1357
1358static int gpio_get(struct gpio_chip *chip, unsigned offset)
1359{
1360 struct gpio_bank *bank;
1361 void __iomem *reg;
1362 int gpio;
1363 u32 mask;
1364
1365 gpio = chip->base + offset;
1366 bank = get_gpio_bank(gpio);
1367 reg = bank->base;
1368 mask = 1 << get_gpio_index(gpio);
1369
1370 if (gpio_is_input(bank, mask))
1371 return _get_gpio_datain(bank, gpio);
1372 else
1373 return _get_gpio_dataout(bank, gpio);
1374}
1375
1376static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
1377{
1378 struct gpio_bank *bank;
1379 unsigned long flags;
1380
1381 bank = container_of(chip, struct gpio_bank, chip);
1382 spin_lock_irqsave(&bank->lock, flags);
1383 _set_gpio_dataout(bank, offset, value);
1384 _set_gpio_direction(bank, offset, 0);
1385 spin_unlock_irqrestore(&bank->lock, flags);
1386 return 0;
1387}
1388
1389static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
1390 unsigned debounce)
1391{
1392 struct gpio_bank *bank;
1393 unsigned long flags;
1394
1395 bank = container_of(chip, struct gpio_bank, chip);
1396
1397 if (!bank->dbck) {
1398 bank->dbck = clk_get(bank->dev, "dbclk");
1399 if (IS_ERR(bank->dbck))
1400 dev_err(bank->dev, "Could not get gpio dbck\n");
1401 }
1402
1403 spin_lock_irqsave(&bank->lock, flags);
1404 _set_gpio_debounce(bank, offset, debounce);
1405 spin_unlock_irqrestore(&bank->lock, flags);
1406
1407 return 0;
1408}
1409
1410static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1411{
1412 struct gpio_bank *bank;
1413 unsigned long flags;
1414
1415 bank = container_of(chip, struct gpio_bank, chip);
1416 spin_lock_irqsave(&bank->lock, flags);
1417 _set_gpio_dataout(bank, offset, value);
1418 spin_unlock_irqrestore(&bank->lock, flags);
1419}
1420
1421static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
1422{
1423 struct gpio_bank *bank;
1424
1425 bank = container_of(chip, struct gpio_bank, chip);
1426 return bank->virtual_irq_start + offset;
1427}
1428
1429/*---------------------------------------------------------------------*/
1430
1431static void __init omap_gpio_show_rev(struct gpio_bank *bank)
1432{
1433 u32 rev;
1434
1435 if (cpu_is_omap16xx() && !(bank->method != METHOD_MPUIO))
1436 rev = __raw_readw(bank->base + OMAP1610_GPIO_REVISION);
1437 else if (cpu_is_omap24xx() || cpu_is_omap34xx())
1438 rev = __raw_readl(bank->base + OMAP24XX_GPIO_REVISION);
1439 else if (cpu_is_omap44xx())
1440 rev = __raw_readl(bank->base + OMAP4_GPIO_REVISION);
1441 else
1442 return;
1443
1444 printk(KERN_INFO "OMAP GPIO hardware version %d.%d\n",
1445 (rev >> 4) & 0x0f, rev & 0x0f);
1446}
1447
1448/* This lock class tells lockdep that GPIO irqs are in a different
1449 * category than their parents, so it won't report false recursion.
1450 */
1451static struct lock_class_key gpio_lock_class;
1452
1453static inline int init_gpio_info(struct platform_device *pdev)
1454{
1455 /* TODO: Analyze removing gpio_bank_count usage from driver code */
1456 gpio_bank = kzalloc(gpio_bank_count * sizeof(struct gpio_bank),
1457 GFP_KERNEL);
1458 if (!gpio_bank) {
1459 dev_err(&pdev->dev, "Memory alloc failed for gpio_bank\n");
1460 return -ENOMEM;
1461 }
1462 return 0;
1463}
1464
1465/* TODO: Cleanup cpu_is_* checks */
1466static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
1467{
1468 if (cpu_class_is_omap2()) {
1469 if (cpu_is_omap44xx()) {
1470 __raw_writel(0xffffffff, bank->base +
1471 OMAP4_GPIO_IRQSTATUSCLR0);
1472 __raw_writel(0x00000000, bank->base +
1473 OMAP4_GPIO_DEBOUNCENABLE);
1474 /* Initialize interface clk ungated, module enabled */
1475 __raw_writel(0, bank->base + OMAP4_GPIO_CTRL);
1476 } else if (cpu_is_omap34xx()) {
1477 __raw_writel(0x00000000, bank->base +
1478 OMAP24XX_GPIO_IRQENABLE1);
1479 __raw_writel(0xffffffff, bank->base +
1480 OMAP24XX_GPIO_IRQSTATUS1);
1481 __raw_writel(0x00000000, bank->base +
1482 OMAP24XX_GPIO_DEBOUNCE_EN);
1483
1484 /* Initialize interface clk ungated, module enabled */
1485 __raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL);
1486 } else if (cpu_is_omap24xx()) {
1487 static const u32 non_wakeup_gpios[] = {
1488 0xe203ffc0, 0x08700040
1489 };
1490 if (id < ARRAY_SIZE(non_wakeup_gpios))
1491 bank->non_wakeup_gpios = non_wakeup_gpios[id];
1492 }
1493 } else if (cpu_class_is_omap1()) {
1494 if (bank_is_mpuio(bank))
1495 __raw_writew(0xffff, bank->base +
1496 OMAP_MPUIO_GPIO_MASKIT / bank->stride);
1497 if (cpu_is_omap15xx() && bank->method == METHOD_GPIO_1510) {
1498 __raw_writew(0xffff, bank->base
1499 + OMAP1510_GPIO_INT_MASK);
1500 __raw_writew(0x0000, bank->base
1501 + OMAP1510_GPIO_INT_STATUS);
1502 }
1503 if (cpu_is_omap16xx() && bank->method == METHOD_GPIO_1610) {
1504 __raw_writew(0x0000, bank->base
1505 + OMAP1610_GPIO_IRQENABLE1);
1506 __raw_writew(0xffff, bank->base
1507 + OMAP1610_GPIO_IRQSTATUS1);
1508 __raw_writew(0x0014, bank->base
1509 + OMAP1610_GPIO_SYSCONFIG);
1510
1511 /*
1512 * Enable system clock for GPIO module.
1513 * The CAM_CLK_CTRL *is* really the right place.
1514 */
1515 omap_writel(omap_readl(ULPD_CAM_CLK_CTRL) | 0x04,
1516 ULPD_CAM_CLK_CTRL);
1517 }
1518 if (cpu_is_omap7xx() && bank->method == METHOD_GPIO_7XX) {
1519 __raw_writel(0xffffffff, bank->base
1520 + OMAP7XX_GPIO_INT_MASK);
1521 __raw_writel(0x00000000, bank->base
1522 + OMAP7XX_GPIO_INT_STATUS);
1523 }
1524 }
1525}
1526
1527static void __init omap_gpio_chip_init(struct gpio_bank *bank)
1528{
1529 int j;
1530 static int gpio;
1531
1532 bank->mod_usage = 0;
1533 /*
1534 * REVISIT eventually switch from OMAP-specific gpio structs
1535 * over to the generic ones
1536 */
1537 bank->chip.request = omap_gpio_request;
1538 bank->chip.free = omap_gpio_free;
1539 bank->chip.direction_input = gpio_input;
1540 bank->chip.get = gpio_get;
1541 bank->chip.direction_output = gpio_output;
1542 bank->chip.set_debounce = gpio_debounce;
1543 bank->chip.set = gpio_set;
1544 bank->chip.to_irq = gpio_2irq;
1545 if (bank_is_mpuio(bank)) {
1546 bank->chip.label = "mpuio";
1547#ifdef CONFIG_ARCH_OMAP16XX
1548 bank->chip.dev = &omap_mpuio_device.dev;
1549#endif
1550 bank->chip.base = OMAP_MPUIO(0);
1551 } else {
1552 bank->chip.label = "gpio";
1553 bank->chip.base = gpio;
1554 gpio += bank_width;
1555 }
1556 bank->chip.ngpio = bank_width;
1557
1558 gpiochip_add(&bank->chip);
1559
1560 for (j = bank->virtual_irq_start;
1561 j < bank->virtual_irq_start + bank_width; j++) {
1562 irq_set_lockdep_class(j, &gpio_lock_class);
1563 irq_set_chip_data(j, bank);
1564 if (bank_is_mpuio(bank))
1565 irq_set_chip(j, &mpuio_irq_chip);
1566 else
1567 irq_set_chip(j, &gpio_irq_chip);
1568 irq_set_handler(j, handle_simple_irq);
1569 set_irq_flags(j, IRQF_VALID);
1570 }
1571 irq_set_chained_handler(bank->irq, gpio_irq_handler);
1572 irq_set_handler_data(bank->irq, bank);
1573}
1574
1575static int __devinit omap_gpio_probe(struct platform_device *pdev)
1576{
1577 static int gpio_init_done;
1578 struct omap_gpio_platform_data *pdata;
1579 struct resource *res;
1580 int id;
1581 struct gpio_bank *bank;
1582
1583 if (!pdev->dev.platform_data)
1584 return -EINVAL;
1585
1586 pdata = pdev->dev.platform_data;
1587
1588 if (!gpio_init_done) {
1589 int ret;
1590
1591 ret = init_gpio_info(pdev);
1592 if (ret)
1593 return ret;
1594 }
1595
1596 id = pdev->id;
1597 bank = &gpio_bank[id];
1598
1599 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1600 if (unlikely(!res)) {
1601 dev_err(&pdev->dev, "GPIO Bank %i Invalid IRQ resource\n", id);
1602 return -ENODEV;
1603 }
1604
1605 bank->irq = res->start;
1606 bank->virtual_irq_start = pdata->virtual_irq_start;
1607 bank->method = pdata->bank_type;
1608 bank->dev = &pdev->dev;
1609 bank->dbck_flag = pdata->dbck_flag;
1610 bank->stride = pdata->bank_stride;
1611 bank_width = pdata->bank_width;
1612
1613 spin_lock_init(&bank->lock);
1614
1615 /* Static mapping, never released */
1616 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1617 if (unlikely(!res)) {
1618 dev_err(&pdev->dev, "GPIO Bank %i Invalid mem resource\n", id);
1619 return -ENODEV;
1620 }
1621
1622 bank->base = ioremap(res->start, resource_size(res));
1623 if (!bank->base) {
1624 dev_err(&pdev->dev, "Could not ioremap gpio bank%i\n", id);
1625 return -ENOMEM;
1626 }
1627
1628 pm_runtime_enable(bank->dev);
1629 pm_runtime_get_sync(bank->dev);
1630
1631 omap_gpio_mod_init(bank, id);
1632 omap_gpio_chip_init(bank);
1633 omap_gpio_show_rev(bank);
1634
1635 if (!gpio_init_done)
1636 gpio_init_done = 1;
1637
1638 return 0;
1639}
1640
1641#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
1642static int omap_gpio_suspend(void)
1643{
1644 int i;
1645
1646 if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
1647 return 0;
1648
1649 for (i = 0; i < gpio_bank_count; i++) {
1650 struct gpio_bank *bank = &gpio_bank[i];
1651 void __iomem *wake_status;
1652 void __iomem *wake_clear;
1653 void __iomem *wake_set;
1654 unsigned long flags;
1655
1656 switch (bank->method) {
1657#ifdef CONFIG_ARCH_OMAP16XX
1658 case METHOD_GPIO_1610:
1659 wake_status = bank->base + OMAP1610_GPIO_WAKEUPENABLE;
1660 wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
1661 wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
1662 break;
1663#endif
1664#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
1665 case METHOD_GPIO_24XX:
1666 wake_status = bank->base + OMAP24XX_GPIO_WAKE_EN;
1667 wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
1668 wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
1669 break;
1670#endif
1671#ifdef CONFIG_ARCH_OMAP4
1672 case METHOD_GPIO_44XX:
1673 wake_status = bank->base + OMAP4_GPIO_IRQWAKEN0;
1674 wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
1675 wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
1676 break;
1677#endif
1678 default:
1679 continue;
1680 }
1681
1682 spin_lock_irqsave(&bank->lock, flags);
1683 bank->saved_wakeup = __raw_readl(wake_status);
1684 __raw_writel(0xffffffff, wake_clear);
1685 __raw_writel(bank->suspend_wakeup, wake_set);
1686 spin_unlock_irqrestore(&bank->lock, flags);
1687 }
1688
1689 return 0;
1690}
1691
1692static void omap_gpio_resume(void)
1693{
1694 int i;
1695
1696 if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
1697 return;
1698
1699 for (i = 0; i < gpio_bank_count; i++) {
1700 struct gpio_bank *bank = &gpio_bank[i];
1701 void __iomem *wake_clear;
1702 void __iomem *wake_set;
1703 unsigned long flags;
1704
1705 switch (bank->method) {
1706#ifdef CONFIG_ARCH_OMAP16XX
1707 case METHOD_GPIO_1610:
1708 wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
1709 wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
1710 break;
1711#endif
1712#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
1713 case METHOD_GPIO_24XX:
1714 wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
1715 wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
1716 break;
1717#endif
1718#ifdef CONFIG_ARCH_OMAP4
1719 case METHOD_GPIO_44XX:
1720 wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
1721 wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
1722 break;
1723#endif
1724 default:
1725 continue;
1726 }
1727
1728 spin_lock_irqsave(&bank->lock, flags);
1729 __raw_writel(0xffffffff, wake_clear);
1730 __raw_writel(bank->saved_wakeup, wake_set);
1731 spin_unlock_irqrestore(&bank->lock, flags);
1732 }
1733}
1734
1735static struct syscore_ops omap_gpio_syscore_ops = {
1736 .suspend = omap_gpio_suspend,
1737 .resume = omap_gpio_resume,
1738};
1739
1740#endif
1741
1742#ifdef CONFIG_ARCH_OMAP2PLUS
1743
1744static int workaround_enabled;
1745
1746void omap2_gpio_prepare_for_idle(int off_mode)
1747{
1748 int i, c = 0;
1749 int min = 0;
1750
1751 if (cpu_is_omap34xx())
1752 min = 1;
1753
1754 for (i = min; i < gpio_bank_count; i++) {
1755 struct gpio_bank *bank = &gpio_bank[i];
1756 u32 l1 = 0, l2 = 0;
1757 int j;
1758
1759 for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
1760 clk_disable(bank->dbck);
1761
1762 if (!off_mode)
1763 continue;
1764
1765 /* If going to OFF, remove triggering for all
1766 * non-wakeup GPIOs. Otherwise spurious IRQs will be
1767 * generated. See OMAP2420 Errata item 1.101. */
1768 if (!(bank->enabled_non_wakeup_gpios))
1769 continue;
1770
1771 if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
1772 bank->saved_datain = __raw_readl(bank->base +
1773 OMAP24XX_GPIO_DATAIN);
1774 l1 = __raw_readl(bank->base +
1775 OMAP24XX_GPIO_FALLINGDETECT);
1776 l2 = __raw_readl(bank->base +
1777 OMAP24XX_GPIO_RISINGDETECT);
1778 }
1779
1780 if (cpu_is_omap44xx()) {
1781 bank->saved_datain = __raw_readl(bank->base +
1782 OMAP4_GPIO_DATAIN);
1783 l1 = __raw_readl(bank->base +
1784 OMAP4_GPIO_FALLINGDETECT);
1785 l2 = __raw_readl(bank->base +
1786 OMAP4_GPIO_RISINGDETECT);
1787 }
1788
1789 bank->saved_fallingdetect = l1;
1790 bank->saved_risingdetect = l2;
1791 l1 &= ~bank->enabled_non_wakeup_gpios;
1792 l2 &= ~bank->enabled_non_wakeup_gpios;
1793
1794 if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
1795 __raw_writel(l1, bank->base +
1796 OMAP24XX_GPIO_FALLINGDETECT);
1797 __raw_writel(l2, bank->base +
1798 OMAP24XX_GPIO_RISINGDETECT);
1799 }
1800
1801 if (cpu_is_omap44xx()) {
1802 __raw_writel(l1, bank->base + OMAP4_GPIO_FALLINGDETECT);
1803 __raw_writel(l2, bank->base + OMAP4_GPIO_RISINGDETECT);
1804 }
1805
1806 c++;
1807 }
1808 if (!c) {
1809 workaround_enabled = 0;
1810 return;
1811 }
1812 workaround_enabled = 1;
1813}
1814
1815void omap2_gpio_resume_after_idle(void)
1816{
1817 int i;
1818 int min = 0;
1819
1820 if (cpu_is_omap34xx())
1821 min = 1;
1822 for (i = min; i < gpio_bank_count; i++) {
1823 struct gpio_bank *bank = &gpio_bank[i];
1824 u32 l = 0, gen, gen0, gen1;
1825 int j;
1826
1827 for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
1828 clk_enable(bank->dbck);
1829
1830 if (!workaround_enabled)
1831 continue;
1832
1833 if (!(bank->enabled_non_wakeup_gpios))
1834 continue;
1835
1836 if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
1837 __raw_writel(bank->saved_fallingdetect,
1838 bank->base + OMAP24XX_GPIO_FALLINGDETECT);
1839 __raw_writel(bank->saved_risingdetect,
1840 bank->base + OMAP24XX_GPIO_RISINGDETECT);
1841 l = __raw_readl(bank->base + OMAP24XX_GPIO_DATAIN);
1842 }
1843
1844 if (cpu_is_omap44xx()) {
1845 __raw_writel(bank->saved_fallingdetect,
1846 bank->base + OMAP4_GPIO_FALLINGDETECT);
1847 __raw_writel(bank->saved_risingdetect,
1848 bank->base + OMAP4_GPIO_RISINGDETECT);
1849 l = __raw_readl(bank->base + OMAP4_GPIO_DATAIN);
1850 }
1851
1852 /* Check if any of the non-wakeup interrupt GPIOs have changed
1853 * state. If so, generate an IRQ by software. This is
1854 * horribly racy, but it's the best we can do to work around
1855 * this silicon bug. */
1856 l ^= bank->saved_datain;
1857 l &= bank->enabled_non_wakeup_gpios;
1858
1859 /*
1860 * No need to generate IRQs for the rising edge for gpio IRQs
1861 * configured with falling edge only; and vice versa.
1862 */
1863 gen0 = l & bank->saved_fallingdetect;
1864 gen0 &= bank->saved_datain;
1865
1866 gen1 = l & bank->saved_risingdetect;
1867 gen1 &= ~(bank->saved_datain);
1868
1869 /* FIXME: Consider GPIO IRQs with level detections properly! */
1870 gen = l & (~(bank->saved_fallingdetect) &
1871 ~(bank->saved_risingdetect));
1872 /* Consider all GPIO IRQs needed to be updated */
1873 gen |= gen0 | gen1;
1874
1875 if (gen) {
1876 u32 old0, old1;
1877
1878 if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
1879 old0 = __raw_readl(bank->base +
1880 OMAP24XX_GPIO_LEVELDETECT0);
1881 old1 = __raw_readl(bank->base +
1882 OMAP24XX_GPIO_LEVELDETECT1);
1883 __raw_writel(old0 | gen, bank->base +
1884 OMAP24XX_GPIO_LEVELDETECT0);
1885 __raw_writel(old1 | gen, bank->base +
1886 OMAP24XX_GPIO_LEVELDETECT1);
1887 __raw_writel(old0, bank->base +
1888 OMAP24XX_GPIO_LEVELDETECT0);
1889 __raw_writel(old1, bank->base +
1890 OMAP24XX_GPIO_LEVELDETECT1);
1891 }
1892
1893 if (cpu_is_omap44xx()) {
1894 old0 = __raw_readl(bank->base +
1895 OMAP4_GPIO_LEVELDETECT0);
1896 old1 = __raw_readl(bank->base +
1897 OMAP4_GPIO_LEVELDETECT1);
1898 __raw_writel(old0 | l, bank->base +
1899 OMAP4_GPIO_LEVELDETECT0);
1900 __raw_writel(old1 | l, bank->base +
1901 OMAP4_GPIO_LEVELDETECT1);
1902 __raw_writel(old0, bank->base +
1903 OMAP4_GPIO_LEVELDETECT0);
1904 __raw_writel(old1, bank->base +
1905 OMAP4_GPIO_LEVELDETECT1);
1906 }
1907 }
1908 }
1909
1910}
1911
1912#endif
1913
1914#ifdef CONFIG_ARCH_OMAP3
1915/* save the registers of bank 2-6 */
1916void omap_gpio_save_context(void)
1917{
1918 int i;
1919
1920 /* saving banks from 2-6 only since GPIO1 is in WKUP */
1921 for (i = 1; i < gpio_bank_count; i++) {
1922 struct gpio_bank *bank = &gpio_bank[i];
1923 gpio_context[i].irqenable1 =
1924 __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE1);
1925 gpio_context[i].irqenable2 =
1926 __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE2);
1927 gpio_context[i].wake_en =
1928 __raw_readl(bank->base + OMAP24XX_GPIO_WAKE_EN);
1929 gpio_context[i].ctrl =
1930 __raw_readl(bank->base + OMAP24XX_GPIO_CTRL);
1931 gpio_context[i].oe =
1932 __raw_readl(bank->base + OMAP24XX_GPIO_OE);
1933 gpio_context[i].leveldetect0 =
1934 __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0);
1935 gpio_context[i].leveldetect1 =
1936 __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
1937 gpio_context[i].risingdetect =
1938 __raw_readl(bank->base + OMAP24XX_GPIO_RISINGDETECT);
1939 gpio_context[i].fallingdetect =
1940 __raw_readl(bank->base + OMAP24XX_GPIO_FALLINGDETECT);
1941 gpio_context[i].dataout =
1942 __raw_readl(bank->base + OMAP24XX_GPIO_DATAOUT);
1943 }
1944}
1945
1946/* restore the required registers of bank 2-6 */
1947void omap_gpio_restore_context(void)
1948{
1949 int i;
1950
1951 for (i = 1; i < gpio_bank_count; i++) {
1952 struct gpio_bank *bank = &gpio_bank[i];
1953 __raw_writel(gpio_context[i].irqenable1,
1954 bank->base + OMAP24XX_GPIO_IRQENABLE1);
1955 __raw_writel(gpio_context[i].irqenable2,
1956 bank->base + OMAP24XX_GPIO_IRQENABLE2);
1957 __raw_writel(gpio_context[i].wake_en,
1958 bank->base + OMAP24XX_GPIO_WAKE_EN);
1959 __raw_writel(gpio_context[i].ctrl,
1960 bank->base + OMAP24XX_GPIO_CTRL);
1961 __raw_writel(gpio_context[i].oe,
1962 bank->base + OMAP24XX_GPIO_OE);
1963 __raw_writel(gpio_context[i].leveldetect0,
1964 bank->base + OMAP24XX_GPIO_LEVELDETECT0);
1965 __raw_writel(gpio_context[i].leveldetect1,
1966 bank->base + OMAP24XX_GPIO_LEVELDETECT1);
1967 __raw_writel(gpio_context[i].risingdetect,
1968 bank->base + OMAP24XX_GPIO_RISINGDETECT);
1969 __raw_writel(gpio_context[i].fallingdetect,
1970 bank->base + OMAP24XX_GPIO_FALLINGDETECT);
1971 __raw_writel(gpio_context[i].dataout,
1972 bank->base + OMAP24XX_GPIO_DATAOUT);
1973 }
1974}
1975#endif
1976
1977static struct platform_driver omap_gpio_driver = {
1978 .probe = omap_gpio_probe,
1979 .driver = {
1980 .name = "omap_gpio",
1981 },
1982};
1983
1984/*
1985 * gpio driver register needs to be done before
1986 * machine_init functions access gpio APIs.
1987 * Hence omap_gpio_drv_reg() is a postcore_initcall.
1988 */
1989static int __init omap_gpio_drv_reg(void)
1990{
1991 return platform_driver_register(&omap_gpio_driver);
1992}
1993postcore_initcall(omap_gpio_drv_reg);
1994
1995static int __init omap_gpio_sysinit(void)
1996{
1997 mpuio_init();
1998
1999#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
2000 if (cpu_is_omap16xx() || cpu_class_is_omap2())
2001 register_syscore_ops(&omap_gpio_syscore_ops);
2002#endif
2003
2004 return 0;
2005}
2006
2007arch_initcall(omap_gpio_sysinit);
diff --git a/drivers/gpio/gpio-plat-samsung.c b/drivers/gpio/gpio-plat-samsung.c
new file mode 100644
index 000000000000..ea37c0461788
--- /dev/null
+++ b/drivers/gpio/gpio-plat-samsung.c
@@ -0,0 +1,206 @@
1/* arch/arm/plat-samsung/gpiolib.c
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * Copyright (c) 2009 Samsung Electronics Co., Ltd.
9 * http://www.samsung.com/
10 *
11 * SAMSUNG - GPIOlib support
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18#include <linux/kernel.h>
19#include <linux/irq.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22#include <plat/gpio-core.h>
23#include <plat/gpio-cfg.h>
24#include <plat/gpio-cfg-helpers.h>
25
26#ifndef DEBUG_GPIO
27#define gpio_dbg(x...) do { } while (0)
28#else
29#define gpio_dbg(x...) printk(KERN_DEBUG x)
30#endif
31
32/* The samsung_gpiolib_4bit routines are to control the gpio banks where
33 * the gpio configuration register (GPxCON) has 4 bits per GPIO, as the
34 * following example:
35 *
36 * base + 0x00: Control register, 4 bits per gpio
37 * gpio n: 4 bits starting at (4*n)
38 * 0000 = input, 0001 = output, others mean special-function
39 * base + 0x04: Data register, 1 bit per gpio
40 * bit n: data bit n
41 *
42 * Note, since the data register is one bit per gpio and is at base + 0x4
43 * we can use s3c_gpiolib_get and s3c_gpiolib_set to change the state of
44 * the output.
45*/
46
47static int samsung_gpiolib_4bit_input(struct gpio_chip *chip,
48 unsigned int offset)
49{
50 struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
51 void __iomem *base = ourchip->base;
52 unsigned long con;
53
54 con = __raw_readl(base + GPIOCON_OFF);
55 con &= ~(0xf << con_4bit_shift(offset));
56 __raw_writel(con, base + GPIOCON_OFF);
57
58 gpio_dbg("%s: %p: CON now %08lx\n", __func__, base, con);
59
60 return 0;
61}
62
63static int samsung_gpiolib_4bit_output(struct gpio_chip *chip,
64 unsigned int offset, int value)
65{
66 struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
67 void __iomem *base = ourchip->base;
68 unsigned long con;
69 unsigned long dat;
70
71 con = __raw_readl(base + GPIOCON_OFF);
72 con &= ~(0xf << con_4bit_shift(offset));
73 con |= 0x1 << con_4bit_shift(offset);
74
75 dat = __raw_readl(base + GPIODAT_OFF);
76
77 if (value)
78 dat |= 1 << offset;
79 else
80 dat &= ~(1 << offset);
81
82 __raw_writel(dat, base + GPIODAT_OFF);
83 __raw_writel(con, base + GPIOCON_OFF);
84 __raw_writel(dat, base + GPIODAT_OFF);
85
86 gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
87
88 return 0;
89}
90
91/* The next set of routines are for the case where the GPIO configuration
92 * registers are 4 bits per GPIO but there is more than one register (the
93 * bank has more than 8 GPIOs.
94 *
95 * This case is the similar to the 4 bit case, but the registers are as
96 * follows:
97 *
98 * base + 0x00: Control register, 4 bits per gpio (lower 8 GPIOs)
99 * gpio n: 4 bits starting at (4*n)
100 * 0000 = input, 0001 = output, others mean special-function
101 * base + 0x04: Control register, 4 bits per gpio (up to 8 additions GPIOs)
102 * gpio n: 4 bits starting at (4*n)
103 * 0000 = input, 0001 = output, others mean special-function
104 * base + 0x08: Data register, 1 bit per gpio
105 * bit n: data bit n
106 *
107 * To allow us to use the s3c_gpiolib_get and s3c_gpiolib_set routines we
108 * store the 'base + 0x4' address so that these routines see the data
109 * register at ourchip->base + 0x04.
110 */
111
112static int samsung_gpiolib_4bit2_input(struct gpio_chip *chip,
113 unsigned int offset)
114{
115 struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
116 void __iomem *base = ourchip->base;
117 void __iomem *regcon = base;
118 unsigned long con;
119
120 if (offset > 7)
121 offset -= 8;
122 else
123 regcon -= 4;
124
125 con = __raw_readl(regcon);
126 con &= ~(0xf << con_4bit_shift(offset));
127 __raw_writel(con, regcon);
128
129 gpio_dbg("%s: %p: CON %08lx\n", __func__, base, con);
130
131 return 0;
132}
133
134static int samsung_gpiolib_4bit2_output(struct gpio_chip *chip,
135 unsigned int offset, int value)
136{
137 struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
138 void __iomem *base = ourchip->base;
139 void __iomem *regcon = base;
140 unsigned long con;
141 unsigned long dat;
142 unsigned con_offset = offset;
143
144 if (con_offset > 7)
145 con_offset -= 8;
146 else
147 regcon -= 4;
148
149 con = __raw_readl(regcon);
150 con &= ~(0xf << con_4bit_shift(con_offset));
151 con |= 0x1 << con_4bit_shift(con_offset);
152
153 dat = __raw_readl(base + GPIODAT_OFF);
154
155 if (value)
156 dat |= 1 << offset;
157 else
158 dat &= ~(1 << offset);
159
160 __raw_writel(dat, base + GPIODAT_OFF);
161 __raw_writel(con, regcon);
162 __raw_writel(dat, base + GPIODAT_OFF);
163
164 gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
165
166 return 0;
167}
168
169void __init samsung_gpiolib_add_4bit(struct s3c_gpio_chip *chip)
170{
171 chip->chip.direction_input = samsung_gpiolib_4bit_input;
172 chip->chip.direction_output = samsung_gpiolib_4bit_output;
173 chip->pm = __gpio_pm(&s3c_gpio_pm_4bit);
174}
175
176void __init samsung_gpiolib_add_4bit2(struct s3c_gpio_chip *chip)
177{
178 chip->chip.direction_input = samsung_gpiolib_4bit2_input;
179 chip->chip.direction_output = samsung_gpiolib_4bit2_output;
180 chip->pm = __gpio_pm(&s3c_gpio_pm_4bit);
181}
182
183void __init samsung_gpiolib_add_4bit_chips(struct s3c_gpio_chip *chip,
184 int nr_chips)
185{
186 for (; nr_chips > 0; nr_chips--, chip++) {
187 samsung_gpiolib_add_4bit(chip);
188 s3c_gpiolib_add(chip);
189 }
190}
191
192void __init samsung_gpiolib_add_4bit2_chips(struct s3c_gpio_chip *chip,
193 int nr_chips)
194{
195 for (; nr_chips > 0; nr_chips--, chip++) {
196 samsung_gpiolib_add_4bit2(chip);
197 s3c_gpiolib_add(chip);
198 }
199}
200
201void __init samsung_gpiolib_add_2bit_chips(struct s3c_gpio_chip *chip,
202 int nr_chips)
203{
204 for (; nr_chips > 0; nr_chips--, chip++)
205 s3c_gpiolib_add(chip);
206}
diff --git a/drivers/gpio/gpio-s5pc100.c b/drivers/gpio/gpio-s5pc100.c
new file mode 100644
index 000000000000..2842394b28b5
--- /dev/null
+++ b/drivers/gpio/gpio-s5pc100.c
@@ -0,0 +1,355 @@
1/* linux/arch/arm/mach-s5pc100/gpiolib.c
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Copyright 2009 Samsung Electronics Co
7 * Kyungmin Park <kyungmin.park@samsung.com>
8 *
9 * S5PC100 - GPIOlib support
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/kernel.h>
17#include <linux/irq.h>
18#include <linux/io.h>
19#include <linux/gpio.h>
20
21#include <mach/map.h>
22#include <mach/regs-gpio.h>
23
24#include <plat/gpio-core.h>
25#include <plat/gpio-cfg.h>
26#include <plat/gpio-cfg-helpers.h>
27
28/* S5PC100 GPIO bank summary:
29 *
30 * Bank GPIOs Style INT Type
31 * A0 8 4Bit GPIO_INT0
32 * A1 5 4Bit GPIO_INT1
33 * B 8 4Bit GPIO_INT2
34 * C 5 4Bit GPIO_INT3
35 * D 7 4Bit GPIO_INT4
36 * E0 8 4Bit GPIO_INT5
37 * E1 6 4Bit GPIO_INT6
38 * F0 8 4Bit GPIO_INT7
39 * F1 8 4Bit GPIO_INT8
40 * F2 8 4Bit GPIO_INT9
41 * F3 4 4Bit GPIO_INT10
42 * G0 8 4Bit GPIO_INT11
43 * G1 3 4Bit GPIO_INT12
44 * G2 7 4Bit GPIO_INT13
45 * G3 7 4Bit GPIO_INT14
46 * H0 8 4Bit WKUP_INT
47 * H1 8 4Bit WKUP_INT
48 * H2 8 4Bit WKUP_INT
49 * H3 8 4Bit WKUP_INT
50 * I 8 4Bit GPIO_INT15
51 * J0 8 4Bit GPIO_INT16
52 * J1 5 4Bit GPIO_INT17
53 * J2 8 4Bit GPIO_INT18
54 * J3 8 4Bit GPIO_INT19
55 * J4 4 4Bit GPIO_INT20
56 * K0 8 4Bit None
57 * K1 6 4Bit None
58 * K2 8 4Bit None
59 * K3 8 4Bit None
60 * L0 8 4Bit None
61 * L1 8 4Bit None
62 * L2 8 4Bit None
63 * L3 8 4Bit None
64 */
65
66static struct s3c_gpio_cfg gpio_cfg = {
67 .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
68 .set_pull = s3c_gpio_setpull_updown,
69 .get_pull = s3c_gpio_getpull_updown,
70};
71
72static struct s3c_gpio_cfg gpio_cfg_eint = {
73 .cfg_eint = 0xf,
74 .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
75 .set_pull = s3c_gpio_setpull_updown,
76 .get_pull = s3c_gpio_getpull_updown,
77};
78
79static struct s3c_gpio_cfg gpio_cfg_noint = {
80 .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
81 .set_pull = s3c_gpio_setpull_updown,
82 .get_pull = s3c_gpio_getpull_updown,
83};
84
85/*
86 * GPIO bank's base address given the index of the bank in the
87 * list of all gpio banks.
88 */
89#define S5PC100_BANK_BASE(bank_nr) (S5P_VA_GPIO + ((bank_nr) * 0x20))
90
91/*
92 * Following are the gpio banks in S5PC100.
93 *
94 * The 'config' member when left to NULL, is initialized to the default
95 * structure gpio_cfg in the init function below.
96 *
97 * The 'base' member is also initialized in the init function below.
98 * Note: The initialization of 'base' member of s3c_gpio_chip structure
99 * uses the above macro and depends on the banks being listed in order here.
100 */
101static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
102 {
103 .chip = {
104 .base = S5PC100_GPA0(0),
105 .ngpio = S5PC100_GPIO_A0_NR,
106 .label = "GPA0",
107 },
108 }, {
109 .chip = {
110 .base = S5PC100_GPA1(0),
111 .ngpio = S5PC100_GPIO_A1_NR,
112 .label = "GPA1",
113 },
114 }, {
115 .chip = {
116 .base = S5PC100_GPB(0),
117 .ngpio = S5PC100_GPIO_B_NR,
118 .label = "GPB",
119 },
120 }, {
121 .chip = {
122 .base = S5PC100_GPC(0),
123 .ngpio = S5PC100_GPIO_C_NR,
124 .label = "GPC",
125 },
126 }, {
127 .chip = {
128 .base = S5PC100_GPD(0),
129 .ngpio = S5PC100_GPIO_D_NR,
130 .label = "GPD",
131 },
132 }, {
133 .chip = {
134 .base = S5PC100_GPE0(0),
135 .ngpio = S5PC100_GPIO_E0_NR,
136 .label = "GPE0",
137 },
138 }, {
139 .chip = {
140 .base = S5PC100_GPE1(0),
141 .ngpio = S5PC100_GPIO_E1_NR,
142 .label = "GPE1",
143 },
144 }, {
145 .chip = {
146 .base = S5PC100_GPF0(0),
147 .ngpio = S5PC100_GPIO_F0_NR,
148 .label = "GPF0",
149 },
150 }, {
151 .chip = {
152 .base = S5PC100_GPF1(0),
153 .ngpio = S5PC100_GPIO_F1_NR,
154 .label = "GPF1",
155 },
156 }, {
157 .chip = {
158 .base = S5PC100_GPF2(0),
159 .ngpio = S5PC100_GPIO_F2_NR,
160 .label = "GPF2",
161 },
162 }, {
163 .chip = {
164 .base = S5PC100_GPF3(0),
165 .ngpio = S5PC100_GPIO_F3_NR,
166 .label = "GPF3",
167 },
168 }, {
169 .chip = {
170 .base = S5PC100_GPG0(0),
171 .ngpio = S5PC100_GPIO_G0_NR,
172 .label = "GPG0",
173 },
174 }, {
175 .chip = {
176 .base = S5PC100_GPG1(0),
177 .ngpio = S5PC100_GPIO_G1_NR,
178 .label = "GPG1",
179 },
180 }, {
181 .chip = {
182 .base = S5PC100_GPG2(0),
183 .ngpio = S5PC100_GPIO_G2_NR,
184 .label = "GPG2",
185 },
186 }, {
187 .chip = {
188 .base = S5PC100_GPG3(0),
189 .ngpio = S5PC100_GPIO_G3_NR,
190 .label = "GPG3",
191 },
192 }, {
193 .chip = {
194 .base = S5PC100_GPI(0),
195 .ngpio = S5PC100_GPIO_I_NR,
196 .label = "GPI",
197 },
198 }, {
199 .chip = {
200 .base = S5PC100_GPJ0(0),
201 .ngpio = S5PC100_GPIO_J0_NR,
202 .label = "GPJ0",
203 },
204 }, {
205 .chip = {
206 .base = S5PC100_GPJ1(0),
207 .ngpio = S5PC100_GPIO_J1_NR,
208 .label = "GPJ1",
209 },
210 }, {
211 .chip = {
212 .base = S5PC100_GPJ2(0),
213 .ngpio = S5PC100_GPIO_J2_NR,
214 .label = "GPJ2",
215 },
216 }, {
217 .chip = {
218 .base = S5PC100_GPJ3(0),
219 .ngpio = S5PC100_GPIO_J3_NR,
220 .label = "GPJ3",
221 },
222 }, {
223 .chip = {
224 .base = S5PC100_GPJ4(0),
225 .ngpio = S5PC100_GPIO_J4_NR,
226 .label = "GPJ4",
227 },
228 }, {
229 .config = &gpio_cfg_noint,
230 .chip = {
231 .base = S5PC100_GPK0(0),
232 .ngpio = S5PC100_GPIO_K0_NR,
233 .label = "GPK0",
234 },
235 }, {
236 .config = &gpio_cfg_noint,
237 .chip = {
238 .base = S5PC100_GPK1(0),
239 .ngpio = S5PC100_GPIO_K1_NR,
240 .label = "GPK1",
241 },
242 }, {
243 .config = &gpio_cfg_noint,
244 .chip = {
245 .base = S5PC100_GPK2(0),
246 .ngpio = S5PC100_GPIO_K2_NR,
247 .label = "GPK2",
248 },
249 }, {
250 .config = &gpio_cfg_noint,
251 .chip = {
252 .base = S5PC100_GPK3(0),
253 .ngpio = S5PC100_GPIO_K3_NR,
254 .label = "GPK3",
255 },
256 }, {
257 .config = &gpio_cfg_noint,
258 .chip = {
259 .base = S5PC100_GPL0(0),
260 .ngpio = S5PC100_GPIO_L0_NR,
261 .label = "GPL0",
262 },
263 }, {
264 .config = &gpio_cfg_noint,
265 .chip = {
266 .base = S5PC100_GPL1(0),
267 .ngpio = S5PC100_GPIO_L1_NR,
268 .label = "GPL1",
269 },
270 }, {
271 .config = &gpio_cfg_noint,
272 .chip = {
273 .base = S5PC100_GPL2(0),
274 .ngpio = S5PC100_GPIO_L2_NR,
275 .label = "GPL2",
276 },
277 }, {
278 .config = &gpio_cfg_noint,
279 .chip = {
280 .base = S5PC100_GPL3(0),
281 .ngpio = S5PC100_GPIO_L3_NR,
282 .label = "GPL3",
283 },
284 }, {
285 .config = &gpio_cfg_noint,
286 .chip = {
287 .base = S5PC100_GPL4(0),
288 .ngpio = S5PC100_GPIO_L4_NR,
289 .label = "GPL4",
290 },
291 }, {
292 .base = (S5P_VA_GPIO + 0xC00),
293 .config = &gpio_cfg_eint,
294 .irq_base = IRQ_EINT(0),
295 .chip = {
296 .base = S5PC100_GPH0(0),
297 .ngpio = S5PC100_GPIO_H0_NR,
298 .label = "GPH0",
299 .to_irq = samsung_gpiolib_to_irq,
300 },
301 }, {
302 .base = (S5P_VA_GPIO + 0xC20),
303 .config = &gpio_cfg_eint,
304 .irq_base = IRQ_EINT(8),
305 .chip = {
306 .base = S5PC100_GPH1(0),
307 .ngpio = S5PC100_GPIO_H1_NR,
308 .label = "GPH1",
309 .to_irq = samsung_gpiolib_to_irq,
310 },
311 }, {
312 .base = (S5P_VA_GPIO + 0xC40),
313 .config = &gpio_cfg_eint,
314 .irq_base = IRQ_EINT(16),
315 .chip = {
316 .base = S5PC100_GPH2(0),
317 .ngpio = S5PC100_GPIO_H2_NR,
318 .label = "GPH2",
319 .to_irq = samsung_gpiolib_to_irq,
320 },
321 }, {
322 .base = (S5P_VA_GPIO + 0xC60),
323 .config = &gpio_cfg_eint,
324 .irq_base = IRQ_EINT(24),
325 .chip = {
326 .base = S5PC100_GPH3(0),
327 .ngpio = S5PC100_GPIO_H3_NR,
328 .label = "GPH3",
329 .to_irq = samsung_gpiolib_to_irq,
330 },
331 },
332};
333
334static __init int s5pc100_gpiolib_init(void)
335{
336 struct s3c_gpio_chip *chip = s5pc100_gpio_chips;
337 int nr_chips = ARRAY_SIZE(s5pc100_gpio_chips);
338 int gpioint_group = 0;
339 int i;
340
341 for (i = 0; i < nr_chips; i++, chip++) {
342 if (chip->config == NULL) {
343 chip->config = &gpio_cfg;
344 chip->group = gpioint_group++;
345 }
346 if (chip->base == NULL)
347 chip->base = S5PC100_BANK_BASE(i);
348 }
349
350 samsung_gpiolib_add_4bit_chips(s5pc100_gpio_chips, nr_chips);
351 s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
352
353 return 0;
354}
355core_initcall(s5pc100_gpiolib_init);
diff --git a/drivers/gpio/gpio-s5pv210.c b/drivers/gpio/gpio-s5pv210.c
new file mode 100644
index 000000000000..1ba20a703e05
--- /dev/null
+++ b/drivers/gpio/gpio-s5pv210.c
@@ -0,0 +1,288 @@
1/* linux/arch/arm/mach-s5pv210/gpiolib.c
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * S5PV210 - GPIOlib support
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/irq.h>
15#include <linux/io.h>
16#include <linux/gpio.h>
17#include <plat/gpio-core.h>
18#include <plat/gpio-cfg.h>
19#include <plat/gpio-cfg-helpers.h>
20#include <mach/map.h>
21
22static struct s3c_gpio_cfg gpio_cfg = {
23 .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
24 .set_pull = s3c_gpio_setpull_updown,
25 .get_pull = s3c_gpio_getpull_updown,
26};
27
28static struct s3c_gpio_cfg gpio_cfg_noint = {
29 .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
30 .set_pull = s3c_gpio_setpull_updown,
31 .get_pull = s3c_gpio_getpull_updown,
32};
33
34/* GPIO bank's base address given the index of the bank in the
35 * list of all gpio banks.
36 */
37#define S5PV210_BANK_BASE(bank_nr) (S5P_VA_GPIO + ((bank_nr) * 0x20))
38
39/*
40 * Following are the gpio banks in v210.
41 *
42 * The 'config' member when left to NULL, is initialized to the default
43 * structure gpio_cfg in the init function below.
44 *
45 * The 'base' member is also initialized in the init function below.
46 * Note: The initialization of 'base' member of s3c_gpio_chip structure
47 * uses the above macro and depends on the banks being listed in order here.
48 */
49static struct s3c_gpio_chip s5pv210_gpio_4bit[] = {
50 {
51 .chip = {
52 .base = S5PV210_GPA0(0),
53 .ngpio = S5PV210_GPIO_A0_NR,
54 .label = "GPA0",
55 },
56 }, {
57 .chip = {
58 .base = S5PV210_GPA1(0),
59 .ngpio = S5PV210_GPIO_A1_NR,
60 .label = "GPA1",
61 },
62 }, {
63 .chip = {
64 .base = S5PV210_GPB(0),
65 .ngpio = S5PV210_GPIO_B_NR,
66 .label = "GPB",
67 },
68 }, {
69 .chip = {
70 .base = S5PV210_GPC0(0),
71 .ngpio = S5PV210_GPIO_C0_NR,
72 .label = "GPC0",
73 },
74 }, {
75 .chip = {
76 .base = S5PV210_GPC1(0),
77 .ngpio = S5PV210_GPIO_C1_NR,
78 .label = "GPC1",
79 },
80 }, {
81 .chip = {
82 .base = S5PV210_GPD0(0),
83 .ngpio = S5PV210_GPIO_D0_NR,
84 .label = "GPD0",
85 },
86 }, {
87 .chip = {
88 .base = S5PV210_GPD1(0),
89 .ngpio = S5PV210_GPIO_D1_NR,
90 .label = "GPD1",
91 },
92 }, {
93 .chip = {
94 .base = S5PV210_GPE0(0),
95 .ngpio = S5PV210_GPIO_E0_NR,
96 .label = "GPE0",
97 },
98 }, {
99 .chip = {
100 .base = S5PV210_GPE1(0),
101 .ngpio = S5PV210_GPIO_E1_NR,
102 .label = "GPE1",
103 },
104 }, {
105 .chip = {
106 .base = S5PV210_GPF0(0),
107 .ngpio = S5PV210_GPIO_F0_NR,
108 .label = "GPF0",
109 },
110 }, {
111 .chip = {
112 .base = S5PV210_GPF1(0),
113 .ngpio = S5PV210_GPIO_F1_NR,
114 .label = "GPF1",
115 },
116 }, {
117 .chip = {
118 .base = S5PV210_GPF2(0),
119 .ngpio = S5PV210_GPIO_F2_NR,
120 .label = "GPF2",
121 },
122 }, {
123 .chip = {
124 .base = S5PV210_GPF3(0),
125 .ngpio = S5PV210_GPIO_F3_NR,
126 .label = "GPF3",
127 },
128 }, {
129 .chip = {
130 .base = S5PV210_GPG0(0),
131 .ngpio = S5PV210_GPIO_G0_NR,
132 .label = "GPG0",
133 },
134 }, {
135 .chip = {
136 .base = S5PV210_GPG1(0),
137 .ngpio = S5PV210_GPIO_G1_NR,
138 .label = "GPG1",
139 },
140 }, {
141 .chip = {
142 .base = S5PV210_GPG2(0),
143 .ngpio = S5PV210_GPIO_G2_NR,
144 .label = "GPG2",
145 },
146 }, {
147 .chip = {
148 .base = S5PV210_GPG3(0),
149 .ngpio = S5PV210_GPIO_G3_NR,
150 .label = "GPG3",
151 },
152 }, {
153 .config = &gpio_cfg_noint,
154 .chip = {
155 .base = S5PV210_GPI(0),
156 .ngpio = S5PV210_GPIO_I_NR,
157 .label = "GPI",
158 },
159 }, {
160 .chip = {
161 .base = S5PV210_GPJ0(0),
162 .ngpio = S5PV210_GPIO_J0_NR,
163 .label = "GPJ0",
164 },
165 }, {
166 .chip = {
167 .base = S5PV210_GPJ1(0),
168 .ngpio = S5PV210_GPIO_J1_NR,
169 .label = "GPJ1",
170 },
171 }, {
172 .chip = {
173 .base = S5PV210_GPJ2(0),
174 .ngpio = S5PV210_GPIO_J2_NR,
175 .label = "GPJ2",
176 },
177 }, {
178 .chip = {
179 .base = S5PV210_GPJ3(0),
180 .ngpio = S5PV210_GPIO_J3_NR,
181 .label = "GPJ3",
182 },
183 }, {
184 .chip = {
185 .base = S5PV210_GPJ4(0),
186 .ngpio = S5PV210_GPIO_J4_NR,
187 .label = "GPJ4",
188 },
189 }, {
190 .config = &gpio_cfg_noint,
191 .chip = {
192 .base = S5PV210_MP01(0),
193 .ngpio = S5PV210_GPIO_MP01_NR,
194 .label = "MP01",
195 },
196 }, {
197 .config = &gpio_cfg_noint,
198 .chip = {
199 .base = S5PV210_MP02(0),
200 .ngpio = S5PV210_GPIO_MP02_NR,
201 .label = "MP02",
202 },
203 }, {
204 .config = &gpio_cfg_noint,
205 .chip = {
206 .base = S5PV210_MP03(0),
207 .ngpio = S5PV210_GPIO_MP03_NR,
208 .label = "MP03",
209 },
210 }, {
211 .config = &gpio_cfg_noint,
212 .chip = {
213 .base = S5PV210_MP04(0),
214 .ngpio = S5PV210_GPIO_MP04_NR,
215 .label = "MP04",
216 },
217 }, {
218 .config = &gpio_cfg_noint,
219 .chip = {
220 .base = S5PV210_MP05(0),
221 .ngpio = S5PV210_GPIO_MP05_NR,
222 .label = "MP05",
223 },
224 }, {
225 .base = (S5P_VA_GPIO + 0xC00),
226 .config = &gpio_cfg_noint,
227 .irq_base = IRQ_EINT(0),
228 .chip = {
229 .base = S5PV210_GPH0(0),
230 .ngpio = S5PV210_GPIO_H0_NR,
231 .label = "GPH0",
232 .to_irq = samsung_gpiolib_to_irq,
233 },
234 }, {
235 .base = (S5P_VA_GPIO + 0xC20),
236 .config = &gpio_cfg_noint,
237 .irq_base = IRQ_EINT(8),
238 .chip = {
239 .base = S5PV210_GPH1(0),
240 .ngpio = S5PV210_GPIO_H1_NR,
241 .label = "GPH1",
242 .to_irq = samsung_gpiolib_to_irq,
243 },
244 }, {
245 .base = (S5P_VA_GPIO + 0xC40),
246 .config = &gpio_cfg_noint,
247 .irq_base = IRQ_EINT(16),
248 .chip = {
249 .base = S5PV210_GPH2(0),
250 .ngpio = S5PV210_GPIO_H2_NR,
251 .label = "GPH2",
252 .to_irq = samsung_gpiolib_to_irq,
253 },
254 }, {
255 .base = (S5P_VA_GPIO + 0xC60),
256 .config = &gpio_cfg_noint,
257 .irq_base = IRQ_EINT(24),
258 .chip = {
259 .base = S5PV210_GPH3(0),
260 .ngpio = S5PV210_GPIO_H3_NR,
261 .label = "GPH3",
262 .to_irq = samsung_gpiolib_to_irq,
263 },
264 },
265};
266
267static __init int s5pv210_gpiolib_init(void)
268{
269 struct s3c_gpio_chip *chip = s5pv210_gpio_4bit;
270 int nr_chips = ARRAY_SIZE(s5pv210_gpio_4bit);
271 int gpioint_group = 0;
272 int i = 0;
273
274 for (i = 0; i < nr_chips; i++, chip++) {
275 if (chip->config == NULL) {
276 chip->config = &gpio_cfg;
277 chip->group = gpioint_group++;
278 }
279 if (chip->base == NULL)
280 chip->base = S5PV210_BANK_BASE(i);
281 }
282
283 samsung_gpiolib_add_4bit_chips(s5pv210_gpio_4bit, nr_chips);
284 s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
285
286 return 0;
287}
288core_initcall(s5pv210_gpiolib_init);
diff --git a/drivers/gpio/gpio-u300.c b/drivers/gpio/gpio-u300.c
new file mode 100644
index 000000000000..d92790140fe5
--- /dev/null
+++ b/drivers/gpio/gpio-u300.c
@@ -0,0 +1,700 @@
1/*
2 *
3 * arch/arm/mach-u300/gpio.c
4 *
5 *
6 * Copyright (C) 2007-2009 ST-Ericsson AB
7 * License terms: GNU General Public License (GPL) version 2
8 * U300 GPIO module.
9 * This can driver either of the two basic GPIO cores
10 * available in the U300 platforms:
11 * COH 901 335 - Used in DB3150 (U300 1.0) and DB3200 (U330 1.0)
12 * COH 901 571/3 - Used in DB3210 (U365 2.0) and DB3350 (U335 1.0)
13 * Notice that you also have inline macros in <asm-arch/gpio.h>
14 * Author: Linus Walleij <linus.walleij@stericsson.com>
15 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
16 *
17 */
18#include <linux/module.h>
19#include <linux/interrupt.h>
20#include <linux/delay.h>
21#include <linux/errno.h>
22#include <linux/io.h>
23#include <linux/clk.h>
24#include <linux/err.h>
25#include <linux/platform_device.h>
26#include <linux/gpio.h>
27
28/* Reference to GPIO block clock */
29static struct clk *clk;
30
31/* Memory resource */
32static struct resource *memres;
33static void __iomem *virtbase;
34static struct device *gpiodev;
35
36struct u300_gpio_port {
37 const char *name;
38 int irq;
39 int number;
40};
41
42
43static struct u300_gpio_port gpio_ports[] = {
44 {
45 .name = "gpio0",
46 .number = 0,
47 },
48 {
49 .name = "gpio1",
50 .number = 1,
51 },
52 {
53 .name = "gpio2",
54 .number = 2,
55 },
56#ifdef U300_COH901571_3
57 {
58 .name = "gpio3",
59 .number = 3,
60 },
61 {
62 .name = "gpio4",
63 .number = 4,
64 },
65#ifdef CONFIG_MACH_U300_BS335
66 {
67 .name = "gpio5",
68 .number = 5,
69 },
70 {
71 .name = "gpio6",
72 .number = 6,
73 },
74#endif
75#endif
76
77};
78
79
80#ifdef U300_COH901571_3
81
82/* Default input value */
83#define DEFAULT_OUTPUT_LOW 0
84#define DEFAULT_OUTPUT_HIGH 1
85
86/* GPIO Pull-Up status */
87#define DISABLE_PULL_UP 0
88#define ENABLE_PULL_UP 1
89
90#define GPIO_NOT_USED 0
91#define GPIO_IN 1
92#define GPIO_OUT 2
93
94struct u300_gpio_configuration_data {
95 unsigned char pin_usage;
96 unsigned char default_output_value;
97 unsigned char pull_up;
98};
99
100/* Initial configuration */
101const struct u300_gpio_configuration_data
102u300_gpio_config[U300_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
103#ifdef CONFIG_MACH_U300_BS335
104 /* Port 0, pins 0-7 */
105 {
106 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
107 {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP},
108 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
109 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
110 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
111 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
112 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
113 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
114 },
115 /* Port 1, pins 0-7 */
116 {
117 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
118 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
119 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
120 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
121 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
122 {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP},
123 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
124 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
125 },
126 /* Port 2, pins 0-7 */
127 {
128 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
129 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
130 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
131 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
132 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
133 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
134 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
135 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
136 },
137 /* Port 3, pins 0-7 */
138 {
139 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
140 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
141 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
142 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
143 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
144 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
145 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
146 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
147 },
148 /* Port 4, pins 0-7 */
149 {
150 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
151 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
152 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
153 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
154 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
155 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
156 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
157 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
158 },
159 /* Port 5, pins 0-7 */
160 {
161 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
162 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
163 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
164 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
165 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
166 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
167 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
168 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
169 },
170 /* Port 6, pind 0-7 */
171 {
172 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
173 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
174 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
175 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
176 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
177 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
178 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
179 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
180 }
181#endif
182
183#ifdef CONFIG_MACH_U300_BS365
184 /* Port 0, pins 0-7 */
185 {
186 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
187 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
188 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
189 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
190 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
191 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
192 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
193 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
194 },
195 /* Port 1, pins 0-7 */
196 {
197 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
198 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
199 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
200 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
201 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
202 {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP},
203 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
204 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
205 },
206 /* Port 2, pins 0-7 */
207 {
208 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
209 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
210 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
211 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
212 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
213 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
214 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
215 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
216 },
217 /* Port 3, pins 0-7 */
218 {
219 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
220 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
221 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
222 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
223 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
224 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
225 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
226 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
227 },
228 /* Port 4, pins 0-7 */
229 {
230 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
231 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
232 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
233 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
234 /* These 4 pins doesn't exist on DB3210 */
235 {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
236 {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
237 {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
238 {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
239 }
240#endif
241};
242#endif
243
244
245/* No users == we can power down GPIO */
246static int gpio_users;
247
248struct gpio_struct {
249 int (*callback)(void *);
250 void *data;
251 int users;
252};
253
254static struct gpio_struct gpio_pin[U300_GPIO_MAX];
255
256/*
257 * Let drivers register callback in order to get notified when there is
258 * an interrupt on the gpio pin
259 */
260int gpio_register_callback(unsigned gpio, int (*func)(void *arg), void *data)
261{
262 if (gpio_pin[gpio].callback)
263 dev_warn(gpiodev, "%s: WARNING: callback already "
264 "registered for gpio pin#%d\n", __func__, gpio);
265 gpio_pin[gpio].callback = func;
266 gpio_pin[gpio].data = data;
267
268 return 0;
269}
270EXPORT_SYMBOL(gpio_register_callback);
271
272int gpio_unregister_callback(unsigned gpio)
273{
274 if (!gpio_pin[gpio].callback)
275 dev_warn(gpiodev, "%s: WARNING: callback already "
276 "unregistered for gpio pin#%d\n", __func__, gpio);
277 gpio_pin[gpio].callback = NULL;
278 gpio_pin[gpio].data = NULL;
279
280 return 0;
281}
282EXPORT_SYMBOL(gpio_unregister_callback);
283
284/* Non-zero means valid */
285int gpio_is_valid(int number)
286{
287 if (number >= 0 &&
288 number < (U300_GPIO_NUM_PORTS * U300_GPIO_PINS_PER_PORT))
289 return 1;
290 return 0;
291}
292EXPORT_SYMBOL(gpio_is_valid);
293
294int gpio_request(unsigned gpio, const char *label)
295{
296 if (gpio_pin[gpio].users)
297 return -EINVAL;
298 else
299 gpio_pin[gpio].users++;
300
301 gpio_users++;
302
303 return 0;
304}
305EXPORT_SYMBOL(gpio_request);
306
307void gpio_free(unsigned gpio)
308{
309 gpio_users--;
310 gpio_pin[gpio].users--;
311 if (unlikely(gpio_pin[gpio].users < 0)) {
312 dev_warn(gpiodev, "warning: gpio#%d release mismatch\n",
313 gpio);
314 gpio_pin[gpio].users = 0;
315 }
316
317 return;
318}
319EXPORT_SYMBOL(gpio_free);
320
321/* This returns zero or nonzero */
322int gpio_get_value(unsigned gpio)
323{
324 return readl(virtbase + U300_GPIO_PXPDIR +
325 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING) & (1 << (gpio & 0x07));
326}
327EXPORT_SYMBOL(gpio_get_value);
328
329/*
330 * We hope that the compiler will optimize away the unused branch
331 * in case "value" is a constant
332 */
333void gpio_set_value(unsigned gpio, int value)
334{
335 u32 val;
336 unsigned long flags;
337
338 local_irq_save(flags);
339 if (value) {
340 /* set */
341 val = readl(virtbase + U300_GPIO_PXPDOR +
342 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING)
343 & (1 << (gpio & 0x07));
344 writel(val | (1 << (gpio & 0x07)), virtbase +
345 U300_GPIO_PXPDOR +
346 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
347 } else {
348 /* clear */
349 val = readl(virtbase + U300_GPIO_PXPDOR +
350 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING)
351 & (1 << (gpio & 0x07));
352 writel(val & ~(1 << (gpio & 0x07)), virtbase +
353 U300_GPIO_PXPDOR +
354 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
355 }
356 local_irq_restore(flags);
357}
358EXPORT_SYMBOL(gpio_set_value);
359
360int gpio_direction_input(unsigned gpio)
361{
362 unsigned long flags;
363 u32 val;
364
365 if (gpio > U300_GPIO_MAX)
366 return -EINVAL;
367
368 local_irq_save(flags);
369 val = readl(virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
370 U300_GPIO_PORTX_SPACING);
371 /* Mask out this pin*/
372 val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((gpio & 0x07) << 1));
373 /* This is not needed since it sets the bits to zero.*/
374 /* val |= (U300_GPIO_PXPCR_PIN_MODE_INPUT << (gpio*2)); */
375 writel(val, virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
376 U300_GPIO_PORTX_SPACING);
377 local_irq_restore(flags);
378 return 0;
379}
380EXPORT_SYMBOL(gpio_direction_input);
381
382int gpio_direction_output(unsigned gpio, int value)
383{
384 unsigned long flags;
385 u32 val;
386
387 if (gpio > U300_GPIO_MAX)
388 return -EINVAL;
389
390 local_irq_save(flags);
391 val = readl(virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
392 U300_GPIO_PORTX_SPACING);
393 /* Mask out this pin */
394 val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((gpio & 0x07) << 1));
395 /*
396 * FIXME: configure for push/pull, open drain or open source per pin
397 * in setup. The current driver will only support push/pull.
398 */
399 val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
400 << ((gpio & 0x07) << 1));
401 writel(val, virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
402 U300_GPIO_PORTX_SPACING);
403 gpio_set_value(gpio, value);
404 local_irq_restore(flags);
405 return 0;
406}
407EXPORT_SYMBOL(gpio_direction_output);
408
409/*
410 * Enable an IRQ, edge is rising edge (!= 0) or falling edge (==0).
411 */
412void enable_irq_on_gpio_pin(unsigned gpio, int edge)
413{
414 u32 val;
415 unsigned long flags;
416 local_irq_save(flags);
417
418 val = readl(virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
419 U300_GPIO_PORTX_SPACING);
420 val |= (1 << (gpio & 0x07));
421 writel(val, virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
422 U300_GPIO_PORTX_SPACING);
423 val = readl(virtbase + U300_GPIO_PXICR + PIN_TO_PORT(gpio) *
424 U300_GPIO_PORTX_SPACING);
425 if (edge)
426 val |= (1 << (gpio & 0x07));
427 else
428 val &= ~(1 << (gpio & 0x07));
429 writel(val, virtbase + U300_GPIO_PXICR + PIN_TO_PORT(gpio) *
430 U300_GPIO_PORTX_SPACING);
431 local_irq_restore(flags);
432}
433EXPORT_SYMBOL(enable_irq_on_gpio_pin);
434
435void disable_irq_on_gpio_pin(unsigned gpio)
436{
437 u32 val;
438 unsigned long flags;
439
440 local_irq_save(flags);
441 val = readl(virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
442 U300_GPIO_PORTX_SPACING);
443 val &= ~(1 << (gpio & 0x07));
444 writel(val, virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
445 U300_GPIO_PORTX_SPACING);
446 local_irq_restore(flags);
447}
448EXPORT_SYMBOL(disable_irq_on_gpio_pin);
449
450/* Enable (value == 0) or disable (value == 1) internal pullup */
451void gpio_pullup(unsigned gpio, int value)
452{
453 u32 val;
454 unsigned long flags;
455
456 local_irq_save(flags);
457 if (value) {
458 val = readl(virtbase + U300_GPIO_PXPER + PIN_TO_PORT(gpio) *
459 U300_GPIO_PORTX_SPACING);
460 writel(val | (1 << (gpio & 0x07)), virtbase + U300_GPIO_PXPER +
461 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
462 } else {
463 val = readl(virtbase + U300_GPIO_PXPER + PIN_TO_PORT(gpio) *
464 U300_GPIO_PORTX_SPACING);
465 writel(val & ~(1 << (gpio & 0x07)), virtbase + U300_GPIO_PXPER +
466 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
467 }
468 local_irq_restore(flags);
469}
470EXPORT_SYMBOL(gpio_pullup);
471
472static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
473{
474 struct u300_gpio_port *port = dev_id;
475 u32 val;
476 int pin;
477
478 /* Read event register */
479 val = readl(virtbase + U300_GPIO_PXIEV + port->number *
480 U300_GPIO_PORTX_SPACING);
481 /* Mask with enable register */
482 val &= readl(virtbase + U300_GPIO_PXIEV + port->number *
483 U300_GPIO_PORTX_SPACING);
484 /* Mask relevant bits */
485 val &= U300_GPIO_PXIEV_ALL_IRQ_EVENT_MASK;
486 /* ACK IRQ (clear event) */
487 writel(val, virtbase + U300_GPIO_PXIEV + port->number *
488 U300_GPIO_PORTX_SPACING);
489 /* Print message */
490 while (val != 0) {
491 unsigned gpio;
492
493 pin = __ffs(val);
494 /* mask off this pin */
495 val &= ~(1 << pin);
496 gpio = (port->number << 3) + pin;
497
498 if (gpio_pin[gpio].callback)
499 (void)gpio_pin[gpio].callback(gpio_pin[gpio].data);
500 else
501 dev_dbg(gpiodev, "stray GPIO IRQ on line %d\n",
502 gpio);
503 }
504 return IRQ_HANDLED;
505}
506
507static void gpio_set_initial_values(void)
508{
509#ifdef U300_COH901571_3
510 int i, j;
511 unsigned long flags;
512 u32 val;
513
514 /* Write default values to all pins */
515 for (i = 0; i < U300_GPIO_NUM_PORTS; i++) {
516 val = 0;
517 for (j = 0; j < 8; j++)
518 val |= (u32) (u300_gpio_config[i][j].default_output_value != DEFAULT_OUTPUT_LOW) << j;
519 local_irq_save(flags);
520 writel(val, virtbase + U300_GPIO_PXPDOR + i * U300_GPIO_PORTX_SPACING);
521 local_irq_restore(flags);
522 }
523
524 /*
525 * Put all pins that are set to either 'GPIO_OUT' or 'GPIO_NOT_USED'
526 * to output and 'GPIO_IN' to input for each port. And initialize
527 * default value on outputs.
528 */
529 for (i = 0; i < U300_GPIO_NUM_PORTS; i++) {
530 for (j = 0; j < U300_GPIO_PINS_PER_PORT; j++) {
531 local_irq_save(flags);
532 val = readl(virtbase + U300_GPIO_PXPCR +
533 i * U300_GPIO_PORTX_SPACING);
534 /* Mask out this pin */
535 val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << (j << 1));
536
537 if (u300_gpio_config[i][j].pin_usage != GPIO_IN)
538 val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL << (j << 1));
539 writel(val, virtbase + U300_GPIO_PXPCR +
540 i * U300_GPIO_PORTX_SPACING);
541 local_irq_restore(flags);
542 }
543 }
544
545 /* Enable or disable the internal pull-ups in the GPIO ASIC block */
546 for (i = 0; i < U300_GPIO_MAX; i++) {
547 val = 0;
548 for (j = 0; j < 8; j++)
549 val |= (u32)((u300_gpio_config[i][j].pull_up == DISABLE_PULL_UP) << j);
550 local_irq_save(flags);
551 writel(val, virtbase + U300_GPIO_PXPER + i * U300_GPIO_PORTX_SPACING);
552 local_irq_restore(flags);
553 }
554#endif
555}
556
557static int __init gpio_probe(struct platform_device *pdev)
558{
559 u32 val;
560 int err = 0;
561 int i;
562 int num_irqs;
563
564 gpiodev = &pdev->dev;
565 memset(gpio_pin, 0, sizeof(gpio_pin));
566
567 /* Get GPIO clock */
568 clk = clk_get(&pdev->dev, NULL);
569 if (IS_ERR(clk)) {
570 err = PTR_ERR(clk);
571 dev_err(gpiodev, "could not get GPIO clock\n");
572 goto err_no_clk;
573 }
574 err = clk_enable(clk);
575 if (err) {
576 dev_err(gpiodev, "could not enable GPIO clock\n");
577 goto err_no_clk_enable;
578 }
579
580 memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
581 if (!memres)
582 goto err_no_resource;
583
584 if (request_mem_region(memres->start, memres->end - memres->start, "GPIO Controller")
585 == NULL) {
586 err = -ENODEV;
587 goto err_no_ioregion;
588 }
589
590 virtbase = ioremap(memres->start, resource_size(memres));
591 if (!virtbase) {
592 err = -ENOMEM;
593 goto err_no_ioremap;
594 }
595 dev_info(gpiodev, "remapped 0x%08x to %p\n",
596 memres->start, virtbase);
597
598#ifdef U300_COH901335
599 dev_info(gpiodev, "initializing GPIO Controller COH 901 335\n");
600 /* Turn on the GPIO block */
601 writel(U300_GPIO_CR_BLOCK_CLOCK_ENABLE, virtbase + U300_GPIO_CR);
602#endif
603
604#ifdef U300_COH901571_3
605 dev_info(gpiodev, "initializing GPIO Controller COH 901 571/3\n");
606 val = readl(virtbase + U300_GPIO_CR);
607 dev_info(gpiodev, "COH901571/3 block version: %d, " \
608 "number of cores: %d\n",
609 ((val & 0x0000FE00) >> 9),
610 ((val & 0x000001FC) >> 2));
611 writel(U300_GPIO_CR_BLOCK_CLKRQ_ENABLE, virtbase + U300_GPIO_CR);
612#endif
613
614 gpio_set_initial_values();
615
616 for (num_irqs = 0 ; num_irqs < U300_GPIO_NUM_PORTS; num_irqs++) {
617
618 gpio_ports[num_irqs].irq =
619 platform_get_irq_byname(pdev,
620 gpio_ports[num_irqs].name);
621
622 err = request_irq(gpio_ports[num_irqs].irq,
623 gpio_irq_handler, IRQF_DISABLED,
624 gpio_ports[num_irqs].name,
625 &gpio_ports[num_irqs]);
626 if (err) {
627 dev_err(gpiodev, "cannot allocate IRQ for %s!\n",
628 gpio_ports[num_irqs].name);
629 goto err_no_irq;
630 }
631 /* Turns off PortX_irq_force */
632 writel(0x0, virtbase + U300_GPIO_PXIFR +
633 num_irqs * U300_GPIO_PORTX_SPACING);
634 }
635
636 return 0;
637
638 err_no_irq:
639 for (i = 0; i < num_irqs; i++)
640 free_irq(gpio_ports[i].irq, &gpio_ports[i]);
641 iounmap(virtbase);
642 err_no_ioremap:
643 release_mem_region(memres->start, memres->end - memres->start);
644 err_no_ioregion:
645 err_no_resource:
646 clk_disable(clk);
647 err_no_clk_enable:
648 clk_put(clk);
649 err_no_clk:
650 dev_info(gpiodev, "module ERROR:%d\n", err);
651 return err;
652}
653
654static int __exit gpio_remove(struct platform_device *pdev)
655{
656 int i;
657
658 /* Turn off the GPIO block */
659 writel(0x00000000U, virtbase + U300_GPIO_CR);
660 for (i = 0 ; i < U300_GPIO_NUM_PORTS; i++)
661 free_irq(gpio_ports[i].irq, &gpio_ports[i]);
662 iounmap(virtbase);
663 release_mem_region(memres->start, memres->end - memres->start);
664 clk_disable(clk);
665 clk_put(clk);
666 return 0;
667}
668
669static struct platform_driver gpio_driver = {
670 .driver = {
671 .name = "u300-gpio",
672 },
673 .remove = __exit_p(gpio_remove),
674};
675
676
677static int __init u300_gpio_init(void)
678{
679 return platform_driver_probe(&gpio_driver, gpio_probe);
680}
681
682static void __exit u300_gpio_exit(void)
683{
684 platform_driver_unregister(&gpio_driver);
685}
686
687arch_initcall(u300_gpio_init);
688module_exit(u300_gpio_exit);
689
690MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
691
692#ifdef U300_COH901571_3
693MODULE_DESCRIPTION("ST-Ericsson AB COH 901 571/3 GPIO driver");
694#endif
695
696#ifdef U300_COH901335
697MODULE_DESCRIPTION("ST-Ericsson AB COH 901 335 GPIO driver");
698#endif
699
700MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 137a8ca67822..a971e3d043ba 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1296,7 +1296,7 @@ EXPORT_SYMBOL_GPL(gpio_request_one);
1296 * @array: array of the 'struct gpio' 1296 * @array: array of the 'struct gpio'
1297 * @num: how many GPIOs in the array 1297 * @num: how many GPIOs in the array
1298 */ 1298 */
1299int gpio_request_array(struct gpio *array, size_t num) 1299int gpio_request_array(const struct gpio *array, size_t num)
1300{ 1300{
1301 int i, err; 1301 int i, err;
1302 1302
@@ -1319,7 +1319,7 @@ EXPORT_SYMBOL_GPL(gpio_request_array);
1319 * @array: array of the 'struct gpio' 1319 * @array: array of the 'struct gpio'
1320 * @num: how many GPIOs in the array 1320 * @num: how many GPIOs in the array
1321 */ 1321 */
1322void gpio_free_array(struct gpio *array, size_t num) 1322void gpio_free_array(const struct gpio *array, size_t num)
1323{ 1323{
1324 while (num--) 1324 while (num--)
1325 gpio_free((array++)->gpio); 1325 gpio_free((array++)->gpio);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 1b06f67e1f69..bd6571e0097a 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -33,6 +33,7 @@
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/gpio.h> 34#include <linux/gpio.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/pm_runtime.h>
36 37
37/* 38/*
38 * Langwell chip has 64 pins and thus there are 2 32bit registers to control 39 * Langwell chip has 64 pins and thus there are 2 32bit registers to control
@@ -63,6 +64,7 @@ struct lnw_gpio {
63 void *reg_base; 64 void *reg_base;
64 spinlock_t lock; 65 spinlock_t lock;
65 unsigned irq_base; 66 unsigned irq_base;
67 struct pci_dev *pdev;
66}; 68};
67 69
68static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset, 70static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
@@ -104,11 +106,18 @@ static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
104 u32 value; 106 u32 value;
105 unsigned long flags; 107 unsigned long flags;
106 108
109 if (lnw->pdev)
110 pm_runtime_get(&lnw->pdev->dev);
111
107 spin_lock_irqsave(&lnw->lock, flags); 112 spin_lock_irqsave(&lnw->lock, flags);
108 value = readl(gpdr); 113 value = readl(gpdr);
109 value &= ~BIT(offset % 32); 114 value &= ~BIT(offset % 32);
110 writel(value, gpdr); 115 writel(value, gpdr);
111 spin_unlock_irqrestore(&lnw->lock, flags); 116 spin_unlock_irqrestore(&lnw->lock, flags);
117
118 if (lnw->pdev)
119 pm_runtime_put(&lnw->pdev->dev);
120
112 return 0; 121 return 0;
113} 122}
114 123
@@ -120,11 +129,19 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
120 unsigned long flags; 129 unsigned long flags;
121 130
122 lnw_gpio_set(chip, offset, value); 131 lnw_gpio_set(chip, offset, value);
132
133 if (lnw->pdev)
134 pm_runtime_get(&lnw->pdev->dev);
135
123 spin_lock_irqsave(&lnw->lock, flags); 136 spin_lock_irqsave(&lnw->lock, flags);
124 value = readl(gpdr); 137 value = readl(gpdr);
125 value |= BIT(offset % 32); 138 value |= BIT(offset % 32);
126 writel(value, gpdr); 139 writel(value, gpdr);
127 spin_unlock_irqrestore(&lnw->lock, flags); 140 spin_unlock_irqrestore(&lnw->lock, flags);
141
142 if (lnw->pdev)
143 pm_runtime_put(&lnw->pdev->dev);
144
128 return 0; 145 return 0;
129} 146}
130 147
@@ -145,6 +162,10 @@ static int lnw_irq_type(struct irq_data *d, unsigned type)
145 162
146 if (gpio >= lnw->chip.ngpio) 163 if (gpio >= lnw->chip.ngpio)
147 return -EINVAL; 164 return -EINVAL;
165
166 if (lnw->pdev)
167 pm_runtime_get(&lnw->pdev->dev);
168
148 spin_lock_irqsave(&lnw->lock, flags); 169 spin_lock_irqsave(&lnw->lock, flags);
149 if (type & IRQ_TYPE_EDGE_RISING) 170 if (type & IRQ_TYPE_EDGE_RISING)
150 value = readl(grer) | BIT(gpio % 32); 171 value = readl(grer) | BIT(gpio % 32);
@@ -159,6 +180,9 @@ static int lnw_irq_type(struct irq_data *d, unsigned type)
159 writel(value, gfer); 180 writel(value, gfer);
160 spin_unlock_irqrestore(&lnw->lock, flags); 181 spin_unlock_irqrestore(&lnw->lock, flags);
161 182
183 if (lnw->pdev)
184 pm_runtime_put(&lnw->pdev->dev);
185
162 return 0; 186 return 0;
163} 187}
164 188
@@ -211,6 +235,39 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
211 chip->irq_eoi(data); 235 chip->irq_eoi(data);
212} 236}
213 237
238#ifdef CONFIG_PM
239static int lnw_gpio_runtime_resume(struct device *dev)
240{
241 return 0;
242}
243
244static int lnw_gpio_runtime_suspend(struct device *dev)
245{
246 return 0;
247}
248
249static int lnw_gpio_runtime_idle(struct device *dev)
250{
251 int err = pm_schedule_suspend(dev, 500);
252
253 if (!err)
254 return 0;
255
256 return -EBUSY;
257}
258
259#else
260#define lnw_gpio_runtime_suspend NULL
261#define lnw_gpio_runtime_resume NULL
262#define lnw_gpio_runtime_idle NULL
263#endif
264
265static const struct dev_pm_ops lnw_gpio_pm_ops = {
266 .runtime_suspend = lnw_gpio_runtime_suspend,
267 .runtime_resume = lnw_gpio_runtime_resume,
268 .runtime_idle = lnw_gpio_runtime_idle,
269};
270
214static int __devinit lnw_gpio_probe(struct pci_dev *pdev, 271static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
215 const struct pci_device_id *id) 272 const struct pci_device_id *id)
216{ 273{
@@ -270,6 +327,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
270 lnw->chip.base = gpio_base; 327 lnw->chip.base = gpio_base;
271 lnw->chip.ngpio = id->driver_data; 328 lnw->chip.ngpio = id->driver_data;
272 lnw->chip.can_sleep = 0; 329 lnw->chip.can_sleep = 0;
330 lnw->pdev = pdev;
273 pci_set_drvdata(pdev, lnw); 331 pci_set_drvdata(pdev, lnw);
274 retval = gpiochip_add(&lnw->chip); 332 retval = gpiochip_add(&lnw->chip);
275 if (retval) { 333 if (retval) {
@@ -285,6 +343,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
285 } 343 }
286 344
287 spin_lock_init(&lnw->lock); 345 spin_lock_init(&lnw->lock);
346
347 pm_runtime_put_noidle(&pdev->dev);
348 pm_runtime_allow(&pdev->dev);
349
288 goto done; 350 goto done;
289err5: 351err5:
290 kfree(lnw); 352 kfree(lnw);
@@ -302,6 +364,9 @@ static struct pci_driver lnw_gpio_driver = {
302 .name = "langwell_gpio", 364 .name = "langwell_gpio",
303 .id_table = lnw_gpio_ids, 365 .id_table = lnw_gpio_ids,
304 .probe = lnw_gpio_probe, 366 .probe = lnw_gpio_probe,
367 .driver = {
368 .pm = &lnw_gpio_pm_ops,
369 },
305}; 370};
306 371
307 372
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 78a843947d82..0451d7ac94ac 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -24,33 +24,46 @@
24#include <linux/of_gpio.h> 24#include <linux/of_gpio.h>
25#endif 25#endif
26 26
27#define PCA953X_INPUT 0 27#define PCA953X_INPUT 0
28#define PCA953X_OUTPUT 1 28#define PCA953X_OUTPUT 1
29#define PCA953X_INVERT 2 29#define PCA953X_INVERT 2
30#define PCA953X_DIRECTION 3 30#define PCA953X_DIRECTION 3
31 31
32#define PCA953X_GPIOS 0x00FF 32#define PCA957X_IN 0
33#define PCA953X_INT 0x0100 33#define PCA957X_INVRT 1
34#define PCA957X_BKEN 2
35#define PCA957X_PUPD 3
36#define PCA957X_CFG 4
37#define PCA957X_OUT 5
38#define PCA957X_MSK 6
39#define PCA957X_INTS 7
40
41#define PCA_GPIO_MASK 0x00FF
42#define PCA_INT 0x0100
43#define PCA953X_TYPE 0x1000
44#define PCA957X_TYPE 0x2000
34 45
35static const struct i2c_device_id pca953x_id[] = { 46static const struct i2c_device_id pca953x_id[] = {
36 { "pca9534", 8 | PCA953X_INT, }, 47 { "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
37 { "pca9535", 16 | PCA953X_INT, }, 48 { "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
38 { "pca9536", 4, }, 49 { "pca9536", 4 | PCA953X_TYPE, },
39 { "pca9537", 4 | PCA953X_INT, }, 50 { "pca9537", 4 | PCA953X_TYPE | PCA_INT, },
40 { "pca9538", 8 | PCA953X_INT, }, 51 { "pca9538", 8 | PCA953X_TYPE | PCA_INT, },
41 { "pca9539", 16 | PCA953X_INT, }, 52 { "pca9539", 16 | PCA953X_TYPE | PCA_INT, },
42 { "pca9554", 8 | PCA953X_INT, }, 53 { "pca9554", 8 | PCA953X_TYPE | PCA_INT, },
43 { "pca9555", 16 | PCA953X_INT, }, 54 { "pca9555", 16 | PCA953X_TYPE | PCA_INT, },
44 { "pca9556", 8, }, 55 { "pca9556", 8 | PCA953X_TYPE, },
45 { "pca9557", 8, }, 56 { "pca9557", 8 | PCA953X_TYPE, },
46 57 { "pca9574", 8 | PCA957X_TYPE | PCA_INT, },
47 { "max7310", 8, }, 58 { "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
48 { "max7312", 16 | PCA953X_INT, }, 59
49 { "max7313", 16 | PCA953X_INT, }, 60 { "max7310", 8 | PCA953X_TYPE, },
50 { "max7315", 8 | PCA953X_INT, }, 61 { "max7312", 16 | PCA953X_TYPE | PCA_INT, },
51 { "pca6107", 8 | PCA953X_INT, }, 62 { "max7313", 16 | PCA953X_TYPE | PCA_INT, },
52 { "tca6408", 8 | PCA953X_INT, }, 63 { "max7315", 8 | PCA953X_TYPE | PCA_INT, },
53 { "tca6416", 16 | PCA953X_INT, }, 64 { "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
65 { "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
66 { "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
54 /* NYET: { "tca6424", 24, }, */ 67 /* NYET: { "tca6424", 24, }, */
55 { } 68 { }
56}; 69};
@@ -75,16 +88,32 @@ struct pca953x_chip {
75 struct pca953x_platform_data *dyn_pdata; 88 struct pca953x_platform_data *dyn_pdata;
76 struct gpio_chip gpio_chip; 89 struct gpio_chip gpio_chip;
77 const char *const *names; 90 const char *const *names;
91 int chip_type;
78}; 92};
79 93
80static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val) 94static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
81{ 95{
82 int ret; 96 int ret = 0;
83 97
84 if (chip->gpio_chip.ngpio <= 8) 98 if (chip->gpio_chip.ngpio <= 8)
85 ret = i2c_smbus_write_byte_data(chip->client, reg, val); 99 ret = i2c_smbus_write_byte_data(chip->client, reg, val);
86 else 100 else {
87 ret = i2c_smbus_write_word_data(chip->client, reg << 1, val); 101 switch (chip->chip_type) {
102 case PCA953X_TYPE:
103 ret = i2c_smbus_write_word_data(chip->client,
104 reg << 1, val);
105 break;
106 case PCA957X_TYPE:
107 ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
108 val & 0xff);
109 if (ret < 0)
110 break;
111 ret = i2c_smbus_write_byte_data(chip->client,
112 (reg << 1) + 1,
113 (val & 0xff00) >> 8);
114 break;
115 }
116 }
88 117
89 if (ret < 0) { 118 if (ret < 0) {
90 dev_err(&chip->client->dev, "failed writing register\n"); 119 dev_err(&chip->client->dev, "failed writing register\n");
@@ -116,13 +145,22 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
116{ 145{
117 struct pca953x_chip *chip; 146 struct pca953x_chip *chip;
118 uint16_t reg_val; 147 uint16_t reg_val;
119 int ret; 148 int ret, offset = 0;
120 149
121 chip = container_of(gc, struct pca953x_chip, gpio_chip); 150 chip = container_of(gc, struct pca953x_chip, gpio_chip);
122 151
123 mutex_lock(&chip->i2c_lock); 152 mutex_lock(&chip->i2c_lock);
124 reg_val = chip->reg_direction | (1u << off); 153 reg_val = chip->reg_direction | (1u << off);
125 ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val); 154
155 switch (chip->chip_type) {
156 case PCA953X_TYPE:
157 offset = PCA953X_DIRECTION;
158 break;
159 case PCA957X_TYPE:
160 offset = PCA957X_CFG;
161 break;
162 }
163 ret = pca953x_write_reg(chip, offset, reg_val);
126 if (ret) 164 if (ret)
127 goto exit; 165 goto exit;
128 166
@@ -138,7 +176,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
138{ 176{
139 struct pca953x_chip *chip; 177 struct pca953x_chip *chip;
140 uint16_t reg_val; 178 uint16_t reg_val;
141 int ret; 179 int ret, offset = 0;
142 180
143 chip = container_of(gc, struct pca953x_chip, gpio_chip); 181 chip = container_of(gc, struct pca953x_chip, gpio_chip);
144 182
@@ -149,7 +187,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
149 else 187 else
150 reg_val = chip->reg_output & ~(1u << off); 188 reg_val = chip->reg_output & ~(1u << off);
151 189
152 ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val); 190 switch (chip->chip_type) {
191 case PCA953X_TYPE:
192 offset = PCA953X_OUTPUT;
193 break;
194 case PCA957X_TYPE:
195 offset = PCA957X_OUT;
196 break;
197 }
198 ret = pca953x_write_reg(chip, offset, reg_val);
153 if (ret) 199 if (ret)
154 goto exit; 200 goto exit;
155 201
@@ -157,7 +203,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
157 203
158 /* then direction */ 204 /* then direction */
159 reg_val = chip->reg_direction & ~(1u << off); 205 reg_val = chip->reg_direction & ~(1u << off);
160 ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val); 206 switch (chip->chip_type) {
207 case PCA953X_TYPE:
208 offset = PCA953X_DIRECTION;
209 break;
210 case PCA957X_TYPE:
211 offset = PCA957X_CFG;
212 break;
213 }
214 ret = pca953x_write_reg(chip, offset, reg_val);
161 if (ret) 215 if (ret)
162 goto exit; 216 goto exit;
163 217
@@ -172,12 +226,20 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
172{ 226{
173 struct pca953x_chip *chip; 227 struct pca953x_chip *chip;
174 uint16_t reg_val; 228 uint16_t reg_val;
175 int ret; 229 int ret, offset = 0;
176 230
177 chip = container_of(gc, struct pca953x_chip, gpio_chip); 231 chip = container_of(gc, struct pca953x_chip, gpio_chip);
178 232
179 mutex_lock(&chip->i2c_lock); 233 mutex_lock(&chip->i2c_lock);
180 ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val); 234 switch (chip->chip_type) {
235 case PCA953X_TYPE:
236 offset = PCA953X_INPUT;
237 break;
238 case PCA957X_TYPE:
239 offset = PCA957X_IN;
240 break;
241 }
242 ret = pca953x_read_reg(chip, offset, &reg_val);
181 mutex_unlock(&chip->i2c_lock); 243 mutex_unlock(&chip->i2c_lock);
182 if (ret < 0) { 244 if (ret < 0) {
183 /* NOTE: diagnostic already emitted; that's all we should 245 /* NOTE: diagnostic already emitted; that's all we should
@@ -194,7 +256,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
194{ 256{
195 struct pca953x_chip *chip; 257 struct pca953x_chip *chip;
196 uint16_t reg_val; 258 uint16_t reg_val;
197 int ret; 259 int ret, offset = 0;
198 260
199 chip = container_of(gc, struct pca953x_chip, gpio_chip); 261 chip = container_of(gc, struct pca953x_chip, gpio_chip);
200 262
@@ -204,7 +266,15 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
204 else 266 else
205 reg_val = chip->reg_output & ~(1u << off); 267 reg_val = chip->reg_output & ~(1u << off);
206 268
207 ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val); 269 switch (chip->chip_type) {
270 case PCA953X_TYPE:
271 offset = PCA953X_OUTPUT;
272 break;
273 case PCA957X_TYPE:
274 offset = PCA957X_OUT;
275 break;
276 }
277 ret = pca953x_write_reg(chip, offset, reg_val);
208 if (ret) 278 if (ret)
209 goto exit; 279 goto exit;
210 280
@@ -322,9 +392,17 @@ static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
322 uint16_t old_stat; 392 uint16_t old_stat;
323 uint16_t pending; 393 uint16_t pending;
324 uint16_t trigger; 394 uint16_t trigger;
325 int ret; 395 int ret, offset = 0;
326 396
327 ret = pca953x_read_reg(chip, PCA953X_INPUT, &cur_stat); 397 switch (chip->chip_type) {
398 case PCA953X_TYPE:
399 offset = PCA953X_INPUT;
400 break;
401 case PCA957X_TYPE:
402 offset = PCA957X_IN;
403 break;
404 }
405 ret = pca953x_read_reg(chip, offset, &cur_stat);
328 if (ret) 406 if (ret)
329 return 0; 407 return 0;
330 408
@@ -372,14 +450,21 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
372{ 450{
373 struct i2c_client *client = chip->client; 451 struct i2c_client *client = chip->client;
374 struct pca953x_platform_data *pdata = client->dev.platform_data; 452 struct pca953x_platform_data *pdata = client->dev.platform_data;
375 int ret; 453 int ret, offset = 0;
376 454
377 if (pdata->irq_base != -1 455 if (pdata->irq_base != -1
378 && (id->driver_data & PCA953X_INT)) { 456 && (id->driver_data & PCA_INT)) {
379 int lvl; 457 int lvl;
380 458
381 ret = pca953x_read_reg(chip, PCA953X_INPUT, 459 switch (chip->chip_type) {
382 &chip->irq_stat); 460 case PCA953X_TYPE:
461 offset = PCA953X_INPUT;
462 break;
463 case PCA957X_TYPE:
464 offset = PCA957X_IN;
465 break;
466 }
467 ret = pca953x_read_reg(chip, offset, &chip->irq_stat);
383 if (ret) 468 if (ret)
384 goto out_failed; 469 goto out_failed;
385 470
@@ -439,7 +524,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
439 struct i2c_client *client = chip->client; 524 struct i2c_client *client = chip->client;
440 struct pca953x_platform_data *pdata = client->dev.platform_data; 525 struct pca953x_platform_data *pdata = client->dev.platform_data;
441 526
442 if (pdata->irq_base != -1 && (id->driver_data & PCA953X_INT)) 527 if (pdata->irq_base != -1 && (id->driver_data & PCA_INT))
443 dev_warn(&client->dev, "interrupt support not compiled in\n"); 528 dev_warn(&client->dev, "interrupt support not compiled in\n");
444 529
445 return 0; 530 return 0;
@@ -499,12 +584,65 @@ pca953x_get_alt_pdata(struct i2c_client *client)
499} 584}
500#endif 585#endif
501 586
587static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert)
588{
589 int ret;
590
591 ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
592 if (ret)
593 goto out;
594
595 ret = pca953x_read_reg(chip, PCA953X_DIRECTION,
596 &chip->reg_direction);
597 if (ret)
598 goto out;
599
600 /* set platform specific polarity inversion */
601 ret = pca953x_write_reg(chip, PCA953X_INVERT, invert);
602 if (ret)
603 goto out;
604 return 0;
605out:
606 return ret;
607}
608
609static int __devinit device_pca957x_init(struct pca953x_chip *chip, int invert)
610{
611 int ret;
612 uint16_t val = 0;
613
614 /* Let every port in proper state, that could save power */
615 pca953x_write_reg(chip, PCA957X_PUPD, 0x0);
616 pca953x_write_reg(chip, PCA957X_CFG, 0xffff);
617 pca953x_write_reg(chip, PCA957X_OUT, 0x0);
618
619 ret = pca953x_read_reg(chip, PCA957X_IN, &val);
620 if (ret)
621 goto out;
622 ret = pca953x_read_reg(chip, PCA957X_OUT, &chip->reg_output);
623 if (ret)
624 goto out;
625 ret = pca953x_read_reg(chip, PCA957X_CFG, &chip->reg_direction);
626 if (ret)
627 goto out;
628
629 /* set platform specific polarity inversion */
630 pca953x_write_reg(chip, PCA957X_INVRT, invert);
631
632 /* To enable register 6, 7 to controll pull up and pull down */
633 pca953x_write_reg(chip, PCA957X_BKEN, 0x202);
634
635 return 0;
636out:
637 return ret;
638}
639
502static int __devinit pca953x_probe(struct i2c_client *client, 640static int __devinit pca953x_probe(struct i2c_client *client,
503 const struct i2c_device_id *id) 641 const struct i2c_device_id *id)
504{ 642{
505 struct pca953x_platform_data *pdata; 643 struct pca953x_platform_data *pdata;
506 struct pca953x_chip *chip; 644 struct pca953x_chip *chip;
507 int ret; 645 int ret = 0;
508 646
509 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); 647 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
510 if (chip == NULL) 648 if (chip == NULL)
@@ -531,25 +669,20 @@ static int __devinit pca953x_probe(struct i2c_client *client,
531 chip->gpio_start = pdata->gpio_base; 669 chip->gpio_start = pdata->gpio_base;
532 670
533 chip->names = pdata->names; 671 chip->names = pdata->names;
672 chip->chip_type = id->driver_data & (PCA953X_TYPE | PCA957X_TYPE);
534 673
535 mutex_init(&chip->i2c_lock); 674 mutex_init(&chip->i2c_lock);
536 675
537 /* initialize cached registers from their original values. 676 /* initialize cached registers from their original values.
538 * we can't share this chip with another i2c master. 677 * we can't share this chip with another i2c master.
539 */ 678 */
540 pca953x_setup_gpio(chip, id->driver_data & PCA953X_GPIOS); 679 pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
541 680
542 ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output); 681 if (chip->chip_type == PCA953X_TYPE)
543 if (ret) 682 device_pca953x_init(chip, pdata->invert);
544 goto out_failed; 683 else if (chip->chip_type == PCA957X_TYPE)
545 684 device_pca957x_init(chip, pdata->invert);
546 ret = pca953x_read_reg(chip, PCA953X_DIRECTION, &chip->reg_direction); 685 else
547 if (ret)
548 goto out_failed;
549
550 /* set platform specific polarity inversion */
551 ret = pca953x_write_reg(chip, PCA953X_INVERT, pdata->invert);
552 if (ret)
553 goto out_failed; 686 goto out_failed;
554 687
555 ret = pca953x_irq_setup(chip, id); 688 ret = pca953x_irq_setup(chip, id);
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
index f970a5f3585e..36919e77c495 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/pch_gpio.c
@@ -283,8 +283,10 @@ static int pch_gpio_resume(struct pci_dev *pdev)
283#define pch_gpio_resume NULL 283#define pch_gpio_resume NULL
284#endif 284#endif
285 285
286#define PCI_VENDOR_ID_ROHM 0x10DB
286static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = { 287static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
287 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) }, 288 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
289 { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
288 { 0, } 290 { 0, }
289}; 291};
290MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id); 292MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 6e5123b1d341..144d27261e43 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1782,7 +1782,6 @@ static int ide_cd_probe(ide_drive_t *drive)
1782 ide_cd_read_toc(drive, &sense); 1782 ide_cd_read_toc(drive, &sense);
1783 g->fops = &idecd_ops; 1783 g->fops = &idecd_ops;
1784 g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; 1784 g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
1785 g->events = DISK_EVENT_MEDIA_CHANGE;
1786 add_disk(g); 1785 add_disk(g);
1787 return 0; 1786 return 0;
1788 1787
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 76a5af00a26b..2067288f61f9 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -19,6 +19,8 @@
19#define DM_MSG_PREFIX "io" 19#define DM_MSG_PREFIX "io"
20 20
21#define DM_IO_MAX_REGIONS BITS_PER_LONG 21#define DM_IO_MAX_REGIONS BITS_PER_LONG
22#define MIN_IOS 16
23#define MIN_BIOS 16
22 24
23struct dm_io_client { 25struct dm_io_client {
24 mempool_t *pool; 26 mempool_t *pool;
@@ -41,33 +43,21 @@ struct io {
41static struct kmem_cache *_dm_io_cache; 43static struct kmem_cache *_dm_io_cache;
42 44
43/* 45/*
44 * io contexts are only dynamically allocated for asynchronous
45 * io. Since async io is likely to be the majority of io we'll
46 * have the same number of io contexts as bios! (FIXME: must reduce this).
47 */
48
49static unsigned int pages_to_ios(unsigned int pages)
50{
51 return 4 * pages; /* too many ? */
52}
53
54/*
55 * Create a client with mempool and bioset. 46 * Create a client with mempool and bioset.
56 */ 47 */
57struct dm_io_client *dm_io_client_create(unsigned num_pages) 48struct dm_io_client *dm_io_client_create(void)
58{ 49{
59 unsigned ios = pages_to_ios(num_pages);
60 struct dm_io_client *client; 50 struct dm_io_client *client;
61 51
62 client = kmalloc(sizeof(*client), GFP_KERNEL); 52 client = kmalloc(sizeof(*client), GFP_KERNEL);
63 if (!client) 53 if (!client)
64 return ERR_PTR(-ENOMEM); 54 return ERR_PTR(-ENOMEM);
65 55
66 client->pool = mempool_create_slab_pool(ios, _dm_io_cache); 56 client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
67 if (!client->pool) 57 if (!client->pool)
68 goto bad; 58 goto bad;
69 59
70 client->bios = bioset_create(16, 0); 60 client->bios = bioset_create(MIN_BIOS, 0);
71 if (!client->bios) 61 if (!client->bios)
72 goto bad; 62 goto bad;
73 63
@@ -81,13 +71,6 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
81} 71}
82EXPORT_SYMBOL(dm_io_client_create); 72EXPORT_SYMBOL(dm_io_client_create);
83 73
84int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
85{
86 return mempool_resize(client->pool, pages_to_ios(num_pages),
87 GFP_KERNEL);
88}
89EXPORT_SYMBOL(dm_io_client_resize);
90
91void dm_io_client_destroy(struct dm_io_client *client) 74void dm_io_client_destroy(struct dm_io_client *client)
92{ 75{
93 mempool_destroy(client->pool); 76 mempool_destroy(client->pool);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 1bb73a13ca40..819e37eaaeba 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -27,15 +27,19 @@
27 27
28#include "dm.h" 28#include "dm.h"
29 29
30#define SUB_JOB_SIZE 128
31#define SPLIT_COUNT 8
32#define MIN_JOBS 8
33#define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
34
30/*----------------------------------------------------------------- 35/*-----------------------------------------------------------------
31 * Each kcopyd client has its own little pool of preallocated 36 * Each kcopyd client has its own little pool of preallocated
32 * pages for kcopyd io. 37 * pages for kcopyd io.
33 *---------------------------------------------------------------*/ 38 *---------------------------------------------------------------*/
34struct dm_kcopyd_client { 39struct dm_kcopyd_client {
35 spinlock_t lock;
36 struct page_list *pages; 40 struct page_list *pages;
37 unsigned int nr_pages; 41 unsigned nr_reserved_pages;
38 unsigned int nr_free_pages; 42 unsigned nr_free_pages;
39 43
40 struct dm_io_client *io_client; 44 struct dm_io_client *io_client;
41 45
@@ -67,15 +71,18 @@ static void wake(struct dm_kcopyd_client *kc)
67 queue_work(kc->kcopyd_wq, &kc->kcopyd_work); 71 queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
68} 72}
69 73
70static struct page_list *alloc_pl(void) 74/*
75 * Obtain one page for the use of kcopyd.
76 */
77static struct page_list *alloc_pl(gfp_t gfp)
71{ 78{
72 struct page_list *pl; 79 struct page_list *pl;
73 80
74 pl = kmalloc(sizeof(*pl), GFP_KERNEL); 81 pl = kmalloc(sizeof(*pl), gfp);
75 if (!pl) 82 if (!pl)
76 return NULL; 83 return NULL;
77 84
78 pl->page = alloc_page(GFP_KERNEL); 85 pl->page = alloc_page(gfp);
79 if (!pl->page) { 86 if (!pl->page) {
80 kfree(pl); 87 kfree(pl);
81 return NULL; 88 return NULL;
@@ -90,41 +97,56 @@ static void free_pl(struct page_list *pl)
90 kfree(pl); 97 kfree(pl);
91} 98}
92 99
93static int kcopyd_get_pages(struct dm_kcopyd_client *kc, 100/*
94 unsigned int nr, struct page_list **pages) 101 * Add the provided pages to a client's free page list, releasing
102 * back to the system any beyond the reserved_pages limit.
103 */
104static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
95{ 105{
96 struct page_list *pl; 106 struct page_list *next;
97
98 spin_lock(&kc->lock);
99 if (kc->nr_free_pages < nr) {
100 spin_unlock(&kc->lock);
101 return -ENOMEM;
102 }
103
104 kc->nr_free_pages -= nr;
105 for (*pages = pl = kc->pages; --nr; pl = pl->next)
106 ;
107 107
108 kc->pages = pl->next; 108 do {
109 pl->next = NULL; 109 next = pl->next;
110 110
111 spin_unlock(&kc->lock); 111 if (kc->nr_free_pages >= kc->nr_reserved_pages)
112 free_pl(pl);
113 else {
114 pl->next = kc->pages;
115 kc->pages = pl;
116 kc->nr_free_pages++;
117 }
112 118
113 return 0; 119 pl = next;
120 } while (pl);
114} 121}
115 122
116static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) 123static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
124 unsigned int nr, struct page_list **pages)
117{ 125{
118 struct page_list *cursor; 126 struct page_list *pl;
127
128 *pages = NULL;
129
130 do {
131 pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY);
132 if (unlikely(!pl)) {
133 /* Use reserved pages */
134 pl = kc->pages;
135 if (unlikely(!pl))
136 goto out_of_memory;
137 kc->pages = pl->next;
138 kc->nr_free_pages--;
139 }
140 pl->next = *pages;
141 *pages = pl;
142 } while (--nr);
119 143
120 spin_lock(&kc->lock); 144 return 0;
121 for (cursor = pl; cursor->next; cursor = cursor->next)
122 kc->nr_free_pages++;
123 145
124 kc->nr_free_pages++; 146out_of_memory:
125 cursor->next = kc->pages; 147 if (*pages)
126 kc->pages = pl; 148 kcopyd_put_pages(kc, *pages);
127 spin_unlock(&kc->lock); 149 return -ENOMEM;
128} 150}
129 151
130/* 152/*
@@ -141,13 +163,16 @@ static void drop_pages(struct page_list *pl)
141 } 163 }
142} 164}
143 165
144static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr) 166/*
167 * Allocate and reserve nr_pages for the use of a specific client.
168 */
169static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
145{ 170{
146 unsigned int i; 171 unsigned i;
147 struct page_list *pl = NULL, *next; 172 struct page_list *pl = NULL, *next;
148 173
149 for (i = 0; i < nr; i++) { 174 for (i = 0; i < nr_pages; i++) {
150 next = alloc_pl(); 175 next = alloc_pl(GFP_KERNEL);
151 if (!next) { 176 if (!next) {
152 if (pl) 177 if (pl)
153 drop_pages(pl); 178 drop_pages(pl);
@@ -157,17 +182,18 @@ static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
157 pl = next; 182 pl = next;
158 } 183 }
159 184
185 kc->nr_reserved_pages += nr_pages;
160 kcopyd_put_pages(kc, pl); 186 kcopyd_put_pages(kc, pl);
161 kc->nr_pages += nr; 187
162 return 0; 188 return 0;
163} 189}
164 190
165static void client_free_pages(struct dm_kcopyd_client *kc) 191static void client_free_pages(struct dm_kcopyd_client *kc)
166{ 192{
167 BUG_ON(kc->nr_free_pages != kc->nr_pages); 193 BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
168 drop_pages(kc->pages); 194 drop_pages(kc->pages);
169 kc->pages = NULL; 195 kc->pages = NULL;
170 kc->nr_free_pages = kc->nr_pages = 0; 196 kc->nr_free_pages = kc->nr_reserved_pages = 0;
171} 197}
172 198
173/*----------------------------------------------------------------- 199/*-----------------------------------------------------------------
@@ -216,16 +242,17 @@ struct kcopyd_job {
216 struct mutex lock; 242 struct mutex lock;
217 atomic_t sub_jobs; 243 atomic_t sub_jobs;
218 sector_t progress; 244 sector_t progress;
219};
220 245
221/* FIXME: this should scale with the number of pages */ 246 struct kcopyd_job *master_job;
222#define MIN_JOBS 512 247};
223 248
224static struct kmem_cache *_job_cache; 249static struct kmem_cache *_job_cache;
225 250
226int __init dm_kcopyd_init(void) 251int __init dm_kcopyd_init(void)
227{ 252{
228 _job_cache = KMEM_CACHE(kcopyd_job, 0); 253 _job_cache = kmem_cache_create("kcopyd_job",
254 sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
255 __alignof__(struct kcopyd_job), 0, NULL);
229 if (!_job_cache) 256 if (!_job_cache)
230 return -ENOMEM; 257 return -ENOMEM;
231 258
@@ -299,7 +326,12 @@ static int run_complete_job(struct kcopyd_job *job)
299 326
300 if (job->pages) 327 if (job->pages)
301 kcopyd_put_pages(kc, job->pages); 328 kcopyd_put_pages(kc, job->pages);
302 mempool_free(job, kc->job_pool); 329 /*
330 * If this is the master job, the sub jobs have already
331 * completed so we can free everything.
332 */
333 if (job->master_job == job)
334 mempool_free(job, kc->job_pool);
303 fn(read_err, write_err, context); 335 fn(read_err, write_err, context);
304 336
305 if (atomic_dec_and_test(&kc->nr_jobs)) 337 if (atomic_dec_and_test(&kc->nr_jobs))
@@ -460,14 +492,14 @@ static void dispatch_job(struct kcopyd_job *job)
460 wake(kc); 492 wake(kc);
461} 493}
462 494
463#define SUB_JOB_SIZE 128
464static void segment_complete(int read_err, unsigned long write_err, 495static void segment_complete(int read_err, unsigned long write_err,
465 void *context) 496 void *context)
466{ 497{
467 /* FIXME: tidy this function */ 498 /* FIXME: tidy this function */
468 sector_t progress = 0; 499 sector_t progress = 0;
469 sector_t count = 0; 500 sector_t count = 0;
470 struct kcopyd_job *job = (struct kcopyd_job *) context; 501 struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
502 struct kcopyd_job *job = sub_job->master_job;
471 struct dm_kcopyd_client *kc = job->kc; 503 struct dm_kcopyd_client *kc = job->kc;
472 504
473 mutex_lock(&job->lock); 505 mutex_lock(&job->lock);
@@ -498,8 +530,6 @@ static void segment_complete(int read_err, unsigned long write_err,
498 530
499 if (count) { 531 if (count) {
500 int i; 532 int i;
501 struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
502 GFP_NOIO);
503 533
504 *sub_job = *job; 534 *sub_job = *job;
505 sub_job->source.sector += progress; 535 sub_job->source.sector += progress;
@@ -511,7 +541,7 @@ static void segment_complete(int read_err, unsigned long write_err,
511 } 541 }
512 542
513 sub_job->fn = segment_complete; 543 sub_job->fn = segment_complete;
514 sub_job->context = job; 544 sub_job->context = sub_job;
515 dispatch_job(sub_job); 545 dispatch_job(sub_job);
516 546
517 } else if (atomic_dec_and_test(&job->sub_jobs)) { 547 } else if (atomic_dec_and_test(&job->sub_jobs)) {
@@ -531,19 +561,19 @@ static void segment_complete(int read_err, unsigned long write_err,
531} 561}
532 562
533/* 563/*
534 * Create some little jobs that will do the move between 564 * Create some sub jobs to share the work between them.
535 * them.
536 */ 565 */
537#define SPLIT_COUNT 8 566static void split_job(struct kcopyd_job *master_job)
538static void split_job(struct kcopyd_job *job)
539{ 567{
540 int i; 568 int i;
541 569
542 atomic_inc(&job->kc->nr_jobs); 570 atomic_inc(&master_job->kc->nr_jobs);
543 571
544 atomic_set(&job->sub_jobs, SPLIT_COUNT); 572 atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
545 for (i = 0; i < SPLIT_COUNT; i++) 573 for (i = 0; i < SPLIT_COUNT; i++) {
546 segment_complete(0, 0u, job); 574 master_job[i + 1].master_job = master_job;
575 segment_complete(0, 0u, &master_job[i + 1]);
576 }
547} 577}
548 578
549int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, 579int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
@@ -553,7 +583,8 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
553 struct kcopyd_job *job; 583 struct kcopyd_job *job;
554 584
555 /* 585 /*
556 * Allocate a new job. 586 * Allocate an array of jobs consisting of one master job
587 * followed by SPLIT_COUNT sub jobs.
557 */ 588 */
558 job = mempool_alloc(kc->job_pool, GFP_NOIO); 589 job = mempool_alloc(kc->job_pool, GFP_NOIO);
559 590
@@ -577,10 +608,10 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
577 608
578 job->fn = fn; 609 job->fn = fn;
579 job->context = context; 610 job->context = context;
611 job->master_job = job;
580 612
581 if (job->source.count < SUB_JOB_SIZE) 613 if (job->source.count <= SUB_JOB_SIZE)
582 dispatch_job(job); 614 dispatch_job(job);
583
584 else { 615 else {
585 mutex_init(&job->lock); 616 mutex_init(&job->lock);
586 job->progress = 0; 617 job->progress = 0;
@@ -606,17 +637,15 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
606/*----------------------------------------------------------------- 637/*-----------------------------------------------------------------
607 * Client setup 638 * Client setup
608 *---------------------------------------------------------------*/ 639 *---------------------------------------------------------------*/
609int dm_kcopyd_client_create(unsigned int nr_pages, 640struct dm_kcopyd_client *dm_kcopyd_client_create(void)
610 struct dm_kcopyd_client **result)
611{ 641{
612 int r = -ENOMEM; 642 int r = -ENOMEM;
613 struct dm_kcopyd_client *kc; 643 struct dm_kcopyd_client *kc;
614 644
615 kc = kmalloc(sizeof(*kc), GFP_KERNEL); 645 kc = kmalloc(sizeof(*kc), GFP_KERNEL);
616 if (!kc) 646 if (!kc)
617 return -ENOMEM; 647 return ERR_PTR(-ENOMEM);
618 648
619 spin_lock_init(&kc->lock);
620 spin_lock_init(&kc->job_lock); 649 spin_lock_init(&kc->job_lock);
621 INIT_LIST_HEAD(&kc->complete_jobs); 650 INIT_LIST_HEAD(&kc->complete_jobs);
622 INIT_LIST_HEAD(&kc->io_jobs); 651 INIT_LIST_HEAD(&kc->io_jobs);
@@ -633,12 +662,12 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
633 goto bad_workqueue; 662 goto bad_workqueue;
634 663
635 kc->pages = NULL; 664 kc->pages = NULL;
636 kc->nr_pages = kc->nr_free_pages = 0; 665 kc->nr_reserved_pages = kc->nr_free_pages = 0;
637 r = client_alloc_pages(kc, nr_pages); 666 r = client_reserve_pages(kc, RESERVE_PAGES);
638 if (r) 667 if (r)
639 goto bad_client_pages; 668 goto bad_client_pages;
640 669
641 kc->io_client = dm_io_client_create(nr_pages); 670 kc->io_client = dm_io_client_create();
642 if (IS_ERR(kc->io_client)) { 671 if (IS_ERR(kc->io_client)) {
643 r = PTR_ERR(kc->io_client); 672 r = PTR_ERR(kc->io_client);
644 goto bad_io_client; 673 goto bad_io_client;
@@ -647,8 +676,7 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
647 init_waitqueue_head(&kc->destroyq); 676 init_waitqueue_head(&kc->destroyq);
648 atomic_set(&kc->nr_jobs, 0); 677 atomic_set(&kc->nr_jobs, 0);
649 678
650 *result = kc; 679 return kc;
651 return 0;
652 680
653bad_io_client: 681bad_io_client:
654 client_free_pages(kc); 682 client_free_pages(kc);
@@ -659,7 +687,7 @@ bad_workqueue:
659bad_slab: 687bad_slab:
660 kfree(kc); 688 kfree(kc);
661 689
662 return r; 690 return ERR_PTR(r);
663} 691}
664EXPORT_SYMBOL(dm_kcopyd_client_create); 692EXPORT_SYMBOL(dm_kcopyd_client_create);
665 693
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index a1f321889676..948e3f4925bf 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -449,8 +449,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
449 449
450 lc->io_req.mem.type = DM_IO_VMA; 450 lc->io_req.mem.type = DM_IO_VMA;
451 lc->io_req.notify.fn = NULL; 451 lc->io_req.notify.fn = NULL;
452 lc->io_req.client = dm_io_client_create(dm_div_up(buf_size, 452 lc->io_req.client = dm_io_client_create();
453 PAGE_SIZE));
454 if (IS_ERR(lc->io_req.client)) { 453 if (IS_ERR(lc->io_req.client)) {
455 r = PTR_ERR(lc->io_req.client); 454 r = PTR_ERR(lc->io_req.client);
456 DMWARN("couldn't allocate disk io client"); 455 DMWARN("couldn't allocate disk io client");
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index a550a057d991..aa4e570c2cb5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1290,7 +1290,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
1290 if (!error && !clone->errors) 1290 if (!error && !clone->errors)
1291 return 0; /* I/O complete */ 1291 return 0; /* I/O complete */
1292 1292
1293 if (error == -EOPNOTSUPP || error == -EREMOTEIO) 1293 if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
1294 return error; 1294 return error;
1295 1295
1296 if (mpio->pgpath) 1296 if (mpio->pgpath)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 976ad4688afc..9bfd057be686 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -22,8 +22,6 @@
22#define DM_MSG_PREFIX "raid1" 22#define DM_MSG_PREFIX "raid1"
23 23
24#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ 24#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
25#define DM_IO_PAGES 64
26#define DM_KCOPYD_PAGES 64
27 25
28#define DM_RAID1_HANDLE_ERRORS 0x01 26#define DM_RAID1_HANDLE_ERRORS 0x01
29#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) 27#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
@@ -887,7 +885,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
887 return NULL; 885 return NULL;
888 } 886 }
889 887
890 ms->io_client = dm_io_client_create(DM_IO_PAGES); 888 ms->io_client = dm_io_client_create();
891 if (IS_ERR(ms->io_client)) { 889 if (IS_ERR(ms->io_client)) {
892 ti->error = "Error creating dm_io client"; 890 ti->error = "Error creating dm_io client";
893 mempool_destroy(ms->read_record_pool); 891 mempool_destroy(ms->read_record_pool);
@@ -1117,9 +1115,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1117 goto err_destroy_wq; 1115 goto err_destroy_wq;
1118 } 1116 }
1119 1117
1120 r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client); 1118 ms->kcopyd_client = dm_kcopyd_client_create();
1121 if (r) 1119 if (IS_ERR(ms->kcopyd_client)) {
1120 r = PTR_ERR(ms->kcopyd_client);
1122 goto err_destroy_wq; 1121 goto err_destroy_wq;
1122 }
1123 1123
1124 wakeup_mirrord(ms); 1124 wakeup_mirrord(ms);
1125 return 0; 1125 return 0;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 95891dfcbca0..135c2f1fdbfc 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -154,11 +154,6 @@ struct pstore {
154 struct workqueue_struct *metadata_wq; 154 struct workqueue_struct *metadata_wq;
155}; 155};
156 156
157static unsigned sectors_to_pages(unsigned sectors)
158{
159 return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
160}
161
162static int alloc_area(struct pstore *ps) 157static int alloc_area(struct pstore *ps)
163{ 158{
164 int r = -ENOMEM; 159 int r = -ENOMEM;
@@ -318,8 +313,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
318 chunk_size_supplied = 0; 313 chunk_size_supplied = 0;
319 } 314 }
320 315
321 ps->io_client = dm_io_client_create(sectors_to_pages(ps->store-> 316 ps->io_client = dm_io_client_create();
322 chunk_size));
323 if (IS_ERR(ps->io_client)) 317 if (IS_ERR(ps->io_client))
324 return PTR_ERR(ps->io_client); 318 return PTR_ERR(ps->io_client);
325 319
@@ -368,11 +362,6 @@ static int read_header(struct pstore *ps, int *new_snapshot)
368 return r; 362 return r;
369 } 363 }
370 364
371 r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
372 ps->io_client);
373 if (r)
374 return r;
375
376 r = alloc_area(ps); 365 r = alloc_area(ps);
377 return r; 366 return r;
378 367
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a2d330942cb2..9ecff5f3023a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,11 +40,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
40#define SNAPSHOT_COPY_PRIORITY 2 40#define SNAPSHOT_COPY_PRIORITY 2
41 41
42/* 42/*
43 * Reserve 1MB for each snapshot initially (with minimum of 1 page).
44 */
45#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
46
47/*
48 * The size of the mempool used to track chunks in use. 43 * The size of the mempool used to track chunks in use.
49 */ 44 */
50#define MIN_IOS 256 45#define MIN_IOS 256
@@ -1116,8 +1111,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1116 goto bad_hash_tables; 1111 goto bad_hash_tables;
1117 } 1112 }
1118 1113
1119 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); 1114 s->kcopyd_client = dm_kcopyd_client_create();
1120 if (r) { 1115 if (IS_ERR(s->kcopyd_client)) {
1116 r = PTR_ERR(s->kcopyd_client);
1121 ti->error = "Could not create kcopyd client"; 1117 ti->error = "Could not create kcopyd client";
1122 goto bad_kcopyd; 1118 goto bad_kcopyd;
1123 } 1119 }
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index cb8380c9767f..451c3bb176d2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -362,6 +362,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
362static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, 362static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
363 sector_t start, sector_t len, void *data) 363 sector_t start, sector_t len, void *data)
364{ 364{
365 struct request_queue *q;
365 struct queue_limits *limits = data; 366 struct queue_limits *limits = data;
366 struct block_device *bdev = dev->bdev; 367 struct block_device *bdev = dev->bdev;
367 sector_t dev_size = 368 sector_t dev_size =
@@ -370,6 +371,22 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
370 limits->logical_block_size >> SECTOR_SHIFT; 371 limits->logical_block_size >> SECTOR_SHIFT;
371 char b[BDEVNAME_SIZE]; 372 char b[BDEVNAME_SIZE];
372 373
374 /*
375 * Some devices exist without request functions,
376 * such as loop devices not yet bound to backing files.
377 * Forbid the use of such devices.
378 */
379 q = bdev_get_queue(bdev);
380 if (!q || !q->make_request_fn) {
381 DMWARN("%s: %s is not yet initialised: "
382 "start=%llu, len=%llu, dev_size=%llu",
383 dm_device_name(ti->table->md), bdevname(bdev, b),
384 (unsigned long long)start,
385 (unsigned long long)len,
386 (unsigned long long)dev_size);
387 return 1;
388 }
389
373 if (!dev_size) 390 if (!dev_size)
374 return 0; 391 return 0;
375 392
@@ -1346,7 +1363,8 @@ bool dm_table_supports_discards(struct dm_table *t)
1346 return 0; 1363 return 0;
1347 1364
1348 /* 1365 /*
1349 * Ensure that at least one underlying device supports discards. 1366 * Unless any target used by the table set discards_supported,
1367 * require at least one underlying device to support discards.
1350 * t->devices includes internal dm devices such as mirror logs 1368 * t->devices includes internal dm devices such as mirror logs
1351 * so we need to use iterate_devices here, which targets 1369 * so we need to use iterate_devices here, which targets
1352 * supporting discard must provide. 1370 * supporting discard must provide.
@@ -1354,6 +1372,9 @@ bool dm_table_supports_discards(struct dm_table *t)
1354 while (i < dm_table_get_num_targets(t)) { 1372 while (i < dm_table_get_num_targets(t)) {
1355 ti = dm_table_get_target(t, i++); 1373 ti = dm_table_get_target(t, i++);
1356 1374
1375 if (ti->discards_supported)
1376 return 1;
1377
1357 if (ti->type->iterate_devices && 1378 if (ti->type->iterate_devices &&
1358 ti->type->iterate_devices(ti, device_discard_capable, NULL)) 1379 ti->type->iterate_devices(ti, device_discard_capable, NULL))
1359 return 1; 1380 return 1;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index b6c267724e14..0f09c057e796 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -721,7 +721,7 @@ config MFD_PM8XXX_IRQ
721 721
722config MFD_TPS65910 722config MFD_TPS65910
723 bool "TPS65910 Power Management chip" 723 bool "TPS65910 Power Management chip"
724 depends on I2C=y 724 depends on I2C=y && GPIOLIB
725 select MFD_CORE 725 select MFD_CORE
726 select GPIO_TPS65910 726 select GPIO_TPS65910
727 help 727 help
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index e63782107e2f..02a15d7cb3b0 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2005,7 +2005,8 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
2005static struct mfd_cell db8500_prcmu_devs[] = { 2005static struct mfd_cell db8500_prcmu_devs[] = {
2006 { 2006 {
2007 .name = "db8500-prcmu-regulators", 2007 .name = "db8500-prcmu-regulators",
2008 .mfd_data = &db8500_regulators, 2008 .platform_data = &db8500_regulators,
2009 .pdata_size = sizeof(db8500_regulators),
2009 }, 2010 },
2010 { 2011 {
2011 .name = "cpufreq-u8500", 2012 .name = "cpufreq-u8500",
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index b0c56313dbbb..8cebec5e85ee 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -304,7 +304,10 @@ static int check_and_rewind_pc(char *put_str, char *arg)
304 return 1; 304 return 1;
305 } 305 }
306 /* Readjust the instruction pointer if needed */ 306 /* Readjust the instruction pointer if needed */
307 instruction_pointer_set(&kgdbts_regs, ip + offset); 307 ip += offset;
308#ifdef GDB_ADJUSTS_BREAK_OFFSET
309 instruction_pointer_set(&kgdbts_regs, ip);
310#endif
308 return 0; 311 return 0;
309} 312}
310 313
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 4941e06fe2e1..5da5bea0f9f0 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -51,6 +51,7 @@ static unsigned int fmax = 515633;
51 * is asserted (likewise for RX) 51 * is asserted (likewise for RX)
52 * @sdio: variant supports SDIO 52 * @sdio: variant supports SDIO
53 * @st_clkdiv: true if using a ST-specific clock divider algorithm 53 * @st_clkdiv: true if using a ST-specific clock divider algorithm
54 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
54 */ 55 */
55struct variant_data { 56struct variant_data {
56 unsigned int clkreg; 57 unsigned int clkreg;
@@ -60,6 +61,7 @@ struct variant_data {
60 unsigned int fifohalfsize; 61 unsigned int fifohalfsize;
61 bool sdio; 62 bool sdio;
62 bool st_clkdiv; 63 bool st_clkdiv;
64 bool blksz_datactrl16;
63}; 65};
64 66
65static struct variant_data variant_arm = { 67static struct variant_data variant_arm = {
@@ -92,6 +94,17 @@ static struct variant_data variant_ux500 = {
92 .st_clkdiv = true, 94 .st_clkdiv = true,
93}; 95};
94 96
97static struct variant_data variant_ux500v2 = {
98 .fifosize = 30 * 4,
99 .fifohalfsize = 8 * 4,
100 .clkreg = MCI_CLK_ENABLE,
101 .clkreg_enable = MCI_ST_UX500_HWFCEN,
102 .datalength_bits = 24,
103 .sdio = true,
104 .st_clkdiv = true,
105 .blksz_datactrl16 = true,
106};
107
95/* 108/*
96 * This must be called with host->lock held 109 * This must be called with host->lock held
97 */ 110 */
@@ -465,7 +478,10 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
465 blksz_bits = ffs(data->blksz) - 1; 478 blksz_bits = ffs(data->blksz) - 1;
466 BUG_ON(1 << blksz_bits != data->blksz); 479 BUG_ON(1 << blksz_bits != data->blksz);
467 480
468 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 481 if (variant->blksz_datactrl16)
482 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
483 else
484 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
469 485
470 if (data->flags & MMC_DATA_READ) 486 if (data->flags & MMC_DATA_READ)
471 datactrl |= MCI_DPSM_DIRECTION; 487 datactrl |= MCI_DPSM_DIRECTION;
@@ -1311,9 +1327,14 @@ static struct amba_id mmci_ids[] = {
1311 }, 1327 },
1312 { 1328 {
1313 .id = 0x00480180, 1329 .id = 0x00480180,
1314 .mask = 0x00ffffff, 1330 .mask = 0xf0ffffff,
1315 .data = &variant_ux500, 1331 .data = &variant_ux500,
1316 }, 1332 },
1333 {
1334 .id = 0x10480180,
1335 .mask = 0xf0ffffff,
1336 .data = &variant_ux500v2,
1337 },
1317 { 0, 0 }, 1338 { 0, 0 },
1318}; 1339};
1319 1340
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index bc50d5ea5534..4be8373d43e5 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -33,20 +33,6 @@ config MTD_TESTS
33 should normally be compiled as kernel modules. The modules perform 33 should normally be compiled as kernel modules. The modules perform
34 various checks and verifications when loaded. 34 various checks and verifications when loaded.
35 35
36config MTD_PARTITIONS
37 bool "MTD partitioning support"
38 help
39 If you have a device which needs to divide its flash chip(s) up
40 into multiple 'partitions', each of which appears to the user as
41 a separate MTD device, you require this option to be enabled. If
42 unsure, say 'Y'.
43
44 Note, however, that you don't need this option for the DiskOnChip
45 devices. Partitioning on NFTL 'devices' is a different - that's the
46 'normal' form of partitioning used on a block device.
47
48if MTD_PARTITIONS
49
50config MTD_REDBOOT_PARTS 36config MTD_REDBOOT_PARTS
51 tristate "RedBoot partition table parsing" 37 tristate "RedBoot partition table parsing"
52 ---help--- 38 ---help---
@@ -99,7 +85,7 @@ endif # MTD_REDBOOT_PARTS
99 85
100config MTD_CMDLINE_PARTS 86config MTD_CMDLINE_PARTS
101 bool "Command line partition table parsing" 87 bool "Command line partition table parsing"
102 depends on MTD_PARTITIONS = "y" && MTD = "y" 88 depends on MTD = "y"
103 ---help--- 89 ---help---
104 Allow generic configuration of the MTD partition tables via the kernel 90 Allow generic configuration of the MTD partition tables via the kernel
105 command line. Multiple flash resources are supported for hardware where 91 command line. Multiple flash resources are supported for hardware where
@@ -163,8 +149,6 @@ config MTD_AR7_PARTS
163 ---help--- 149 ---help---
164 TI AR7 partitioning support 150 TI AR7 partitioning support
165 151
166endif # MTD_PARTITIONS
167
168comment "User Modules And Translation Layers" 152comment "User Modules And Translation Layers"
169 153
170config MTD_CHAR 154config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index d578095fb255..39664c4229ff 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -4,8 +4,7 @@
4 4
5# Core functionality. 5# Core functionality.
6obj-$(CONFIG_MTD) += mtd.o 6obj-$(CONFIG_MTD) += mtd.o
7mtd-y := mtdcore.o mtdsuper.o mtdconcat.o 7mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
8mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
9mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o 8mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
10 9
11obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o 10obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 09cb7c8d93b4..e1e122f2f929 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -812,12 +812,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
812 break; 812 break;
813 813
814 if (time_after(jiffies, timeo)) { 814 if (time_after(jiffies, timeo)) {
815 /* Urgh. Resume and pretend we weren't here. */ 815 /* Urgh. Resume and pretend we weren't here.
816 map_write(map, CMD(0xd0), adr); 816 * Make sure we're in 'read status' mode if it had finished */
817 /* Make sure we're in 'read status' mode if it had finished */ 817 put_chip(map, chip, adr);
818 map_write(map, CMD(0x70), adr);
819 chip->state = FL_ERASING;
820 chip->oldstate = FL_READY;
821 printk(KERN_ERR "%s: Chip not ready after erase " 818 printk(KERN_ERR "%s: Chip not ready after erase "
822 "suspended: status = 0x%lx\n", map->name, status.x[0]); 819 "suspended: status = 0x%lx\n", map->name, status.x[0]);
823 return -EIO; 820 return -EIO;
@@ -997,7 +994,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
997 994
998 switch(chip->oldstate) { 995 switch(chip->oldstate) {
999 case FL_ERASING: 996 case FL_ERASING:
1000 chip->state = chip->oldstate;
1001 /* What if one interleaved chip has finished and the 997 /* What if one interleaved chip has finished and the
1002 other hasn't? The old code would leave the finished 998 other hasn't? The old code would leave the finished
1003 one in READY mode. That's bad, and caused -EROFS 999 one in READY mode. That's bad, and caused -EROFS
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 0b49266840b9..23175edd5634 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -462,13 +462,14 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
462 cfi_fixup_major_minor(cfi, extp); 462 cfi_fixup_major_minor(cfi, extp);
463 463
464 /* 464 /*
465 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4 465 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
466 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 466 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
467 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 467 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
468 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 468 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
469 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
469 */ 470 */
470 if (extp->MajorVersion != '1' || 471 if (extp->MajorVersion != '1' ||
471 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) { 472 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
472 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 473 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
473 "version %c.%c (%#02x/%#02x).\n", 474 "version %c.%c (%#02x/%#02x).\n",
474 extp->MajorVersion, extp->MinorVersion, 475 extp->MajorVersion, extp->MinorVersion,
@@ -710,9 +711,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
710 * there was an error (so leave the erase 711 * there was an error (so leave the erase
711 * routine to recover from it) or we trying to 712 * routine to recover from it) or we trying to
712 * use the erase-in-progress sector. */ 713 * use the erase-in-progress sector. */
713 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 714 put_chip(map, chip, adr);
714 chip->state = FL_ERASING;
715 chip->oldstate = FL_READY;
716 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 715 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
717 return -EIO; 716 return -EIO;
718 } 717 }
@@ -762,7 +761,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
762 761
763 switch(chip->oldstate) { 762 switch(chip->oldstate) {
764 case FL_ERASING: 763 case FL_ERASING:
765 chip->state = chip->oldstate;
766 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 764 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
767 chip->oldstate = FL_READY; 765 chip->oldstate = FL_READY;
768 chip->state = FL_ERASING; 766 chip->state = FL_ERASING;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index ed56ad3884fb..179814a95f3a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -296,6 +296,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
296 /* make sure we're in 'read status' mode */ 296 /* make sure we're in 'read status' mode */
297 map_write(map, CMD(0x70), cmd_addr); 297 map_write(map, CMD(0x70), cmd_addr);
298 chip->state = FL_ERASING; 298 chip->state = FL_ERASING;
299 wake_up(&chip->wq);
299 mutex_unlock(&chip->mutex); 300 mutex_unlock(&chip->mutex);
300 printk(KERN_ERR "Chip not ready after erase " 301 printk(KERN_ERR "Chip not ready after erase "
301 "suspended: status = 0x%lx\n", status.x[0]); 302 "suspended: status = 0x%lx\n", status.x[0]);
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 97183c8c9e33..b78f23169d4e 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -294,7 +294,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
294 dev->mtd.priv = dev; 294 dev->mtd.priv = dev;
295 dev->mtd.owner = THIS_MODULE; 295 dev->mtd.owner = THIS_MODULE;
296 296
297 if (add_mtd_device(&dev->mtd)) { 297 if (mtd_device_register(&dev->mtd, NULL, 0)) {
298 /* Device didn't get added, so free the entry */ 298 /* Device didn't get added, so free the entry */
299 goto devinit_err; 299 goto devinit_err;
300 } 300 }
@@ -465,7 +465,7 @@ static void __devexit block2mtd_exit(void)
465 list_for_each_safe(pos, next, &blkmtd_device_list) { 465 list_for_each_safe(pos, next, &blkmtd_device_list) {
466 struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list); 466 struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
467 block2mtd_sync(&dev->mtd); 467 block2mtd_sync(&dev->mtd);
468 del_mtd_device(&dev->mtd); 468 mtd_device_unregister(&dev->mtd);
469 INFO("mtd%d: [%s] removed", dev->mtd.index, 469 INFO("mtd%d: [%s] removed", dev->mtd.index,
470 dev->mtd.name + strlen("block2mtd: ")); 470 dev->mtd.name + strlen("block2mtd: "));
471 list_del(&dev->list); 471 list_del(&dev->list);
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index 5bf5f460e132..f7fbf6025ef2 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -597,7 +597,7 @@ void DoC2k_init(struct mtd_info *mtd)
597 doc2klist = mtd; 597 doc2klist = mtd;
598 mtd->size = this->totlen; 598 mtd->size = this->totlen;
599 mtd->erasesize = this->erasesize; 599 mtd->erasesize = this->erasesize;
600 add_mtd_device(mtd); 600 mtd_device_register(mtd, NULL, 0);
601 return; 601 return;
602 } 602 }
603} 603}
@@ -1185,7 +1185,7 @@ static void __exit cleanup_doc2000(void)
1185 this = mtd->priv; 1185 this = mtd->priv;
1186 doc2klist = this->nextdoc; 1186 doc2klist = this->nextdoc;
1187 1187
1188 del_mtd_device(mtd); 1188 mtd_device_unregister(mtd);
1189 1189
1190 iounmap(this->virtadr); 1190 iounmap(this->virtadr);
1191 kfree(this->chips); 1191 kfree(this->chips);
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 0990f7803628..241192f05bc8 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -376,7 +376,7 @@ void DoCMil_init(struct mtd_info *mtd)
376 this->nextdoc = docmillist; 376 this->nextdoc = docmillist;
377 docmillist = mtd; 377 docmillist = mtd;
378 mtd->size = this->totlen; 378 mtd->size = this->totlen;
379 add_mtd_device(mtd); 379 mtd_device_register(mtd, NULL, 0);
380 return; 380 return;
381 } 381 }
382} 382}
@@ -826,7 +826,7 @@ static void __exit cleanup_doc2001(void)
826 this = mtd->priv; 826 this = mtd->priv;
827 docmillist = this->nextdoc; 827 docmillist = this->nextdoc;
828 828
829 del_mtd_device(mtd); 829 mtd_device_unregister(mtd);
830 830
831 iounmap(this->virtadr); 831 iounmap(this->virtadr);
832 kfree(this->chips); 832 kfree(this->chips);
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 8b36fa77a195..09ae0adc3ad0 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -499,7 +499,7 @@ void DoCMilPlus_init(struct mtd_info *mtd)
499 docmilpluslist = mtd; 499 docmilpluslist = mtd;
500 mtd->size = this->totlen; 500 mtd->size = this->totlen;
501 mtd->erasesize = this->erasesize; 501 mtd->erasesize = this->erasesize;
502 add_mtd_device(mtd); 502 mtd_device_register(mtd, NULL, 0);
503 return; 503 return;
504 } 504 }
505} 505}
@@ -1091,7 +1091,7 @@ static void __exit cleanup_doc2001plus(void)
1091 this = mtd->priv; 1091 this = mtd->priv;
1092 docmilpluslist = this->nextdoc; 1092 docmilpluslist = this->nextdoc;
1093 1093
1094 del_mtd_device(mtd); 1094 mtd_device_unregister(mtd);
1095 1095
1096 iounmap(this->virtadr); 1096 iounmap(this->virtadr);
1097 kfree(this->chips); 1097 kfree(this->chips);
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 4b829f97d56c..772a0ff89e0f 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -684,9 +684,10 @@ static int __init lart_flash_init (void)
684#endif 684#endif
685 685
686#ifndef HAVE_PARTITIONS 686#ifndef HAVE_PARTITIONS
687 result = add_mtd_device (&mtd); 687 result = mtd_device_register(&mtd, NULL, 0);
688#else 688#else
689 result = add_mtd_partitions (&mtd,lart_partitions, ARRAY_SIZE(lart_partitions)); 689 result = mtd_device_register(&mtd, lart_partitions,
690 ARRAY_SIZE(lart_partitions));
690#endif 691#endif
691 692
692 return (result); 693 return (result);
@@ -695,9 +696,9 @@ static int __init lart_flash_init (void)
695static void __exit lart_flash_exit (void) 696static void __exit lart_flash_exit (void)
696{ 697{
697#ifndef HAVE_PARTITIONS 698#ifndef HAVE_PARTITIONS
698 del_mtd_device (&mtd); 699 mtd_device_unregister(&mtd);
699#else 700#else
700 del_mtd_partitions (&mtd); 701 mtd_device_unregister(&mtd);
701#endif 702#endif
702} 703}
703 704
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 3fb981d4bb51..35180e475c4c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -27,6 +27,7 @@
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/mod_devicetable.h> 28#include <linux/mod_devicetable.h>
29 29
30#include <linux/mtd/cfi.h>
30#include <linux/mtd/mtd.h> 31#include <linux/mtd/mtd.h>
31#include <linux/mtd/partitions.h> 32#include <linux/mtd/partitions.h>
32 33
@@ -55,6 +56,9 @@
55#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */ 56#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
56#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */ 57#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */
57 58
59/* Used for Spansion flashes only. */
60#define OPCODE_BRWR 0x17 /* Bank register write */
61
58/* Status Register bits. */ 62/* Status Register bits. */
59#define SR_WIP 1 /* Write in progress */ 63#define SR_WIP 1 /* Write in progress */
60#define SR_WEL 2 /* Write enable latch */ 64#define SR_WEL 2 /* Write enable latch */
@@ -76,6 +80,8 @@
76#define FAST_READ_DUMMY_BYTE 0 80#define FAST_READ_DUMMY_BYTE 0
77#endif 81#endif
78 82
83#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
84
79/****************************************************************************/ 85/****************************************************************************/
80 86
81struct m25p { 87struct m25p {
@@ -158,11 +164,18 @@ static inline int write_disable(struct m25p *flash)
158/* 164/*
159 * Enable/disable 4-byte addressing mode. 165 * Enable/disable 4-byte addressing mode.
160 */ 166 */
161static inline int set_4byte(struct m25p *flash, int enable) 167static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
162{ 168{
163 u8 code = enable ? OPCODE_EN4B : OPCODE_EX4B; 169 switch (JEDEC_MFR(jedec_id)) {
164 170 case CFI_MFR_MACRONIX:
165 return spi_write_then_read(flash->spi, &code, 1, NULL, 0); 171 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
172 return spi_write(flash->spi, flash->command, 1);
173 default:
174 /* Spansion style */
175 flash->command[0] = OPCODE_BRWR;
176 flash->command[1] = enable << 7;
177 return spi_write(flash->spi, flash->command, 2);
178 }
166} 179}
167 180
168/* 181/*
@@ -668,6 +681,7 @@ static const struct spi_device_id m25p_ids[] = {
668 /* Macronix */ 681 /* Macronix */
669 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) }, 682 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
670 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) }, 683 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
684 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
671 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) }, 685 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
672 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) }, 686 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
673 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, 687 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
@@ -684,6 +698,10 @@ static const struct spi_device_id m25p_ids[] = {
684 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) }, 698 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
685 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) }, 699 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) },
686 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) }, 700 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
701 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
702 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) },
703 { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) },
704 { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
687 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, 705 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
688 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, 706 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
689 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) }, 707 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
@@ -729,7 +747,10 @@ static const struct spi_device_id m25p_ids[] = {
729 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, 747 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
730 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, 748 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
731 749
732 { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) }, 750 { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
751 { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
752 { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
753 { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
733 754
734 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ 755 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
735 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, 756 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
@@ -804,6 +825,8 @@ static int __devinit m25p_probe(struct spi_device *spi)
804 struct m25p *flash; 825 struct m25p *flash;
805 struct flash_info *info; 826 struct flash_info *info;
806 unsigned i; 827 unsigned i;
828 struct mtd_partition *parts = NULL;
829 int nr_parts = 0;
807 830
808 /* Platform data helps sort out which chip type we have, as 831 /* Platform data helps sort out which chip type we have, as
809 * well as how this board partitions it. If we don't have 832 * well as how this board partitions it. If we don't have
@@ -868,9 +891,9 @@ static int __devinit m25p_probe(struct spi_device *spi)
868 * up with the software protection bits set 891 * up with the software protection bits set
869 */ 892 */
870 893
871 if (info->jedec_id >> 16 == 0x1f || 894 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
872 info->jedec_id >> 16 == 0x89 || 895 JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
873 info->jedec_id >> 16 == 0xbf) { 896 JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
874 write_enable(flash); 897 write_enable(flash);
875 write_sr(flash, 0); 898 write_sr(flash, 0);
876 } 899 }
@@ -888,7 +911,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
888 flash->mtd.read = m25p80_read; 911 flash->mtd.read = m25p80_read;
889 912
890 /* sst flash chips use AAI word program */ 913 /* sst flash chips use AAI word program */
891 if (info->jedec_id >> 16 == 0xbf) 914 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
892 flash->mtd.write = sst_write; 915 flash->mtd.write = sst_write;
893 else 916 else
894 flash->mtd.write = m25p80_write; 917 flash->mtd.write = m25p80_write;
@@ -914,7 +937,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
914 /* enable 4-byte addressing if the device exceeds 16MiB */ 937 /* enable 4-byte addressing if the device exceeds 16MiB */
915 if (flash->mtd.size > 0x1000000) { 938 if (flash->mtd.size > 0x1000000) {
916 flash->addr_width = 4; 939 flash->addr_width = 4;
917 set_4byte(flash, 1); 940 set_4byte(flash, info->jedec_id, 1);
918 } else 941 } else
919 flash->addr_width = 3; 942 flash->addr_width = 3;
920 } 943 }
@@ -945,48 +968,41 @@ static int __devinit m25p_probe(struct spi_device *spi)
945 /* partitions should match sector boundaries; and it may be good to 968 /* partitions should match sector boundaries; and it may be good to
946 * use readonly partitions for writeprotected sectors (BP2..BP0). 969 * use readonly partitions for writeprotected sectors (BP2..BP0).
947 */ 970 */
948 if (mtd_has_partitions()) { 971 if (mtd_has_cmdlinepart()) {
949 struct mtd_partition *parts = NULL; 972 static const char *part_probes[]
950 int nr_parts = 0; 973 = { "cmdlinepart", NULL, };
951
952 if (mtd_has_cmdlinepart()) {
953 static const char *part_probes[]
954 = { "cmdlinepart", NULL, };
955 974
956 nr_parts = parse_mtd_partitions(&flash->mtd, 975 nr_parts = parse_mtd_partitions(&flash->mtd,
957 part_probes, &parts, 0); 976 part_probes, &parts, 0);
958 } 977 }
959 978
960 if (nr_parts <= 0 && data && data->parts) { 979 if (nr_parts <= 0 && data && data->parts) {
961 parts = data->parts; 980 parts = data->parts;
962 nr_parts = data->nr_parts; 981 nr_parts = data->nr_parts;
963 } 982 }
964 983
965#ifdef CONFIG_MTD_OF_PARTS 984#ifdef CONFIG_MTD_OF_PARTS
966 if (nr_parts <= 0 && spi->dev.of_node) { 985 if (nr_parts <= 0 && spi->dev.of_node) {
967 nr_parts = of_mtd_parse_partitions(&spi->dev, 986 nr_parts = of_mtd_parse_partitions(&spi->dev,
968 spi->dev.of_node, &parts); 987 spi->dev.of_node, &parts);
969 } 988 }
970#endif 989#endif
971 990
972 if (nr_parts > 0) { 991 if (nr_parts > 0) {
973 for (i = 0; i < nr_parts; i++) { 992 for (i = 0; i < nr_parts; i++) {
974 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " 993 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
975 "{.name = %s, .offset = 0x%llx, " 994 "{.name = %s, .offset = 0x%llx, "
976 ".size = 0x%llx (%lldKiB) }\n", 995 ".size = 0x%llx (%lldKiB) }\n",
977 i, parts[i].name, 996 i, parts[i].name,
978 (long long)parts[i].offset, 997 (long long)parts[i].offset,
979 (long long)parts[i].size, 998 (long long)parts[i].size,
980 (long long)(parts[i].size >> 10)); 999 (long long)(parts[i].size >> 10));
981 }
982 flash->partitioned = 1;
983 return add_mtd_partitions(&flash->mtd, parts, nr_parts);
984 } 1000 }
985 } else if (data && data->nr_parts) 1001 flash->partitioned = 1;
986 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", 1002 }
987 data->nr_parts, data->name);
988 1003
989 return add_mtd_device(&flash->mtd) == 1 ? -ENODEV : 0; 1004 return mtd_device_register(&flash->mtd, parts, nr_parts) == 1 ?
1005 -ENODEV : 0;
990} 1006}
991 1007
992 1008
@@ -996,10 +1012,7 @@ static int __devexit m25p_remove(struct spi_device *spi)
996 int status; 1012 int status;
997 1013
998 /* Clean up MTD stuff. */ 1014 /* Clean up MTD stuff. */
999 if (mtd_has_partitions() && flash->partitioned) 1015 status = mtd_device_unregister(&flash->mtd);
1000 status = del_mtd_partitions(&flash->mtd);
1001 else
1002 status = del_mtd_device(&flash->mtd);
1003 if (status == 0) { 1016 if (status == 0) {
1004 kfree(flash->command); 1017 kfree(flash->command);
1005 kfree(flash); 1018 kfree(flash);
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 6a9a24a80a6d..8423fb6d4f26 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -220,7 +220,7 @@ static int __init ms02nv_init_one(ulong addr)
220 mtd->writesize = 1; 220 mtd->writesize = 1;
221 221
222 ret = -EIO; 222 ret = -EIO;
223 if (add_mtd_device(mtd)) { 223 if (mtd_device_register(mtd, NULL, 0)) {
224 printk(KERN_ERR 224 printk(KERN_ERR
225 "ms02-nv: Unable to register MTD device, aborting!\n"); 225 "ms02-nv: Unable to register MTD device, aborting!\n");
226 goto err_out_csr_res; 226 goto err_out_csr_res;
@@ -262,7 +262,7 @@ static void __exit ms02nv_remove_one(void)
262 262
263 root_ms02nv_mtd = mp->next; 263 root_ms02nv_mtd = mp->next;
264 264
265 del_mtd_device(mtd); 265 mtd_device_unregister(mtd);
266 266
267 release_resource(mp->resource.csr); 267 release_resource(mp->resource.csr);
268 kfree(mp->resource.csr); 268 kfree(mp->resource.csr);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index c5015cc721d5..13749d458a31 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -637,6 +637,8 @@ add_dataflash_otp(struct spi_device *spi, char *name,
637 struct flash_platform_data *pdata = spi->dev.platform_data; 637 struct flash_platform_data *pdata = spi->dev.platform_data;
638 char *otp_tag = ""; 638 char *otp_tag = "";
639 int err = 0; 639 int err = 0;
640 struct mtd_partition *parts;
641 int nr_parts = 0;
640 642
641 priv = kzalloc(sizeof *priv, GFP_KERNEL); 643 priv = kzalloc(sizeof *priv, GFP_KERNEL);
642 if (!priv) 644 if (!priv)
@@ -675,33 +677,25 @@ add_dataflash_otp(struct spi_device *spi, char *name,
675 pagesize, otp_tag); 677 pagesize, otp_tag);
676 dev_set_drvdata(&spi->dev, priv); 678 dev_set_drvdata(&spi->dev, priv);
677 679
678 if (mtd_has_partitions()) { 680 if (mtd_has_cmdlinepart()) {
679 struct mtd_partition *parts; 681 static const char *part_probes[] = { "cmdlinepart", NULL, };
680 int nr_parts = 0;
681 682
682 if (mtd_has_cmdlinepart()) { 683 nr_parts = parse_mtd_partitions(device, part_probes, &parts,
683 static const char *part_probes[] 684 0);
684 = { "cmdlinepart", NULL, }; 685 }
685
686 nr_parts = parse_mtd_partitions(device,
687 part_probes, &parts, 0);
688 }
689 686
690 if (nr_parts <= 0 && pdata && pdata->parts) { 687 if (nr_parts <= 0 && pdata && pdata->parts) {
691 parts = pdata->parts; 688 parts = pdata->parts;
692 nr_parts = pdata->nr_parts; 689 nr_parts = pdata->nr_parts;
693 } 690 }
694 691
695 if (nr_parts > 0) { 692 if (nr_parts > 0) {
696 priv->partitioned = 1; 693 priv->partitioned = 1;
697 err = add_mtd_partitions(device, parts, nr_parts); 694 err = mtd_device_register(device, parts, nr_parts);
698 goto out; 695 goto out;
699 } 696 }
700 } else if (pdata && pdata->nr_parts)
701 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
702 pdata->nr_parts, device->name);
703 697
704 if (add_mtd_device(device) == 1) 698 if (mtd_device_register(device, NULL, 0) == 1)
705 err = -ENODEV; 699 err = -ENODEV;
706 700
707out: 701out:
@@ -939,10 +933,7 @@ static int __devexit dataflash_remove(struct spi_device *spi)
939 933
940 DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev)); 934 DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev));
941 935
942 if (mtd_has_partitions() && flash->partitioned) 936 status = mtd_device_unregister(&flash->mtd);
943 status = del_mtd_partitions(&flash->mtd);
944 else
945 status = del_mtd_device(&flash->mtd);
946 if (status == 0) { 937 if (status == 0) {
947 dev_set_drvdata(&spi->dev, NULL); 938 dev_set_drvdata(&spi->dev, NULL);
948 kfree(flash); 939 kfree(flash);
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 1483e18971ce..2562689ba6b4 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -104,7 +104,7 @@ static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
104static void __exit cleanup_mtdram(void) 104static void __exit cleanup_mtdram(void)
105{ 105{
106 if (mtd_info) { 106 if (mtd_info) {
107 del_mtd_device(mtd_info); 107 mtd_device_unregister(mtd_info);
108 vfree(mtd_info->priv); 108 vfree(mtd_info->priv);
109 kfree(mtd_info); 109 kfree(mtd_info);
110 } 110 }
@@ -133,9 +133,8 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
133 mtd->read = ram_read; 133 mtd->read = ram_read;
134 mtd->write = ram_write; 134 mtd->write = ram_write;
135 135
136 if (add_mtd_device(mtd)) { 136 if (mtd_device_register(mtd, NULL, 0))
137 return -EIO; 137 return -EIO;
138 }
139 138
140 return 0; 139 return 0;
141} 140}
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 8d28fa02a5a2..23423bd00b06 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -115,7 +115,7 @@ static void unregister_devices(void)
115 struct phram_mtd_list *this, *safe; 115 struct phram_mtd_list *this, *safe;
116 116
117 list_for_each_entry_safe(this, safe, &phram_list, list) { 117 list_for_each_entry_safe(this, safe, &phram_list, list) {
118 del_mtd_device(&this->mtd); 118 mtd_device_unregister(&this->mtd);
119 iounmap(this->mtd.priv); 119 iounmap(this->mtd.priv);
120 kfree(this->mtd.name); 120 kfree(this->mtd.name);
121 kfree(this); 121 kfree(this);
@@ -153,7 +153,7 @@ static int register_device(char *name, unsigned long start, unsigned long len)
153 new->mtd.writesize = 1; 153 new->mtd.writesize = 1;
154 154
155 ret = -EAGAIN; 155 ret = -EAGAIN;
156 if (add_mtd_device(&new->mtd)) { 156 if (mtd_device_register(&new->mtd, NULL, 0)) {
157 pr_err("Failed to register new device\n"); 157 pr_err("Failed to register new device\n");
158 goto out2; 158 goto out2;
159 } 159 }
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 41b8cdcc64cb..ecff765579dd 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -798,7 +798,7 @@ static int __init init_pmc551(void)
798 mtd->writesize = 1; 798 mtd->writesize = 1;
799 mtd->owner = THIS_MODULE; 799 mtd->owner = THIS_MODULE;
800 800
801 if (add_mtd_device(mtd)) { 801 if (mtd_device_register(mtd, NULL, 0)) {
802 printk(KERN_NOTICE "pmc551: Failed to register new device\n"); 802 printk(KERN_NOTICE "pmc551: Failed to register new device\n");
803 pci_iounmap(PCI_Device, priv->start); 803 pci_iounmap(PCI_Device, priv->start);
804 kfree(mtd->priv); 804 kfree(mtd->priv);
@@ -806,7 +806,7 @@ static int __init init_pmc551(void)
806 break; 806 break;
807 } 807 }
808 808
809 /* Keep a reference as the add_mtd_device worked */ 809 /* Keep a reference as the mtd_device_register worked */
810 pci_dev_get(PCI_Device); 810 pci_dev_get(PCI_Device);
811 811
812 printk(KERN_NOTICE "Registered pmc551 memory device.\n"); 812 printk(KERN_NOTICE "Registered pmc551 memory device.\n");
@@ -856,7 +856,7 @@ static void __exit cleanup_pmc551(void)
856 pci_dev_put(priv->dev); 856 pci_dev_put(priv->dev);
857 857
858 kfree(mtd->priv); 858 kfree(mtd->priv);
859 del_mtd_device(mtd); 859 mtd_device_unregister(mtd);
860 kfree(mtd); 860 kfree(mtd);
861 found++; 861 found++;
862 } 862 }
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 592016a0668f..e585263161b9 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -210,7 +210,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
210 (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ; 210 (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
211 (*curmtd)->mtdinfo->writesize = 1; 211 (*curmtd)->mtdinfo->writesize = 1;
212 212
213 if (add_mtd_device((*curmtd)->mtdinfo)) { 213 if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) {
214 E("slram: Failed to register new device\n"); 214 E("slram: Failed to register new device\n");
215 iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start); 215 iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
216 kfree((*curmtd)->mtdinfo->priv); 216 kfree((*curmtd)->mtdinfo->priv);
@@ -231,7 +231,7 @@ static void unregister_devices(void)
231 231
232 while (slram_mtdlist) { 232 while (slram_mtdlist) {
233 nextitem = slram_mtdlist->next; 233 nextitem = slram_mtdlist->next;
234 del_mtd_device(slram_mtdlist->mtdinfo); 234 mtd_device_unregister(slram_mtdlist->mtdinfo);
235 iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start); 235 iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
236 kfree(slram_mtdlist->mtdinfo->priv); 236 kfree(slram_mtdlist->mtdinfo->priv);
237 kfree(slram_mtdlist->mtdinfo); 237 kfree(slram_mtdlist->mtdinfo);
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index c163e619abc9..1e2c430aaad2 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -66,7 +66,7 @@ struct flash_info {
66 66
67#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd) 67#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
68 68
69static struct flash_info __initdata sst25l_flash_info[] = { 69static struct flash_info __devinitdata sst25l_flash_info[] = {
70 {"sst25lf020a", 0xbf43, 256, 1024, 4096}, 70 {"sst25lf020a", 0xbf43, 256, 1024, 4096},
71 {"sst25lf040a", 0xbf44, 256, 2048, 4096}, 71 {"sst25lf040a", 0xbf44, 256, 2048, 4096},
72}; 72};
@@ -381,6 +381,8 @@ static int __devinit sst25l_probe(struct spi_device *spi)
381 struct sst25l_flash *flash; 381 struct sst25l_flash *flash;
382 struct flash_platform_data *data; 382 struct flash_platform_data *data;
383 int ret, i; 383 int ret, i;
384 struct mtd_partition *parts = NULL;
385 int nr_parts = 0;
384 386
385 flash_info = sst25l_match_device(spi); 387 flash_info = sst25l_match_device(spi);
386 if (!flash_info) 388 if (!flash_info)
@@ -420,46 +422,37 @@ static int __devinit sst25l_probe(struct spi_device *spi)
420 flash->mtd.erasesize, flash->mtd.erasesize / 1024, 422 flash->mtd.erasesize, flash->mtd.erasesize / 1024,
421 flash->mtd.numeraseregions); 423 flash->mtd.numeraseregions);
422 424
423 if (mtd_has_partitions()) {
424 struct mtd_partition *parts = NULL;
425 int nr_parts = 0;
426 425
427 if (mtd_has_cmdlinepart()) { 426 if (mtd_has_cmdlinepart()) {
428 static const char *part_probes[] = 427 static const char *part_probes[] = {"cmdlinepart", NULL};
429 {"cmdlinepart", NULL};
430 428
431 nr_parts = parse_mtd_partitions(&flash->mtd, 429 nr_parts = parse_mtd_partitions(&flash->mtd,
432 part_probes, 430 part_probes,
433 &parts, 0); 431 &parts, 0);
434 } 432 }
435 433
436 if (nr_parts <= 0 && data && data->parts) { 434 if (nr_parts <= 0 && data && data->parts) {
437 parts = data->parts; 435 parts = data->parts;
438 nr_parts = data->nr_parts; 436 nr_parts = data->nr_parts;
439 } 437 }
440 438
441 if (nr_parts > 0) { 439 if (nr_parts > 0) {
442 for (i = 0; i < nr_parts; i++) { 440 for (i = 0; i < nr_parts; i++) {
443 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " 441 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
444 "{.name = %s, .offset = 0x%llx, " 442 "{.name = %s, .offset = 0x%llx, "
445 ".size = 0x%llx (%lldKiB) }\n", 443 ".size = 0x%llx (%lldKiB) }\n",
446 i, parts[i].name, 444 i, parts[i].name,
447 (long long)parts[i].offset, 445 (long long)parts[i].offset,
448 (long long)parts[i].size, 446 (long long)parts[i].size,
449 (long long)(parts[i].size >> 10)); 447 (long long)(parts[i].size >> 10));
450 }
451
452 flash->partitioned = 1;
453 return add_mtd_partitions(&flash->mtd,
454 parts, nr_parts);
455 } 448 }
456 449
457 } else if (data && data->nr_parts) { 450 flash->partitioned = 1;
458 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", 451 return mtd_device_register(&flash->mtd, parts,
459 data->nr_parts, data->name); 452 nr_parts);
460 } 453 }
461 454
462 ret = add_mtd_device(&flash->mtd); 455 ret = mtd_device_register(&flash->mtd, NULL, 0);
463 if (ret == 1) { 456 if (ret == 1) {
464 kfree(flash); 457 kfree(flash);
465 dev_set_drvdata(&spi->dev, NULL); 458 dev_set_drvdata(&spi->dev, NULL);
@@ -469,15 +462,12 @@ static int __devinit sst25l_probe(struct spi_device *spi)
469 return 0; 462 return 0;
470} 463}
471 464
472static int __exit sst25l_remove(struct spi_device *spi) 465static int __devexit sst25l_remove(struct spi_device *spi)
473{ 466{
474 struct sst25l_flash *flash = dev_get_drvdata(&spi->dev); 467 struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
475 int ret; 468 int ret;
476 469
477 if (mtd_has_partitions() && flash->partitioned) 470 ret = mtd_device_unregister(&flash->mtd);
478 ret = del_mtd_partitions(&flash->mtd);
479 else
480 ret = del_mtd_device(&flash->mtd);
481 if (ret == 0) 471 if (ret == 0)
482 kfree(flash); 472 kfree(flash);
483 return ret; 473 return ret;
@@ -490,7 +480,7 @@ static struct spi_driver sst25l_driver = {
490 .owner = THIS_MODULE, 480 .owner = THIS_MODULE,
491 }, 481 },
492 .probe = sst25l_probe, 482 .probe = sst25l_probe,
493 .remove = __exit_p(sst25l_remove), 483 .remove = __devexit_p(sst25l_remove),
494}; 484};
495 485
496static int __init sst25l_init(void) 486static int __init sst25l_init(void)
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 12679925b420..65655dd59e1f 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -313,12 +313,7 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
313 if (ret) { 313 if (ret) {
314 /* Oops. something got wrong. */ 314 /* Oops. something got wrong. */
315 /* Resume and pretend we weren't here. */ 315 /* Resume and pretend we weren't here. */
316 map_write(map, CMD(LPDDR_RESUME), 316 put_chip(map, chip);
317 map->pfow_base + PFOW_COMMAND_CODE);
318 map_write(map, CMD(LPDDR_START_EXECUTION),
319 map->pfow_base + PFOW_COMMAND_EXECUTE);
320 chip->state = FL_ERASING;
321 chip->oldstate = FL_READY;
322 printk(KERN_ERR "%s: suspend operation failed." 317 printk(KERN_ERR "%s: suspend operation failed."
323 "State may be wrong \n", map->name); 318 "State may be wrong \n", map->name);
324 return -EIO; 319 return -EIO;
@@ -383,7 +378,6 @@ static void put_chip(struct map_info *map, struct flchip *chip)
383 378
384 switch (chip->oldstate) { 379 switch (chip->oldstate) {
385 case FL_ERASING: 380 case FL_ERASING:
386 chip->state = chip->oldstate;
387 map_write(map, CMD(LPDDR_RESUME), 381 map_write(map, CMD(LPDDR_RESUME),
388 map->pfow_base + PFOW_COMMAND_CODE); 382 map->pfow_base + PFOW_COMMAND_CODE);
389 map_write(map, CMD(LPDDR_START_EXECUTION), 383 map_write(map, CMD(LPDDR_START_EXECUTION),
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5069111c81cc..c0c328c5b133 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -82,7 +82,6 @@ config MTD_PHYSMAP_OF
82config MTD_PMC_MSP_EVM 82config MTD_PMC_MSP_EVM
83 tristate "CFI Flash device mapped on PMC-Sierra MSP" 83 tristate "CFI Flash device mapped on PMC-Sierra MSP"
84 depends on PMC_MSP && MTD_CFI 84 depends on PMC_MSP && MTD_CFI
85 select MTD_PARTITIONS
86 help 85 help
87 This provides a 'mapping' driver which supports the way 86 This provides a 'mapping' driver which supports the way
88 in which user-programmable flash chips are connected on the 87 in which user-programmable flash chips are connected on the
@@ -122,7 +121,7 @@ config MTD_SC520CDP
122 121
123config MTD_NETSC520 122config MTD_NETSC520
124 tristate "CFI Flash device mapped on AMD NetSc520" 123 tristate "CFI Flash device mapped on AMD NetSc520"
125 depends on X86 && MTD_CFI && MTD_PARTITIONS 124 depends on X86 && MTD_CFI
126 help 125 help
127 This enables access routines for the flash chips on the AMD NetSc520 126 This enables access routines for the flash chips on the AMD NetSc520
128 demonstration board. If you have one of these boards and would like 127 demonstration board. If you have one of these boards and would like
@@ -131,7 +130,6 @@ config MTD_NETSC520
131config MTD_TS5500 130config MTD_TS5500
132 tristate "JEDEC Flash device mapped on Technologic Systems TS-5500" 131 tristate "JEDEC Flash device mapped on Technologic Systems TS-5500"
133 depends on X86 132 depends on X86
134 select MTD_PARTITIONS
135 select MTD_JEDECPROBE 133 select MTD_JEDECPROBE
136 select MTD_CFI_AMDSTD 134 select MTD_CFI_AMDSTD
137 help 135 help
@@ -149,7 +147,7 @@ config MTD_TS5500
149 147
150config MTD_SBC_GXX 148config MTD_SBC_GXX
151 tristate "CFI Flash device mapped on Arcom SBC-GXx boards" 149 tristate "CFI Flash device mapped on Arcom SBC-GXx boards"
152 depends on X86 && MTD_CFI_INTELEXT && MTD_PARTITIONS && MTD_COMPLEX_MAPPINGS 150 depends on X86 && MTD_CFI_INTELEXT && MTD_COMPLEX_MAPPINGS
153 help 151 help
154 This provides a driver for the on-board flash of Arcom Control 152 This provides a driver for the on-board flash of Arcom Control
155 Systems' SBC-GXn family of boards, formerly known as SBC-MediaGX. 153 Systems' SBC-GXn family of boards, formerly known as SBC-MediaGX.
@@ -161,7 +159,6 @@ config MTD_SBC_GXX
161config MTD_PXA2XX 159config MTD_PXA2XX
162 tristate "CFI Flash device mapped on Intel XScale PXA2xx based boards" 160 tristate "CFI Flash device mapped on Intel XScale PXA2xx based boards"
163 depends on (PXA25x || PXA27x) && MTD_CFI_INTELEXT 161 depends on (PXA25x || PXA27x) && MTD_CFI_INTELEXT
164 select MTD_PARTITIONS
165 help 162 help
166 This provides a driver for the NOR flash attached to a PXA2xx chip. 163 This provides a driver for the NOR flash attached to a PXA2xx chip.
167 164
@@ -185,7 +182,7 @@ config MTD_VMAX
185 182
186config MTD_SCx200_DOCFLASH 183config MTD_SCx200_DOCFLASH
187 tristate "Flash device mapped with DOCCS on NatSemi SCx200" 184 tristate "Flash device mapped with DOCCS on NatSemi SCx200"
188 depends on SCx200 && MTD_CFI && MTD_PARTITIONS 185 depends on SCx200 && MTD_CFI
189 help 186 help
190 Enable support for a flash chip mapped using the DOCCS signal on a 187 Enable support for a flash chip mapped using the DOCCS signal on a
191 National Semiconductor SCx200 processor. 188 National Semiconductor SCx200 processor.
@@ -247,7 +244,7 @@ config MTD_TSUNAMI
247 244
248config MTD_NETtel 245config MTD_NETtel
249 tristate "CFI flash device on SnapGear/SecureEdge" 246 tristate "CFI flash device on SnapGear/SecureEdge"
250 depends on X86 && MTD_PARTITIONS && MTD_JEDECPROBE 247 depends on X86 && MTD_JEDECPROBE
251 help 248 help
252 Support for flash chips on NETtel/SecureEdge/SnapGear boards. 249 Support for flash chips on NETtel/SecureEdge/SnapGear boards.
253 250
@@ -269,7 +266,7 @@ config MTD_LANTIQ
269 266
270config MTD_DILNETPC 267config MTD_DILNETPC
271 tristate "CFI Flash device mapped on DIL/Net PC" 268 tristate "CFI Flash device mapped on DIL/Net PC"
272 depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN 269 depends on X86 && MTD_CFI_INTELEXT && BROKEN
273 help 270 help
274 MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP". 271 MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
275 For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm> 272 For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
@@ -355,7 +352,7 @@ config MTD_CDB89712
355 352
356config MTD_SA1100 353config MTD_SA1100
357 tristate "CFI Flash device mapped on StrongARM SA11x0" 354 tristate "CFI Flash device mapped on StrongARM SA11x0"
358 depends on MTD_CFI && ARCH_SA1100 && MTD_PARTITIONS 355 depends on MTD_CFI && ARCH_SA1100
359 help 356 help
360 This enables access to the flash chips on most platforms based on 357 This enables access to the flash chips on most platforms based on
361 the SA1100 and SA1110, including the Assabet and the Compaq iPAQ. 358 the SA1100 and SA1110, including the Assabet and the Compaq iPAQ.
@@ -389,7 +386,7 @@ config MTD_IXP2000
389 386
390config MTD_FORTUNET 387config MTD_FORTUNET
391 tristate "CFI Flash device mapped on the FortuNet board" 388 tristate "CFI Flash device mapped on the FortuNet board"
392 depends on MTD_CFI && MTD_PARTITIONS && SA1100_FORTUNET 389 depends on MTD_CFI && SA1100_FORTUNET
393 help 390 help
394 This enables access to the Flash on the FortuNet board. If you 391 This enables access to the Flash on the FortuNet board. If you
395 have such a board, say 'Y'. 392 have such a board, say 'Y'.
@@ -461,7 +458,6 @@ config MTD_PCMCIA_ANONYMOUS
461config MTD_BFIN_ASYNC 458config MTD_BFIN_ASYNC
462 tristate "Blackfin BF533-STAMP Flash Chip Support" 459 tristate "Blackfin BF533-STAMP Flash Chip Support"
463 depends on BFIN533_STAMP && MTD_CFI && MTD_COMPLEX_MAPPINGS 460 depends on BFIN533_STAMP && MTD_CFI && MTD_COMPLEX_MAPPINGS
464 select MTD_PARTITIONS
465 default y 461 default y
466 help 462 help
467 Map driver which allows for simultaneous utilization of 463 Map driver which allows for simultaneous utilization of
@@ -473,7 +469,6 @@ config MTD_GPIO_ADDR
473 tristate "GPIO-assisted Flash Chip Support" 469 tristate "GPIO-assisted Flash Chip Support"
474 depends on GENERIC_GPIO || GPIOLIB 470 depends on GENERIC_GPIO || GPIOLIB
475 depends on MTD_COMPLEX_MAPPINGS 471 depends on MTD_COMPLEX_MAPPINGS
476 select MTD_PARTITIONS
477 help 472 help
478 Map driver which allows flashes to be partially physically addressed 473 Map driver which allows flashes to be partially physically addressed
479 and assisted by GPIOs. 474 and assisted by GPIOs.
@@ -482,14 +477,13 @@ config MTD_GPIO_ADDR
482 477
483config MTD_UCLINUX 478config MTD_UCLINUX
484 bool "Generic uClinux RAM/ROM filesystem support" 479 bool "Generic uClinux RAM/ROM filesystem support"
485 depends on MTD_PARTITIONS && MTD_RAM=y && !MMU 480 depends on MTD_RAM=y && !MMU
486 help 481 help
487 Map driver to support image based filesystems for uClinux. 482 Map driver to support image based filesystems for uClinux.
488 483
489config MTD_WRSBC8260 484config MTD_WRSBC8260
490 tristate "Map driver for WindRiver PowerQUICC II MPC82xx board" 485 tristate "Map driver for WindRiver PowerQUICC II MPC82xx board"
491 depends on (SBC82xx || SBC8560) 486 depends on (SBC82xx || SBC8560)
492 select MTD_PARTITIONS
493 select MTD_MAP_BANK_WIDTH_4 487 select MTD_MAP_BANK_WIDTH_4
494 select MTD_MAP_BANK_WIDTH_1 488 select MTD_MAP_BANK_WIDTH_1
495 select MTD_CFI_I1 489 select MTD_CFI_I1
@@ -502,7 +496,6 @@ config MTD_WRSBC8260
502config MTD_DMV182 496config MTD_DMV182
503 tristate "Map driver for Dy-4 SVME/DMV-182 board." 497 tristate "Map driver for Dy-4 SVME/DMV-182 board."
504 depends on DMV182 498 depends on DMV182
505 select MTD_PARTITIONS
506 select MTD_MAP_BANK_WIDTH_32 499 select MTD_MAP_BANK_WIDTH_32
507 select MTD_CFI_I8 500 select MTD_CFI_I8
508 select MTD_CFI_AMDSTD 501 select MTD_CFI_AMDSTD
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 92de7e3a49a5..e2875d6fe129 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -82,7 +82,7 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window)
82 if (map->rsrc.parent) { 82 if (map->rsrc.parent) {
83 release_resource(&map->rsrc); 83 release_resource(&map->rsrc);
84 } 84 }
85 del_mtd_device(map->mtd); 85 mtd_device_unregister(map->mtd);
86 map_destroy(map->mtd); 86 map_destroy(map->mtd);
87 list_del(&map->list); 87 list_del(&map->list);
88 kfree(map); 88 kfree(map);
@@ -262,7 +262,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
262 262
263 /* Now that the mtd devices is complete claim and export it */ 263 /* Now that the mtd devices is complete claim and export it */
264 map->mtd->owner = THIS_MODULE; 264 map->mtd->owner = THIS_MODULE;
265 if (add_mtd_device(map->mtd)) { 265 if (mtd_device_register(map->mtd, NULL, 0)) {
266 map_destroy(map->mtd); 266 map_destroy(map->mtd);
267 map->mtd = NULL; 267 map->mtd = NULL;
268 goto out; 268 goto out;
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index 53664188fc47..e5bfd0e093bb 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -88,7 +88,7 @@ map:
88 sram_mtd->owner = THIS_MODULE; 88 sram_mtd->owner = THIS_MODULE;
89 sram_mtd->erasesize = 16; 89 sram_mtd->erasesize = 16;
90 90
91 if (add_mtd_device(sram_mtd)) { 91 if (mtd_device_register(sram_mtd, NULL, 0)) {
92 printk("NV-RAM device addition failed\n"); 92 printk("NV-RAM device addition failed\n");
93 err = -ENOMEM; 93 err = -ENOMEM;
94 goto out_probe; 94 goto out_probe;
@@ -111,7 +111,7 @@ out:
111static void __exit cleanup_autcpu12_maps(void) 111static void __exit cleanup_autcpu12_maps(void)
112{ 112{
113 if (sram_mtd) { 113 if (sram_mtd) {
114 del_mtd_device(sram_mtd); 114 mtd_device_unregister(sram_mtd);
115 map_destroy(sram_mtd); 115 map_destroy(sram_mtd);
116 iounmap((void *)autcpu12_sram_map.virt); 116 iounmap((void *)autcpu12_sram_map.virt);
117 } 117 }
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index 1f3049590d9e..608967fe74c6 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -224,8 +224,8 @@ probe_ok:
224 goto err_probe; 224 goto err_probe;
225 } 225 }
226 226
227 return add_mtd_partitions(bcm963xx_mtd_info, parsed_parts, 227 return mtd_device_register(bcm963xx_mtd_info, parsed_parts,
228 parsed_nr_parts); 228 parsed_nr_parts);
229 229
230err_probe: 230err_probe:
231 iounmap(bcm963xx_map.virt); 231 iounmap(bcm963xx_map.virt);
@@ -235,7 +235,7 @@ err_probe:
235static int bcm963xx_remove(struct platform_device *pdev) 235static int bcm963xx_remove(struct platform_device *pdev)
236{ 236{
237 if (bcm963xx_mtd_info) { 237 if (bcm963xx_mtd_info) {
238 del_mtd_partitions(bcm963xx_mtd_info); 238 mtd_device_unregister(bcm963xx_mtd_info);
239 map_destroy(bcm963xx_mtd_info); 239 map_destroy(bcm963xx_mtd_info);
240 } 240 }
241 241
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 85dd18193cf2..d4297a97e100 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -41,9 +41,7 @@ struct async_state {
41 uint32_t flash_ambctl0, flash_ambctl1; 41 uint32_t flash_ambctl0, flash_ambctl1;
42 uint32_t save_ambctl0, save_ambctl1; 42 uint32_t save_ambctl0, save_ambctl1;
43 unsigned long irq_flags; 43 unsigned long irq_flags;
44#ifdef CONFIG_MTD_PARTITIONS
45 struct mtd_partition *parts; 44 struct mtd_partition *parts;
46#endif
47}; 45};
48 46
49static void switch_to_flash(struct async_state *state) 47static void switch_to_flash(struct async_state *state)
@@ -124,9 +122,7 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi
124 switch_back(state); 122 switch_back(state);
125} 123}
126 124
127#ifdef CONFIG_MTD_PARTITIONS
128static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; 125static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
129#endif
130 126
131static int __devinit bfin_flash_probe(struct platform_device *pdev) 127static int __devinit bfin_flash_probe(struct platform_device *pdev)
132{ 128{
@@ -169,22 +165,17 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
169 return -ENXIO; 165 return -ENXIO;
170 } 166 }
171 167
172#ifdef CONFIG_MTD_PARTITIONS
173 ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0); 168 ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
174 if (ret > 0) { 169 if (ret > 0) {
175 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n"); 170 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
176 add_mtd_partitions(state->mtd, pdata->parts, ret); 171 mtd_device_register(state->mtd, pdata->parts, ret);
177 state->parts = pdata->parts; 172 state->parts = pdata->parts;
178
179 } else if (pdata->nr_parts) { 173 } else if (pdata->nr_parts) {
180 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n"); 174 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
181 add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts); 175 mtd_device_register(state->mtd, pdata->parts, pdata->nr_parts);
182 176 } else {
183 } else
184#endif
185 {
186 pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n"); 177 pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n");
187 add_mtd_device(state->mtd); 178 mtd_device_register(state->mtd, NULL, 0);
188 } 179 }
189 180
190 platform_set_drvdata(pdev, state); 181 platform_set_drvdata(pdev, state);
@@ -196,10 +187,8 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
196{ 187{
197 struct async_state *state = platform_get_drvdata(pdev); 188 struct async_state *state = platform_get_drvdata(pdev);
198 gpio_free(state->enet_flash_pin); 189 gpio_free(state->enet_flash_pin);
199#ifdef CONFIG_MTD_PARTITIONS 190 mtd_device_unregister(state->mtd);
200 del_mtd_partitions(state->mtd);
201 kfree(state->parts); 191 kfree(state->parts);
202#endif
203 map_destroy(state->mtd); 192 map_destroy(state->mtd);
204 kfree(state); 193 kfree(state);
205 return 0; 194 return 0;
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
index 8d92d8db9a98..c29cbf87ea0c 100644
--- a/drivers/mtd/maps/cdb89712.c
+++ b/drivers/mtd/maps/cdb89712.c
@@ -75,7 +75,7 @@ static int __init init_cdb89712_flash (void)
75 75
76 flash_mtd->owner = THIS_MODULE; 76 flash_mtd->owner = THIS_MODULE;
77 77
78 if (add_mtd_device(flash_mtd)) { 78 if (mtd_device_register(flash_mtd, NULL, 0)) {
79 printk("FLASH device addition failed\n"); 79 printk("FLASH device addition failed\n");
80 err = -ENOMEM; 80 err = -ENOMEM;
81 goto out_probe; 81 goto out_probe;
@@ -141,7 +141,7 @@ static int __init init_cdb89712_sram (void)
141 sram_mtd->owner = THIS_MODULE; 141 sram_mtd->owner = THIS_MODULE;
142 sram_mtd->erasesize = 16; 142 sram_mtd->erasesize = 16;
143 143
144 if (add_mtd_device(sram_mtd)) { 144 if (mtd_device_register(sram_mtd, NULL, 0)) {
145 printk("SRAM device addition failed\n"); 145 printk("SRAM device addition failed\n");
146 err = -ENOMEM; 146 err = -ENOMEM;
147 goto out_probe; 147 goto out_probe;
@@ -209,7 +209,7 @@ static int __init init_cdb89712_bootrom (void)
209 bootrom_mtd->owner = THIS_MODULE; 209 bootrom_mtd->owner = THIS_MODULE;
210 bootrom_mtd->erasesize = 0x10000; 210 bootrom_mtd->erasesize = 0x10000;
211 211
212 if (add_mtd_device(bootrom_mtd)) { 212 if (mtd_device_register(bootrom_mtd, NULL, 0)) {
213 printk("BootROM device addition failed\n"); 213 printk("BootROM device addition failed\n");
214 err = -ENOMEM; 214 err = -ENOMEM;
215 goto out_probe; 215 goto out_probe;
@@ -249,21 +249,21 @@ static int __init init_cdb89712_maps(void)
249static void __exit cleanup_cdb89712_maps(void) 249static void __exit cleanup_cdb89712_maps(void)
250{ 250{
251 if (sram_mtd) { 251 if (sram_mtd) {
252 del_mtd_device(sram_mtd); 252 mtd_device_unregister(sram_mtd);
253 map_destroy(sram_mtd); 253 map_destroy(sram_mtd);
254 iounmap((void *)cdb89712_sram_map.virt); 254 iounmap((void *)cdb89712_sram_map.virt);
255 release_resource (&cdb89712_sram_resource); 255 release_resource (&cdb89712_sram_resource);
256 } 256 }
257 257
258 if (flash_mtd) { 258 if (flash_mtd) {
259 del_mtd_device(flash_mtd); 259 mtd_device_unregister(flash_mtd);
260 map_destroy(flash_mtd); 260 map_destroy(flash_mtd);
261 iounmap((void *)cdb89712_flash_map.virt); 261 iounmap((void *)cdb89712_flash_map.virt);
262 release_resource (&cdb89712_flash_resource); 262 release_resource (&cdb89712_flash_resource);
263 } 263 }
264 264
265 if (bootrom_mtd) { 265 if (bootrom_mtd) {
266 del_mtd_device(bootrom_mtd); 266 mtd_device_unregister(bootrom_mtd);
267 map_destroy(bootrom_mtd); 267 map_destroy(bootrom_mtd);
268 iounmap((void *)cdb89712_bootrom_map.virt); 268 iounmap((void *)cdb89712_bootrom_map.virt);
269 release_resource (&cdb89712_bootrom_resource); 269 release_resource (&cdb89712_bootrom_resource);
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index 23f551dc8ca8..06f9c9815720 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -224,7 +224,7 @@ static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd
224{ 224{
225 int i; 225 int i;
226 226
227 del_mtd_partitions(mtd); 227 mtd_device_unregister(mtd);
228 228
229 if (mtd != clps[0].mtd) 229 if (mtd != clps[0].mtd)
230 mtd_concat_destroy(mtd); 230 mtd_concat_destroy(mtd);
@@ -292,11 +292,11 @@ static void __init clps_locate_partitions(struct mtd_info *mtd)
292 if (nr_parts == 0) { 292 if (nr_parts == 0) {
293 printk(KERN_NOTICE "clps flash: no partition info " 293 printk(KERN_NOTICE "clps flash: no partition info "
294 "available, registering whole flash\n"); 294 "available, registering whole flash\n");
295 add_mtd_device(mtd); 295 mtd_device_register(mtd, NULL, 0);
296 } else { 296 } else {
297 printk(KERN_NOTICE "clps flash: using %s partition " 297 printk(KERN_NOTICE "clps flash: using %s partition "
298 "definition\n", part_type); 298 "definition\n", part_type);
299 add_mtd_partitions(mtd, parsed_parts, nr_parts); 299 mtd_device_register(mtd, parsed_parts, nr_parts);
300 } 300 }
301 301
302 /* Always succeeds. */ 302 /* Always succeeds. */
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index f71343cd77cc..d16fc9d3b8cd 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -107,7 +107,7 @@ static int __init init_flagadm(void)
107 mymtd = do_map_probe("cfi_probe", &flagadm_map); 107 mymtd = do_map_probe("cfi_probe", &flagadm_map);
108 if (mymtd) { 108 if (mymtd) {
109 mymtd->owner = THIS_MODULE; 109 mymtd->owner = THIS_MODULE;
110 add_mtd_partitions(mymtd, flagadm_parts, PARTITION_COUNT); 110 mtd_device_register(mymtd, flagadm_parts, PARTITION_COUNT);
111 printk(KERN_NOTICE "FlagaDM flash device initialized\n"); 111 printk(KERN_NOTICE "FlagaDM flash device initialized\n");
112 return 0; 112 return 0;
113 } 113 }
@@ -119,7 +119,7 @@ static int __init init_flagadm(void)
119static void __exit cleanup_flagadm(void) 119static void __exit cleanup_flagadm(void)
120{ 120{
121 if (mymtd) { 121 if (mymtd) {
122 del_mtd_partitions(mymtd); 122 mtd_device_unregister(mymtd);
123 map_destroy(mymtd); 123 map_destroy(mymtd);
124 } 124 }
125 if (flagadm_map.virt) { 125 if (flagadm_map.virt) {
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 5fdb7b26cea3..3d0e762fa5f2 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -94,7 +94,7 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
94 if (map->rsrc.parent) 94 if (map->rsrc.parent)
95 release_resource(&map->rsrc); 95 release_resource(&map->rsrc);
96 96
97 del_mtd_device(map->mtd); 97 mtd_device_unregister(map->mtd);
98 map_destroy(map->mtd); 98 map_destroy(map->mtd);
99 list_del(&map->list); 99 list_del(&map->list);
100 kfree(map); 100 kfree(map);
@@ -291,7 +291,7 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
291 291
292 /* Now that the mtd devices is complete claim and export it */ 292 /* Now that the mtd devices is complete claim and export it */
293 map->mtd->owner = THIS_MODULE; 293 map->mtd->owner = THIS_MODULE;
294 if (add_mtd_device(map->mtd)) { 294 if (mtd_device_register(map->mtd, NULL, 0)) {
295 map_destroy(map->mtd); 295 map_destroy(map->mtd);
296 map->mtd = NULL; 296 map->mtd = NULL;
297 goto out; 297 goto out;
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index cfacfa6f45dd..85bdece6ab3f 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -93,7 +93,7 @@ static int __init init_dbox2_flash(void)
93 mymtd->owner = THIS_MODULE; 93 mymtd->owner = THIS_MODULE;
94 94
95 /* Create MTD devices for each partition. */ 95 /* Create MTD devices for each partition. */
96 add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS); 96 mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
97 97
98 return 0; 98 return 0;
99 } 99 }
@@ -105,7 +105,7 @@ static int __init init_dbox2_flash(void)
105static void __exit cleanup_dbox2_flash(void) 105static void __exit cleanup_dbox2_flash(void)
106{ 106{
107 if (mymtd) { 107 if (mymtd) {
108 del_mtd_partitions(mymtd); 108 mtd_device_unregister(mymtd);
109 map_destroy(mymtd); 109 map_destroy(mymtd);
110 } 110 }
111 if (dbox2_flash_map.virt) { 111 if (dbox2_flash_map.virt) {
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index b3cb3a183809..7a9e1989c977 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -145,17 +145,13 @@ static struct map_info dc21285_map = {
145 145
146 146
147/* Partition stuff */ 147/* Partition stuff */
148#ifdef CONFIG_MTD_PARTITIONS
149static struct mtd_partition *dc21285_parts; 148static struct mtd_partition *dc21285_parts;
150static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; 149static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
151#endif
152 150
153static int __init init_dc21285(void) 151static int __init init_dc21285(void)
154{ 152{
155 153
156#ifdef CONFIG_MTD_PARTITIONS
157 int nrparts; 154 int nrparts;
158#endif
159 155
160 /* Determine bankwidth */ 156 /* Determine bankwidth */
161 switch (*CSR_SA110_CNTL & (3<<14)) { 157 switch (*CSR_SA110_CNTL & (3<<14)) {
@@ -204,13 +200,8 @@ static int __init init_dc21285(void)
204 200
205 dc21285_mtd->owner = THIS_MODULE; 201 dc21285_mtd->owner = THIS_MODULE;
206 202
207#ifdef CONFIG_MTD_PARTITIONS
208 nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0); 203 nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0);
209 if (nrparts > 0) 204 mtd_device_register(dc21285_mtd, dc21285_parts, nrparts);
210 add_mtd_partitions(dc21285_mtd, dc21285_parts, nrparts);
211 else
212#endif
213 add_mtd_device(dc21285_mtd);
214 205
215 if(machine_is_ebsa285()) { 206 if(machine_is_ebsa285()) {
216 /* 207 /*
@@ -232,14 +223,9 @@ static int __init init_dc21285(void)
232 223
233static void __exit cleanup_dc21285(void) 224static void __exit cleanup_dc21285(void)
234{ 225{
235#ifdef CONFIG_MTD_PARTITIONS 226 mtd_device_unregister(dc21285_mtd);
236 if (dc21285_parts) { 227 if (dc21285_parts)
237 del_mtd_partitions(dc21285_mtd);
238 kfree(dc21285_parts); 228 kfree(dc21285_parts);
239 } else
240#endif
241 del_mtd_device(dc21285_mtd);
242
243 map_destroy(dc21285_mtd); 229 map_destroy(dc21285_mtd);
244 iounmap(dc21285_map.virt); 230 iounmap(dc21285_map.virt);
245} 231}
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
index 0713e3a5a22c..3e393f0da823 100644
--- a/drivers/mtd/maps/dilnetpc.c
+++ b/drivers/mtd/maps/dilnetpc.c
@@ -450,7 +450,7 @@ static int __init init_dnpc(void)
450 partition_info[2].mtdp = &lowlvl_parts[1]; 450 partition_info[2].mtdp = &lowlvl_parts[1];
451 partition_info[3].mtdp = &lowlvl_parts[3]; 451 partition_info[3].mtdp = &lowlvl_parts[3];
452 452
453 add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS); 453 mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
454 454
455 /* 455 /*
456 ** now create a virtual MTD device by concatenating the for partitions 456 ** now create a virtual MTD device by concatenating the for partitions
@@ -463,7 +463,8 @@ static int __init init_dnpc(void)
463 ** we do not supply mtd pointers in higlvl_partition_info, so 463 ** we do not supply mtd pointers in higlvl_partition_info, so
464 ** add_mtd_partitions() will register the devices. 464 ** add_mtd_partitions() will register the devices.
465 */ 465 */
466 add_mtd_partitions(merged_mtd, higlvl_partition_info, NUM_HIGHLVL_PARTITIONS); 466 mtd_device_register(merged_mtd, higlvl_partition_info,
467 NUM_HIGHLVL_PARTITIONS);
467 } 468 }
468 469
469 return 0; 470 return 0;
@@ -472,12 +473,12 @@ static int __init init_dnpc(void)
472static void __exit cleanup_dnpc(void) 473static void __exit cleanup_dnpc(void)
473{ 474{
474 if(merged_mtd) { 475 if(merged_mtd) {
475 del_mtd_partitions(merged_mtd); 476 mtd_device_unregister(merged_mtd);
476 mtd_concat_destroy(merged_mtd); 477 mtd_concat_destroy(merged_mtd);
477 } 478 }
478 479
479 if (mymtd) { 480 if (mymtd) {
480 del_mtd_partitions(mymtd); 481 mtd_device_unregister(mymtd);
481 map_destroy(mymtd); 482 map_destroy(mymtd);
482 } 483 }
483 if (dnpc_map.virt) { 484 if (dnpc_map.virt) {
diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c
index d171674eb2ed..6538ac675e00 100644
--- a/drivers/mtd/maps/dmv182.c
+++ b/drivers/mtd/maps/dmv182.c
@@ -120,7 +120,7 @@ static int __init init_svme182(void)
120 this_mtd->size >> 20, FLASH_BASE_ADDR); 120 this_mtd->size >> 20, FLASH_BASE_ADDR);
121 121
122 this_mtd->owner = THIS_MODULE; 122 this_mtd->owner = THIS_MODULE;
123 add_mtd_partitions(this_mtd, partitions, num_parts); 123 mtd_device_register(this_mtd, partitions, num_parts);
124 124
125 return 0; 125 return 0;
126} 126}
@@ -129,7 +129,7 @@ static void __exit cleanup_svme182(void)
129{ 129{
130 if (this_mtd) 130 if (this_mtd)
131 { 131 {
132 del_mtd_partitions(this_mtd); 132 mtd_device_unregister(this_mtd);
133 map_destroy(this_mtd); 133 map_destroy(this_mtd);
134 } 134 }
135 135
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
index be9e90b44587..fe42a212bb3e 100644
--- a/drivers/mtd/maps/edb7312.c
+++ b/drivers/mtd/maps/edb7312.c
@@ -15,10 +15,7 @@
15#include <asm/io.h> 15#include <asm/io.h>
16#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h> 17#include <linux/mtd/map.h>
18
19#ifdef CONFIG_MTD_PARTITIONS
20#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
21#endif
22 19
23#define WINDOW_ADDR 0x00000000 /* physical properties of flash */ 20#define WINDOW_ADDR 0x00000000 /* physical properties of flash */
24#define WINDOW_SIZE 0x01000000 21#define WINDOW_SIZE 0x01000000
@@ -40,8 +37,6 @@ struct map_info edb7312nor_map = {
40 .phys = WINDOW_ADDR, 37 .phys = WINDOW_ADDR,
41}; 38};
42 39
43#ifdef CONFIG_MTD_PARTITIONS
44
45/* 40/*
46 * MTD partitioning stuff 41 * MTD partitioning stuff
47 */ 42 */
@@ -66,8 +61,6 @@ static struct mtd_partition static_partitions[3] =
66 61
67static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; 62static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
68 63
69#endif
70
71static int mtd_parts_nb = 0; 64static int mtd_parts_nb = 0;
72static struct mtd_partition *mtd_parts = 0; 65static struct mtd_partition *mtd_parts = 0;
73 66
@@ -96,27 +89,24 @@ static int __init init_edb7312nor(void)
96 if (mymtd) { 89 if (mymtd) {
97 mymtd->owner = THIS_MODULE; 90 mymtd->owner = THIS_MODULE;
98 91
99#ifdef CONFIG_MTD_PARTITIONS
100 mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID); 92 mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID);
101 if (mtd_parts_nb > 0) 93 if (mtd_parts_nb > 0)
102 part_type = "detected"; 94 part_type = "detected";
103 95
104 if (mtd_parts_nb == 0) 96 if (mtd_parts_nb == 0) {
105 {
106 mtd_parts = static_partitions; 97 mtd_parts = static_partitions;
107 mtd_parts_nb = ARRAY_SIZE(static_partitions); 98 mtd_parts_nb = ARRAY_SIZE(static_partitions);
108 part_type = "static"; 99 part_type = "static";
109 } 100 }
110#endif 101
111 add_mtd_device(mymtd);
112 if (mtd_parts_nb == 0) 102 if (mtd_parts_nb == 0)
113 printk(KERN_NOTICE MSG_PREFIX "no partition info available\n"); 103 printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
114 else 104 else
115 {
116 printk(KERN_NOTICE MSG_PREFIX 105 printk(KERN_NOTICE MSG_PREFIX
117 "using %s partition definition\n", part_type); 106 "using %s partition definition\n", part_type);
118 add_mtd_partitions(mymtd, mtd_parts, mtd_parts_nb); 107 /* Register the whole device first. */
119 } 108 mtd_device_register(mymtd, NULL, 0);
109 mtd_device_register(mymtd, mtd_parts, mtd_parts_nb);
120 return 0; 110 return 0;
121 } 111 }
122 112
@@ -127,7 +117,7 @@ static int __init init_edb7312nor(void)
127static void __exit cleanup_edb7312nor(void) 117static void __exit cleanup_edb7312nor(void)
128{ 118{
129 if (mymtd) { 119 if (mymtd) {
130 del_mtd_device(mymtd); 120 mtd_device_unregister(mymtd);
131 map_destroy(mymtd); 121 map_destroy(mymtd);
132 } 122 }
133 if (edb7312nor_map.virt) { 123 if (edb7312nor_map.virt) {
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 4feb7507ab7c..08322b1c3e81 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -128,7 +128,7 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
128 list_for_each_entry_safe(map, scratch, &window->maps, list) { 128 list_for_each_entry_safe(map, scratch, &window->maps, list) {
129 if (map->rsrc.parent) 129 if (map->rsrc.parent)
130 release_resource(&map->rsrc); 130 release_resource(&map->rsrc);
131 del_mtd_device(map->mtd); 131 mtd_device_unregister(map->mtd);
132 map_destroy(map->mtd); 132 map_destroy(map->mtd);
133 list_del(&map->list); 133 list_del(&map->list);
134 kfree(map); 134 kfree(map);
@@ -352,7 +352,7 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
352 352
353 /* Now that the mtd devices is complete claim and export it */ 353 /* Now that the mtd devices is complete claim and export it */
354 map->mtd->owner = THIS_MODULE; 354 map->mtd->owner = THIS_MODULE;
355 if (add_mtd_device(map->mtd)) { 355 if (mtd_device_register(map->mtd, NULL, 0)) {
356 map_destroy(map->mtd); 356 map_destroy(map->mtd);
357 map->mtd = NULL; 357 map->mtd = NULL;
358 goto out; 358 goto out;
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index 1e43124d498b..956e2e4f30ea 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -243,8 +243,9 @@ static int __init init_fortunet(void)
243 &map_regions[ix].map_info); 243 &map_regions[ix].map_info);
244 } 244 }
245 map_regions[ix].mymtd->owner = THIS_MODULE; 245 map_regions[ix].mymtd->owner = THIS_MODULE;
246 add_mtd_partitions(map_regions[ix].mymtd, 246 mtd_device_register(map_regions[ix].mymtd,
247 map_regions[ix].parts,map_regions_parts[ix]); 247 map_regions[ix].parts,
248 map_regions_parts[ix]);
248 } 249 }
249 } 250 }
250 if(iy) 251 if(iy)
@@ -261,7 +262,7 @@ static void __exit cleanup_fortunet(void)
261 { 262 {
262 if( map_regions[ix].mymtd ) 263 if( map_regions[ix].mymtd )
263 { 264 {
264 del_mtd_partitions( map_regions[ix].mymtd ); 265 mtd_device_unregister(map_regions[ix].mymtd);
265 map_destroy( map_regions[ix].mymtd ); 266 map_destroy( map_regions[ix].mymtd );
266 } 267 }
267 iounmap((void *)map_regions[ix].map_info.virt); 268 iounmap((void *)map_regions[ix].map_info.virt);
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index af5707a80205..7568c5f8b8ae 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -155,9 +155,7 @@ static void gf_copy_to(struct map_info *map, unsigned long to, const void *from,
155 memcpy_toio(map->virt + (to % state->win_size), from, len); 155 memcpy_toio(map->virt + (to % state->win_size), from, len);
156} 156}
157 157
158#ifdef CONFIG_MTD_PARTITIONS
159static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; 158static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
160#endif
161 159
162/** 160/**
163 * gpio_flash_probe() - setup a mapping for a GPIO assisted flash 161 * gpio_flash_probe() - setup a mapping for a GPIO assisted flash
@@ -189,7 +187,7 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
189 */ 187 */
190static int __devinit gpio_flash_probe(struct platform_device *pdev) 188static int __devinit gpio_flash_probe(struct platform_device *pdev)
191{ 189{
192 int ret; 190 int nr_parts;
193 size_t i, arr_size; 191 size_t i, arr_size;
194 struct physmap_flash_data *pdata; 192 struct physmap_flash_data *pdata;
195 struct resource *memory; 193 struct resource *memory;
@@ -254,24 +252,21 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
254 return -ENXIO; 252 return -ENXIO;
255 } 253 }
256 254
257#ifdef CONFIG_MTD_PARTITIONS 255 nr_parts = parse_mtd_partitions(state->mtd, part_probe_types,
258 ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0); 256 &pdata->parts, 0);
259 if (ret > 0) { 257 if (nr_parts > 0) {
260 pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n"); 258 pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n");
261 add_mtd_partitions(state->mtd, pdata->parts, ret);
262 kfree(pdata->parts); 259 kfree(pdata->parts);
263
264 } else if (pdata->nr_parts) { 260 } else if (pdata->nr_parts) {
265 pr_devinit(KERN_NOTICE PFX "Using board partition definition\n"); 261 pr_devinit(KERN_NOTICE PFX "Using board partition definition\n");
266 add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts); 262 nr_parts = pdata->nr_parts;
267 263 } else {
268 } else
269#endif
270 {
271 pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n"); 264 pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n");
272 add_mtd_device(state->mtd); 265 nr_parts = 0;
273 } 266 }
274 267
268 mtd_device_register(state->mtd, pdata->parts, nr_parts);
269
275 return 0; 270 return 0;
276} 271}
277 272
@@ -282,9 +277,7 @@ static int __devexit gpio_flash_remove(struct platform_device *pdev)
282 do { 277 do {
283 gpio_free(state->gpio_addrs[i]); 278 gpio_free(state->gpio_addrs[i]);
284 } while (++i < state->gpio_count); 279 } while (++i < state->gpio_count);
285#ifdef CONFIG_MTD_PARTITIONS 280 mtd_device_unregister(state->mtd);
286 del_mtd_partitions(state->mtd);
287#endif
288 map_destroy(state->mtd); 281 map_destroy(state->mtd);
289 kfree(state); 282 kfree(state);
290 return 0; 283 return 0;
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 72c724fa8c27..7f035860a36b 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -92,18 +92,16 @@ static int __init h720x_mtd_init(void)
92 if (mymtd) { 92 if (mymtd) {
93 mymtd->owner = THIS_MODULE; 93 mymtd->owner = THIS_MODULE;
94 94
95#ifdef CONFIG_MTD_PARTITIONS
96 nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0); 95 nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0);
97 if (nr_mtd_parts > 0) 96 if (nr_mtd_parts > 0)
98 part_type = "command line"; 97 part_type = "command line";
99#endif
100 if (nr_mtd_parts <= 0) { 98 if (nr_mtd_parts <= 0) {
101 mtd_parts = h720x_partitions; 99 mtd_parts = h720x_partitions;
102 nr_mtd_parts = NUM_PARTITIONS; 100 nr_mtd_parts = NUM_PARTITIONS;
103 part_type = "builtin"; 101 part_type = "builtin";
104 } 102 }
105 printk(KERN_INFO "Using %s partition table\n", part_type); 103 printk(KERN_INFO "Using %s partition table\n", part_type);
106 add_mtd_partitions(mymtd, mtd_parts, nr_mtd_parts); 104 mtd_device_register(mymtd, mtd_parts, nr_mtd_parts);
107 return 0; 105 return 0;
108 } 106 }
109 107
@@ -118,7 +116,7 @@ static void __exit h720x_mtd_cleanup(void)
118{ 116{
119 117
120 if (mymtd) { 118 if (mymtd) {
121 del_mtd_partitions(mymtd); 119 mtd_device_unregister(mymtd);
122 map_destroy(mymtd); 120 map_destroy(mymtd);
123 } 121 }
124 122
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 1337a4191a0c..6689dcb3124d 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -67,7 +67,7 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
67 list_for_each_entry_safe(map, scratch, &window->maps, list) { 67 list_for_each_entry_safe(map, scratch, &window->maps, list) {
68 if (map->rsrc.parent) 68 if (map->rsrc.parent)
69 release_resource(&map->rsrc); 69 release_resource(&map->rsrc);
70 del_mtd_device(map->mtd); 70 mtd_device_unregister(map->mtd);
71 map_destroy(map->mtd); 71 map_destroy(map->mtd);
72 list_del(&map->list); 72 list_del(&map->list);
73 kfree(map); 73 kfree(map);
@@ -287,7 +287,7 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
287 287
288 /* Now that the mtd devices is complete claim and export it */ 288 /* Now that the mtd devices is complete claim and export it */
289 map->mtd->owner = THIS_MODULE; 289 map->mtd->owner = THIS_MODULE;
290 if (add_mtd_device(map->mtd)) { 290 if (mtd_device_register(map->mtd, NULL, 0)) {
291 map_destroy(map->mtd); 291 map_destroy(map->mtd);
292 map->mtd = NULL; 292 map->mtd = NULL;
293 goto out; 293 goto out;
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 998a27da97f3..404a50cbafa0 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -15,10 +15,7 @@
15#include <asm/io.h> 15#include <asm/io.h>
16#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h> 17#include <linux/mtd/map.h>
18
19#ifdef CONFIG_MTD_PARTITIONS
20#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
21#endif
22 19
23#define WINDOW_ADDR0 0x00000000 /* physical properties of flash */ 20#define WINDOW_ADDR0 0x00000000 /* physical properties of flash */
24#define WINDOW_SIZE0 0x00800000 21#define WINDOW_SIZE0 0x00800000
@@ -49,8 +46,6 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
49 }, 46 },
50}; 47};
51 48
52#ifdef CONFIG_MTD_PARTITIONS
53
54/* 49/*
55 * MTD partitioning stuff 50 * MTD partitioning stuff
56 */ 51 */
@@ -66,8 +61,6 @@ static struct mtd_partition static_partitions[] =
66static int mtd_parts_nb[NUM_FLASHBANKS]; 61static int mtd_parts_nb[NUM_FLASHBANKS];
67static struct mtd_partition *mtd_parts[NUM_FLASHBANKS]; 62static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
68 63
69#endif
70
71static const char *probes[] = { "cmdlinepart", NULL }; 64static const char *probes[] = { "cmdlinepart", NULL };
72 65
73static int __init init_impa7(void) 66static int __init init_impa7(void)
@@ -104,7 +97,6 @@ static int __init init_impa7(void)
104 if (impa7_mtd[i]) { 97 if (impa7_mtd[i]) {
105 impa7_mtd[i]->owner = THIS_MODULE; 98 impa7_mtd[i]->owner = THIS_MODULE;
106 devicesfound++; 99 devicesfound++;
107#ifdef CONFIG_MTD_PARTITIONS
108 mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i], 100 mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i],
109 probes, 101 probes,
110 &mtd_parts[i], 102 &mtd_parts[i],
@@ -120,12 +112,8 @@ static int __init init_impa7(void)
120 printk(KERN_NOTICE MSG_PREFIX 112 printk(KERN_NOTICE MSG_PREFIX
121 "using %s partition definition\n", 113 "using %s partition definition\n",
122 part_type); 114 part_type);
123 add_mtd_partitions(impa7_mtd[i], 115 mtd_device_register(impa7_mtd[i],
124 mtd_parts[i], mtd_parts_nb[i]); 116 mtd_parts[i], mtd_parts_nb[i]);
125#else
126 add_mtd_device(impa7_mtd[i]);
127
128#endif
129 } 117 }
130 else 118 else
131 iounmap((void *)impa7_map[i].virt); 119 iounmap((void *)impa7_map[i].virt);
@@ -138,11 +126,7 @@ static void __exit cleanup_impa7(void)
138 int i; 126 int i;
139 for (i=0; i<NUM_FLASHBANKS; i++) { 127 for (i=0; i<NUM_FLASHBANKS; i++) {
140 if (impa7_mtd[i]) { 128 if (impa7_mtd[i]) {
141#ifdef CONFIG_MTD_PARTITIONS 129 mtd_device_unregister(impa7_mtd[i]);
142 del_mtd_partitions(impa7_mtd[i]);
143#else
144 del_mtd_device(impa7_mtd[i]);
145#endif
146 map_destroy(impa7_mtd[i]); 130 map_destroy(impa7_mtd[i]);
147 iounmap((void *)impa7_map[i].virt); 131 iounmap((void *)impa7_map[i].virt);
148 impa7_map[i].virt = 0; 132 impa7_map[i].virt = 0;
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index fc1998512eb4..d2f47be8754b 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -66,33 +66,18 @@ struct vr_nor_mtd {
66 66
67static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p) 67static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
68{ 68{
69 if (p->nr_parts > 0) { 69 mtd_device_unregister(p->info);
70#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
71 del_mtd_partitions(p->info);
72#endif
73 } else
74 del_mtd_device(p->info);
75} 70}
76 71
77static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p) 72static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
78{ 73{
79 int err = 0;
80#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
81 struct mtd_partition *parts; 74 struct mtd_partition *parts;
82 static const char *part_probes[] = { "cmdlinepart", NULL }; 75 static const char *part_probes[] = { "cmdlinepart", NULL };
83#endif
84 76
85 /* register the flash bank */ 77 /* register the flash bank */
86#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
87 /* partition the flash bank */ 78 /* partition the flash bank */
88 p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0); 79 p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0);
89 if (p->nr_parts > 0) 80 return mtd_device_register(p->info, parts, p->nr_parts);
90 err = add_mtd_partitions(p->info, parts, p->nr_parts);
91#endif
92 if (p->nr_parts <= 0)
93 err = add_mtd_device(p->info);
94
95 return err;
96} 81}
97 82
98static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) 83static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 9639d83a9d6c..c00b9175ba9e 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -119,7 +119,7 @@ static int ixp2000_flash_remove(struct platform_device *dev)
119 return 0; 119 return 0;
120 120
121 if (info->mtd) { 121 if (info->mtd) {
122 del_mtd_partitions(info->mtd); 122 mtd_device_unregister(info->mtd);
123 map_destroy(info->mtd); 123 map_destroy(info->mtd);
124 } 124 }
125 if (info->map.map_priv_1) 125 if (info->map.map_priv_1)
@@ -230,7 +230,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
230 230
231 err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0); 231 err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
232 if (err > 0) { 232 if (err > 0) {
233 err = add_mtd_partitions(info->mtd, info->partitions, err); 233 err = mtd_device_register(info->mtd, info->partitions, err);
234 if(err) 234 if(err)
235 dev_err(&dev->dev, "Could not parse partitions\n"); 235 dev_err(&dev->dev, "Could not parse partitions\n");
236 } 236 }
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 1f9fde0dad35..155b21942f47 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -162,7 +162,7 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
162 return 0; 162 return 0;
163 163
164 if (info->mtd) { 164 if (info->mtd) {
165 del_mtd_partitions(info->mtd); 165 mtd_device_unregister(info->mtd);
166 map_destroy(info->mtd); 166 map_destroy(info->mtd);
167 } 167 }
168 if (info->map.virt) 168 if (info->map.virt)
@@ -252,10 +252,8 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
252 /* Use the fast version */ 252 /* Use the fast version */
253 info->map.write = ixp4xx_write16; 253 info->map.write = ixp4xx_write16;
254 254
255#ifdef CONFIG_MTD_PARTITIONS
256 nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions, 255 nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions,
257 dev->resource->start); 256 dev->resource->start);
258#endif
259 if (nr_parts > 0) { 257 if (nr_parts > 0) {
260 part_type = "dynamic"; 258 part_type = "dynamic";
261 } else { 259 } else {
@@ -263,18 +261,16 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
263 nr_parts = plat->nr_parts; 261 nr_parts = plat->nr_parts;
264 part_type = "static"; 262 part_type = "static";
265 } 263 }
266 if (nr_parts == 0) { 264 if (nr_parts == 0)
267 printk(KERN_NOTICE "IXP4xx flash: no partition info " 265 printk(KERN_NOTICE "IXP4xx flash: no partition info "
268 "available, registering whole flash\n"); 266 "available, registering whole flash\n");
269 err = add_mtd_device(info->mtd); 267 else
270 } else {
271 printk(KERN_NOTICE "IXP4xx flash: using %s partition " 268 printk(KERN_NOTICE "IXP4xx flash: using %s partition "
272 "definition\n", part_type); 269 "definition\n", part_type);
273 err = add_mtd_partitions(info->mtd, info->partitions, nr_parts);
274 270
275 if(err) 271 err = mtd_device_register(info->mtd, info->partitions, nr_parts);
276 printk(KERN_ERR "Could not parse partitions\n"); 272 if (err)
277 } 273 printk(KERN_ERR "Could not parse partitions\n");
278 274
279 if (err) 275 if (err)
280 goto Error; 276 goto Error;
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 9e054503c4cf..dd0360ba2412 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -138,7 +138,7 @@ static int __init init_l440gx(void)
138 if (mymtd) { 138 if (mymtd) {
139 mymtd->owner = THIS_MODULE; 139 mymtd->owner = THIS_MODULE;
140 140
141 add_mtd_device(mymtd); 141 mtd_device_register(mymtd, NULL, 0);
142 return 0; 142 return 0;
143 } 143 }
144 144
@@ -148,7 +148,7 @@ static int __init init_l440gx(void)
148 148
149static void __exit cleanup_l440gx(void) 149static void __exit cleanup_l440gx(void)
150{ 150{
151 del_mtd_device(mymtd); 151 mtd_device_unregister(mymtd);
152 map_destroy(mymtd); 152 map_destroy(mymtd);
153 153
154 iounmap(l440gx_map.virt); 154 iounmap(l440gx_map.virt);
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index ee2548085334..5936c466e901 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -112,18 +112,9 @@ static int latch_addr_flash_remove(struct platform_device *dev)
112 latch_addr_data = dev->dev.platform_data; 112 latch_addr_data = dev->dev.platform_data;
113 113
114 if (info->mtd != NULL) { 114 if (info->mtd != NULL) {
115 if (mtd_has_partitions()) { 115 if (info->nr_parts)
116 if (info->nr_parts) { 116 kfree(info->parts);
117 del_mtd_partitions(info->mtd); 117 mtd_device_unregister(info->mtd);
118 kfree(info->parts);
119 } else if (latch_addr_data->nr_parts) {
120 del_mtd_partitions(info->mtd);
121 } else {
122 del_mtd_device(info->mtd);
123 }
124 } else {
125 del_mtd_device(info->mtd);
126 }
127 map_destroy(info->mtd); 118 map_destroy(info->mtd);
128 } 119 }
129 120
@@ -215,23 +206,21 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev)
215 } 206 }
216 info->mtd->owner = THIS_MODULE; 207 info->mtd->owner = THIS_MODULE;
217 208
218 if (mtd_has_partitions()) { 209 err = parse_mtd_partitions(info->mtd, (const char **)part_probe_types,
219 210 &info->parts, 0);
220 err = parse_mtd_partitions(info->mtd, 211 if (err > 0) {
221 (const char **)part_probe_types, 212 mtd_device_register(info->mtd, info->parts, err);
222 &info->parts, 0); 213 return 0;
223 if (err > 0) { 214 }
224 add_mtd_partitions(info->mtd, info->parts, err); 215 if (latch_addr_data->nr_parts) {
225 return 0; 216 pr_notice("Using latch-addr-flash partition information\n");
226 } 217 mtd_device_register(info->mtd,
227 if (latch_addr_data->nr_parts) { 218 latch_addr_data->parts,
228 pr_notice("Using latch-addr-flash partition information\n"); 219 latch_addr_data->nr_parts);
229 add_mtd_partitions(info->mtd, latch_addr_data->parts, 220 return 0;
230 latch_addr_data->nr_parts);
231 return 0;
232 }
233 } 221 }
234 add_mtd_device(info->mtd); 222
223 mtd_device_register(info->mtd, NULL, 0);
235 return 0; 224 return 0;
236 225
237iounmap: 226iounmap:
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
index 0eb5a7c85380..93fa56c33003 100644
--- a/drivers/mtd/maps/mbx860.c
+++ b/drivers/mtd/maps/mbx860.c
@@ -69,8 +69,8 @@ static int __init init_mbx(void)
69 mymtd = do_map_probe("jedec_probe", &mbx_map); 69 mymtd = do_map_probe("jedec_probe", &mbx_map);
70 if (mymtd) { 70 if (mymtd) {
71 mymtd->owner = THIS_MODULE; 71 mymtd->owner = THIS_MODULE;
72 add_mtd_device(mymtd); 72 mtd_device_register(mymtd, NULL, 0);
73 add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS); 73 mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
74 return 0; 74 return 0;
75 } 75 }
76 76
@@ -81,7 +81,7 @@ static int __init init_mbx(void)
81static void __exit cleanup_mbx(void) 81static void __exit cleanup_mbx(void)
82{ 82{
83 if (mymtd) { 83 if (mymtd) {
84 del_mtd_device(mymtd); 84 mtd_device_unregister(mymtd);
85 map_destroy(mymtd); 85 map_destroy(mymtd);
86 } 86 }
87 if (mbx_map.virt) { 87 if (mbx_map.virt) {
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index c0cb319b2b70..81dc2598bc0a 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -116,14 +116,14 @@ static int __init init_netsc520(void)
116 } 116 }
117 117
118 mymtd->owner = THIS_MODULE; 118 mymtd->owner = THIS_MODULE;
119 add_mtd_partitions( mymtd, partition_info, NUM_PARTITIONS ); 119 mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
120 return 0; 120 return 0;
121} 121}
122 122
123static void __exit cleanup_netsc520(void) 123static void __exit cleanup_netsc520(void)
124{ 124{
125 if (mymtd) { 125 if (mymtd) {
126 del_mtd_partitions(mymtd); 126 mtd_device_unregister(mymtd);
127 map_destroy(mymtd); 127 map_destroy(mymtd);
128 } 128 }
129 if (netsc520_map.virt) { 129 if (netsc520_map.virt) {
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index a97133eb9d70..eadcfffc4f9c 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -383,13 +383,13 @@ static int __init nettel_init(void)
383 /* No BIOS regions when AMD boot */ 383 /* No BIOS regions when AMD boot */
384 num_intel_partitions -= 2; 384 num_intel_partitions -= 2;
385 } 385 }
386 rc = add_mtd_partitions(intel_mtd, nettel_intel_partitions, 386 rc = mtd_device_register(intel_mtd, nettel_intel_partitions,
387 num_intel_partitions); 387 num_intel_partitions);
388#endif 388#endif
389 389
390 if (amd_mtd) { 390 if (amd_mtd) {
391 rc = add_mtd_partitions(amd_mtd, nettel_amd_partitions, 391 rc = mtd_device_register(amd_mtd, nettel_amd_partitions,
392 num_amd_partitions); 392 num_amd_partitions);
393 } 393 }
394 394
395#ifdef CONFIG_MTD_CFI_INTELEXT 395#ifdef CONFIG_MTD_CFI_INTELEXT
@@ -419,7 +419,7 @@ static void __exit nettel_cleanup(void)
419 unregister_reboot_notifier(&nettel_notifier_block); 419 unregister_reboot_notifier(&nettel_notifier_block);
420#endif 420#endif
421 if (amd_mtd) { 421 if (amd_mtd) {
422 del_mtd_partitions(amd_mtd); 422 mtd_device_unregister(amd_mtd);
423 map_destroy(amd_mtd); 423 map_destroy(amd_mtd);
424 } 424 }
425 if (nettel_mmcrp) { 425 if (nettel_mmcrp) {
@@ -432,7 +432,7 @@ static void __exit nettel_cleanup(void)
432 } 432 }
433#ifdef CONFIG_MTD_CFI_INTELEXT 433#ifdef CONFIG_MTD_CFI_INTELEXT
434 if (intel_mtd) { 434 if (intel_mtd) {
435 del_mtd_partitions(intel_mtd); 435 mtd_device_unregister(intel_mtd);
436 map_destroy(intel_mtd); 436 map_destroy(intel_mtd);
437 } 437 }
438 if (nettel_intel_map.virt) { 438 if (nettel_intel_map.virt) {
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
index 23fe1786770f..807ac2a2e686 100644
--- a/drivers/mtd/maps/octagon-5066.c
+++ b/drivers/mtd/maps/octagon-5066.c
@@ -175,7 +175,7 @@ void cleanup_oct5066(void)
175 int i; 175 int i;
176 for (i=0; i<2; i++) { 176 for (i=0; i<2; i++) {
177 if (oct5066_mtd[i]) { 177 if (oct5066_mtd[i]) {
178 del_mtd_device(oct5066_mtd[i]); 178 mtd_device_unregister(oct5066_mtd[i]);
179 map_destroy(oct5066_mtd[i]); 179 map_destroy(oct5066_mtd[i]);
180 } 180 }
181 } 181 }
@@ -220,7 +220,7 @@ static int __init init_oct5066(void)
220 oct5066_mtd[i] = do_map_probe("map_rom", &oct5066_map[i]); 220 oct5066_mtd[i] = do_map_probe("map_rom", &oct5066_map[i]);
221 if (oct5066_mtd[i]) { 221 if (oct5066_mtd[i]) {
222 oct5066_mtd[i]->owner = THIS_MODULE; 222 oct5066_mtd[i]->owner = THIS_MODULE;
223 add_mtd_device(oct5066_mtd[i]); 223 mtd_device_register(oct5066_mtd[i], NULL, 0);
224 } 224 }
225 } 225 }
226 226
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 48f4cf5cb9d1..1d005a3e9b41 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -313,7 +313,7 @@ mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
313 goto release; 313 goto release;
314 314
315 mtd->owner = THIS_MODULE; 315 mtd->owner = THIS_MODULE;
316 add_mtd_device(mtd); 316 mtd_device_register(mtd, NULL, 0);
317 317
318 pci_set_drvdata(dev, mtd); 318 pci_set_drvdata(dev, mtd);
319 319
@@ -336,7 +336,7 @@ mtd_pci_remove(struct pci_dev *dev)
336 struct mtd_info *mtd = pci_get_drvdata(dev); 336 struct mtd_info *mtd = pci_get_drvdata(dev);
337 struct map_pci_info *map = mtd->priv; 337 struct map_pci_info *map = mtd->priv;
338 338
339 del_mtd_device(mtd); 339 mtd_device_unregister(mtd);
340 map_destroy(mtd); 340 map_destroy(mtd);
341 map->exit(dev, map); 341 map->exit(dev, map);
342 kfree(map); 342 kfree(map);
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 33dc2829b01b..bbe168b65c26 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -630,7 +630,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
630 dev->pcmcia_map.copy_to = pcmcia_copy_to; 630 dev->pcmcia_map.copy_to = pcmcia_copy_to;
631 } 631 }
632 632
633 if(add_mtd_device(mtd)) { 633 if (mtd_device_register(mtd, NULL, 0)) {
634 map_destroy(mtd); 634 map_destroy(mtd);
635 dev->mtd_info = NULL; 635 dev->mtd_info = NULL;
636 dev_err(&dev->p_dev->dev, 636 dev_err(&dev->p_dev->dev,
@@ -669,7 +669,7 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
669 DEBUG(3, "link=0x%p", link); 669 DEBUG(3, "link=0x%p", link);
670 670
671 if(dev->mtd_info) { 671 if(dev->mtd_info) {
672 del_mtd_device(dev->mtd_info); 672 mtd_device_unregister(dev->mtd_info);
673 dev_info(&dev->p_dev->dev, "mtd%d: Removing\n", 673 dev_info(&dev->p_dev->dev, "mtd%d: Removing\n",
674 dev->mtd_info->index); 674 dev->mtd_info->index);
675 map_destroy(dev->mtd_info); 675 map_destroy(dev->mtd_info);
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 1a9b94f0ee54..f64cee4a3bfb 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,10 +27,8 @@ struct physmap_flash_info {
27 struct mtd_info *mtd[MAX_RESOURCES]; 27 struct mtd_info *mtd[MAX_RESOURCES];
28 struct mtd_info *cmtd; 28 struct mtd_info *cmtd;
29 struct map_info map[MAX_RESOURCES]; 29 struct map_info map[MAX_RESOURCES];
30#ifdef CONFIG_MTD_PARTITIONS
31 int nr_parts; 30 int nr_parts;
32 struct mtd_partition *parts; 31 struct mtd_partition *parts;
33#endif
34}; 32};
35 33
36static int physmap_flash_remove(struct platform_device *dev) 34static int physmap_flash_remove(struct platform_device *dev)
@@ -47,18 +45,9 @@ static int physmap_flash_remove(struct platform_device *dev)
47 physmap_data = dev->dev.platform_data; 45 physmap_data = dev->dev.platform_data;
48 46
49 if (info->cmtd) { 47 if (info->cmtd) {
50#ifdef CONFIG_MTD_PARTITIONS 48 mtd_device_unregister(info->cmtd);
51 if (info->nr_parts || physmap_data->nr_parts) { 49 if (info->nr_parts)
52 del_mtd_partitions(info->cmtd); 50 kfree(info->parts);
53
54 if (info->nr_parts)
55 kfree(info->parts);
56 } else {
57 del_mtd_device(info->cmtd);
58 }
59#else
60 del_mtd_device(info->cmtd);
61#endif
62 if (info->cmtd != info->mtd[0]) 51 if (info->cmtd != info->mtd[0])
63 mtd_concat_destroy(info->cmtd); 52 mtd_concat_destroy(info->cmtd);
64 } 53 }
@@ -92,10 +81,8 @@ static const char *rom_probe_types[] = {
92 "qinfo_probe", 81 "qinfo_probe",
93 "map_rom", 82 "map_rom",
94 NULL }; 83 NULL };
95#ifdef CONFIG_MTD_PARTITIONS
96static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs", 84static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs",
97 NULL }; 85 NULL };
98#endif
99 86
100static int physmap_flash_probe(struct platform_device *dev) 87static int physmap_flash_probe(struct platform_device *dev)
101{ 88{
@@ -188,24 +175,23 @@ static int physmap_flash_probe(struct platform_device *dev)
188 if (err) 175 if (err)
189 goto err_out; 176 goto err_out;
190 177
191#ifdef CONFIG_MTD_PARTITIONS
192 err = parse_mtd_partitions(info->cmtd, part_probe_types, 178 err = parse_mtd_partitions(info->cmtd, part_probe_types,
193 &info->parts, 0); 179 &info->parts, 0);
194 if (err > 0) { 180 if (err > 0) {
195 add_mtd_partitions(info->cmtd, info->parts, err); 181 mtd_device_register(info->cmtd, info->parts, err);
196 info->nr_parts = err; 182 info->nr_parts = err;
197 return 0; 183 return 0;
198 } 184 }
199 185
200 if (physmap_data->nr_parts) { 186 if (physmap_data->nr_parts) {
201 printk(KERN_NOTICE "Using physmap partition information\n"); 187 printk(KERN_NOTICE "Using physmap partition information\n");
202 add_mtd_partitions(info->cmtd, physmap_data->parts, 188 mtd_device_register(info->cmtd, physmap_data->parts,
203 physmap_data->nr_parts); 189 physmap_data->nr_parts);
204 return 0; 190 return 0;
205 } 191 }
206#endif
207 192
208 add_mtd_device(info->cmtd); 193 mtd_device_register(info->cmtd, NULL, 0);
194
209 return 0; 195 return 0;
210 196
211err_out: 197err_out:
@@ -269,14 +255,12 @@ void physmap_configure(unsigned long addr, unsigned long size,
269 physmap_flash_data.set_vpp = set_vpp; 255 physmap_flash_data.set_vpp = set_vpp;
270} 256}
271 257
272#ifdef CONFIG_MTD_PARTITIONS
273void physmap_set_partitions(struct mtd_partition *parts, int num_parts) 258void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
274{ 259{
275 physmap_flash_data.nr_parts = num_parts; 260 physmap_flash_data.nr_parts = num_parts;
276 physmap_flash_data.parts = parts; 261 physmap_flash_data.parts = parts;
277} 262}
278#endif 263#endif
279#endif
280 264
281static int __init physmap_init(void) 265static int __init physmap_init(void)
282{ 266{
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c1d33464aee8..d251d1db129b 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -34,16 +34,12 @@ struct of_flash_list {
34 34
35struct of_flash { 35struct of_flash {
36 struct mtd_info *cmtd; 36 struct mtd_info *cmtd;
37#ifdef CONFIG_MTD_PARTITIONS
38 struct mtd_partition *parts; 37 struct mtd_partition *parts;
39#endif
40 int list_size; /* number of elements in of_flash_list */ 38 int list_size; /* number of elements in of_flash_list */
41 struct of_flash_list list[0]; 39 struct of_flash_list list[0];
42}; 40};
43 41
44#ifdef CONFIG_MTD_PARTITIONS
45#define OF_FLASH_PARTS(info) ((info)->parts) 42#define OF_FLASH_PARTS(info) ((info)->parts)
46
47static int parse_obsolete_partitions(struct platform_device *dev, 43static int parse_obsolete_partitions(struct platform_device *dev,
48 struct of_flash *info, 44 struct of_flash *info,
49 struct device_node *dp) 45 struct device_node *dp)
@@ -89,10 +85,6 @@ static int parse_obsolete_partitions(struct platform_device *dev,
89 85
90 return nr_parts; 86 return nr_parts;
91} 87}
92#else /* MTD_PARTITIONS */
93#define OF_FLASH_PARTS(info) (0)
94#define parse_partitions(info, dev) (0)
95#endif /* MTD_PARTITIONS */
96 88
97static int of_flash_remove(struct platform_device *dev) 89static int of_flash_remove(struct platform_device *dev)
98{ 90{
@@ -105,17 +97,14 @@ static int of_flash_remove(struct platform_device *dev)
105 dev_set_drvdata(&dev->dev, NULL); 97 dev_set_drvdata(&dev->dev, NULL);
106 98
107 if (info->cmtd != info->list[0].mtd) { 99 if (info->cmtd != info->list[0].mtd) {
108 del_mtd_device(info->cmtd); 100 mtd_device_unregister(info->cmtd);
109 mtd_concat_destroy(info->cmtd); 101 mtd_concat_destroy(info->cmtd);
110 } 102 }
111 103
112 if (info->cmtd) { 104 if (info->cmtd) {
113 if (OF_FLASH_PARTS(info)) { 105 if (OF_FLASH_PARTS(info))
114 del_mtd_partitions(info->cmtd);
115 kfree(OF_FLASH_PARTS(info)); 106 kfree(OF_FLASH_PARTS(info));
116 } else { 107 mtd_device_unregister(info->cmtd);
117 del_mtd_device(info->cmtd);
118 }
119 } 108 }
120 109
121 for (i = 0; i < info->list_size; i++) { 110 for (i = 0; i < info->list_size; i++) {
@@ -172,7 +161,6 @@ static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
172 } 161 }
173} 162}
174 163
175#ifdef CONFIG_MTD_PARTITIONS
176/* When partitions are set we look for a linux,part-probe property which 164/* When partitions are set we look for a linux,part-probe property which
177 specifies the list of partition probers to use. If none is given then the 165 specifies the list of partition probers to use. If none is given then the
178 default is use. These take precedence over other device tree 166 default is use. These take precedence over other device tree
@@ -212,14 +200,11 @@ static void __devinit of_free_probes(const char **probes)
212 if (probes != part_probe_types_def) 200 if (probes != part_probe_types_def)
213 kfree(probes); 201 kfree(probes);
214} 202}
215#endif
216 203
217static struct of_device_id of_flash_match[]; 204static struct of_device_id of_flash_match[];
218static int __devinit of_flash_probe(struct platform_device *dev) 205static int __devinit of_flash_probe(struct platform_device *dev)
219{ 206{
220#ifdef CONFIG_MTD_PARTITIONS
221 const char **part_probe_types; 207 const char **part_probe_types;
222#endif
223 const struct of_device_id *match; 208 const struct of_device_id *match;
224 struct device_node *dp = dev->dev.of_node; 209 struct device_node *dp = dev->dev.of_node;
225 struct resource res; 210 struct resource res;
@@ -346,7 +331,6 @@ static int __devinit of_flash_probe(struct platform_device *dev)
346 if (err) 331 if (err)
347 goto err_out; 332 goto err_out;
348 333
349#ifdef CONFIG_MTD_PARTITIONS
350 part_probe_types = of_get_probes(dp); 334 part_probe_types = of_get_probes(dp);
351 err = parse_mtd_partitions(info->cmtd, part_probe_types, 335 err = parse_mtd_partitions(info->cmtd, part_probe_types,
352 &info->parts, 0); 336 &info->parts, 0);
@@ -356,13 +340,11 @@ static int __devinit of_flash_probe(struct platform_device *dev)
356 } 340 }
357 of_free_probes(part_probe_types); 341 of_free_probes(part_probe_types);
358 342
359#ifdef CONFIG_MTD_OF_PARTS
360 if (err == 0) { 343 if (err == 0) {
361 err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts); 344 err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
362 if (err < 0) 345 if (err < 0)
363 goto err_out; 346 goto err_out;
364 } 347 }
365#endif
366 348
367 if (err == 0) { 349 if (err == 0) {
368 err = parse_obsolete_partitions(dev, info, dp); 350 err = parse_obsolete_partitions(dev, info, dp);
@@ -370,11 +352,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
370 goto err_out; 352 goto err_out;
371 } 353 }
372 354
373 if (err > 0) 355 mtd_device_register(info->cmtd, info->parts, err);
374 add_mtd_partitions(info->cmtd, info->parts, err);
375 else
376#endif
377 add_mtd_device(info->cmtd);
378 356
379 kfree(mtd_list); 357 kfree(mtd_list);
380 358
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 76a76be5a7bd..9ca1eccba4bc 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -94,14 +94,11 @@ static int platram_remove(struct platform_device *pdev)
94 return 0; 94 return 0;
95 95
96 if (info->mtd) { 96 if (info->mtd) {
97#ifdef CONFIG_MTD_PARTITIONS 97 mtd_device_unregister(info->mtd);
98 if (info->partitions) { 98 if (info->partitions) {
99 del_mtd_partitions(info->mtd);
100 if (info->free_partitions) 99 if (info->free_partitions)
101 kfree(info->partitions); 100 kfree(info->partitions);
102 } 101 }
103#endif
104 del_mtd_device(info->mtd);
105 map_destroy(info->mtd); 102 map_destroy(info->mtd);
106 } 103 }
107 104
@@ -231,7 +228,6 @@ static int platram_probe(struct platform_device *pdev)
231 /* check to see if there are any available partitions, or wether 228 /* check to see if there are any available partitions, or wether
232 * to add this device whole */ 229 * to add this device whole */
233 230
234#ifdef CONFIG_MTD_PARTITIONS
235 if (!pdata->nr_partitions) { 231 if (!pdata->nr_partitions) {
236 /* try to probe using the supplied probe type */ 232 /* try to probe using the supplied probe type */
237 if (pdata->probes) { 233 if (pdata->probes) {
@@ -239,24 +235,22 @@ static int platram_probe(struct platform_device *pdev)
239 &info->partitions, 0); 235 &info->partitions, 0);
240 info->free_partitions = 1; 236 info->free_partitions = 1;
241 if (err > 0) 237 if (err > 0)
242 err = add_mtd_partitions(info->mtd, 238 err = mtd_device_register(info->mtd,
243 info->partitions, err); 239 info->partitions, err);
244 } 240 }
245 } 241 }
246 /* use the static mapping */ 242 /* use the static mapping */
247 else 243 else
248 err = add_mtd_partitions(info->mtd, pdata->partitions, 244 err = mtd_device_register(info->mtd, pdata->partitions,
249 pdata->nr_partitions); 245 pdata->nr_partitions);
250#endif /* CONFIG_MTD_PARTITIONS */
251
252 if (add_mtd_device(info->mtd)) {
253 dev_err(&pdev->dev, "add_mtd_device() failed\n");
254 err = -ENOMEM;
255 }
256
257 if (!err) 246 if (!err)
258 dev_info(&pdev->dev, "registered mtd device\n"); 247 dev_info(&pdev->dev, "registered mtd device\n");
259 248
249 /* add the whole device. */
250 err = mtd_device_register(info->mtd, NULL, 0);
251 if (err)
252 dev_err(&pdev->dev, "failed to register the entire device\n");
253
260 return err; 254 return err;
261 255
262 exit_free: 256 exit_free:
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index 64aea6acd48e..744ca5cacc9b 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -173,7 +173,7 @@ static int __init init_msp_flash(void)
173 msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]); 173 msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]);
174 if (msp_flash[i]) { 174 if (msp_flash[i]) {
175 msp_flash[i]->owner = THIS_MODULE; 175 msp_flash[i]->owner = THIS_MODULE;
176 add_mtd_partitions(msp_flash[i], msp_parts[i], pcnt); 176 mtd_device_register(msp_flash[i], msp_parts[i], pcnt);
177 } else { 177 } else {
178 printk(KERN_ERR "map probe failed for flash\n"); 178 printk(KERN_ERR "map probe failed for flash\n");
179 ret = -ENXIO; 179 ret = -ENXIO;
@@ -188,7 +188,7 @@ static int __init init_msp_flash(void)
188 188
189cleanup_loop: 189cleanup_loop:
190 while (i--) { 190 while (i--) {
191 del_mtd_partitions(msp_flash[i]); 191 mtd_device_unregister(msp_flash[i]);
192 map_destroy(msp_flash[i]); 192 map_destroy(msp_flash[i]);
193 kfree(msp_maps[i].name); 193 kfree(msp_maps[i].name);
194 iounmap(msp_maps[i].virt); 194 iounmap(msp_maps[i].virt);
@@ -207,7 +207,7 @@ static void __exit cleanup_msp_flash(void)
207 int i; 207 int i;
208 208
209 for (i = 0; i < fcnt; i++) { 209 for (i = 0; i < fcnt; i++) {
210 del_mtd_partitions(msp_flash[i]); 210 mtd_device_unregister(msp_flash[i]);
211 map_destroy(msp_flash[i]); 211 map_destroy(msp_flash[i]);
212 iounmap((void *)msp_maps[i].virt); 212 iounmap((void *)msp_maps[i].virt);
213 213
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index d8ae634d347e..f59d62f74d44 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -104,23 +104,18 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
104 } 104 }
105 info->mtd->owner = THIS_MODULE; 105 info->mtd->owner = THIS_MODULE;
106 106
107#ifdef CONFIG_MTD_PARTITIONS
108 ret = parse_mtd_partitions(info->mtd, probes, &parts, 0); 107 ret = parse_mtd_partitions(info->mtd, probes, &parts, 0);
109 108
110 if (ret > 0) { 109 if (ret > 0) {
111 info->nr_parts = ret; 110 info->nr_parts = ret;
112 info->parts = parts; 111 info->parts = parts;
113 } 112 }
114#endif
115 113
116 if (info->nr_parts) { 114 if (!info->nr_parts)
117 add_mtd_partitions(info->mtd, info->parts,
118 info->nr_parts);
119 } else {
120 printk("Registering %s as whole device\n", 115 printk("Registering %s as whole device\n",
121 info->map.name); 116 info->map.name);
122 add_mtd_device(info->mtd); 117
123 } 118 mtd_device_register(info->mtd, info->parts, info->nr_parts);
124 119
125 platform_set_drvdata(pdev, info); 120 platform_set_drvdata(pdev, info);
126 return 0; 121 return 0;
@@ -132,12 +127,7 @@ static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
132 127
133 platform_set_drvdata(dev, NULL); 128 platform_set_drvdata(dev, NULL);
134 129
135#ifdef CONFIG_MTD_PARTITIONS 130 mtd_device_unregister(info->mtd);
136 if (info->nr_parts)
137 del_mtd_partitions(info->mtd);
138 else
139#endif
140 del_mtd_device(info->mtd);
141 131
142 map_destroy(info->mtd); 132 map_destroy(info->mtd);
143 iounmap(info->map.virt); 133 iounmap(info->map.virt);
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 83ed64512c5e..761fb459d2c7 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -25,10 +25,8 @@
25struct rbtx4939_flash_info { 25struct rbtx4939_flash_info {
26 struct mtd_info *mtd; 26 struct mtd_info *mtd;
27 struct map_info map; 27 struct map_info map;
28#ifdef CONFIG_MTD_PARTITIONS
29 int nr_parts; 28 int nr_parts;
30 struct mtd_partition *parts; 29 struct mtd_partition *parts;
31#endif
32}; 30};
33 31
34static int rbtx4939_flash_remove(struct platform_device *dev) 32static int rbtx4939_flash_remove(struct platform_device *dev)
@@ -41,28 +39,18 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
41 platform_set_drvdata(dev, NULL); 39 platform_set_drvdata(dev, NULL);
42 40
43 if (info->mtd) { 41 if (info->mtd) {
44#ifdef CONFIG_MTD_PARTITIONS
45 struct rbtx4939_flash_data *pdata = dev->dev.platform_data; 42 struct rbtx4939_flash_data *pdata = dev->dev.platform_data;
46 43
47 if (info->nr_parts) { 44 if (info->nr_parts)
48 del_mtd_partitions(info->mtd);
49 kfree(info->parts); 45 kfree(info->parts);
50 } else if (pdata->nr_parts) 46 mtd_device_unregister(info->mtd);
51 del_mtd_partitions(info->mtd);
52 else
53 del_mtd_device(info->mtd);
54#else
55 del_mtd_device(info->mtd);
56#endif
57 map_destroy(info->mtd); 47 map_destroy(info->mtd);
58 } 48 }
59 return 0; 49 return 0;
60} 50}
61 51
62static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 52static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
63#ifdef CONFIG_MTD_PARTITIONS
64static const char *part_probe_types[] = { "cmdlinepart", NULL }; 53static const char *part_probe_types[] = { "cmdlinepart", NULL };
65#endif
66 54
67static int rbtx4939_flash_probe(struct platform_device *dev) 55static int rbtx4939_flash_probe(struct platform_device *dev)
68{ 56{
@@ -120,23 +108,21 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
120 if (err) 108 if (err)
121 goto err_out; 109 goto err_out;
122 110
123#ifdef CONFIG_MTD_PARTITIONS
124 err = parse_mtd_partitions(info->mtd, part_probe_types, 111 err = parse_mtd_partitions(info->mtd, part_probe_types,
125 &info->parts, 0); 112 &info->parts, 0);
126 if (err > 0) { 113 if (err > 0) {
127 add_mtd_partitions(info->mtd, info->parts, err); 114 mtd_device_register(info->mtd, info->parts, err);
128 info->nr_parts = err; 115 info->nr_parts = err;
129 return 0; 116 return 0;
130 } 117 }
131 118
132 if (pdata->nr_parts) { 119 if (pdata->nr_parts) {
133 pr_notice("Using rbtx4939 partition information\n"); 120 pr_notice("Using rbtx4939 partition information\n");
134 add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts); 121 mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
135 return 0; 122 return 0;
136 } 123 }
137#endif
138 124
139 add_mtd_device(info->mtd); 125 mtd_device_register(info->mtd, NULL, 0);
140 return 0; 126 return 0;
141 127
142err_out: 128err_out:
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
index 3e3ef53d4fd4..ed88225bf667 100644
--- a/drivers/mtd/maps/rpxlite.c
+++ b/drivers/mtd/maps/rpxlite.c
@@ -36,7 +36,7 @@ static int __init init_rpxlite(void)
36 mymtd = do_map_probe("cfi_probe", &rpxlite_map); 36 mymtd = do_map_probe("cfi_probe", &rpxlite_map);
37 if (mymtd) { 37 if (mymtd) {
38 mymtd->owner = THIS_MODULE; 38 mymtd->owner = THIS_MODULE;
39 add_mtd_device(mymtd); 39 mtd_device_register(mymtd, NULL, 0);
40 return 0; 40 return 0;
41 } 41 }
42 42
@@ -47,7 +47,7 @@ static int __init init_rpxlite(void)
47static void __exit cleanup_rpxlite(void) 47static void __exit cleanup_rpxlite(void)
48{ 48{
49 if (mymtd) { 49 if (mymtd) {
50 del_mtd_device(mymtd); 50 mtd_device_unregister(mymtd);
51 map_destroy(mymtd); 51 map_destroy(mymtd);
52 } 52 }
53 if (rpxlite_map.virt) { 53 if (rpxlite_map.virt) {
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index da875908ea8e..a9b5e0e5c4c5 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -226,12 +226,7 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
226 int i; 226 int i;
227 227
228 if (info->mtd) { 228 if (info->mtd) {
229 if (info->nr_parts == 0) 229 mtd_device_unregister(info->mtd);
230 del_mtd_device(info->mtd);
231#ifdef CONFIG_MTD_PARTITIONS
232 else
233 del_mtd_partitions(info->mtd);
234#endif
235 if (info->mtd != info->subdev[0].mtd) 230 if (info->mtd != info->subdev[0].mtd)
236 mtd_concat_destroy(info->mtd); 231 mtd_concat_destroy(info->mtd);
237 } 232 }
@@ -363,28 +358,24 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
363 /* 358 /*
364 * Partition selection stuff. 359 * Partition selection stuff.
365 */ 360 */
366#ifdef CONFIG_MTD_PARTITIONS
367 nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0); 361 nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0);
368 if (nr_parts > 0) { 362 if (nr_parts > 0) {
369 info->parts = parts; 363 info->parts = parts;
370 part_type = "dynamic"; 364 part_type = "dynamic";
371 } else 365 } else {
372#endif
373 {
374 parts = plat->parts; 366 parts = plat->parts;
375 nr_parts = plat->nr_parts; 367 nr_parts = plat->nr_parts;
376 part_type = "static"; 368 part_type = "static";
377 } 369 }
378 370
379 if (nr_parts == 0) { 371 if (nr_parts == 0)
380 printk(KERN_NOTICE "SA1100 flash: no partition info " 372 printk(KERN_NOTICE "SA1100 flash: no partition info "
381 "available, registering whole flash\n"); 373 "available, registering whole flash\n");
382 add_mtd_device(info->mtd); 374 else
383 } else {
384 printk(KERN_NOTICE "SA1100 flash: using %s partition " 375 printk(KERN_NOTICE "SA1100 flash: using %s partition "
385 "definition\n", part_type); 376 "definition\n", part_type);
386 add_mtd_partitions(info->mtd, parts, nr_parts); 377
387 } 378 mtd_device_register(info->mtd, parts, nr_parts);
388 379
389 info->nr_parts = nr_parts; 380 info->nr_parts = nr_parts;
390 381
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
index 04b2781fc627..556a2dfe94c5 100644
--- a/drivers/mtd/maps/sbc_gxx.c
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -182,7 +182,7 @@ static struct mtd_info *all_mtd;
182static void cleanup_sbc_gxx(void) 182static void cleanup_sbc_gxx(void)
183{ 183{
184 if( all_mtd ) { 184 if( all_mtd ) {
185 del_mtd_partitions( all_mtd ); 185 mtd_device_unregister(all_mtd);
186 map_destroy( all_mtd ); 186 map_destroy( all_mtd );
187 } 187 }
188 188
@@ -223,7 +223,7 @@ static int __init init_sbc_gxx(void)
223 all_mtd->owner = THIS_MODULE; 223 all_mtd->owner = THIS_MODULE;
224 224
225 /* Create MTD devices for each partition. */ 225 /* Create MTD devices for each partition. */
226 add_mtd_partitions(all_mtd, partition_info, NUM_PARTITIONS ); 226 mtd_device_register(all_mtd, partition_info, NUM_PARTITIONS);
227 227
228 return 0; 228 return 0;
229} 229}
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 4d8aaaf4bb76..8fead8e46bce 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -266,10 +266,10 @@ static int __init init_sc520cdp(void)
266 /* Combine the two flash banks into a single MTD device & register it: */ 266 /* Combine the two flash banks into a single MTD device & register it: */
267 merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1"); 267 merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1");
268 if(merged_mtd) 268 if(merged_mtd)
269 add_mtd_device(merged_mtd); 269 mtd_device_register(merged_mtd, NULL, 0);
270 } 270 }
271 if(devices_found == 3) /* register the third (DIL-Flash) device */ 271 if(devices_found == 3) /* register the third (DIL-Flash) device */
272 add_mtd_device(mymtd[2]); 272 mtd_device_register(mymtd[2], NULL, 0);
273 return(devices_found ? 0 : -ENXIO); 273 return(devices_found ? 0 : -ENXIO);
274} 274}
275 275
@@ -278,11 +278,11 @@ static void __exit cleanup_sc520cdp(void)
278 int i; 278 int i;
279 279
280 if (merged_mtd) { 280 if (merged_mtd) {
281 del_mtd_device(merged_mtd); 281 mtd_device_unregister(merged_mtd);
282 mtd_concat_destroy(merged_mtd); 282 mtd_concat_destroy(merged_mtd);
283 } 283 }
284 if (mymtd[2]) 284 if (mymtd[2])
285 del_mtd_device(mymtd[2]); 285 mtd_device_unregister(mymtd[2]);
286 286
287 for (i = 0; i < NUM_FLASH_BANKS; i++) { 287 for (i = 0; i < NUM_FLASH_BANKS; i++) {
288 if (mymtd[i]) 288 if (mymtd[i])
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 7e329f09a548..d88c8426bb0f 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -180,7 +180,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
180 180
181 scb2_mtd->owner = THIS_MODULE; 181 scb2_mtd->owner = THIS_MODULE;
182 if (scb2_fixup_mtd(scb2_mtd) < 0) { 182 if (scb2_fixup_mtd(scb2_mtd) < 0) {
183 del_mtd_device(scb2_mtd); 183 mtd_device_unregister(scb2_mtd);
184 map_destroy(scb2_mtd); 184 map_destroy(scb2_mtd);
185 iounmap(scb2_ioaddr); 185 iounmap(scb2_ioaddr);
186 if (!region_fail) 186 if (!region_fail)
@@ -192,7 +192,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
192 (unsigned long long)scb2_mtd->size, 192 (unsigned long long)scb2_mtd->size,
193 (unsigned long long)(SCB2_WINDOW - scb2_mtd->size)); 193 (unsigned long long)(SCB2_WINDOW - scb2_mtd->size));
194 194
195 add_mtd_device(scb2_mtd); 195 mtd_device_register(scb2_mtd, NULL, 0);
196 196
197 return 0; 197 return 0;
198} 198}
@@ -207,7 +207,7 @@ scb2_flash_remove(struct pci_dev *dev)
207 if (scb2_mtd->lock) 207 if (scb2_mtd->lock)
208 scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size); 208 scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
209 209
210 del_mtd_device(scb2_mtd); 210 mtd_device_unregister(scb2_mtd);
211 map_destroy(scb2_mtd); 211 map_destroy(scb2_mtd);
212 212
213 iounmap(scb2_ioaddr); 213 iounmap(scb2_ioaddr);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index 027e628a4f1d..f1c1f737d0d7 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -44,7 +44,6 @@ static struct resource docmem = {
44 44
45static struct mtd_info *mymtd; 45static struct mtd_info *mymtd;
46 46
47#ifdef CONFIG_MTD_PARTITIONS
48static struct mtd_partition partition_info[] = { 47static struct mtd_partition partition_info[] = {
49 { 48 {
50 .name = "DOCCS Boot kernel", 49 .name = "DOCCS Boot kernel",
@@ -68,8 +67,6 @@ static struct mtd_partition partition_info[] = {
68 }, 67 },
69}; 68};
70#define NUM_PARTITIONS ARRAY_SIZE(partition_info) 69#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
71#endif
72
73 70
74static struct map_info scx200_docflash_map = { 71static struct map_info scx200_docflash_map = {
75 .name = "NatSemi SCx200 DOCCS Flash", 72 .name = "NatSemi SCx200 DOCCS Flash",
@@ -198,24 +195,17 @@ static int __init init_scx200_docflash(void)
198 195
199 mymtd->owner = THIS_MODULE; 196 mymtd->owner = THIS_MODULE;
200 197
201#ifdef CONFIG_MTD_PARTITIONS
202 partition_info[3].offset = mymtd->size-partition_info[3].size; 198 partition_info[3].offset = mymtd->size-partition_info[3].size;
203 partition_info[2].size = partition_info[3].offset-partition_info[2].offset; 199 partition_info[2].size = partition_info[3].offset-partition_info[2].offset;
204 add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS); 200 mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
205#else 201
206 add_mtd_device(mymtd);
207#endif
208 return 0; 202 return 0;
209} 203}
210 204
211static void __exit cleanup_scx200_docflash(void) 205static void __exit cleanup_scx200_docflash(void)
212{ 206{
213 if (mymtd) { 207 if (mymtd) {
214#ifdef CONFIG_MTD_PARTITIONS 208 mtd_device_unregister(mymtd);
215 del_mtd_partitions(mymtd);
216#else
217 del_mtd_device(mymtd);
218#endif
219 map_destroy(mymtd); 209 map_destroy(mymtd);
220 } 210 }
221 if (scx200_docflash_map.virt) { 211 if (scx200_docflash_map.virt) {
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 0eb41d9c6786..cbf6bade9354 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -89,7 +89,7 @@ static int __init init_soleng_maps(void)
89 eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map); 89 eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
90 if (eprom_mtd) { 90 if (eprom_mtd) {
91 eprom_mtd->owner = THIS_MODULE; 91 eprom_mtd->owner = THIS_MODULE;
92 add_mtd_device(eprom_mtd); 92 mtd_device_register(eprom_mtd, NULL, 0);
93 } 93 }
94 94
95 nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0); 95 nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
@@ -104,9 +104,9 @@ static int __init init_soleng_maps(void)
104#endif /* CONFIG_MTD_SUPERH_RESERVE */ 104#endif /* CONFIG_MTD_SUPERH_RESERVE */
105 105
106 if (nr_parts > 0) 106 if (nr_parts > 0)
107 add_mtd_partitions(flash_mtd, parsed_parts, nr_parts); 107 mtd_device_register(flash_mtd, parsed_parts, nr_parts);
108 else 108 else
109 add_mtd_device(flash_mtd); 109 mtd_device_register(flash_mtd, NULL, 0);
110 110
111 return 0; 111 return 0;
112} 112}
@@ -114,14 +114,14 @@ static int __init init_soleng_maps(void)
114static void __exit cleanup_soleng_maps(void) 114static void __exit cleanup_soleng_maps(void)
115{ 115{
116 if (eprom_mtd) { 116 if (eprom_mtd) {
117 del_mtd_device(eprom_mtd); 117 mtd_device_unregister(eprom_mtd);
118 map_destroy(eprom_mtd); 118 map_destroy(eprom_mtd);
119 } 119 }
120 120
121 if (parsed_parts) 121 if (parsed_parts)
122 del_mtd_partitions(flash_mtd); 122 mtd_device_unregister(flash_mtd);
123 else 123 else
124 del_mtd_device(flash_mtd); 124 mtd_device_unregister(flash_mtd);
125 map_destroy(flash_mtd); 125 map_destroy(flash_mtd);
126} 126}
127 127
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 3f1cb328a574..2d66234f57cb 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -101,7 +101,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
101 101
102 up->mtd->owner = THIS_MODULE; 102 up->mtd->owner = THIS_MODULE;
103 103
104 add_mtd_device(up->mtd); 104 mtd_device_register(up->mtd, NULL, 0);
105 105
106 dev_set_drvdata(&op->dev, up); 106 dev_set_drvdata(&op->dev, up);
107 107
@@ -126,7 +126,7 @@ static int __devexit uflash_remove(struct platform_device *op)
126 struct uflash_dev *up = dev_get_drvdata(&op->dev); 126 struct uflash_dev *up = dev_get_drvdata(&op->dev);
127 127
128 if (up->mtd) { 128 if (up->mtd) {
129 del_mtd_device(up->mtd); 129 mtd_device_unregister(up->mtd);
130 map_destroy(up->mtd); 130 map_destroy(up->mtd);
131 } 131 }
132 if (up->map.virt) { 132 if (up->map.virt) {
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 0718dfb3ee64..d78587990e7e 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -62,7 +62,6 @@ static void __iomem *start_scan_addr;
62 * "struct map_desc *_io_desc" for the corresponding machine. 62 * "struct map_desc *_io_desc" for the corresponding machine.
63 */ 63 */
64 64
65#ifdef CONFIG_MTD_PARTITIONS
66/* Currently, TQM8xxL has up to 8MiB flash */ 65/* Currently, TQM8xxL has up to 8MiB flash */
67static unsigned long tqm8xxl_max_flash_size = 0x00800000; 66static unsigned long tqm8xxl_max_flash_size = 0x00800000;
68 67
@@ -107,7 +106,6 @@ static struct mtd_partition tqm8xxl_fs_partitions[] = {
107 //.size = MTDPART_SIZ_FULL, 106 //.size = MTDPART_SIZ_FULL,
108 } 107 }
109}; 108};
110#endif
111 109
112static int __init init_tqm_mtd(void) 110static int __init init_tqm_mtd(void)
113{ 111{
@@ -188,7 +186,6 @@ static int __init init_tqm_mtd(void)
188 goto error_mem; 186 goto error_mem;
189 } 187 }
190 188
191#ifdef CONFIG_MTD_PARTITIONS
192 /* 189 /*
193 * Select Static partition definitions 190 * Select Static partition definitions
194 */ 191 */
@@ -201,21 +198,14 @@ static int __init init_tqm_mtd(void)
201 part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions); 198 part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions);
202 199
203 for(idx = 0; idx < num_banks ; idx++) { 200 for(idx = 0; idx < num_banks ; idx++) {
204 if (part_banks[idx].nums == 0) { 201 if (part_banks[idx].nums == 0)
205 printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx); 202 printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx);
206 add_mtd_device(mtd_banks[idx]); 203 else
207 } else {
208 printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n", 204 printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n",
209 idx, part_banks[idx].type); 205 idx, part_banks[idx].type);
210 add_mtd_partitions(mtd_banks[idx], part_banks[idx].mtd_part, 206 mtd_device_register(mtd_banks[idx], part_banks[idx].mtd_part,
211 part_banks[idx].nums); 207 part_banks[idx].nums);
212 }
213 } 208 }
214#else
215 printk(KERN_NOTICE "TQM flash: registering %d whole flash banks at once\n", num_banks);
216 for(idx = 0 ; idx < num_banks ; idx++)
217 add_mtd_device(mtd_banks[idx]);
218#endif
219 return 0; 209 return 0;
220error_mem: 210error_mem:
221 for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) { 211 for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
@@ -237,7 +227,7 @@ static void __exit cleanup_tqm_mtd(void)
237 for(idx = 0 ; idx < num_banks ; idx++) { 227 for(idx = 0 ; idx < num_banks ; idx++) {
238 /* destroy mtd_info previously allocated */ 228 /* destroy mtd_info previously allocated */
239 if (mtd_banks[idx]) { 229 if (mtd_banks[idx]) {
240 del_mtd_partitions(mtd_banks[idx]); 230 mtd_device_unregister(mtd_banks[idx]);
241 map_destroy(mtd_banks[idx]); 231 map_destroy(mtd_banks[idx]);
242 } 232 }
243 /* release map_info not used anymore */ 233 /* release map_info not used anymore */
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index e02dfa9d4ddd..d1d671daf235 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -89,7 +89,7 @@ static int __init init_ts5500_map(void)
89 } 89 }
90 90
91 mymtd->owner = THIS_MODULE; 91 mymtd->owner = THIS_MODULE;
92 add_mtd_partitions(mymtd, ts5500_partitions, NUM_PARTITIONS); 92 mtd_device_register(mymtd, ts5500_partitions, NUM_PARTITIONS);
93 93
94 return 0; 94 return 0;
95 95
@@ -102,7 +102,7 @@ err2:
102static void __exit cleanup_ts5500_map(void) 102static void __exit cleanup_ts5500_map(void)
103{ 103{
104 if (mymtd) { 104 if (mymtd) {
105 del_mtd_partitions(mymtd); 105 mtd_device_unregister(mymtd);
106 map_destroy(mymtd); 106 map_destroy(mymtd);
107 } 107 }
108 108
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
index 77a8bfc02577..1de390e1c2fb 100644
--- a/drivers/mtd/maps/tsunami_flash.c
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -76,7 +76,7 @@ static void __exit cleanup_tsunami_flash(void)
76 struct mtd_info *mtd; 76 struct mtd_info *mtd;
77 mtd = tsunami_flash_mtd; 77 mtd = tsunami_flash_mtd;
78 if (mtd) { 78 if (mtd) {
79 del_mtd_device(mtd); 79 mtd_device_unregister(mtd);
80 map_destroy(mtd); 80 map_destroy(mtd);
81 } 81 }
82 tsunami_flash_mtd = 0; 82 tsunami_flash_mtd = 0;
@@ -97,7 +97,7 @@ static int __init init_tsunami_flash(void)
97 } 97 }
98 if (tsunami_flash_mtd) { 98 if (tsunami_flash_mtd) {
99 tsunami_flash_mtd->owner = THIS_MODULE; 99 tsunami_flash_mtd->owner = THIS_MODULE;
100 add_mtd_device(tsunami_flash_mtd); 100 mtd_device_register(tsunami_flash_mtd, NULL, 0);
101 return 0; 101 return 0;
102 } 102 }
103 return -ENXIO; 103 return -ENXIO;
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 35009294b435..6793074f3f40 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -89,11 +89,7 @@ static int __init uclinux_mtd_init(void)
89 mtd->priv = mapp; 89 mtd->priv = mapp;
90 90
91 uclinux_ram_mtdinfo = mtd; 91 uclinux_ram_mtdinfo = mtd;
92#ifdef CONFIG_MTD_PARTITIONS 92 mtd_device_register(mtd, uclinux_romfs, NUM_PARTITIONS);
93 add_mtd_partitions(mtd, uclinux_romfs, NUM_PARTITIONS);
94#else
95 add_mtd_device(mtd);
96#endif
97 93
98 return(0); 94 return(0);
99} 95}
@@ -103,11 +99,7 @@ static int __init uclinux_mtd_init(void)
103static void __exit uclinux_mtd_cleanup(void) 99static void __exit uclinux_mtd_cleanup(void)
104{ 100{
105 if (uclinux_ram_mtdinfo) { 101 if (uclinux_ram_mtdinfo) {
106#ifdef CONFIG_MTD_PARTITIONS 102 mtd_device_unregister(uclinux_ram_mtdinfo);
107 del_mtd_partitions(uclinux_ram_mtdinfo);
108#else
109 del_mtd_device(uclinux_ram_mtdinfo);
110#endif
111 map_destroy(uclinux_ram_mtdinfo); 103 map_destroy(uclinux_ram_mtdinfo);
112 uclinux_ram_mtdinfo = NULL; 104 uclinux_ram_mtdinfo = NULL;
113 } 105 }
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
index 6adaa6acc193..5e68de73eabc 100644
--- a/drivers/mtd/maps/vmax301.c
+++ b/drivers/mtd/maps/vmax301.c
@@ -138,7 +138,7 @@ static void __exit cleanup_vmax301(void)
138 138
139 for (i=0; i<2; i++) { 139 for (i=0; i<2; i++) {
140 if (vmax_mtd[i]) { 140 if (vmax_mtd[i]) {
141 del_mtd_device(vmax_mtd[i]); 141 mtd_device_unregister(vmax_mtd[i]);
142 map_destroy(vmax_mtd[i]); 142 map_destroy(vmax_mtd[i]);
143 } 143 }
144 } 144 }
@@ -176,7 +176,7 @@ static int __init init_vmax301(void)
176 vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]); 176 vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]);
177 if (vmax_mtd[i]) { 177 if (vmax_mtd[i]) {
178 vmax_mtd[i]->owner = THIS_MODULE; 178 vmax_mtd[i]->owner = THIS_MODULE;
179 add_mtd_device(vmax_mtd[i]); 179 mtd_device_register(vmax_mtd[i], NULL, 0);
180 } 180 }
181 } 181 }
182 182
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 4afc167731ef..3a04b078576a 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -563,7 +563,7 @@ static void vmu_queryblocks(struct mapleq *mq)
563 goto fail_cache_create; 563 goto fail_cache_create;
564 part_cur->pcache = pcache; 564 part_cur->pcache = pcache;
565 565
566 error = add_mtd_device(mtd_cur); 566 error = mtd_device_register(mtd_cur, NULL, 0);
567 if (error) 567 if (error)
568 goto fail_mtd_register; 568 goto fail_mtd_register;
569 569
@@ -709,7 +709,7 @@ static void __devexit vmu_disconnect(struct maple_device *mdev)
709 for (x = 0; x < card->partitions; x++) { 709 for (x = 0; x < card->partitions; x++) {
710 mpart = ((card->mtd)[x]).priv; 710 mpart = ((card->mtd)[x]).priv;
711 mpart->mdev = NULL; 711 mpart->mdev = NULL;
712 del_mtd_device(&((card->mtd)[x])); 712 mtd_device_unregister(&((card->mtd)[x]));
713 kfree(((card->parts)[x]).name); 713 kfree(((card->parts)[x]).name);
714 } 714 }
715 kfree(card->parts); 715 kfree(card->parts);
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 933a2b6598b4..901ce968efae 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -132,17 +132,20 @@ static int __init init_sbc82xx_flash(void)
132 nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes, 132 nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes,
133 &sbcmtd_parts[i], 0); 133 &sbcmtd_parts[i], 0);
134 if (nr_parts > 0) { 134 if (nr_parts > 0) {
135 add_mtd_partitions (sbcmtd[i], sbcmtd_parts[i], nr_parts); 135 mtd_device_register(sbcmtd[i], sbcmtd_parts[i],
136 nr_parts);
136 continue; 137 continue;
137 } 138 }
138 139
139 /* No partitioning detected. Use default */ 140 /* No partitioning detected. Use default */
140 if (i == 2) { 141 if (i == 2) {
141 add_mtd_device(sbcmtd[i]); 142 mtd_device_register(sbcmtd[i], NULL, 0);
142 } else if (i == bigflash) { 143 } else if (i == bigflash) {
143 add_mtd_partitions (sbcmtd[i], bigflash_parts, ARRAY_SIZE(bigflash_parts)); 144 mtd_device_register(sbcmtd[i], bigflash_parts,
145 ARRAY_SIZE(bigflash_parts));
144 } else { 146 } else {
145 add_mtd_partitions (sbcmtd[i], smallflash_parts, ARRAY_SIZE(smallflash_parts)); 147 mtd_device_register(sbcmtd[i], smallflash_parts,
148 ARRAY_SIZE(smallflash_parts));
146 } 149 }
147 } 150 }
148 return 0; 151 return 0;
@@ -157,9 +160,9 @@ static void __exit cleanup_sbc82xx_flash(void)
157 continue; 160 continue;
158 161
159 if (i<2 || sbcmtd_parts[i]) 162 if (i<2 || sbcmtd_parts[i])
160 del_mtd_partitions(sbcmtd[i]); 163 mtd_device_unregister(sbcmtd[i]);
161 else 164 else
162 del_mtd_device(sbcmtd[i]); 165 mtd_device_unregister(sbcmtd[i]);
163 166
164 kfree(sbcmtd_parts[i]); 167 kfree(sbcmtd_parts[i]);
165 map_destroy(sbcmtd[i]); 168 map_destroy(sbcmtd[i]);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index a534e1f0c348..ca385697446e 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -221,15 +221,33 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
221 kref_get(&dev->ref); 221 kref_get(&dev->ref);
222 __module_get(dev->tr->owner); 222 __module_get(dev->tr->owner);
223 223
224 if (dev->mtd) { 224 if (!dev->mtd)
225 ret = dev->tr->open ? dev->tr->open(dev) : 0; 225 goto unlock;
226 __get_mtd_device(dev->mtd); 226
227 if (dev->tr->open) {
228 ret = dev->tr->open(dev);
229 if (ret)
230 goto error_put;
227 } 231 }
228 232
233 ret = __get_mtd_device(dev->mtd);
234 if (ret)
235 goto error_release;
236
229unlock: 237unlock:
230 mutex_unlock(&dev->lock); 238 mutex_unlock(&dev->lock);
231 blktrans_dev_put(dev); 239 blktrans_dev_put(dev);
232 return ret; 240 return ret;
241
242error_release:
243 if (dev->tr->release)
244 dev->tr->release(dev);
245error_put:
246 module_put(dev->tr->owner);
247 kref_put(&dev->ref, blktrans_dev_release);
248 mutex_unlock(&dev->lock);
249 blktrans_dev_put(dev);
250 return ret;
233} 251}
234 252
235static int blktrans_release(struct gendisk *disk, fmode_t mode) 253static int blktrans_release(struct gendisk *disk, fmode_t mode)
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 4c36ef66a46b..3f92731a5b9e 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -166,10 +166,23 @@ static int mtd_close(struct inode *inode, struct file *file)
166 return 0; 166 return 0;
167} /* mtd_close */ 167} /* mtd_close */
168 168
169/* FIXME: This _really_ needs to die. In 2.5, we should lock the 169/* Back in June 2001, dwmw2 wrote:
170 userspace buffer down and use it directly with readv/writev. 170 *
171*/ 171 * FIXME: This _really_ needs to die. In 2.5, we should lock the
172#define MAX_KMALLOC_SIZE 0x20000 172 * userspace buffer down and use it directly with readv/writev.
173 *
174 * The implementation below, using mtd_kmalloc_up_to, mitigates
175 * allocation failures when the system is under low-memory situations
176 * or if memory is highly fragmented at the cost of reducing the
177 * performance of the requested transfer due to a smaller buffer size.
178 *
179 * A more complex but more memory-efficient implementation based on
180 * get_user_pages and iovecs to cover extents of those pages is a
181 * longer-term goal, as intimated by dwmw2 above. However, for the
182 * write case, this requires yet more complex head and tail transfer
183 * handling when those head and tail offsets and sizes are such that
184 * alignment requirements are not met in the NAND subdriver.
185 */
173 186
174static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 187static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
175{ 188{
@@ -179,6 +192,7 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
179 size_t total_retlen=0; 192 size_t total_retlen=0;
180 int ret=0; 193 int ret=0;
181 int len; 194 int len;
195 size_t size = count;
182 char *kbuf; 196 char *kbuf;
183 197
184 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 198 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
@@ -189,23 +203,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
189 if (!count) 203 if (!count)
190 return 0; 204 return 0;
191 205
192 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers 206 kbuf = mtd_kmalloc_up_to(mtd, &size);
193 and pass them directly to the MTD functions */
194
195 if (count > MAX_KMALLOC_SIZE)
196 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
197 else
198 kbuf=kmalloc(count, GFP_KERNEL);
199
200 if (!kbuf) 207 if (!kbuf)
201 return -ENOMEM; 208 return -ENOMEM;
202 209
203 while (count) { 210 while (count) {
204 211 len = min_t(size_t, count, size);
205 if (count > MAX_KMALLOC_SIZE)
206 len = MAX_KMALLOC_SIZE;
207 else
208 len = count;
209 212
210 switch (mfi->mode) { 213 switch (mfi->mode) {
211 case MTD_MODE_OTP_FACTORY: 214 case MTD_MODE_OTP_FACTORY:
@@ -268,6 +271,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
268{ 271{
269 struct mtd_file_info *mfi = file->private_data; 272 struct mtd_file_info *mfi = file->private_data;
270 struct mtd_info *mtd = mfi->mtd; 273 struct mtd_info *mtd = mfi->mtd;
274 size_t size = count;
271 char *kbuf; 275 char *kbuf;
272 size_t retlen; 276 size_t retlen;
273 size_t total_retlen=0; 277 size_t total_retlen=0;
@@ -285,20 +289,12 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
285 if (!count) 289 if (!count)
286 return 0; 290 return 0;
287 291
288 if (count > MAX_KMALLOC_SIZE) 292 kbuf = mtd_kmalloc_up_to(mtd, &size);
289 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
290 else
291 kbuf=kmalloc(count, GFP_KERNEL);
292
293 if (!kbuf) 293 if (!kbuf)
294 return -ENOMEM; 294 return -ENOMEM;
295 295
296 while (count) { 296 while (count) {
297 297 len = min_t(size_t, count, size);
298 if (count > MAX_KMALLOC_SIZE)
299 len = MAX_KMALLOC_SIZE;
300 else
301 len = count;
302 298
303 if (copy_from_user(kbuf, buf, len)) { 299 if (copy_from_user(kbuf, buf, len)) {
304 kfree(kbuf); 300 kfree(kbuf);
@@ -512,7 +508,6 @@ static int shrink_ecclayout(const struct nand_ecclayout *from,
512 return 0; 508 return 0;
513} 509}
514 510
515#ifdef CONFIG_MTD_PARTITIONS
516static int mtd_blkpg_ioctl(struct mtd_info *mtd, 511static int mtd_blkpg_ioctl(struct mtd_info *mtd,
517 struct blkpg_ioctl_arg __user *arg) 512 struct blkpg_ioctl_arg __user *arg)
518{ 513{
@@ -548,8 +543,6 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
548 return -EINVAL; 543 return -EINVAL;
549 } 544 }
550} 545}
551#endif
552
553 546
554static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) 547static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
555{ 548{
@@ -941,7 +934,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
941 break; 934 break;
942 } 935 }
943 936
944#ifdef CONFIG_MTD_PARTITIONS
945 case BLKPG: 937 case BLKPG:
946 { 938 {
947 ret = mtd_blkpg_ioctl(mtd, 939 ret = mtd_blkpg_ioctl(mtd,
@@ -955,7 +947,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
955 ret = 0; 947 ret = 0;
956 break; 948 break;
957 } 949 }
958#endif
959 950
960 default: 951 default:
961 ret = -ENOTTY; 952 ret = -ENOTTY;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 5060e608ea5d..e601672a5305 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -319,7 +319,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
319 if (!(mtd->flags & MTD_WRITEABLE)) 319 if (!(mtd->flags & MTD_WRITEABLE))
320 return -EROFS; 320 return -EROFS;
321 321
322 ops->retlen = 0; 322 ops->retlen = ops->oobretlen = 0;
323 323
324 for (i = 0; i < concat->num_subdev; i++) { 324 for (i = 0; i < concat->num_subdev; i++) {
325 struct mtd_info *subdev = concat->subdev[i]; 325 struct mtd_info *subdev = concat->subdev[i];
@@ -334,7 +334,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
334 devops.len = subdev->size - to; 334 devops.len = subdev->size - to;
335 335
336 err = subdev->write_oob(subdev, to, &devops); 336 err = subdev->write_oob(subdev, to, &devops);
337 ops->retlen += devops.retlen; 337 ops->retlen += devops.oobretlen;
338 if (err) 338 if (err)
339 return err; 339 return err;
340 340
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index da69bc8a5a7d..c510aff289a8 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/ptrace.h> 26#include <linux/ptrace.h>
27#include <linux/seq_file.h>
27#include <linux/string.h> 28#include <linux/string.h>
28#include <linux/timer.h> 29#include <linux/timer.h>
29#include <linux/major.h> 30#include <linux/major.h>
@@ -37,6 +38,7 @@
37#include <linux/gfp.h> 38#include <linux/gfp.h>
38 39
39#include <linux/mtd/mtd.h> 40#include <linux/mtd/mtd.h>
41#include <linux/mtd/partitions.h>
40 42
41#include "mtdcore.h" 43#include "mtdcore.h"
42/* 44/*
@@ -391,7 +393,7 @@ fail_locked:
391 * if the requested device does not appear to be present in the list. 393 * if the requested device does not appear to be present in the list.
392 */ 394 */
393 395
394int del_mtd_device (struct mtd_info *mtd) 396int del_mtd_device(struct mtd_info *mtd)
395{ 397{
396 int ret; 398 int ret;
397 struct mtd_notifier *not; 399 struct mtd_notifier *not;
@@ -427,6 +429,50 @@ out_error:
427} 429}
428 430
429/** 431/**
432 * mtd_device_register - register an MTD device.
433 *
434 * @master: the MTD device to register
435 * @parts: the partitions to register - only valid if nr_parts > 0
436 * @nr_parts: the number of partitions in parts. If zero then the full MTD
437 * device is registered
438 *
439 * Register an MTD device with the system and optionally, a number of
440 * partitions. If nr_parts is 0 then the whole device is registered, otherwise
441 * only the partitions are registered. To register both the full device *and*
442 * the partitions, call mtd_device_register() twice, once with nr_parts == 0
443 * and once equal to the number of partitions.
444 */
445int mtd_device_register(struct mtd_info *master,
446 const struct mtd_partition *parts,
447 int nr_parts)
448{
449 return parts ? add_mtd_partitions(master, parts, nr_parts) :
450 add_mtd_device(master);
451}
452EXPORT_SYMBOL_GPL(mtd_device_register);
453
454/**
455 * mtd_device_unregister - unregister an existing MTD device.
456 *
457 * @master: the MTD device to unregister. This will unregister both the master
458 * and any partitions if registered.
459 */
460int mtd_device_unregister(struct mtd_info *master)
461{
462 int err;
463
464 err = del_mtd_partitions(master);
465 if (err)
466 return err;
467
468 if (!device_is_registered(&master->dev))
469 return 0;
470
471 return del_mtd_device(master);
472}
473EXPORT_SYMBOL_GPL(mtd_device_unregister);
474
475/**
430 * register_mtd_user - register a 'user' of MTD devices. 476 * register_mtd_user - register a 'user' of MTD devices.
431 * @new: pointer to notifier info structure 477 * @new: pointer to notifier info structure
432 * 478 *
@@ -443,7 +489,7 @@ void register_mtd_user (struct mtd_notifier *new)
443 489
444 list_add(&new->list, &mtd_notifiers); 490 list_add(&new->list, &mtd_notifiers);
445 491
446 __module_get(THIS_MODULE); 492 __module_get(THIS_MODULE);
447 493
448 mtd_for_each_device(mtd) 494 mtd_for_each_device(mtd)
449 new->add(mtd); 495 new->add(mtd);
@@ -532,7 +578,6 @@ int __get_mtd_device(struct mtd_info *mtd)
532 return -ENODEV; 578 return -ENODEV;
533 579
534 if (mtd->get_device) { 580 if (mtd->get_device) {
535
536 err = mtd->get_device(mtd); 581 err = mtd->get_device(mtd);
537 582
538 if (err) { 583 if (err) {
@@ -570,21 +615,13 @@ struct mtd_info *get_mtd_device_nm(const char *name)
570 if (!mtd) 615 if (!mtd)
571 goto out_unlock; 616 goto out_unlock;
572 617
573 if (!try_module_get(mtd->owner)) 618 err = __get_mtd_device(mtd);
619 if (err)
574 goto out_unlock; 620 goto out_unlock;
575 621
576 if (mtd->get_device) {
577 err = mtd->get_device(mtd);
578 if (err)
579 goto out_put;
580 }
581
582 mtd->usecount++;
583 mutex_unlock(&mtd_table_mutex); 622 mutex_unlock(&mtd_table_mutex);
584 return mtd; 623 return mtd;
585 624
586out_put:
587 module_put(mtd->owner);
588out_unlock: 625out_unlock:
589 mutex_unlock(&mtd_table_mutex); 626 mutex_unlock(&mtd_table_mutex);
590 return ERR_PTR(err); 627 return ERR_PTR(err);
@@ -638,8 +675,54 @@ int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
638 return ret; 675 return ret;
639} 676}
640 677
641EXPORT_SYMBOL_GPL(add_mtd_device); 678/**
642EXPORT_SYMBOL_GPL(del_mtd_device); 679 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
680 * @size: A pointer to the ideal or maximum size of the allocation. Points
681 * to the actual allocation size on success.
682 *
683 * This routine attempts to allocate a contiguous kernel buffer up to
684 * the specified size, backing off the size of the request exponentially
685 * until the request succeeds or until the allocation size falls below
686 * the system page size. This attempts to make sure it does not adversely
687 * impact system performance, so when allocating more than one page, we
688 * ask the memory allocator to avoid re-trying, swapping, writing back
689 * or performing I/O.
690 *
691 * Note, this function also makes sure that the allocated buffer is aligned to
692 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
693 *
694 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
695 * to handle smaller (i.e. degraded) buffer allocations under low- or
696 * fragmented-memory situations where such reduced allocations, from a
697 * requested ideal, are allowed.
698 *
699 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
700 */
701void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
702{
703 gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
704 __GFP_NORETRY | __GFP_NO_KSWAPD;
705 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
706 void *kbuf;
707
708 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
709
710 while (*size > min_alloc) {
711 kbuf = kmalloc(*size, flags);
712 if (kbuf)
713 return kbuf;
714
715 *size >>= 1;
716 *size = ALIGN(*size, mtd->writesize);
717 }
718
719 /*
720 * For the last resort allocation allow 'kmalloc()' to do all sorts of
721 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
722 */
723 return kmalloc(*size, GFP_KERNEL);
724}
725
643EXPORT_SYMBOL_GPL(get_mtd_device); 726EXPORT_SYMBOL_GPL(get_mtd_device);
644EXPORT_SYMBOL_GPL(get_mtd_device_nm); 727EXPORT_SYMBOL_GPL(get_mtd_device_nm);
645EXPORT_SYMBOL_GPL(__get_mtd_device); 728EXPORT_SYMBOL_GPL(__get_mtd_device);
@@ -648,6 +731,7 @@ EXPORT_SYMBOL_GPL(__put_mtd_device);
648EXPORT_SYMBOL_GPL(register_mtd_user); 731EXPORT_SYMBOL_GPL(register_mtd_user);
649EXPORT_SYMBOL_GPL(unregister_mtd_user); 732EXPORT_SYMBOL_GPL(unregister_mtd_user);
650EXPORT_SYMBOL_GPL(default_mtd_writev); 733EXPORT_SYMBOL_GPL(default_mtd_writev);
734EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
651 735
652#ifdef CONFIG_PROC_FS 736#ifdef CONFIG_PROC_FS
653 737
@@ -656,44 +740,32 @@ EXPORT_SYMBOL_GPL(default_mtd_writev);
656 740
657static struct proc_dir_entry *proc_mtd; 741static struct proc_dir_entry *proc_mtd;
658 742
659static inline int mtd_proc_info(char *buf, struct mtd_info *this) 743static int mtd_proc_show(struct seq_file *m, void *v)
660{
661 return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", this->index,
662 (unsigned long long)this->size,
663 this->erasesize, this->name);
664}
665
666static int mtd_read_proc (char *page, char **start, off_t off, int count,
667 int *eof, void *data_unused)
668{ 744{
669 struct mtd_info *mtd; 745 struct mtd_info *mtd;
670 int len, l;
671 off_t begin = 0;
672 746
747 seq_puts(m, "dev: size erasesize name\n");
673 mutex_lock(&mtd_table_mutex); 748 mutex_lock(&mtd_table_mutex);
674
675 len = sprintf(page, "dev: size erasesize name\n");
676 mtd_for_each_device(mtd) { 749 mtd_for_each_device(mtd) {
677 l = mtd_proc_info(page + len, mtd); 750 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
678 len += l; 751 mtd->index, (unsigned long long)mtd->size,
679 if (len+begin > off+count) 752 mtd->erasesize, mtd->name);
680 goto done; 753 }
681 if (len+begin < off) {
682 begin += len;
683 len = 0;
684 }
685 }
686
687 *eof = 1;
688
689done:
690 mutex_unlock(&mtd_table_mutex); 754 mutex_unlock(&mtd_table_mutex);
691 if (off >= len+begin) 755 return 0;
692 return 0; 756}
693 *start = page + (off-begin); 757
694 return ((count < begin+len-off) ? count : begin+len-off); 758static int mtd_proc_open(struct inode *inode, struct file *file)
759{
760 return single_open(file, mtd_proc_show, NULL);
695} 761}
696 762
763static const struct file_operations mtd_proc_ops = {
764 .open = mtd_proc_open,
765 .read = seq_read,
766 .llseek = seq_lseek,
767 .release = single_release,
768};
697#endif /* CONFIG_PROC_FS */ 769#endif /* CONFIG_PROC_FS */
698 770
699/*====================================================================*/ 771/*====================================================================*/
@@ -734,8 +806,7 @@ static int __init init_mtd(void)
734 goto err_bdi3; 806 goto err_bdi3;
735 807
736#ifdef CONFIG_PROC_FS 808#ifdef CONFIG_PROC_FS
737 if ((proc_mtd = create_proc_entry( "mtd", 0, NULL ))) 809 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
738 proc_mtd->read_proc = mtd_read_proc;
739#endif /* CONFIG_PROC_FS */ 810#endif /* CONFIG_PROC_FS */
740 return 0; 811 return 0;
741 812
@@ -753,7 +824,7 @@ err_reg:
753static void __exit cleanup_mtd(void) 824static void __exit cleanup_mtd(void)
754{ 825{
755#ifdef CONFIG_PROC_FS 826#ifdef CONFIG_PROC_FS
756 if (proc_mtd) 827 if (proc_mtd)
757 remove_proc_entry( "mtd", NULL); 828 remove_proc_entry( "mtd", NULL);
758#endif /* CONFIG_PROC_FS */ 829#endif /* CONFIG_PROC_FS */
759 class_unregister(&mtd_class); 830 class_unregister(&mtd_class);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 6a64fdebc898..0ed6126b4c1f 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -10,6 +10,12 @@
10extern struct mutex mtd_table_mutex; 10extern struct mutex mtd_table_mutex;
11extern struct mtd_info *__mtd_next_device(int i); 11extern struct mtd_info *__mtd_next_device(int i);
12 12
13extern int add_mtd_device(struct mtd_info *mtd);
14extern int del_mtd_device(struct mtd_info *mtd);
15extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *,
16 int);
17extern int del_mtd_partitions(struct mtd_info *);
18
13#define mtd_for_each_device(mtd) \ 19#define mtd_for_each_device(mtd) \
14 for ((mtd) = __mtd_next_device(0); \ 20 for ((mtd) = __mtd_next_device(0); \
15 (mtd) != NULL; \ 21 (mtd) != NULL; \
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 0a4760174782..630be3e7da04 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -31,6 +31,8 @@
31#include <linux/mtd/partitions.h> 31#include <linux/mtd/partitions.h>
32#include <linux/err.h> 32#include <linux/err.h>
33 33
34#include "mtdcore.h"
35
34/* Our partition linked list */ 36/* Our partition linked list */
35static LIST_HEAD(mtd_partitions); 37static LIST_HEAD(mtd_partitions);
36static DEFINE_MUTEX(mtd_partitions_mutex); 38static DEFINE_MUTEX(mtd_partitions_mutex);
@@ -376,7 +378,6 @@ int del_mtd_partitions(struct mtd_info *master)
376 378
377 return err; 379 return err;
378} 380}
379EXPORT_SYMBOL(del_mtd_partitions);
380 381
381static struct mtd_part *allocate_partition(struct mtd_info *master, 382static struct mtd_part *allocate_partition(struct mtd_info *master,
382 const struct mtd_partition *part, int partno, 383 const struct mtd_partition *part, int partno,
@@ -671,7 +672,6 @@ int add_mtd_partitions(struct mtd_info *master,
671 672
672 return 0; 673 return 0;
673} 674}
674EXPORT_SYMBOL(add_mtd_partitions);
675 675
676static DEFINE_SPINLOCK(part_parser_lock); 676static DEFINE_SPINLOCK(part_parser_lock);
677static LIST_HEAD(part_parsers); 677static LIST_HEAD(part_parsers);
@@ -722,11 +722,8 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
722 parser = get_partition_parser(*types); 722 parser = get_partition_parser(*types);
723 if (!parser && !request_module("%s", *types)) 723 if (!parser && !request_module("%s", *types))
724 parser = get_partition_parser(*types); 724 parser = get_partition_parser(*types);
725 if (!parser) { 725 if (!parser)
726 printk(KERN_NOTICE "%s partition parsing not available\n",
727 *types);
728 continue; 726 continue;
729 }
730 ret = (*parser->parse_fn)(master, pparts, origin); 727 ret = (*parser->parse_fn)(master, pparts, origin);
731 if (ret > 0) { 728 if (ret > 0) {
732 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", 729 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index fed215c4cfa1..fd7885327611 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1450,7 +1450,13 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1450 } 1450 }
1451 1451
1452 oinfo = mtd->ecclayout; 1452 oinfo = mtd->ecclayout;
1453 if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) { 1453 if (!oinfo) {
1454 printk(KERN_ERR "%s: mtd%d does not have OOB\n",
1455 MTDSWAP_PREFIX, mtd->index);
1456 return;
1457 }
1458
1459 if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
1454 printk(KERN_ERR "%s: Not enough free bytes in OOB, " 1460 printk(KERN_ERR "%s: Not enough free bytes in OOB, "
1455 "%d available, %zu needed.\n", 1461 "%d available, %zu needed.\n",
1456 MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE); 1462 MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index edec457d361d..4c3425235adc 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -92,7 +92,7 @@ config MTD_NAND_EDB7312
92 92
93config MTD_NAND_H1900 93config MTD_NAND_H1900
94 tristate "iPAQ H1900 flash" 94 tristate "iPAQ H1900 flash"
95 depends on ARCH_PXA && MTD_PARTITIONS 95 depends on ARCH_PXA
96 help 96 help
97 This enables the driver for the iPAQ h1900 flash. 97 This enables the driver for the iPAQ h1900 flash.
98 98
@@ -419,7 +419,6 @@ config MTD_NAND_TMIO
419 419
420config MTD_NAND_NANDSIM 420config MTD_NAND_NANDSIM
421 tristate "Support for NAND Flash Simulator" 421 tristate "Support for NAND Flash Simulator"
422 depends on MTD_PARTITIONS
423 help 422 help
424 The simulator may simulate various NAND flash chips for the 423 The simulator may simulate various NAND flash chips for the
425 MTD nand layer. 424 MTD nand layer.
@@ -513,7 +512,7 @@ config MTD_NAND_SOCRATES
513 512
514config MTD_NAND_NUC900 513config MTD_NAND_NUC900
515 tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards." 514 tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
516 depends on ARCH_W90X900 && MTD_PARTITIONS 515 depends on ARCH_W90X900
517 help 516 help
518 This enables the driver for the NAND Flash on evaluation board based 517 This enables the driver for the NAND Flash on evaluation board based
519 on w90p910 / NUC9xx. 518 on w90p910 / NUC9xx.
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 8691e0482ed2..eb40ea829ab2 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -120,7 +120,7 @@ static void alauda_delete(struct kref *kref)
120 struct alauda *al = container_of(kref, struct alauda, kref); 120 struct alauda *al = container_of(kref, struct alauda, kref);
121 121
122 if (al->mtd) { 122 if (al->mtd) {
123 del_mtd_device(al->mtd); 123 mtd_device_unregister(al->mtd);
124 kfree(al->mtd); 124 kfree(al->mtd);
125 } 125 }
126 usb_put_dev(al->dev); 126 usb_put_dev(al->dev);
@@ -592,7 +592,7 @@ static int alauda_init_media(struct alauda *al)
592 mtd->priv = al; 592 mtd->priv = al;
593 mtd->owner = THIS_MODULE; 593 mtd->owner = THIS_MODULE;
594 594
595 err = add_mtd_device(mtd); 595 err = mtd_device_register(mtd, NULL, 0);
596 if (err) { 596 if (err) {
597 err = -ENFILE; 597 err = -ENFILE;
598 goto error; 598 goto error;
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index bc65bf71e1a2..78017eb9318e 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -235,8 +235,8 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
235 } 235 }
236 236
237 /* Register the partitions */ 237 /* Register the partitions */
238 add_mtd_partitions(ams_delta_mtd, partition_info, 238 mtd_device_register(ams_delta_mtd, partition_info,
239 ARRAY_SIZE(partition_info)); 239 ARRAY_SIZE(partition_info));
240 240
241 goto out; 241 goto out;
242 242
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 950646aa4c4b..b300705d41cb 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -30,6 +30,7 @@
30#include <linux/mtd/nand.h> 30#include <linux/mtd/nand.h>
31#include <linux/mtd/partitions.h> 31#include <linux/mtd/partitions.h>
32 32
33#include <linux/dmaengine.h>
33#include <linux/gpio.h> 34#include <linux/gpio.h>
34#include <linux/io.h> 35#include <linux/io.h>
35 36
@@ -494,11 +495,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
494 struct resource *regs; 495 struct resource *regs;
495 struct resource *mem; 496 struct resource *mem;
496 int res; 497 int res;
497
498#ifdef CONFIG_MTD_PARTITIONS
499 struct mtd_partition *partitions = NULL; 498 struct mtd_partition *partitions = NULL;
500 int num_partitions = 0; 499 int num_partitions = 0;
501#endif
502 500
503 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 501 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
504 if (!mem) { 502 if (!mem) {
@@ -656,7 +654,6 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
656 goto err_scan_tail; 654 goto err_scan_tail;
657 } 655 }
658 656
659#ifdef CONFIG_MTD_PARTITIONS
660#ifdef CONFIG_MTD_CMDLINE_PARTS 657#ifdef CONFIG_MTD_CMDLINE_PARTS
661 mtd->name = "atmel_nand"; 658 mtd->name = "atmel_nand";
662 num_partitions = parse_mtd_partitions(mtd, part_probes, 659 num_partitions = parse_mtd_partitions(mtd, part_probes,
@@ -672,17 +669,11 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
672 goto err_no_partitions; 669 goto err_no_partitions;
673 } 670 }
674 671
675 res = add_mtd_partitions(mtd, partitions, num_partitions); 672 res = mtd_device_register(mtd, partitions, num_partitions);
676#else
677 res = add_mtd_device(mtd);
678#endif
679
680 if (!res) 673 if (!res)
681 return res; 674 return res;
682 675
683#ifdef CONFIG_MTD_PARTITIONS
684err_no_partitions: 676err_no_partitions:
685#endif
686 nand_release(mtd); 677 nand_release(mtd);
687err_scan_tail: 678err_scan_tail:
688err_scan_ident: 679err_scan_ident:
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 5d513b54a7d7..e7767eef4505 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -581,7 +581,8 @@ static int __init au1xxx_nand_init(void)
581 } 581 }
582 582
583 /* Register the partitions */ 583 /* Register the partitions */
584 add_mtd_partitions(au1550_mtd, partition_info, ARRAY_SIZE(partition_info)); 584 mtd_device_register(au1550_mtd, partition_info,
585 ARRAY_SIZE(partition_info));
585 586
586 return 0; 587 return 0;
587 588
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index 0911cf03db80..eddc9a224985 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -185,20 +185,20 @@ static int __init autcpu12_init(void)
185 /* Register the partitions */ 185 /* Register the partitions */
186 switch (autcpu12_mtd->size) { 186 switch (autcpu12_mtd->size) {
187 case SZ_16M: 187 case SZ_16M:
188 add_mtd_partitions(autcpu12_mtd, partition_info16k, 188 mtd_device_register(autcpu12_mtd, partition_info16k,
189 NUM_PARTITIONS16K); 189 NUM_PARTITIONS16K);
190 break; 190 break;
191 case SZ_32M: 191 case SZ_32M:
192 add_mtd_partitions(autcpu12_mtd, partition_info32k, 192 mtd_device_register(autcpu12_mtd, partition_info32k,
193 NUM_PARTITIONS32K); 193 NUM_PARTITIONS32K);
194 break; 194 break;
195 case SZ_64M: 195 case SZ_64M:
196 add_mtd_partitions(autcpu12_mtd, partition_info64k, 196 mtd_device_register(autcpu12_mtd, partition_info64k,
197 NUM_PARTITIONS64K); 197 NUM_PARTITIONS64K);
198 break; 198 break;
199 case SZ_128M: 199 case SZ_128M:
200 add_mtd_partitions(autcpu12_mtd, partition_info128k, 200 mtd_device_register(autcpu12_mtd, partition_info128k,
201 NUM_PARTITIONS128K); 201 NUM_PARTITIONS128K);
202 break; 202 break;
203 default: 203 default:
204 printk("Unsupported SmartMedia device\n"); 204 printk("Unsupported SmartMedia device\n");
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index dfe262c726fb..9ec280738a9a 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -52,9 +52,7 @@
52static const __devinitconst char gBanner[] = KERN_INFO \ 52static const __devinitconst char gBanner[] = KERN_INFO \
53 "BCM UMI MTD NAND Driver: 1.00\n"; 53 "BCM UMI MTD NAND Driver: 1.00\n";
54 54
55#ifdef CONFIG_MTD_PARTITIONS
56const char *part_probes[] = { "cmdlinepart", NULL }; 55const char *part_probes[] = { "cmdlinepart", NULL };
57#endif
58 56
59#if NAND_ECC_BCH 57#if NAND_ECC_BCH
60static uint8_t scan_ff_pattern[] = { 0xff }; 58static uint8_t scan_ff_pattern[] = { 0xff };
@@ -509,7 +507,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
509 kfree(board_mtd); 507 kfree(board_mtd);
510 return -EIO; 508 return -EIO;
511 } 509 }
512 add_mtd_partitions(board_mtd, partition_info, nr_partitions); 510 mtd_device_register(board_mtd, partition_info, nr_partitions);
513 } 511 }
514 512
515 /* Return happy */ 513 /* Return happy */
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 79947bea4d57..dd899cb5d366 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -659,15 +659,10 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
659static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info) 659static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
660{ 660{
661 struct mtd_info *mtd = &info->mtd; 661 struct mtd_info *mtd = &info->mtd;
662
663#ifdef CONFIG_MTD_PARTITIONS
664 struct mtd_partition *parts = info->platform->partitions; 662 struct mtd_partition *parts = info->platform->partitions;
665 int nr = info->platform->nr_partitions; 663 int nr = info->platform->nr_partitions;
666 664
667 return add_mtd_partitions(mtd, parts, nr); 665 return mtd_device_register(mtd, parts, nr);
668#else
669 return add_mtd_device(mtd);
670#endif
671} 666}
672 667
673static int __devexit bf5xx_nand_remove(struct platform_device *pdev) 668static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index e06c8983978e..87ebb4e5b0c3 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -90,9 +90,7 @@ static unsigned int numtimings;
90static int timing[3]; 90static int timing[3];
91module_param_array(timing, int, &numtimings, 0644); 91module_param_array(timing, int, &numtimings, 0644);
92 92
93#ifdef CONFIG_MTD_PARTITIONS
94static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; 93static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
95#endif
96 94
97/* Hrm. Why isn't this already conditional on something in the struct device? */ 95/* Hrm. Why isn't this already conditional on something in the struct device? */
98#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0) 96#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
@@ -632,10 +630,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
632 struct cafe_priv *cafe; 630 struct cafe_priv *cafe;
633 uint32_t ctrl; 631 uint32_t ctrl;
634 int err = 0; 632 int err = 0;
635#ifdef CONFIG_MTD_PARTITIONS
636 struct mtd_partition *parts; 633 struct mtd_partition *parts;
637 int nr_parts; 634 int nr_parts;
638#endif
639 635
640 /* Very old versions shared the same PCI ident for all three 636 /* Very old versions shared the same PCI ident for all three
641 functions on the chip. Verify the class too... */ 637 functions on the chip. Verify the class too... */
@@ -804,9 +800,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
804 pci_set_drvdata(pdev, mtd); 800 pci_set_drvdata(pdev, mtd);
805 801
806 /* We register the whole device first, separate from the partitions */ 802 /* We register the whole device first, separate from the partitions */
807 add_mtd_device(mtd); 803 mtd_device_register(mtd, NULL, 0);
808 804
809#ifdef CONFIG_MTD_PARTITIONS
810#ifdef CONFIG_MTD_CMDLINE_PARTS 805#ifdef CONFIG_MTD_CMDLINE_PARTS
811 mtd->name = "cafe_nand"; 806 mtd->name = "cafe_nand";
812#endif 807#endif
@@ -814,9 +809,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
814 if (nr_parts > 0) { 809 if (nr_parts > 0) {
815 cafe->parts = parts; 810 cafe->parts = parts;
816 dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts); 811 dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts);
817 add_mtd_partitions(mtd, parts, nr_parts); 812 mtd_device_register(mtd, parts, nr_parts);
818 } 813 }
819#endif
820 goto out; 814 goto out;
821 815
822 out_irq: 816 out_irq:
@@ -838,7 +832,6 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
838 struct mtd_info *mtd = pci_get_drvdata(pdev); 832 struct mtd_info *mtd = pci_get_drvdata(pdev);
839 struct cafe_priv *cafe = mtd->priv; 833 struct cafe_priv *cafe = mtd->priv;
840 834
841 del_mtd_device(mtd);
842 /* Disable NAND IRQ in global IRQ mask register */ 835 /* Disable NAND IRQ in global IRQ mask register */
843 cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK); 836 cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
844 free_irq(pdev->irq, mtd); 837 free_irq(pdev->irq, mtd);
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 6e6495278258..6fc043a30d1e 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -238,7 +238,7 @@ static int __init cmx270_init(void)
238 238
239 /* Register the partitions */ 239 /* Register the partitions */
240 pr_notice("Using %s partition definition\n", part_type); 240 pr_notice("Using %s partition definition\n", part_type);
241 ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb); 241 ret = mtd_device_register(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
242 if (ret) 242 if (ret)
243 goto err_scan; 243 goto err_scan;
244 244
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 71c35a0b9826..f59ad1f2d5db 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -277,22 +277,15 @@ static int is_geode(void)
277 return 0; 277 return 0;
278} 278}
279 279
280
281#ifdef CONFIG_MTD_PARTITIONS
282static const char *part_probes[] = { "cmdlinepart", NULL }; 280static const char *part_probes[] = { "cmdlinepart", NULL };
283#endif
284
285 281
286static int __init cs553x_init(void) 282static int __init cs553x_init(void)
287{ 283{
288 int err = -ENXIO; 284 int err = -ENXIO;
289 int i; 285 int i;
290 uint64_t val; 286 uint64_t val;
291
292#ifdef CONFIG_MTD_PARTITIONS
293 int mtd_parts_nb = 0; 287 int mtd_parts_nb = 0;
294 struct mtd_partition *mtd_parts = NULL; 288 struct mtd_partition *mtd_parts = NULL;
295#endif
296 289
297 /* If the CPU isn't a Geode GX or LX, abort */ 290 /* If the CPU isn't a Geode GX or LX, abort */
298 if (!is_geode()) 291 if (!is_geode())
@@ -324,17 +317,11 @@ static int __init cs553x_init(void)
324 if (cs553x_mtd[i]) { 317 if (cs553x_mtd[i]) {
325 318
326 /* If any devices registered, return success. Else the last error. */ 319 /* If any devices registered, return success. Else the last error. */
327#ifdef CONFIG_MTD_PARTITIONS
328 mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0); 320 mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0);
329 if (mtd_parts_nb > 0) { 321 if (mtd_parts_nb > 0)
330 printk(KERN_NOTICE "Using command line partition definition\n"); 322 printk(KERN_NOTICE "Using command line partition definition\n");
331 add_mtd_partitions(cs553x_mtd[i], mtd_parts, mtd_parts_nb); 323 mtd_device_register(cs553x_mtd[i], mtd_parts,
332 } else { 324 mtd_parts_nb);
333 add_mtd_device(cs553x_mtd[i]);
334 }
335#else
336 add_mtd_device(cs553x_mtd[i]);
337#endif
338 err = 0; 325 err = 0;
339 } 326 }
340 } 327 }
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index aff3468867ac..1f34951ae1a7 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -530,6 +530,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
530 int ret; 530 int ret;
531 uint32_t val; 531 uint32_t val;
532 nand_ecc_modes_t ecc_mode; 532 nand_ecc_modes_t ecc_mode;
533 struct mtd_partition *mtd_parts = NULL;
534 int mtd_parts_nb = 0;
533 535
534 /* insist on board-specific configuration */ 536 /* insist on board-specific configuration */
535 if (!pdata) 537 if (!pdata)
@@ -749,41 +751,33 @@ syndrome_done:
749 if (ret < 0) 751 if (ret < 0)
750 goto err_scan; 752 goto err_scan;
751 753
752 if (mtd_has_partitions()) { 754 if (mtd_has_cmdlinepart()) {
753 struct mtd_partition *mtd_parts = NULL; 755 static const char *probes[] __initconst = {
754 int mtd_parts_nb = 0; 756 "cmdlinepart", NULL
757 };
755 758
756 if (mtd_has_cmdlinepart()) { 759 mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
757 static const char *probes[] __initconst = 760 &mtd_parts, 0);
758 { "cmdlinepart", NULL }; 761 }
759
760 mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
761 &mtd_parts, 0);
762 }
763
764 if (mtd_parts_nb <= 0) {
765 mtd_parts = pdata->parts;
766 mtd_parts_nb = pdata->nr_parts;
767 }
768 762
769 /* Register any partitions */ 763 if (mtd_parts_nb <= 0) {
770 if (mtd_parts_nb > 0) { 764 mtd_parts = pdata->parts;
771 ret = add_mtd_partitions(&info->mtd, 765 mtd_parts_nb = pdata->nr_parts;
772 mtd_parts, mtd_parts_nb); 766 }
773 if (ret == 0)
774 info->partitioned = true;
775 }
776 767
777 } else if (pdata->nr_parts) { 768 /* Register any partitions */
778 dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n", 769 if (mtd_parts_nb > 0) {
779 pdata->nr_parts, info->mtd.name); 770 ret = mtd_device_register(&info->mtd, mtd_parts,
771 mtd_parts_nb);
772 if (ret == 0)
773 info->partitioned = true;
780 } 774 }
781 775
782 /* If there's no partition info, just package the whole chip 776 /* If there's no partition info, just package the whole chip
783 * as a single MTD device. 777 * as a single MTD device.
784 */ 778 */
785 if (!info->partitioned) 779 if (!info->partitioned)
786 ret = add_mtd_device(&info->mtd) ? -ENODEV : 0; 780 ret = mtd_device_register(&info->mtd, NULL, 0) ? -ENODEV : 0;
787 781
788 if (ret < 0) 782 if (ret < 0)
789 goto err_scan; 783 goto err_scan;
@@ -824,10 +818,7 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
824 struct davinci_nand_info *info = platform_get_drvdata(pdev); 818 struct davinci_nand_info *info = platform_get_drvdata(pdev);
825 int status; 819 int status;
826 820
827 if (mtd_has_partitions() && info->partitioned) 821 status = mtd_device_unregister(&info->mtd);
828 status = del_mtd_partitions(&info->mtd);
829 else
830 status = del_mtd_device(&info->mtd);
831 822
832 spin_lock_irq(&davinci_nand_lock); 823 spin_lock_irq(&davinci_nand_lock);
833 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME) 824 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 4633f094c510..d5276218945f 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/dma-mapping.h>
22#include <linux/wait.h> 23#include <linux/wait.h>
23#include <linux/mutex.h> 24#include <linux/mutex.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
@@ -44,16 +45,16 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
44 45
45/* We define a macro here that combines all interrupts this driver uses into 46/* We define a macro here that combines all interrupts this driver uses into
46 * a single constant value, for convenience. */ 47 * a single constant value, for convenience. */
47#define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \ 48#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \
48 INTR_STATUS0__ECC_TRANSACTION_DONE | \ 49 INTR_STATUS__ECC_TRANSACTION_DONE | \
49 INTR_STATUS0__ECC_ERR | \ 50 INTR_STATUS__ECC_ERR | \
50 INTR_STATUS0__PROGRAM_FAIL | \ 51 INTR_STATUS__PROGRAM_FAIL | \
51 INTR_STATUS0__LOAD_COMP | \ 52 INTR_STATUS__LOAD_COMP | \
52 INTR_STATUS0__PROGRAM_COMP | \ 53 INTR_STATUS__PROGRAM_COMP | \
53 INTR_STATUS0__TIME_OUT | \ 54 INTR_STATUS__TIME_OUT | \
54 INTR_STATUS0__ERASE_FAIL | \ 55 INTR_STATUS__ERASE_FAIL | \
55 INTR_STATUS0__RST_COMP | \ 56 INTR_STATUS__RST_COMP | \
56 INTR_STATUS0__ERASE_COMP) 57 INTR_STATUS__ERASE_COMP)
57 58
58/* indicates whether or not the internal value for the flash bank is 59/* indicates whether or not the internal value for the flash bank is
59 * valid or not */ 60 * valid or not */
@@ -95,30 +96,6 @@ static const struct pci_device_id denali_pci_ids[] = {
95 { /* end: all zeroes */ } 96 { /* end: all zeroes */ }
96}; 97};
97 98
98
99/* these are static lookup tables that give us easy access to
100 * registers in the NAND controller.
101 */
102static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
103 INTR_STATUS1,
104 INTR_STATUS2,
105 INTR_STATUS3};
106
107static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0,
108 DEVICE_RESET__BANK1,
109 DEVICE_RESET__BANK2,
110 DEVICE_RESET__BANK3};
111
112static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT,
113 INTR_STATUS1__TIME_OUT,
114 INTR_STATUS2__TIME_OUT,
115 INTR_STATUS3__TIME_OUT};
116
117static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
118 INTR_STATUS1__RST_COMP,
119 INTR_STATUS2__RST_COMP,
120 INTR_STATUS3__RST_COMP};
121
122/* forward declarations */ 99/* forward declarations */
123static void clear_interrupts(struct denali_nand_info *denali); 100static void clear_interrupts(struct denali_nand_info *denali);
124static uint32_t wait_for_irq(struct denali_nand_info *denali, 101static uint32_t wait_for_irq(struct denali_nand_info *denali,
@@ -180,19 +157,17 @@ static void read_status(struct denali_nand_info *denali)
180static void reset_bank(struct denali_nand_info *denali) 157static void reset_bank(struct denali_nand_info *denali)
181{ 158{
182 uint32_t irq_status = 0; 159 uint32_t irq_status = 0;
183 uint32_t irq_mask = reset_complete[denali->flash_bank] | 160 uint32_t irq_mask = INTR_STATUS__RST_COMP |
184 operation_timeout[denali->flash_bank]; 161 INTR_STATUS__TIME_OUT;
185 int bank = 0;
186 162
187 clear_interrupts(denali); 163 clear_interrupts(denali);
188 164
189 bank = device_reset_banks[denali->flash_bank]; 165 iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
190 iowrite32(bank, denali->flash_reg + DEVICE_RESET);
191 166
192 irq_status = wait_for_irq(denali, irq_mask); 167 irq_status = wait_for_irq(denali, irq_mask);
193 168
194 if (irq_status & operation_timeout[denali->flash_bank]) 169 if (irq_status & INTR_STATUS__TIME_OUT)
195 dev_err(&denali->dev->dev, "reset bank failed.\n"); 170 dev_err(denali->dev, "reset bank failed.\n");
196} 171}
197 172
198/* Reset the flash controller */ 173/* Reset the flash controller */
@@ -200,29 +175,28 @@ static uint16_t denali_nand_reset(struct denali_nand_info *denali)
200{ 175{
201 uint32_t i; 176 uint32_t i;
202 177
203 dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n", 178 dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
204 __FILE__, __LINE__, __func__); 179 __FILE__, __LINE__, __func__);
205 180
206 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) 181 for (i = 0 ; i < denali->max_banks; i++)
207 iowrite32(reset_complete[i] | operation_timeout[i], 182 iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
208 denali->flash_reg + intr_status_addresses[i]); 183 denali->flash_reg + INTR_STATUS(i));
209 184
210 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) { 185 for (i = 0 ; i < denali->max_banks; i++) {
211 iowrite32(device_reset_banks[i], 186 iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
212 denali->flash_reg + DEVICE_RESET);
213 while (!(ioread32(denali->flash_reg + 187 while (!(ioread32(denali->flash_reg +
214 intr_status_addresses[i]) & 188 INTR_STATUS(i)) &
215 (reset_complete[i] | operation_timeout[i]))) 189 (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
216 cpu_relax(); 190 cpu_relax();
217 if (ioread32(denali->flash_reg + intr_status_addresses[i]) & 191 if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
218 operation_timeout[i]) 192 INTR_STATUS__TIME_OUT)
219 dev_dbg(&denali->dev->dev, 193 dev_dbg(denali->dev,
220 "NAND Reset operation timed out on bank %d\n", i); 194 "NAND Reset operation timed out on bank %d\n", i);
221 } 195 }
222 196
223 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) 197 for (i = 0; i < denali->max_banks; i++)
224 iowrite32(reset_complete[i] | operation_timeout[i], 198 iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
225 denali->flash_reg + intr_status_addresses[i]); 199 denali->flash_reg + INTR_STATUS(i));
226 200
227 return PASS; 201 return PASS;
228} 202}
@@ -254,7 +228,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
254 uint16_t acc_clks; 228 uint16_t acc_clks;
255 uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt; 229 uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
256 230
257 dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n", 231 dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
258 __FILE__, __LINE__, __func__); 232 __FILE__, __LINE__, __func__);
259 233
260 en_lo = CEIL_DIV(Trp[mode], CLK_X); 234 en_lo = CEIL_DIV(Trp[mode], CLK_X);
@@ -291,7 +265,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
291 acc_clks++; 265 acc_clks++;
292 266
293 if ((data_invalid - acc_clks * CLK_X) < 2) 267 if ((data_invalid - acc_clks * CLK_X) < 2)
294 dev_warn(&denali->dev->dev, "%s, Line %d: Warning!\n", 268 dev_warn(denali->dev, "%s, Line %d: Warning!\n",
295 __FILE__, __LINE__); 269 __FILE__, __LINE__);
296 270
297 addr_2_data = CEIL_DIV(Tadl[mode], CLK_X); 271 addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
@@ -419,7 +393,7 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
419#endif 393#endif
420 break; 394 break;
421 default: 395 default:
422 dev_warn(&denali->dev->dev, 396 dev_warn(denali->dev,
423 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)." 397 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
424 "Will use default parameter values instead.\n", 398 "Will use default parameter values instead.\n",
425 device_id); 399 device_id);
@@ -431,17 +405,17 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
431 */ 405 */
432static void find_valid_banks(struct denali_nand_info *denali) 406static void find_valid_banks(struct denali_nand_info *denali)
433{ 407{
434 uint32_t id[LLD_MAX_FLASH_BANKS]; 408 uint32_t id[denali->max_banks];
435 int i; 409 int i;
436 410
437 denali->total_used_banks = 1; 411 denali->total_used_banks = 1;
438 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) { 412 for (i = 0; i < denali->max_banks; i++) {
439 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90); 413 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
440 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0); 414 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
441 index_addr_read_data(denali, 415 index_addr_read_data(denali,
442 (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]); 416 (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
443 417
444 dev_dbg(&denali->dev->dev, 418 dev_dbg(denali->dev,
445 "Return 1st ID for bank[%d]: %x\n", i, id[i]); 419 "Return 1st ID for bank[%d]: %x\n", i, id[i]);
446 420
447 if (i == 0) { 421 if (i == 0) {
@@ -461,16 +435,27 @@ static void find_valid_banks(struct denali_nand_info *denali)
461 * Multichip support is not enabled. 435 * Multichip support is not enabled.
462 */ 436 */
463 if (denali->total_used_banks != 1) { 437 if (denali->total_used_banks != 1) {
464 dev_err(&denali->dev->dev, 438 dev_err(denali->dev,
465 "Sorry, Intel CE4100 only supports " 439 "Sorry, Intel CE4100 only supports "
466 "a single NAND device.\n"); 440 "a single NAND device.\n");
467 BUG(); 441 BUG();
468 } 442 }
469 } 443 }
470 dev_dbg(&denali->dev->dev, 444 dev_dbg(denali->dev,
471 "denali->total_used_banks: %d\n", denali->total_used_banks); 445 "denali->total_used_banks: %d\n", denali->total_used_banks);
472} 446}
473 447
448/*
449 * Use the configuration feature register to determine the maximum number of
450 * banks that the hardware supports.
451 */
452static void detect_max_banks(struct denali_nand_info *denali)
453{
454 uint32_t features = ioread32(denali->flash_reg + FEATURES);
455
456 denali->max_banks = 2 << (features & FEATURES__N_BANKS);
457}
458
474static void detect_partition_feature(struct denali_nand_info *denali) 459static void detect_partition_feature(struct denali_nand_info *denali)
475{ 460{
476 /* For MRST platform, denali->fwblks represent the 461 /* For MRST platform, denali->fwblks represent the
@@ -480,15 +465,15 @@ static void detect_partition_feature(struct denali_nand_info *denali)
480 * blocks it can't touch. 465 * blocks it can't touch.
481 * */ 466 * */
482 if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) { 467 if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
483 if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) & 468 if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
484 PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) { 469 PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
485 denali->fwblks = 470 denali->fwblks =
486 ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) & 471 ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
487 MIN_MAX_BANK_1__MIN_VALUE) * 472 MIN_MAX_BANK__MIN_VALUE) *
488 denali->blksperchip) 473 denali->blksperchip)
489 + 474 +
490 (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) & 475 (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
491 MIN_BLK_ADDR_1__VALUE); 476 MIN_BLK_ADDR__VALUE);
492 } else 477 } else
493 denali->fwblks = SPECTRA_START_BLOCK; 478 denali->fwblks = SPECTRA_START_BLOCK;
494 } else 479 } else
@@ -501,7 +486,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
501 uint32_t id_bytes[5], addr; 486 uint32_t id_bytes[5], addr;
502 uint8_t i, maf_id, device_id; 487 uint8_t i, maf_id, device_id;
503 488
504 dev_dbg(&denali->dev->dev, 489 dev_dbg(denali->dev,
505 "%s, Line %d, Function: %s\n", 490 "%s, Line %d, Function: %s\n",
506 __FILE__, __LINE__, __func__); 491 __FILE__, __LINE__, __func__);
507 492
@@ -530,7 +515,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
530 get_hynix_nand_para(denali, device_id); 515 get_hynix_nand_para(denali, device_id);
531 } 516 }
532 517
533 dev_info(&denali->dev->dev, 518 dev_info(denali->dev,
534 "Dump timing register values:" 519 "Dump timing register values:"
535 "acc_clks: %d, re_2_we: %d, re_2_re: %d\n" 520 "acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
536 "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n" 521 "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
@@ -560,7 +545,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
560static void denali_set_intr_modes(struct denali_nand_info *denali, 545static void denali_set_intr_modes(struct denali_nand_info *denali,
561 uint16_t INT_ENABLE) 546 uint16_t INT_ENABLE)
562{ 547{
563 dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n", 548 dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
564 __FILE__, __LINE__, __func__); 549 __FILE__, __LINE__, __func__);
565 550
566 if (INT_ENABLE) 551 if (INT_ENABLE)
@@ -580,6 +565,7 @@ static inline bool is_flash_bank_valid(int flash_bank)
580static void denali_irq_init(struct denali_nand_info *denali) 565static void denali_irq_init(struct denali_nand_info *denali)
581{ 566{
582 uint32_t int_mask = 0; 567 uint32_t int_mask = 0;
568 int i;
583 569
584 /* Disable global interrupts */ 570 /* Disable global interrupts */
585 denali_set_intr_modes(denali, false); 571 denali_set_intr_modes(denali, false);
@@ -587,10 +573,8 @@ static void denali_irq_init(struct denali_nand_info *denali)
587 int_mask = DENALI_IRQ_ALL; 573 int_mask = DENALI_IRQ_ALL;
588 574
589 /* Clear all status bits */ 575 /* Clear all status bits */
590 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS0); 576 for (i = 0; i < denali->max_banks; ++i)
591 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS1); 577 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
592 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS2);
593 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS3);
594 578
595 denali_irq_enable(denali, int_mask); 579 denali_irq_enable(denali, int_mask);
596} 580}
@@ -604,10 +588,10 @@ static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
604static void denali_irq_enable(struct denali_nand_info *denali, 588static void denali_irq_enable(struct denali_nand_info *denali,
605 uint32_t int_mask) 589 uint32_t int_mask)
606{ 590{
607 iowrite32(int_mask, denali->flash_reg + INTR_EN0); 591 int i;
608 iowrite32(int_mask, denali->flash_reg + INTR_EN1); 592
609 iowrite32(int_mask, denali->flash_reg + INTR_EN2); 593 for (i = 0; i < denali->max_banks; ++i)
610 iowrite32(int_mask, denali->flash_reg + INTR_EN3); 594 iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
611} 595}
612 596
613/* This function only returns when an interrupt that this driver cares about 597/* This function only returns when an interrupt that this driver cares about
@@ -624,7 +608,7 @@ static inline void clear_interrupt(struct denali_nand_info *denali,
624{ 608{
625 uint32_t intr_status_reg = 0; 609 uint32_t intr_status_reg = 0;
626 610
627 intr_status_reg = intr_status_addresses[denali->flash_bank]; 611 intr_status_reg = INTR_STATUS(denali->flash_bank);
628 612
629 iowrite32(irq_mask, denali->flash_reg + intr_status_reg); 613 iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
630} 614}
@@ -645,7 +629,7 @@ static uint32_t read_interrupt_status(struct denali_nand_info *denali)
645{ 629{
646 uint32_t intr_status_reg = 0; 630 uint32_t intr_status_reg = 0;
647 631
648 intr_status_reg = intr_status_addresses[denali->flash_bank]; 632 intr_status_reg = INTR_STATUS(denali->flash_bank);
649 633
650 return ioread32(denali->flash_reg + intr_status_reg); 634 return ioread32(denali->flash_reg + intr_status_reg);
651} 635}
@@ -754,7 +738,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
754 irq_mask = 0; 738 irq_mask = 0;
755 739
756 if (op == DENALI_READ) 740 if (op == DENALI_READ)
757 irq_mask = INTR_STATUS0__LOAD_COMP; 741 irq_mask = INTR_STATUS__LOAD_COMP;
758 else if (op == DENALI_WRITE) 742 else if (op == DENALI_WRITE)
759 irq_mask = 0; 743 irq_mask = 0;
760 else 744 else
@@ -800,7 +784,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
800 irq_status = wait_for_irq(denali, irq_mask); 784 irq_status = wait_for_irq(denali, irq_mask);
801 785
802 if (irq_status == 0) { 786 if (irq_status == 0) {
803 dev_err(&denali->dev->dev, 787 dev_err(denali->dev,
804 "cmd, page, addr on timeout " 788 "cmd, page, addr on timeout "
805 "(0x%x, 0x%x, 0x%x)\n", 789 "(0x%x, 0x%x, 0x%x)\n",
806 cmd, denali->page, addr); 790 cmd, denali->page, addr);
@@ -861,8 +845,8 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
861{ 845{
862 struct denali_nand_info *denali = mtd_to_denali(mtd); 846 struct denali_nand_info *denali = mtd_to_denali(mtd);
863 uint32_t irq_status = 0; 847 uint32_t irq_status = 0;
864 uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP | 848 uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
865 INTR_STATUS0__PROGRAM_FAIL; 849 INTR_STATUS__PROGRAM_FAIL;
866 int status = 0; 850 int status = 0;
867 851
868 denali->page = page; 852 denali->page = page;
@@ -875,11 +859,11 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
875 irq_status = wait_for_irq(denali, irq_mask); 859 irq_status = wait_for_irq(denali, irq_mask);
876 860
877 if (irq_status == 0) { 861 if (irq_status == 0) {
878 dev_err(&denali->dev->dev, "OOB write failed\n"); 862 dev_err(denali->dev, "OOB write failed\n");
879 status = -EIO; 863 status = -EIO;
880 } 864 }
881 } else { 865 } else {
882 dev_err(&denali->dev->dev, "unable to send pipeline command\n"); 866 dev_err(denali->dev, "unable to send pipeline command\n");
883 status = -EIO; 867 status = -EIO;
884 } 868 }
885 return status; 869 return status;
@@ -889,7 +873,7 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
889static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) 873static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
890{ 874{
891 struct denali_nand_info *denali = mtd_to_denali(mtd); 875 struct denali_nand_info *denali = mtd_to_denali(mtd);
892 uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, 876 uint32_t irq_mask = INTR_STATUS__LOAD_COMP,
893 irq_status = 0, addr = 0x0, cmd = 0x0; 877 irq_status = 0, addr = 0x0, cmd = 0x0;
894 878
895 denali->page = page; 879 denali->page = page;
@@ -904,7 +888,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
904 irq_status = wait_for_irq(denali, irq_mask); 888 irq_status = wait_for_irq(denali, irq_mask);
905 889
906 if (irq_status == 0) 890 if (irq_status == 0)
907 dev_err(&denali->dev->dev, "page on OOB timeout %d\n", 891 dev_err(denali->dev, "page on OOB timeout %d\n",
908 denali->page); 892 denali->page);
909 893
910 /* We set the device back to MAIN_ACCESS here as I observed 894 /* We set the device back to MAIN_ACCESS here as I observed
@@ -944,7 +928,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
944{ 928{
945 bool check_erased_page = false; 929 bool check_erased_page = false;
946 930
947 if (irq_status & INTR_STATUS0__ECC_ERR) { 931 if (irq_status & INTR_STATUS__ECC_ERR) {
948 /* read the ECC errors. we'll ignore them for now */ 932 /* read the ECC errors. we'll ignore them for now */
949 uint32_t err_address = 0, err_correction_info = 0; 933 uint32_t err_address = 0, err_correction_info = 0;
950 uint32_t err_byte = 0, err_sector = 0, err_device = 0; 934 uint32_t err_byte = 0, err_sector = 0, err_device = 0;
@@ -995,7 +979,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
995 * for a while for this interrupt 979 * for a while for this interrupt
996 * */ 980 * */
997 while (!(read_interrupt_status(denali) & 981 while (!(read_interrupt_status(denali) &
998 INTR_STATUS0__ECC_TRANSACTION_DONE)) 982 INTR_STATUS__ECC_TRANSACTION_DONE))
999 cpu_relax(); 983 cpu_relax();
1000 clear_interrupts(denali); 984 clear_interrupts(denali);
1001 denali_set_intr_modes(denali, true); 985 denali_set_intr_modes(denali, true);
@@ -1045,14 +1029,13 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1045 const uint8_t *buf, bool raw_xfer) 1029 const uint8_t *buf, bool raw_xfer)
1046{ 1030{
1047 struct denali_nand_info *denali = mtd_to_denali(mtd); 1031 struct denali_nand_info *denali = mtd_to_denali(mtd);
1048 struct pci_dev *pci_dev = denali->dev;
1049 1032
1050 dma_addr_t addr = denali->buf.dma_buf; 1033 dma_addr_t addr = denali->buf.dma_buf;
1051 size_t size = denali->mtd.writesize + denali->mtd.oobsize; 1034 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1052 1035
1053 uint32_t irq_status = 0; 1036 uint32_t irq_status = 0;
1054 uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP | 1037 uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
1055 INTR_STATUS0__PROGRAM_FAIL; 1038 INTR_STATUS__PROGRAM_FAIL;
1056 1039
1057 /* if it is a raw xfer, we want to disable ecc, and send 1040 /* if it is a raw xfer, we want to disable ecc, and send
1058 * the spare area. 1041 * the spare area.
@@ -1071,7 +1054,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1071 mtd->oobsize); 1054 mtd->oobsize);
1072 } 1055 }
1073 1056
1074 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE); 1057 dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
1075 1058
1076 clear_interrupts(denali); 1059 clear_interrupts(denali);
1077 denali_enable_dma(denali, true); 1060 denali_enable_dma(denali, true);
@@ -1082,16 +1065,16 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1082 irq_status = wait_for_irq(denali, irq_mask); 1065 irq_status = wait_for_irq(denali, irq_mask);
1083 1066
1084 if (irq_status == 0) { 1067 if (irq_status == 0) {
1085 dev_err(&denali->dev->dev, 1068 dev_err(denali->dev,
1086 "timeout on write_page (type = %d)\n", 1069 "timeout on write_page (type = %d)\n",
1087 raw_xfer); 1070 raw_xfer);
1088 denali->status = 1071 denali->status =
1089 (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? 1072 (irq_status & INTR_STATUS__PROGRAM_FAIL) ?
1090 NAND_STATUS_FAIL : PASS; 1073 NAND_STATUS_FAIL : PASS;
1091 } 1074 }
1092 1075
1093 denali_enable_dma(denali, false); 1076 denali_enable_dma(denali, false);
1094 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE); 1077 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
1095} 1078}
1096 1079
1097/* NAND core entry points */ 1080/* NAND core entry points */
@@ -1139,18 +1122,17 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1139 uint8_t *buf, int page) 1122 uint8_t *buf, int page)
1140{ 1123{
1141 struct denali_nand_info *denali = mtd_to_denali(mtd); 1124 struct denali_nand_info *denali = mtd_to_denali(mtd);
1142 struct pci_dev *pci_dev = denali->dev;
1143 1125
1144 dma_addr_t addr = denali->buf.dma_buf; 1126 dma_addr_t addr = denali->buf.dma_buf;
1145 size_t size = denali->mtd.writesize + denali->mtd.oobsize; 1127 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1146 1128
1147 uint32_t irq_status = 0; 1129 uint32_t irq_status = 0;
1148 uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE | 1130 uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
1149 INTR_STATUS0__ECC_ERR; 1131 INTR_STATUS__ECC_ERR;
1150 bool check_erased_page = false; 1132 bool check_erased_page = false;
1151 1133
1152 if (page != denali->page) { 1134 if (page != denali->page) {
1153 dev_err(&denali->dev->dev, "IN %s: page %d is not" 1135 dev_err(denali->dev, "IN %s: page %d is not"
1154 " equal to denali->page %d, investigate!!", 1136 " equal to denali->page %d, investigate!!",
1155 __func__, page, denali->page); 1137 __func__, page, denali->page);
1156 BUG(); 1138 BUG();
@@ -1159,7 +1141,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1159 setup_ecc_for_xfer(denali, true, false); 1141 setup_ecc_for_xfer(denali, true, false);
1160 1142
1161 denali_enable_dma(denali, true); 1143 denali_enable_dma(denali, true);
1162 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE); 1144 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
1163 1145
1164 clear_interrupts(denali); 1146 clear_interrupts(denali);
1165 denali_setup_dma(denali, DENALI_READ); 1147 denali_setup_dma(denali, DENALI_READ);
@@ -1167,7 +1149,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1167 /* wait for operation to complete */ 1149 /* wait for operation to complete */
1168 irq_status = wait_for_irq(denali, irq_mask); 1150 irq_status = wait_for_irq(denali, irq_mask);
1169 1151
1170 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE); 1152 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
1171 1153
1172 memcpy(buf, denali->buf.buf, mtd->writesize); 1154 memcpy(buf, denali->buf.buf, mtd->writesize);
1173 1155
@@ -1192,16 +1174,15 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1192 uint8_t *buf, int page) 1174 uint8_t *buf, int page)
1193{ 1175{
1194 struct denali_nand_info *denali = mtd_to_denali(mtd); 1176 struct denali_nand_info *denali = mtd_to_denali(mtd);
1195 struct pci_dev *pci_dev = denali->dev;
1196 1177
1197 dma_addr_t addr = denali->buf.dma_buf; 1178 dma_addr_t addr = denali->buf.dma_buf;
1198 size_t size = denali->mtd.writesize + denali->mtd.oobsize; 1179 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1199 1180
1200 uint32_t irq_status = 0; 1181 uint32_t irq_status = 0;
1201 uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP; 1182 uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
1202 1183
1203 if (page != denali->page) { 1184 if (page != denali->page) {
1204 dev_err(&denali->dev->dev, "IN %s: page %d is not" 1185 dev_err(denali->dev, "IN %s: page %d is not"
1205 " equal to denali->page %d, investigate!!", 1186 " equal to denali->page %d, investigate!!",
1206 __func__, page, denali->page); 1187 __func__, page, denali->page);
1207 BUG(); 1188 BUG();
@@ -1210,7 +1191,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1210 setup_ecc_for_xfer(denali, false, true); 1191 setup_ecc_for_xfer(denali, false, true);
1211 denali_enable_dma(denali, true); 1192 denali_enable_dma(denali, true);
1212 1193
1213 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE); 1194 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
1214 1195
1215 clear_interrupts(denali); 1196 clear_interrupts(denali);
1216 denali_setup_dma(denali, DENALI_READ); 1197 denali_setup_dma(denali, DENALI_READ);
@@ -1218,7 +1199,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1218 /* wait for operation to complete */ 1199 /* wait for operation to complete */
1219 irq_status = wait_for_irq(denali, irq_mask); 1200 irq_status = wait_for_irq(denali, irq_mask);
1220 1201
1221 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE); 1202 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
1222 1203
1223 denali_enable_dma(denali, false); 1204 denali_enable_dma(denali, false);
1224 1205
@@ -1271,10 +1252,10 @@ static void denali_erase(struct mtd_info *mtd, int page)
1271 index_addr(denali, (uint32_t)cmd, 0x1); 1252 index_addr(denali, (uint32_t)cmd, 0x1);
1272 1253
1273 /* wait for erase to complete or failure to occur */ 1254 /* wait for erase to complete or failure to occur */
1274 irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP | 1255 irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
1275 INTR_STATUS0__ERASE_FAIL); 1256 INTR_STATUS__ERASE_FAIL);
1276 1257
1277 denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? 1258 denali->status = (irq_status & INTR_STATUS__ERASE_FAIL) ?
1278 NAND_STATUS_FAIL : PASS; 1259 NAND_STATUS_FAIL : PASS;
1279} 1260}
1280 1261
@@ -1330,7 +1311,7 @@ static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
1330 uint8_t *ecc_code) 1311 uint8_t *ecc_code)
1331{ 1312{
1332 struct denali_nand_info *denali = mtd_to_denali(mtd); 1313 struct denali_nand_info *denali = mtd_to_denali(mtd);
1333 dev_err(&denali->dev->dev, 1314 dev_err(denali->dev,
1334 "denali_ecc_calculate called unexpectedly\n"); 1315 "denali_ecc_calculate called unexpectedly\n");
1335 BUG(); 1316 BUG();
1336 return -EIO; 1317 return -EIO;
@@ -1340,7 +1321,7 @@ static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
1340 uint8_t *read_ecc, uint8_t *calc_ecc) 1321 uint8_t *read_ecc, uint8_t *calc_ecc)
1341{ 1322{
1342 struct denali_nand_info *denali = mtd_to_denali(mtd); 1323 struct denali_nand_info *denali = mtd_to_denali(mtd);
1343 dev_err(&denali->dev->dev, 1324 dev_err(denali->dev,
1344 "denali_ecc_correct called unexpectedly\n"); 1325 "denali_ecc_correct called unexpectedly\n");
1345 BUG(); 1326 BUG();
1346 return -EIO; 1327 return -EIO;
@@ -1349,7 +1330,7 @@ static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
1349static void denali_ecc_hwctl(struct mtd_info *mtd, int mode) 1330static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
1350{ 1331{
1351 struct denali_nand_info *denali = mtd_to_denali(mtd); 1332 struct denali_nand_info *denali = mtd_to_denali(mtd);
1352 dev_err(&denali->dev->dev, 1333 dev_err(denali->dev,
1353 "denali_ecc_hwctl called unexpectedly\n"); 1334 "denali_ecc_hwctl called unexpectedly\n");
1354 BUG(); 1335 BUG();
1355} 1336}
@@ -1375,6 +1356,7 @@ static void denali_hw_init(struct denali_nand_info *denali)
1375 /* Should set value for these registers when init */ 1356 /* Should set value for these registers when init */
1376 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); 1357 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1377 iowrite32(1, denali->flash_reg + ECC_ENABLE); 1358 iowrite32(1, denali->flash_reg + ECC_ENABLE);
1359 detect_max_banks(denali);
1378 denali_nand_timing_set(denali); 1360 denali_nand_timing_set(denali);
1379 denali_irq_init(denali); 1361 denali_irq_init(denali);
1380} 1362}
@@ -1484,24 +1466,22 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1484 } 1466 }
1485 1467
1486 /* Is 32-bit DMA supported? */ 1468 /* Is 32-bit DMA supported? */
1487 ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); 1469 ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
1488
1489 if (ret) { 1470 if (ret) {
1490 printk(KERN_ERR "Spectra: no usable DMA configuration\n"); 1471 printk(KERN_ERR "Spectra: no usable DMA configuration\n");
1491 goto failed_enable_dev; 1472 goto failed_enable_dev;
1492 } 1473 }
1493 denali->buf.dma_buf = 1474 denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
1494 pci_map_single(dev, denali->buf.buf, 1475 DENALI_BUF_SIZE,
1495 DENALI_BUF_SIZE, 1476 DMA_BIDIRECTIONAL);
1496 PCI_DMA_BIDIRECTIONAL);
1497 1477
1498 if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) { 1478 if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
1499 dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n"); 1479 dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
1500 goto failed_enable_dev; 1480 goto failed_enable_dev;
1501 } 1481 }
1502 1482
1503 pci_set_master(dev); 1483 pci_set_master(dev);
1504 denali->dev = dev; 1484 denali->dev = &dev->dev;
1505 denali->mtd.dev.parent = &dev->dev; 1485 denali->mtd.dev.parent = &dev->dev;
1506 1486
1507 ret = pci_request_regions(dev, DENALI_NAND_NAME); 1487 ret = pci_request_regions(dev, DENALI_NAND_NAME);
@@ -1554,7 +1534,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1554 /* scan for NAND devices attached to the controller 1534 /* scan for NAND devices attached to the controller
1555 * this is the first stage in a two step process to register 1535 * this is the first stage in a two step process to register
1556 * with the nand subsystem */ 1536 * with the nand subsystem */
1557 if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) { 1537 if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
1558 ret = -ENXIO; 1538 ret = -ENXIO;
1559 goto failed_req_irq; 1539 goto failed_req_irq;
1560 } 1540 }
@@ -1664,7 +1644,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1664 goto failed_req_irq; 1644 goto failed_req_irq;
1665 } 1645 }
1666 1646
1667 ret = add_mtd_device(&denali->mtd); 1647 ret = mtd_device_register(&denali->mtd, NULL, 0);
1668 if (ret) { 1648 if (ret) {
1669 dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n", 1649 dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
1670 ret); 1650 ret);
@@ -1681,8 +1661,8 @@ failed_remap_reg:
1681failed_req_regions: 1661failed_req_regions:
1682 pci_release_regions(dev); 1662 pci_release_regions(dev);
1683failed_dma_map: 1663failed_dma_map:
1684 pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, 1664 dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
1685 PCI_DMA_BIDIRECTIONAL); 1665 DMA_BIDIRECTIONAL);
1686failed_enable_dev: 1666failed_enable_dev:
1687 pci_disable_device(dev); 1667 pci_disable_device(dev);
1688failed_alloc_memery: 1668failed_alloc_memery:
@@ -1696,7 +1676,7 @@ static void denali_pci_remove(struct pci_dev *dev)
1696 struct denali_nand_info *denali = pci_get_drvdata(dev); 1676 struct denali_nand_info *denali = pci_get_drvdata(dev);
1697 1677
1698 nand_release(&denali->mtd); 1678 nand_release(&denali->mtd);
1699 del_mtd_device(&denali->mtd); 1679 mtd_device_unregister(&denali->mtd);
1700 1680
1701 denali_irq_cleanup(dev->irq, denali); 1681 denali_irq_cleanup(dev->irq, denali);
1702 1682
@@ -1704,8 +1684,8 @@ static void denali_pci_remove(struct pci_dev *dev)
1704 iounmap(denali->flash_mem); 1684 iounmap(denali->flash_mem);
1705 pci_release_regions(dev); 1685 pci_release_regions(dev);
1706 pci_disable_device(dev); 1686 pci_disable_device(dev);
1707 pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, 1687 dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
1708 PCI_DMA_BIDIRECTIONAL); 1688 DMA_BIDIRECTIONAL);
1709 pci_set_drvdata(dev, NULL); 1689 pci_set_drvdata(dev, NULL);
1710 kfree(denali); 1690 kfree(denali);
1711} 1691}
@@ -1721,8 +1701,7 @@ static struct pci_driver denali_pci_driver = {
1721 1701
1722static int __devinit denali_init(void) 1702static int __devinit denali_init(void)
1723{ 1703{
1724 printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", 1704 printk(KERN_INFO "Spectra MTD driver\n");
1725 __DATE__, __TIME__);
1726 return pci_register_driver(&denali_pci_driver); 1705 return pci_register_driver(&denali_pci_driver);
1727} 1706}
1728 1707
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 3918bcb1561e..fabb9d56b39e 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -211,185 +211,46 @@
211#define TRANSFER_MODE 0x400 211#define TRANSFER_MODE 0x400
212#define TRANSFER_MODE__VALUE 0x0003 212#define TRANSFER_MODE__VALUE 0x0003
213 213
214#define INTR_STATUS0 0x410 214#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50))
215#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001 215#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50))
216#define INTR_STATUS0__ECC_ERR 0x0002 216
217#define INTR_STATUS0__DMA_CMD_COMP 0x0004 217#define INTR_STATUS__ECC_TRANSACTION_DONE 0x0001
218#define INTR_STATUS0__TIME_OUT 0x0008 218#define INTR_STATUS__ECC_ERR 0x0002
219#define INTR_STATUS0__PROGRAM_FAIL 0x0010 219#define INTR_STATUS__DMA_CMD_COMP 0x0004
220#define INTR_STATUS0__ERASE_FAIL 0x0020 220#define INTR_STATUS__TIME_OUT 0x0008
221#define INTR_STATUS0__LOAD_COMP 0x0040 221#define INTR_STATUS__PROGRAM_FAIL 0x0010
222#define INTR_STATUS0__PROGRAM_COMP 0x0080 222#define INTR_STATUS__ERASE_FAIL 0x0020
223#define INTR_STATUS0__ERASE_COMP 0x0100 223#define INTR_STATUS__LOAD_COMP 0x0040
224#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200 224#define INTR_STATUS__PROGRAM_COMP 0x0080
225#define INTR_STATUS0__LOCKED_BLK 0x0400 225#define INTR_STATUS__ERASE_COMP 0x0100
226#define INTR_STATUS0__UNSUP_CMD 0x0800 226#define INTR_STATUS__PIPE_CPYBCK_CMD_COMP 0x0200
227#define INTR_STATUS0__INT_ACT 0x1000 227#define INTR_STATUS__LOCKED_BLK 0x0400
228#define INTR_STATUS0__RST_COMP 0x2000 228#define INTR_STATUS__UNSUP_CMD 0x0800
229#define INTR_STATUS0__PIPE_CMD_ERR 0x4000 229#define INTR_STATUS__INT_ACT 0x1000
230#define INTR_STATUS0__PAGE_XFER_INC 0x8000 230#define INTR_STATUS__RST_COMP 0x2000
231 231#define INTR_STATUS__PIPE_CMD_ERR 0x4000
232#define INTR_EN0 0x420 232#define INTR_STATUS__PAGE_XFER_INC 0x8000
233#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001 233
234#define INTR_EN0__ECC_ERR 0x0002 234#define INTR_EN__ECC_TRANSACTION_DONE 0x0001
235#define INTR_EN0__DMA_CMD_COMP 0x0004 235#define INTR_EN__ECC_ERR 0x0002
236#define INTR_EN0__TIME_OUT 0x0008 236#define INTR_EN__DMA_CMD_COMP 0x0004
237#define INTR_EN0__PROGRAM_FAIL 0x0010 237#define INTR_EN__TIME_OUT 0x0008
238#define INTR_EN0__ERASE_FAIL 0x0020 238#define INTR_EN__PROGRAM_FAIL 0x0010
239#define INTR_EN0__LOAD_COMP 0x0040 239#define INTR_EN__ERASE_FAIL 0x0020
240#define INTR_EN0__PROGRAM_COMP 0x0080 240#define INTR_EN__LOAD_COMP 0x0040
241#define INTR_EN0__ERASE_COMP 0x0100 241#define INTR_EN__PROGRAM_COMP 0x0080
242#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200 242#define INTR_EN__ERASE_COMP 0x0100
243#define INTR_EN0__LOCKED_BLK 0x0400 243#define INTR_EN__PIPE_CPYBCK_CMD_COMP 0x0200
244#define INTR_EN0__UNSUP_CMD 0x0800 244#define INTR_EN__LOCKED_BLK 0x0400
245#define INTR_EN0__INT_ACT 0x1000 245#define INTR_EN__UNSUP_CMD 0x0800
246#define INTR_EN0__RST_COMP 0x2000 246#define INTR_EN__INT_ACT 0x1000
247#define INTR_EN0__PIPE_CMD_ERR 0x4000 247#define INTR_EN__RST_COMP 0x2000
248#define INTR_EN0__PAGE_XFER_INC 0x8000 248#define INTR_EN__PIPE_CMD_ERR 0x4000
249 249#define INTR_EN__PAGE_XFER_INC 0x8000
250#define PAGE_CNT0 0x430 250
251#define PAGE_CNT0__VALUE 0x00ff 251#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50))
252 252#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50))
253#define ERR_PAGE_ADDR0 0x440 253#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50))
254#define ERR_PAGE_ADDR0__VALUE 0xffff
255
256#define ERR_BLOCK_ADDR0 0x450
257#define ERR_BLOCK_ADDR0__VALUE 0xffff
258
259#define INTR_STATUS1 0x460
260#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
261#define INTR_STATUS1__ECC_ERR 0x0002
262#define INTR_STATUS1__DMA_CMD_COMP 0x0004
263#define INTR_STATUS1__TIME_OUT 0x0008
264#define INTR_STATUS1__PROGRAM_FAIL 0x0010
265#define INTR_STATUS1__ERASE_FAIL 0x0020
266#define INTR_STATUS1__LOAD_COMP 0x0040
267#define INTR_STATUS1__PROGRAM_COMP 0x0080
268#define INTR_STATUS1__ERASE_COMP 0x0100
269#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
270#define INTR_STATUS1__LOCKED_BLK 0x0400
271#define INTR_STATUS1__UNSUP_CMD 0x0800
272#define INTR_STATUS1__INT_ACT 0x1000
273#define INTR_STATUS1__RST_COMP 0x2000
274#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
275#define INTR_STATUS1__PAGE_XFER_INC 0x8000
276
277#define INTR_EN1 0x470
278#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
279#define INTR_EN1__ECC_ERR 0x0002
280#define INTR_EN1__DMA_CMD_COMP 0x0004
281#define INTR_EN1__TIME_OUT 0x0008
282#define INTR_EN1__PROGRAM_FAIL 0x0010
283#define INTR_EN1__ERASE_FAIL 0x0020
284#define INTR_EN1__LOAD_COMP 0x0040
285#define INTR_EN1__PROGRAM_COMP 0x0080
286#define INTR_EN1__ERASE_COMP 0x0100
287#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
288#define INTR_EN1__LOCKED_BLK 0x0400
289#define INTR_EN1__UNSUP_CMD 0x0800
290#define INTR_EN1__INT_ACT 0x1000
291#define INTR_EN1__RST_COMP 0x2000
292#define INTR_EN1__PIPE_CMD_ERR 0x4000
293#define INTR_EN1__PAGE_XFER_INC 0x8000
294
295#define PAGE_CNT1 0x480
296#define PAGE_CNT1__VALUE 0x00ff
297
298#define ERR_PAGE_ADDR1 0x490
299#define ERR_PAGE_ADDR1__VALUE 0xffff
300
301#define ERR_BLOCK_ADDR1 0x4a0
302#define ERR_BLOCK_ADDR1__VALUE 0xffff
303
304#define INTR_STATUS2 0x4b0
305#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
306#define INTR_STATUS2__ECC_ERR 0x0002
307#define INTR_STATUS2__DMA_CMD_COMP 0x0004
308#define INTR_STATUS2__TIME_OUT 0x0008
309#define INTR_STATUS2__PROGRAM_FAIL 0x0010
310#define INTR_STATUS2__ERASE_FAIL 0x0020
311#define INTR_STATUS2__LOAD_COMP 0x0040
312#define INTR_STATUS2__PROGRAM_COMP 0x0080
313#define INTR_STATUS2__ERASE_COMP 0x0100
314#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
315#define INTR_STATUS2__LOCKED_BLK 0x0400
316#define INTR_STATUS2__UNSUP_CMD 0x0800
317#define INTR_STATUS2__INT_ACT 0x1000
318#define INTR_STATUS2__RST_COMP 0x2000
319#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
320#define INTR_STATUS2__PAGE_XFER_INC 0x8000
321
322#define INTR_EN2 0x4c0
323#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
324#define INTR_EN2__ECC_ERR 0x0002
325#define INTR_EN2__DMA_CMD_COMP 0x0004
326#define INTR_EN2__TIME_OUT 0x0008
327#define INTR_EN2__PROGRAM_FAIL 0x0010
328#define INTR_EN2__ERASE_FAIL 0x0020
329#define INTR_EN2__LOAD_COMP 0x0040
330#define INTR_EN2__PROGRAM_COMP 0x0080
331#define INTR_EN2__ERASE_COMP 0x0100
332#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
333#define INTR_EN2__LOCKED_BLK 0x0400
334#define INTR_EN2__UNSUP_CMD 0x0800
335#define INTR_EN2__INT_ACT 0x1000
336#define INTR_EN2__RST_COMP 0x2000
337#define INTR_EN2__PIPE_CMD_ERR 0x4000
338#define INTR_EN2__PAGE_XFER_INC 0x8000
339
340#define PAGE_CNT2 0x4d0
341#define PAGE_CNT2__VALUE 0x00ff
342
343#define ERR_PAGE_ADDR2 0x4e0
344#define ERR_PAGE_ADDR2__VALUE 0xffff
345
346#define ERR_BLOCK_ADDR2 0x4f0
347#define ERR_BLOCK_ADDR2__VALUE 0xffff
348
349#define INTR_STATUS3 0x500
350#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
351#define INTR_STATUS3__ECC_ERR 0x0002
352#define INTR_STATUS3__DMA_CMD_COMP 0x0004
353#define INTR_STATUS3__TIME_OUT 0x0008
354#define INTR_STATUS3__PROGRAM_FAIL 0x0010
355#define INTR_STATUS3__ERASE_FAIL 0x0020
356#define INTR_STATUS3__LOAD_COMP 0x0040
357#define INTR_STATUS3__PROGRAM_COMP 0x0080
358#define INTR_STATUS3__ERASE_COMP 0x0100
359#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
360#define INTR_STATUS3__LOCKED_BLK 0x0400
361#define INTR_STATUS3__UNSUP_CMD 0x0800
362#define INTR_STATUS3__INT_ACT 0x1000
363#define INTR_STATUS3__RST_COMP 0x2000
364#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
365#define INTR_STATUS3__PAGE_XFER_INC 0x8000
366
367#define INTR_EN3 0x510
368#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
369#define INTR_EN3__ECC_ERR 0x0002
370#define INTR_EN3__DMA_CMD_COMP 0x0004
371#define INTR_EN3__TIME_OUT 0x0008
372#define INTR_EN3__PROGRAM_FAIL 0x0010
373#define INTR_EN3__ERASE_FAIL 0x0020
374#define INTR_EN3__LOAD_COMP 0x0040
375#define INTR_EN3__PROGRAM_COMP 0x0080
376#define INTR_EN3__ERASE_COMP 0x0100
377#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
378#define INTR_EN3__LOCKED_BLK 0x0400
379#define INTR_EN3__UNSUP_CMD 0x0800
380#define INTR_EN3__INT_ACT 0x1000
381#define INTR_EN3__RST_COMP 0x2000
382#define INTR_EN3__PIPE_CMD_ERR 0x4000
383#define INTR_EN3__PAGE_XFER_INC 0x8000
384
385#define PAGE_CNT3 0x520
386#define PAGE_CNT3__VALUE 0x00ff
387
388#define ERR_PAGE_ADDR3 0x530
389#define ERR_PAGE_ADDR3__VALUE 0xffff
390
391#define ERR_BLOCK_ADDR3 0x540
392#define ERR_BLOCK_ADDR3__VALUE 0xffff
393 254
394#define DATA_INTR 0x550 255#define DATA_INTR 0x550
395#define DATA_INTR__WRITE_SPACE_AV 0x0001 256#define DATA_INTR__WRITE_SPACE_AV 0x0001
@@ -484,141 +345,23 @@
484#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010 345#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
485#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020 346#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
486 347
487#define PERM_SRC_ID_0 0x830 348#define PERM_SRC_ID(__bank) (0x830 + ((__bank) * 0x40))
488#define PERM_SRC_ID_0__SRCID 0x00ff 349#define PERM_SRC_ID__SRCID 0x00ff
489#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800 350#define PERM_SRC_ID__DIRECT_ACCESS_ACTIVE 0x0800
490#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000 351#define PERM_SRC_ID__WRITE_ACTIVE 0x2000
491#define PERM_SRC_ID_0__READ_ACTIVE 0x4000 352#define PERM_SRC_ID__READ_ACTIVE 0x4000
492#define PERM_SRC_ID_0__PARTITION_VALID 0x8000 353#define PERM_SRC_ID__PARTITION_VALID 0x8000
493 354
494#define MIN_BLK_ADDR_0 0x840 355#define MIN_BLK_ADDR(__bank) (0x840 + ((__bank) * 0x40))
495#define MIN_BLK_ADDR_0__VALUE 0xffff 356#define MIN_BLK_ADDR__VALUE 0xffff
496 357
497#define MAX_BLK_ADDR_0 0x850 358#define MAX_BLK_ADDR(__bank) (0x850 + ((__bank) * 0x40))
498#define MAX_BLK_ADDR_0__VALUE 0xffff 359#define MAX_BLK_ADDR__VALUE 0xffff
499 360
500#define MIN_MAX_BANK_0 0x860 361#define MIN_MAX_BANK(__bank) (0x860 + ((__bank) * 0x40))
501#define MIN_MAX_BANK_0__MIN_VALUE 0x0003 362#define MIN_MAX_BANK__MIN_VALUE 0x0003
502#define MIN_MAX_BANK_0__MAX_VALUE 0x000c 363#define MIN_MAX_BANK__MAX_VALUE 0x000c
503
504#define PERM_SRC_ID_1 0x870
505#define PERM_SRC_ID_1__SRCID 0x00ff
506#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
507#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
508#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
509#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
510
511#define MIN_BLK_ADDR_1 0x880
512#define MIN_BLK_ADDR_1__VALUE 0xffff
513
514#define MAX_BLK_ADDR_1 0x890
515#define MAX_BLK_ADDR_1__VALUE 0xffff
516
517#define MIN_MAX_BANK_1 0x8a0
518#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
519#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
520
521#define PERM_SRC_ID_2 0x8b0
522#define PERM_SRC_ID_2__SRCID 0x00ff
523#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
524#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
525#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
526#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
527
528#define MIN_BLK_ADDR_2 0x8c0
529#define MIN_BLK_ADDR_2__VALUE 0xffff
530
531#define MAX_BLK_ADDR_2 0x8d0
532#define MAX_BLK_ADDR_2__VALUE 0xffff
533
534#define MIN_MAX_BANK_2 0x8e0
535#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
536#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
537
538#define PERM_SRC_ID_3 0x8f0
539#define PERM_SRC_ID_3__SRCID 0x00ff
540#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
541#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
542#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
543#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
544
545#define MIN_BLK_ADDR_3 0x900
546#define MIN_BLK_ADDR_3__VALUE 0xffff
547
548#define MAX_BLK_ADDR_3 0x910
549#define MAX_BLK_ADDR_3__VALUE 0xffff
550
551#define MIN_MAX_BANK_3 0x920
552#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
553#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
554
555#define PERM_SRC_ID_4 0x930
556#define PERM_SRC_ID_4__SRCID 0x00ff
557#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
558#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
559#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
560#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
561
562#define MIN_BLK_ADDR_4 0x940
563#define MIN_BLK_ADDR_4__VALUE 0xffff
564
565#define MAX_BLK_ADDR_4 0x950
566#define MAX_BLK_ADDR_4__VALUE 0xffff
567
568#define MIN_MAX_BANK_4 0x960
569#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
570#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
571
572#define PERM_SRC_ID_5 0x970
573#define PERM_SRC_ID_5__SRCID 0x00ff
574#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
575#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
576#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
577#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
578
579#define MIN_BLK_ADDR_5 0x980
580#define MIN_BLK_ADDR_5__VALUE 0xffff
581
582#define MAX_BLK_ADDR_5 0x990
583#define MAX_BLK_ADDR_5__VALUE 0xffff
584
585#define MIN_MAX_BANK_5 0x9a0
586#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
587#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
588
589#define PERM_SRC_ID_6 0x9b0
590#define PERM_SRC_ID_6__SRCID 0x00ff
591#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
592#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
593#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
594#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
595
596#define MIN_BLK_ADDR_6 0x9c0
597#define MIN_BLK_ADDR_6__VALUE 0xffff
598
599#define MAX_BLK_ADDR_6 0x9d0
600#define MAX_BLK_ADDR_6__VALUE 0xffff
601
602#define MIN_MAX_BANK_6 0x9e0
603#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
604#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
605
606#define PERM_SRC_ID_7 0x9f0
607#define PERM_SRC_ID_7__SRCID 0x00ff
608#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
609#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
610#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
611#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
612 364
613#define MIN_BLK_ADDR_7 0xa00
614#define MIN_BLK_ADDR_7__VALUE 0xffff
615
616#define MAX_BLK_ADDR_7 0xa10
617#define MAX_BLK_ADDR_7__VALUE 0xffff
618
619#define MIN_MAX_BANK_7 0xa20
620#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
621#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
622 365
623/* ffsdefs.h */ 366/* ffsdefs.h */
624#define CLEAR 0 /*use this to clear a field instead of "fail"*/ 367#define CLEAR 0 /*use this to clear a field instead of "fail"*/
@@ -711,7 +454,6 @@
711#define READ_WRITE_ENABLE_HIGH_COUNT 22 454#define READ_WRITE_ENABLE_HIGH_COUNT 22
712 455
713#define ECC_SECTOR_SIZE 512 456#define ECC_SECTOR_SIZE 512
714#define LLD_MAX_FLASH_BANKS 4
715 457
716#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) 458#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
717 459
@@ -732,7 +474,7 @@ struct denali_nand_info {
732 int status; 474 int status;
733 int platform; 475 int platform;
734 struct nand_buf buf; 476 struct nand_buf buf;
735 struct pci_dev *dev; 477 struct device *dev;
736 int total_used_banks; 478 int total_used_banks;
737 uint32_t block; /* stored for future use */ 479 uint32_t block; /* stored for future use */
738 uint16_t page; 480 uint16_t page;
@@ -751,6 +493,7 @@ struct denali_nand_info {
751 uint32_t totalblks; 493 uint32_t totalblks;
752 uint32_t blksperchip; 494 uint32_t blksperchip;
753 uint32_t bbtskipbytes; 495 uint32_t bbtskipbytes;
496 uint32_t max_banks;
754}; 497};
755 498
756#endif /*_LLD_NAND_*/ 499#endif /*_LLD_NAND_*/
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 657b9f4b6f9b..7837728d02ff 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1360,11 +1360,9 @@ static int __init nftl_scan_bbt(struct mtd_info *mtd)
1360 At least as nand_bbt.c is currently written. */ 1360 At least as nand_bbt.c is currently written. */
1361 if ((ret = nand_scan_bbt(mtd, NULL))) 1361 if ((ret = nand_scan_bbt(mtd, NULL)))
1362 return ret; 1362 return ret;
1363 add_mtd_device(mtd); 1363 mtd_device_register(mtd, NULL, 0);
1364#ifdef CONFIG_MTD_PARTITIONS
1365 if (!no_autopart) 1364 if (!no_autopart)
1366 add_mtd_partitions(mtd, parts, numparts); 1365 mtd_device_register(mtd, parts, numparts);
1367#endif
1368 return 0; 1366 return 0;
1369} 1367}
1370 1368
@@ -1419,11 +1417,9 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
1419 autopartitioning, but I want to give it more thought. */ 1417 autopartitioning, but I want to give it more thought. */
1420 if (!numparts) 1418 if (!numparts)
1421 return -EIO; 1419 return -EIO;
1422 add_mtd_device(mtd); 1420 mtd_device_register(mtd, NULL, 0);
1423#ifdef CONFIG_MTD_PARTITIONS
1424 if (!no_autopart) 1421 if (!no_autopart)
1425 add_mtd_partitions(mtd, parts, numparts); 1422 mtd_device_register(mtd, parts, numparts);
1426#endif
1427 return 0; 1423 return 0;
1428} 1424}
1429 1425
@@ -1678,9 +1674,9 @@ static int __init doc_probe(unsigned long physadr)
1678 /* DBB note: i believe nand_release is necessary here, as 1674 /* DBB note: i believe nand_release is necessary here, as
1679 buffers may have been allocated in nand_base. Check with 1675 buffers may have been allocated in nand_base. Check with
1680 Thomas. FIX ME! */ 1676 Thomas. FIX ME! */
1681 /* nand_release will call del_mtd_device, but we haven't yet 1677 /* nand_release will call mtd_device_unregister, but we
1682 added it. This is handled without incident by 1678 haven't yet added it. This is handled without incident by
1683 del_mtd_device, as far as I can tell. */ 1679 mtd_device_unregister, as far as I can tell. */
1684 nand_release(mtd); 1680 nand_release(mtd);
1685 kfree(mtd); 1681 kfree(mtd);
1686 goto fail; 1682 goto fail;
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
index 86366bfba9f8..8400d0f6dada 100644
--- a/drivers/mtd/nand/edb7312.c
+++ b/drivers/mtd/nand/edb7312.c
@@ -55,7 +55,6 @@ static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
55static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR; 55static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR;
56static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR; 56static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR;
57 57
58#ifdef CONFIG_MTD_PARTITIONS
59/* 58/*
60 * Define static partitions for flash device 59 * Define static partitions for flash device
61 */ 60 */
@@ -67,8 +66,6 @@ static struct mtd_partition partition_info[] = {
67 66
68#define NUM_PARTITIONS 1 67#define NUM_PARTITIONS 1
69 68
70#endif
71
72/* 69/*
73 * hardware specific access to control-lines 70 * hardware specific access to control-lines
74 * 71 *
@@ -101,9 +98,7 @@ static int ep7312_device_ready(struct mtd_info *mtd)
101 return 1; 98 return 1;
102} 99}
103 100
104#ifdef CONFIG_MTD_PARTITIONS
105const char *part_probes[] = { "cmdlinepart", NULL }; 101const char *part_probes[] = { "cmdlinepart", NULL };
106#endif
107 102
108/* 103/*
109 * Main initialization routine 104 * Main initialization routine
@@ -162,14 +157,12 @@ static int __init ep7312_init(void)
162 kfree(ep7312_mtd); 157 kfree(ep7312_mtd);
163 return -ENXIO; 158 return -ENXIO;
164 } 159 }
165#ifdef CONFIG_MTD_PARTITIONS
166 ep7312_mtd->name = "edb7312-nand"; 160 ep7312_mtd->name = "edb7312-nand";
167 mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0); 161 mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0);
168 if (mtd_parts_nb > 0) 162 if (mtd_parts_nb > 0)
169 part_type = "command line"; 163 part_type = "command line";
170 else 164 else
171 mtd_parts_nb = 0; 165 mtd_parts_nb = 0;
172#endif
173 if (mtd_parts_nb == 0) { 166 if (mtd_parts_nb == 0) {
174 mtd_parts = partition_info; 167 mtd_parts = partition_info;
175 mtd_parts_nb = NUM_PARTITIONS; 168 mtd_parts_nb = NUM_PARTITIONS;
@@ -178,7 +171,7 @@ static int __init ep7312_init(void)
178 171
179 /* Register the partitions */ 172 /* Register the partitions */
180 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 173 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
181 add_mtd_partitions(ep7312_mtd, mtd_parts, mtd_parts_nb); 174 mtd_device_register(ep7312_mtd, mtd_parts, mtd_parts_nb);
182 175
183 /* Return happy */ 176 /* Return happy */
184 return 0; 177 return 0;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 537e380b8dcb..0bb254c7d2b1 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -841,12 +841,9 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
841 struct fsl_elbc_mtd *priv; 841 struct fsl_elbc_mtd *priv;
842 struct resource res; 842 struct resource res;
843 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl; 843 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
844
845#ifdef CONFIG_MTD_PARTITIONS
846 static const char *part_probe_types[] 844 static const char *part_probe_types[]
847 = { "cmdlinepart", "RedBoot", NULL }; 845 = { "cmdlinepart", "RedBoot", NULL };
848 struct mtd_partition *parts; 846 struct mtd_partition *parts;
849#endif
850 int ret; 847 int ret;
851 int bank; 848 int bank;
852 struct device *dev; 849 struct device *dev;
@@ -935,26 +932,19 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
935 if (ret) 932 if (ret)
936 goto err; 933 goto err;
937 934
938#ifdef CONFIG_MTD_PARTITIONS
939 /* First look for RedBoot table or partitions on the command 935 /* First look for RedBoot table or partitions on the command
940 * line, these take precedence over device tree information */ 936 * line, these take precedence over device tree information */
941 ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0); 937 ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0);
942 if (ret < 0) 938 if (ret < 0)
943 goto err; 939 goto err;
944 940
945#ifdef CONFIG_MTD_OF_PARTS
946 if (ret == 0) { 941 if (ret == 0) {
947 ret = of_mtd_parse_partitions(priv->dev, node, &parts); 942 ret = of_mtd_parse_partitions(priv->dev, node, &parts);
948 if (ret < 0) 943 if (ret < 0)
949 goto err; 944 goto err;
950 } 945 }
951#endif
952 946
953 if (ret > 0) 947 mtd_device_register(&priv->mtd, parts, ret);
954 add_mtd_partitions(&priv->mtd, parts, ret);
955 else
956#endif
957 add_mtd_device(&priv->mtd);
958 948
959 printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n", 949 printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
960 (unsigned long long)res.start, priv->bank); 950 (unsigned long long)res.start, priv->bank);
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 073ee026a17c..23752fd5bc59 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -33,10 +33,7 @@ struct fsl_upm_nand {
33 struct mtd_info mtd; 33 struct mtd_info mtd;
34 struct nand_chip chip; 34 struct nand_chip chip;
35 int last_ctrl; 35 int last_ctrl;
36#ifdef CONFIG_MTD_PARTITIONS
37 struct mtd_partition *parts; 36 struct mtd_partition *parts;
38#endif
39
40 struct fsl_upm upm; 37 struct fsl_upm upm;
41 uint8_t upm_addr_offset; 38 uint8_t upm_addr_offset;
42 uint8_t upm_cmd_offset; 39 uint8_t upm_cmd_offset;
@@ -161,9 +158,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
161{ 158{
162 int ret; 159 int ret;
163 struct device_node *flash_np; 160 struct device_node *flash_np;
164#ifdef CONFIG_MTD_PARTITIONS
165 static const char *part_types[] = { "cmdlinepart", NULL, }; 161 static const char *part_types[] = { "cmdlinepart", NULL, };
166#endif
167 162
168 fun->chip.IO_ADDR_R = fun->io_base; 163 fun->chip.IO_ADDR_R = fun->io_base;
169 fun->chip.IO_ADDR_W = fun->io_base; 164 fun->chip.IO_ADDR_W = fun->io_base;
@@ -197,7 +192,6 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
197 if (ret) 192 if (ret)
198 goto err; 193 goto err;
199 194
200#ifdef CONFIG_MTD_PARTITIONS
201 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0); 195 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
202 196
203#ifdef CONFIG_MTD_OF_PARTS 197#ifdef CONFIG_MTD_OF_PARTS
@@ -207,11 +201,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
207 goto err; 201 goto err;
208 } 202 }
209#endif 203#endif
210 if (ret > 0) 204 ret = mtd_device_register(&fun->mtd, fun->parts, ret);
211 ret = add_mtd_partitions(&fun->mtd, fun->parts, ret);
212 else
213#endif
214 ret = add_mtd_device(&fun->mtd);
215err: 205err:
216 of_node_put(flash_np); 206 of_node_put(flash_np);
217 return ret; 207 return ret;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 0d45ef3883e8..e9b275ac381c 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -120,8 +120,6 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
120 } 120 }
121}; 121};
122 122
123
124#ifdef CONFIG_MTD_PARTITIONS
125/* 123/*
126 * Default partition tables to be used if the partition information not 124 * Default partition tables to be used if the partition information not
127 * provided through platform data. 125 * provided through platform data.
@@ -182,7 +180,6 @@ static struct mtd_partition partition_info_128KB_blk[] = {
182#ifdef CONFIG_MTD_CMDLINE_PARTS 180#ifdef CONFIG_MTD_CMDLINE_PARTS
183const char *part_probes[] = { "cmdlinepart", NULL }; 181const char *part_probes[] = { "cmdlinepart", NULL };
184#endif 182#endif
185#endif
186 183
187/** 184/**
188 * struct fsmc_nand_data - structure for FSMC NAND device state 185 * struct fsmc_nand_data - structure for FSMC NAND device state
@@ -719,7 +716,6 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
719 * platform data, 716 * platform data,
720 * default partition information present in driver. 717 * default partition information present in driver.
721 */ 718 */
722#ifdef CONFIG_MTD_PARTITIONS
723#ifdef CONFIG_MTD_CMDLINE_PARTS 719#ifdef CONFIG_MTD_CMDLINE_PARTS
724 /* 720 /*
725 * Check if partition info passed via command line 721 * Check if partition info passed via command line
@@ -777,19 +773,10 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
777 } 773 }
778#endif 774#endif
779 775
780 if (host->partitions) { 776 ret = mtd_device_register(&host->mtd, host->partitions,
781 ret = add_mtd_partitions(&host->mtd, host->partitions, 777 host->nr_partitions);
782 host->nr_partitions); 778 if (ret)
783 if (ret)
784 goto err_probe;
785 }
786#else
787 dev_info(&pdev->dev, "Registering %s as whole device\n", mtd->name);
788 if (!add_mtd_device(mtd)) {
789 ret = -ENXIO;
790 goto err_probe; 779 goto err_probe;
791 }
792#endif
793 780
794 platform_set_drvdata(pdev, host); 781 platform_set_drvdata(pdev, host);
795 dev_info(&pdev->dev, "FSMC NAND driver registration successful\n"); 782 dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
@@ -835,11 +822,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
835 platform_set_drvdata(pdev, NULL); 822 platform_set_drvdata(pdev, NULL);
836 823
837 if (host) { 824 if (host) {
838#ifdef CONFIG_MTD_PARTITIONS 825 mtd_device_unregister(&host->mtd);
839 del_mtd_partitions(&host->mtd);
840#else
841 del_mtd_device(&host->mtd);
842#endif
843 clk_disable(host->clk); 826 clk_disable(host->clk);
844 clk_put(host->clk); 827 clk_put(host->clk);
845 828
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 0cde618bcc1e..2c2060b2800e 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -316,8 +316,8 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
316 gpiomtd->plat.adjust_parts(&gpiomtd->plat, 316 gpiomtd->plat.adjust_parts(&gpiomtd->plat,
317 gpiomtd->mtd_info.size); 317 gpiomtd->mtd_info.size);
318 318
319 add_mtd_partitions(&gpiomtd->mtd_info, gpiomtd->plat.parts, 319 mtd_device_register(&gpiomtd->mtd_info, gpiomtd->plat.parts,
320 gpiomtd->plat.num_parts); 320 gpiomtd->plat.num_parts);
321 platform_set_drvdata(dev, gpiomtd); 321 platform_set_drvdata(dev, gpiomtd);
322 322
323 return 0; 323 return 0;
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index f8ce79b446ed..02a03e67109c 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -38,7 +38,6 @@ static struct mtd_info *h1910_nand_mtd = NULL;
38 * Module stuff 38 * Module stuff
39 */ 39 */
40 40
41#ifdef CONFIG_MTD_PARTITIONS
42/* 41/*
43 * Define static partitions for flash device 42 * Define static partitions for flash device
44 */ 43 */
@@ -50,8 +49,6 @@ static struct mtd_partition partition_info[] = {
50 49
51#define NUM_PARTITIONS 1 50#define NUM_PARTITIONS 1
52 51
53#endif
54
55/* 52/*
56 * hardware specific access to control-lines 53 * hardware specific access to control-lines
57 * 54 *
@@ -154,7 +151,7 @@ static int __init h1910_init(void)
154 151
155 /* Register the partitions */ 152 /* Register the partitions */
156 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 153 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
157 add_mtd_partitions(h1910_nand_mtd, mtd_parts, mtd_parts_nb); 154 mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
158 155
159 /* Return happy */ 156 /* Return happy */
160 return 0; 157 return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index cea38a5d4ac5..6e813daed068 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -299,10 +299,8 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
299 struct nand_chip *chip; 299 struct nand_chip *chip;
300 struct mtd_info *mtd; 300 struct mtd_info *mtd;
301 struct jz_nand_platform_data *pdata = pdev->dev.platform_data; 301 struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
302#ifdef CONFIG_MTD_PARTITIONS
303 struct mtd_partition *partition_info; 302 struct mtd_partition *partition_info;
304 int num_partitions = 0; 303 int num_partitions = 0;
305#endif
306 304
307 nand = kzalloc(sizeof(*nand), GFP_KERNEL); 305 nand = kzalloc(sizeof(*nand), GFP_KERNEL);
308 if (!nand) { 306 if (!nand) {
@@ -375,7 +373,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
375 goto err_gpio_free; 373 goto err_gpio_free;
376 } 374 }
377 375
378#ifdef CONFIG_MTD_PARTITIONS
379#ifdef CONFIG_MTD_CMDLINE_PARTS 376#ifdef CONFIG_MTD_CMDLINE_PARTS
380 num_partitions = parse_mtd_partitions(mtd, part_probes, 377 num_partitions = parse_mtd_partitions(mtd, part_probes,
381 &partition_info, 0); 378 &partition_info, 0);
@@ -384,12 +381,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
384 num_partitions = pdata->num_partitions; 381 num_partitions = pdata->num_partitions;
385 partition_info = pdata->partitions; 382 partition_info = pdata->partitions;
386 } 383 }
387 384 ret = mtd_device_register(mtd, partition_info, num_partitions);
388 if (num_partitions > 0)
389 ret = add_mtd_partitions(mtd, partition_info, num_partitions);
390 else
391#endif
392 ret = add_mtd_device(mtd);
393 385
394 if (ret) { 386 if (ret) {
395 dev_err(&pdev->dev, "Failed to add mtd device\n"); 387 dev_err(&pdev->dev, "Failed to add mtd device\n");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 0b81b5b499d1..2f7c930872f9 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -131,9 +131,7 @@ struct mpc5121_nfc_prv {
131 131
132static void mpc5121_nfc_done(struct mtd_info *mtd); 132static void mpc5121_nfc_done(struct mtd_info *mtd);
133 133
134#ifdef CONFIG_MTD_PARTITIONS
135static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL }; 134static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
136#endif
137 135
138/* Read NFC register */ 136/* Read NFC register */
139static inline u16 nfc_read(struct mtd_info *mtd, uint reg) 137static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
@@ -658,9 +656,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
658 struct mpc5121_nfc_prv *prv; 656 struct mpc5121_nfc_prv *prv;
659 struct resource res; 657 struct resource res;
660 struct mtd_info *mtd; 658 struct mtd_info *mtd;
661#ifdef CONFIG_MTD_PARTITIONS
662 struct mtd_partition *parts; 659 struct mtd_partition *parts;
663#endif
664 struct nand_chip *chip; 660 struct nand_chip *chip;
665 unsigned long regs_paddr, regs_size; 661 unsigned long regs_paddr, regs_size;
666 const __be32 *chips_no; 662 const __be32 *chips_no;
@@ -841,7 +837,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
841 dev_set_drvdata(dev, mtd); 837 dev_set_drvdata(dev, mtd);
842 838
843 /* Register device in MTD */ 839 /* Register device in MTD */
844#ifdef CONFIG_MTD_PARTITIONS
845 retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0); 840 retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
846#ifdef CONFIG_MTD_OF_PARTS 841#ifdef CONFIG_MTD_OF_PARTS
847 if (retval == 0) 842 if (retval == 0)
@@ -854,12 +849,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
854 goto error; 849 goto error;
855 } 850 }
856 851
857 if (retval > 0) 852 retval = mtd_device_register(mtd, parts, retval);
858 retval = add_mtd_partitions(mtd, parts, retval);
859 else
860#endif
861 retval = add_mtd_device(mtd);
862
863 if (retval) { 853 if (retval) {
864 dev_err(dev, "Error adding MTD device!\n"); 854 dev_err(dev, "Error adding MTD device!\n");
865 devm_free_irq(dev, prv->irq, mtd); 855 devm_free_irq(dev, prv->irq, mtd);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 42a95fb41504..90df34c4d26c 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -56,8 +56,14 @@
56#define NFC_V1_V2_WRPROT (host->regs + 0x12) 56#define NFC_V1_V2_WRPROT (host->regs + 0x12)
57#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14) 57#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
58#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16) 58#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
59#define NFC_V21_UNLOCKSTART_BLKADDR (host->regs + 0x20) 59#define NFC_V21_UNLOCKSTART_BLKADDR0 (host->regs + 0x20)
60#define NFC_V21_UNLOCKEND_BLKADDR (host->regs + 0x22) 60#define NFC_V21_UNLOCKSTART_BLKADDR1 (host->regs + 0x24)
61#define NFC_V21_UNLOCKSTART_BLKADDR2 (host->regs + 0x28)
62#define NFC_V21_UNLOCKSTART_BLKADDR3 (host->regs + 0x2c)
63#define NFC_V21_UNLOCKEND_BLKADDR0 (host->regs + 0x22)
64#define NFC_V21_UNLOCKEND_BLKADDR1 (host->regs + 0x26)
65#define NFC_V21_UNLOCKEND_BLKADDR2 (host->regs + 0x2a)
66#define NFC_V21_UNLOCKEND_BLKADDR3 (host->regs + 0x2e)
61#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18) 67#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18)
62#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a) 68#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a)
63#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c) 69#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c)
@@ -152,6 +158,7 @@ struct mxc_nand_host {
152 int clk_act; 158 int clk_act;
153 int irq; 159 int irq;
154 int eccsize; 160 int eccsize;
161 int active_cs;
155 162
156 struct completion op_completion; 163 struct completion op_completion;
157 164
@@ -236,9 +243,7 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
236 } 243 }
237}; 244};
238 245
239#ifdef CONFIG_MTD_PARTITIONS
240static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; 246static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
241#endif
242 247
243static irqreturn_t mxc_nfc_irq(int irq, void *dev_id) 248static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
244{ 249{
@@ -445,7 +450,7 @@ static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
445 for (i = 0; i < bufs; i++) { 450 for (i = 0; i < bufs; i++) {
446 451
447 /* NANDFC buffer 0 is used for page read/write */ 452 /* NANDFC buffer 0 is used for page read/write */
448 writew(i, NFC_V1_V2_BUF_ADDR); 453 writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
449 454
450 writew(ops, NFC_V1_V2_CONFIG2); 455 writew(ops, NFC_V1_V2_CONFIG2);
451 456
@@ -470,7 +475,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
470 struct nand_chip *this = &host->nand; 475 struct nand_chip *this = &host->nand;
471 476
472 /* NANDFC buffer 0 is used for device ID output */ 477 /* NANDFC buffer 0 is used for device ID output */
473 writew(0x0, NFC_V1_V2_BUF_ADDR); 478 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
474 479
475 writew(NFC_ID, NFC_V1_V2_CONFIG2); 480 writew(NFC_ID, NFC_V1_V2_CONFIG2);
476 481
@@ -505,7 +510,7 @@ static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host)
505 uint32_t store; 510 uint32_t store;
506 uint16_t ret; 511 uint16_t ret;
507 512
508 writew(0x0, NFC_V1_V2_BUF_ADDR); 513 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
509 514
510 /* 515 /*
511 * The device status is stored in main_area0. To 516 * The device status is stored in main_area0. To
@@ -686,24 +691,24 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
686 struct nand_chip *nand_chip = mtd->priv; 691 struct nand_chip *nand_chip = mtd->priv;
687 struct mxc_nand_host *host = nand_chip->priv; 692 struct mxc_nand_host *host = nand_chip->priv;
688 693
689 switch (chip) { 694 if (chip == -1) {
690 case -1:
691 /* Disable the NFC clock */ 695 /* Disable the NFC clock */
692 if (host->clk_act) { 696 if (host->clk_act) {
693 clk_disable(host->clk); 697 clk_disable(host->clk);
694 host->clk_act = 0; 698 host->clk_act = 0;
695 } 699 }
696 break; 700 return;
697 case 0: 701 }
702
703 if (!host->clk_act) {
698 /* Enable the NFC clock */ 704 /* Enable the NFC clock */
699 if (!host->clk_act) { 705 clk_enable(host->clk);
700 clk_enable(host->clk); 706 host->clk_act = 1;
701 host->clk_act = 1; 707 }
702 }
703 break;
704 708
705 default: 709 if (nfc_is_v21()) {
706 break; 710 host->active_cs = chip;
711 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
707 } 712 }
708} 713}
709 714
@@ -834,8 +839,14 @@ static void preset_v1_v2(struct mtd_info *mtd)
834 839
835 /* Blocks to be unlocked */ 840 /* Blocks to be unlocked */
836 if (nfc_is_v21()) { 841 if (nfc_is_v21()) {
837 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR); 842 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
838 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR); 843 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
844 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
845 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
846 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
847 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
848 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
849 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
839 } else if (nfc_is_v1()) { 850 } else if (nfc_is_v1()) {
840 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR); 851 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
841 writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR); 852 writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR);
@@ -1200,7 +1211,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1200 irq_control_v1_v2(host, 1); 1211 irq_control_v1_v2(host, 1);
1201 1212
1202 /* first scan to find the device and get the page size */ 1213 /* first scan to find the device and get the page size */
1203 if (nand_scan_ident(mtd, 1, NULL)) { 1214 if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
1204 err = -ENXIO; 1215 err = -ENXIO;
1205 goto escan; 1216 goto escan;
1206 } 1217 }
@@ -1220,18 +1231,15 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1220 } 1231 }
1221 1232
1222 /* Register the partitions */ 1233 /* Register the partitions */
1223#ifdef CONFIG_MTD_PARTITIONS
1224 nr_parts = 1234 nr_parts =
1225 parse_mtd_partitions(mtd, part_probes, &host->parts, 0); 1235 parse_mtd_partitions(mtd, part_probes, &host->parts, 0);
1226 if (nr_parts > 0) 1236 if (nr_parts > 0)
1227 add_mtd_partitions(mtd, host->parts, nr_parts); 1237 mtd_device_register(mtd, host->parts, nr_parts);
1228 else if (pdata->parts) 1238 else if (pdata->parts)
1229 add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); 1239 mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
1230 else 1240 else {
1231#endif
1232 {
1233 pr_info("Registering %s as whole device\n", mtd->name); 1241 pr_info("Registering %s as whole device\n", mtd->name);
1234 add_mtd_device(mtd); 1242 mtd_device_register(mtd, NULL, 0);
1235 } 1243 }
1236 1244
1237 platform_set_drvdata(pdev, host); 1245 platform_set_drvdata(pdev, host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index c54a4cbac6bc..a46e9bb847bd 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -47,10 +47,7 @@
47#include <linux/bitops.h> 47#include <linux/bitops.h>
48#include <linux/leds.h> 48#include <linux/leds.h>
49#include <linux/io.h> 49#include <linux/io.h>
50
51#ifdef CONFIG_MTD_PARTITIONS
52#include <linux/mtd/partitions.h> 50#include <linux/mtd/partitions.h>
53#endif
54 51
55/* Define default oob placement schemes for large and small page devices */ 52/* Define default oob placement schemes for large and small page devices */
56static struct nand_ecclayout nand_oob_8 = { 53static struct nand_ecclayout nand_oob_8 = {
@@ -976,9 +973,6 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
976 ret = __nand_unlock(mtd, ofs, len, 0); 973 ret = __nand_unlock(mtd, ofs, len, 0);
977 974
978out: 975out:
979 /* de-select the NAND device */
980 chip->select_chip(mtd, -1);
981
982 nand_release_device(mtd); 976 nand_release_device(mtd);
983 977
984 return ret; 978 return ret;
@@ -1046,9 +1040,6 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1046 ret = __nand_unlock(mtd, ofs, len, 0x1); 1040 ret = __nand_unlock(mtd, ofs, len, 0x1);
1047 1041
1048out: 1042out:
1049 /* de-select the NAND device */
1050 chip->select_chip(mtd, -1);
1051
1052 nand_release_device(mtd); 1043 nand_release_device(mtd);
1053 1044
1054 return ret; 1045 return ret;
@@ -3112,6 +3103,8 @@ ident_done:
3112 chip->chip_shift += 32 - 1; 3103 chip->chip_shift += 32 - 1;
3113 } 3104 }
3114 3105
3106 chip->badblockbits = 8;
3107
3115 /* Set the bad block position */ 3108 /* Set the bad block position */
3116 if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16)) 3109 if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
3117 chip->badblockpos = NAND_LARGE_BADBLOCK_POS; 3110 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
@@ -3539,12 +3532,7 @@ void nand_release(struct mtd_info *mtd)
3539 if (chip->ecc.mode == NAND_ECC_SOFT_BCH) 3532 if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
3540 nand_bch_free((struct nand_bch_control *)chip->ecc.priv); 3533 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
3541 3534
3542#ifdef CONFIG_MTD_PARTITIONS 3535 mtd_device_unregister(mtd);
3543 /* Deregister partitions */
3544 del_mtd_partitions(mtd);
3545#endif
3546 /* Deregister the device */
3547 del_mtd_device(mtd);
3548 3536
3549 /* Free bad block table memory */ 3537 /* Free bad block table memory */
3550 kfree(chip->bbt); 3538 kfree(chip->bbt);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index af46428286fe..ccbeaa1e4a8e 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1276,20 +1276,6 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1276 * while scanning a device for factory marked good / bad blocks. */ 1276 * while scanning a device for factory marked good / bad blocks. */
1277static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 1277static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
1278 1278
1279static struct nand_bbt_descr smallpage_flashbased = {
1280 .options = NAND_BBT_SCAN2NDPAGE,
1281 .offs = NAND_SMALL_BADBLOCK_POS,
1282 .len = 1,
1283 .pattern = scan_ff_pattern
1284};
1285
1286static struct nand_bbt_descr largepage_flashbased = {
1287 .options = NAND_BBT_SCAN2NDPAGE,
1288 .offs = NAND_LARGE_BADBLOCK_POS,
1289 .len = 2,
1290 .pattern = scan_ff_pattern
1291};
1292
1293static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 }; 1279static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
1294 1280
1295static struct nand_bbt_descr agand_flashbased = { 1281static struct nand_bbt_descr agand_flashbased = {
@@ -1355,10 +1341,6 @@ static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
1355 * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when 1341 * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
1356 * passed to this function. 1342 * passed to this function.
1357 * 1343 *
1358 * TODO: Handle other flags, replace other static structs
1359 * (e.g. handle NAND_BBT_FLASH for flash-based BBT,
1360 * replace smallpage_flashbased)
1361 *
1362 */ 1344 */
1363static int nand_create_default_bbt_descr(struct nand_chip *this) 1345static int nand_create_default_bbt_descr(struct nand_chip *this)
1364{ 1346{
@@ -1422,15 +1404,14 @@ int nand_default_bbt(struct mtd_info *mtd)
1422 this->bbt_md = &bbt_mirror_descr; 1404 this->bbt_md = &bbt_mirror_descr;
1423 } 1405 }
1424 } 1406 }
1425 if (!this->badblock_pattern) {
1426 this->badblock_pattern = (mtd->writesize > 512) ? &largepage_flashbased : &smallpage_flashbased;
1427 }
1428 } else { 1407 } else {
1429 this->bbt_td = NULL; 1408 this->bbt_td = NULL;
1430 this->bbt_md = NULL; 1409 this->bbt_md = NULL;
1431 if (!this->badblock_pattern)
1432 nand_create_default_bbt_descr(this);
1433 } 1410 }
1411
1412 if (!this->badblock_pattern)
1413 nand_create_default_bbt_descr(this);
1414
1434 return nand_scan_bbt(mtd, this->badblock_pattern); 1415 return nand_scan_bbt(mtd, this->badblock_pattern);
1435} 1416}
1436 1417
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 893d95bfea48..357e8c5252a8 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2383,7 +2383,9 @@ static int __init ns_init_module(void)
2383 goto err_exit; 2383 goto err_exit;
2384 2384
2385 /* Register NAND partitions */ 2385 /* Register NAND partitions */
2386 if ((retval = add_mtd_partitions(nsmtd, &nand->partitions[0], nand->nbparts)) != 0) 2386 retval = mtd_device_register(nsmtd, &nand->partitions[0],
2387 nand->nbparts);
2388 if (retval != 0)
2387 goto err_exit; 2389 goto err_exit;
2388 2390
2389 return 0; 2391 return 0;
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index bbe6d451290d..ea2dea8a9c88 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -33,6 +33,7 @@
33#include <linux/of_platform.h> 33#include <linux/of_platform.h>
34#include <asm/io.h> 34#include <asm/io.h>
35 35
36#define NDFC_MAX_CS 4
36 37
37struct ndfc_controller { 38struct ndfc_controller {
38 struct platform_device *ofdev; 39 struct platform_device *ofdev;
@@ -41,17 +42,16 @@ struct ndfc_controller {
41 struct nand_chip chip; 42 struct nand_chip chip;
42 int chip_select; 43 int chip_select;
43 struct nand_hw_control ndfc_control; 44 struct nand_hw_control ndfc_control;
44#ifdef CONFIG_MTD_PARTITIONS
45 struct mtd_partition *parts; 45 struct mtd_partition *parts;
46#endif
47}; 46};
48 47
49static struct ndfc_controller ndfc_ctrl; 48static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
50 49
51static void ndfc_select_chip(struct mtd_info *mtd, int chip) 50static void ndfc_select_chip(struct mtd_info *mtd, int chip)
52{ 51{
53 uint32_t ccr; 52 uint32_t ccr;
54 struct ndfc_controller *ndfc = &ndfc_ctrl; 53 struct nand_chip *nchip = mtd->priv;
54 struct ndfc_controller *ndfc = nchip->priv;
55 55
56 ccr = in_be32(ndfc->ndfcbase + NDFC_CCR); 56 ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
57 if (chip >= 0) { 57 if (chip >= 0) {
@@ -64,7 +64,8 @@ static void ndfc_select_chip(struct mtd_info *mtd, int chip)
64 64
65static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) 65static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
66{ 66{
67 struct ndfc_controller *ndfc = &ndfc_ctrl; 67 struct nand_chip *chip = mtd->priv;
68 struct ndfc_controller *ndfc = chip->priv;
68 69
69 if (cmd == NAND_CMD_NONE) 70 if (cmd == NAND_CMD_NONE)
70 return; 71 return;
@@ -77,7 +78,8 @@ static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
77 78
78static int ndfc_ready(struct mtd_info *mtd) 79static int ndfc_ready(struct mtd_info *mtd)
79{ 80{
80 struct ndfc_controller *ndfc = &ndfc_ctrl; 81 struct nand_chip *chip = mtd->priv;
82 struct ndfc_controller *ndfc = chip->priv;
81 83
82 return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY; 84 return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
83} 85}
@@ -85,7 +87,8 @@ static int ndfc_ready(struct mtd_info *mtd)
85static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode) 87static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
86{ 88{
87 uint32_t ccr; 89 uint32_t ccr;
88 struct ndfc_controller *ndfc = &ndfc_ctrl; 90 struct nand_chip *chip = mtd->priv;
91 struct ndfc_controller *ndfc = chip->priv;
89 92
90 ccr = in_be32(ndfc->ndfcbase + NDFC_CCR); 93 ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
91 ccr |= NDFC_CCR_RESET_ECC; 94 ccr |= NDFC_CCR_RESET_ECC;
@@ -96,7 +99,8 @@ static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
96static int ndfc_calculate_ecc(struct mtd_info *mtd, 99static int ndfc_calculate_ecc(struct mtd_info *mtd,
97 const u_char *dat, u_char *ecc_code) 100 const u_char *dat, u_char *ecc_code)
98{ 101{
99 struct ndfc_controller *ndfc = &ndfc_ctrl; 102 struct nand_chip *chip = mtd->priv;
103 struct ndfc_controller *ndfc = chip->priv;
100 uint32_t ecc; 104 uint32_t ecc;
101 uint8_t *p = (uint8_t *)&ecc; 105 uint8_t *p = (uint8_t *)&ecc;
102 106
@@ -119,7 +123,8 @@ static int ndfc_calculate_ecc(struct mtd_info *mtd,
119 */ 123 */
120static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 124static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
121{ 125{
122 struct ndfc_controller *ndfc = &ndfc_ctrl; 126 struct nand_chip *chip = mtd->priv;
127 struct ndfc_controller *ndfc = chip->priv;
123 uint32_t *p = (uint32_t *) buf; 128 uint32_t *p = (uint32_t *) buf;
124 129
125 for(;len > 0; len -= 4) 130 for(;len > 0; len -= 4)
@@ -128,7 +133,8 @@ static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
128 133
129static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 134static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
130{ 135{
131 struct ndfc_controller *ndfc = &ndfc_ctrl; 136 struct nand_chip *chip = mtd->priv;
137 struct ndfc_controller *ndfc = chip->priv;
132 uint32_t *p = (uint32_t *) buf; 138 uint32_t *p = (uint32_t *) buf;
133 139
134 for(;len > 0; len -= 4) 140 for(;len > 0; len -= 4)
@@ -137,7 +143,8 @@ static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
137 143
138static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 144static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
139{ 145{
140 struct ndfc_controller *ndfc = &ndfc_ctrl; 146 struct nand_chip *chip = mtd->priv;
147 struct ndfc_controller *ndfc = chip->priv;
141 uint32_t *p = (uint32_t *) buf; 148 uint32_t *p = (uint32_t *) buf;
142 149
143 for(;len > 0; len -= 4) 150 for(;len > 0; len -= 4)
@@ -152,13 +159,11 @@ static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
152static int ndfc_chip_init(struct ndfc_controller *ndfc, 159static int ndfc_chip_init(struct ndfc_controller *ndfc,
153 struct device_node *node) 160 struct device_node *node)
154{ 161{
155#ifdef CONFIG_MTD_PARTITIONS
156#ifdef CONFIG_MTD_CMDLINE_PARTS 162#ifdef CONFIG_MTD_CMDLINE_PARTS
157 static const char *part_types[] = { "cmdlinepart", NULL }; 163 static const char *part_types[] = { "cmdlinepart", NULL };
158#else 164#else
159 static const char *part_types[] = { NULL }; 165 static const char *part_types[] = { NULL };
160#endif 166#endif
161#endif
162 struct device_node *flash_np; 167 struct device_node *flash_np;
163 struct nand_chip *chip = &ndfc->chip; 168 struct nand_chip *chip = &ndfc->chip;
164 int ret; 169 int ret;
@@ -179,6 +184,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
179 chip->ecc.mode = NAND_ECC_HW; 184 chip->ecc.mode = NAND_ECC_HW;
180 chip->ecc.size = 256; 185 chip->ecc.size = 256;
181 chip->ecc.bytes = 3; 186 chip->ecc.bytes = 3;
187 chip->priv = ndfc;
182 188
183 ndfc->mtd.priv = chip; 189 ndfc->mtd.priv = chip;
184 ndfc->mtd.owner = THIS_MODULE; 190 ndfc->mtd.owner = THIS_MODULE;
@@ -198,25 +204,18 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
198 if (ret) 204 if (ret)
199 goto err; 205 goto err;
200 206
201#ifdef CONFIG_MTD_PARTITIONS
202 ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0); 207 ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0);
203 if (ret < 0) 208 if (ret < 0)
204 goto err; 209 goto err;
205 210
206#ifdef CONFIG_MTD_OF_PARTS
207 if (ret == 0) { 211 if (ret == 0) {
208 ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np, 212 ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np,
209 &ndfc->parts); 213 &ndfc->parts);
210 if (ret < 0) 214 if (ret < 0)
211 goto err; 215 goto err;
212 } 216 }
213#endif
214 217
215 if (ret > 0) 218 ret = mtd_device_register(&ndfc->mtd, ndfc->parts, ret);
216 ret = add_mtd_partitions(&ndfc->mtd, ndfc->parts, ret);
217 else
218#endif
219 ret = add_mtd_device(&ndfc->mtd);
220 219
221err: 220err:
222 of_node_put(flash_np); 221 of_node_put(flash_np);
@@ -227,15 +226,10 @@ err:
227 226
228static int __devinit ndfc_probe(struct platform_device *ofdev) 227static int __devinit ndfc_probe(struct platform_device *ofdev)
229{ 228{
230 struct ndfc_controller *ndfc = &ndfc_ctrl; 229 struct ndfc_controller *ndfc;
231 const __be32 *reg; 230 const __be32 *reg;
232 u32 ccr; 231 u32 ccr;
233 int err, len; 232 int err, len, cs;
234
235 spin_lock_init(&ndfc->ndfc_control.lock);
236 init_waitqueue_head(&ndfc->ndfc_control.wq);
237 ndfc->ofdev = ofdev;
238 dev_set_drvdata(&ofdev->dev, ndfc);
239 233
240 /* Read the reg property to get the chip select */ 234 /* Read the reg property to get the chip select */
241 reg = of_get_property(ofdev->dev.of_node, "reg", &len); 235 reg = of_get_property(ofdev->dev.of_node, "reg", &len);
@@ -243,7 +237,20 @@ static int __devinit ndfc_probe(struct platform_device *ofdev)
243 dev_err(&ofdev->dev, "unable read reg property (%d)\n", len); 237 dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
244 return -ENOENT; 238 return -ENOENT;
245 } 239 }
246 ndfc->chip_select = be32_to_cpu(reg[0]); 240
241 cs = be32_to_cpu(reg[0]);
242 if (cs >= NDFC_MAX_CS) {
243 dev_err(&ofdev->dev, "invalid CS number (%d)\n", cs);
244 return -EINVAL;
245 }
246
247 ndfc = &ndfc_ctrl[cs];
248 ndfc->chip_select = cs;
249
250 spin_lock_init(&ndfc->ndfc_control.lock);
251 init_waitqueue_head(&ndfc->ndfc_control.wq);
252 ndfc->ofdev = ofdev;
253 dev_set_drvdata(&ofdev->dev, ndfc);
247 254
248 ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0); 255 ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
249 if (!ndfc->ndfcbase) { 256 if (!ndfc->ndfcbase) {
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index a045a4a581b6..b6a5c86ab31e 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -158,12 +158,7 @@ static int nomadik_nand_probe(struct platform_device *pdev)
158 goto err_unmap; 158 goto err_unmap;
159 } 159 }
160 160
161#ifdef CONFIG_MTD_PARTITIONS 161 mtd_device_register(&host->mtd, pdata->parts, pdata->nparts);
162 add_mtd_partitions(&host->mtd, pdata->parts, pdata->nparts);
163#else
164 pr_info("Registering %s as whole device\n", mtd->name);
165 add_mtd_device(mtd);
166#endif
167 162
168 platform_set_drvdata(pdev, host); 163 platform_set_drvdata(pdev, host);
169 return 0; 164 return 0;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 6eddf7361ed7..9c30a0b03171 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -321,8 +321,8 @@ static int __devinit nuc900_nand_probe(struct platform_device *pdev)
321 goto fail3; 321 goto fail3;
322 } 322 }
323 323
324 add_mtd_partitions(&(nuc900_nand->mtd), partitions, 324 mtd_device_register(&(nuc900_nand->mtd), partitions,
325 ARRAY_SIZE(partitions)); 325 ARRAY_SIZE(partitions));
326 326
327 platform_set_drvdata(pdev, nuc900_nand); 327 platform_set_drvdata(pdev, nuc900_nand);
328 328
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index da9a351c9d79..0db2c0e7656a 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -94,9 +94,7 @@
94#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) 94#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
95#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) 95#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
96 96
97#ifdef CONFIG_MTD_PARTITIONS
98static const char *part_probes[] = { "cmdlinepart", NULL }; 97static const char *part_probes[] = { "cmdlinepart", NULL };
99#endif
100 98
101/* oob info generated runtime depending on ecc algorithm and layout selected */ 99/* oob info generated runtime depending on ecc algorithm and layout selected */
102static struct nand_ecclayout omap_oobinfo; 100static struct nand_ecclayout omap_oobinfo;
@@ -263,11 +261,10 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
263 if (ret) { 261 if (ret) {
264 /* PFPW engine is busy, use cpu copy method */ 262 /* PFPW engine is busy, use cpu copy method */
265 if (info->nand.options & NAND_BUSWIDTH_16) 263 if (info->nand.options & NAND_BUSWIDTH_16)
266 omap_read_buf16(mtd, buf, len); 264 omap_read_buf16(mtd, (u_char *)p, len);
267 else 265 else
268 omap_read_buf8(mtd, buf, len); 266 omap_read_buf8(mtd, (u_char *)p, len);
269 } else { 267 } else {
270 p = (u32 *) buf;
271 do { 268 do {
272 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); 269 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
273 r_count = r_count >> 2; 270 r_count = r_count >> 2;
@@ -293,7 +290,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
293 struct omap_nand_info, mtd); 290 struct omap_nand_info, mtd);
294 uint32_t w_count = 0; 291 uint32_t w_count = 0;
295 int i = 0, ret = 0; 292 int i = 0, ret = 0;
296 u16 *p; 293 u16 *p = (u16 *)buf;
297 unsigned long tim, limit; 294 unsigned long tim, limit;
298 295
299 /* take care of subpage writes */ 296 /* take care of subpage writes */
@@ -309,11 +306,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
309 if (ret) { 306 if (ret) {
310 /* PFPW engine is busy, use cpu copy method */ 307 /* PFPW engine is busy, use cpu copy method */
311 if (info->nand.options & NAND_BUSWIDTH_16) 308 if (info->nand.options & NAND_BUSWIDTH_16)
312 omap_write_buf16(mtd, buf, len); 309 omap_write_buf16(mtd, (u_char *)p, len);
313 else 310 else
314 omap_write_buf8(mtd, buf, len); 311 omap_write_buf8(mtd, (u_char *)p, len);
315 } else { 312 } else {
316 p = (u16 *) buf;
317 while (len) { 313 while (len) {
318 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); 314 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
319 w_count = w_count >> 1; 315 w_count = w_count >> 1;
@@ -1073,9 +1069,9 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1073 /* DIP switches on some boards change between 8 and 16 bit 1069 /* DIP switches on some boards change between 8 and 16 bit
1074 * bus widths for flash. Try the other width if the first try fails. 1070 * bus widths for flash. Try the other width if the first try fails.
1075 */ 1071 */
1076 if (nand_scan(&info->mtd, 1)) { 1072 if (nand_scan_ident(&info->mtd, 1, NULL)) {
1077 info->nand.options ^= NAND_BUSWIDTH_16; 1073 info->nand.options ^= NAND_BUSWIDTH_16;
1078 if (nand_scan(&info->mtd, 1)) { 1074 if (nand_scan_ident(&info->mtd, 1, NULL)) {
1079 err = -ENXIO; 1075 err = -ENXIO;
1080 goto out_release_mem_region; 1076 goto out_release_mem_region;
1081 } 1077 }
@@ -1101,15 +1097,19 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1101 info->nand.ecc.layout = &omap_oobinfo; 1097 info->nand.ecc.layout = &omap_oobinfo;
1102 } 1098 }
1103 1099
1104#ifdef CONFIG_MTD_PARTITIONS 1100 /* second phase scan */
1101 if (nand_scan_tail(&info->mtd)) {
1102 err = -ENXIO;
1103 goto out_release_mem_region;
1104 }
1105
1105 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 1106 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
1106 if (err > 0) 1107 if (err > 0)
1107 add_mtd_partitions(&info->mtd, info->parts, err); 1108 mtd_device_register(&info->mtd, info->parts, err);
1108 else if (pdata->parts) 1109 else if (pdata->parts)
1109 add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts); 1110 mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
1110 else 1111 else
1111#endif 1112 mtd_device_register(&info->mtd, NULL, 0);
1112 add_mtd_device(&info->mtd);
1113 1113
1114 platform_set_drvdata(pdev, &info->mtd); 1114 platform_set_drvdata(pdev, &info->mtd);
1115 1115
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index da6e75343052..7794d0680f91 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -21,9 +21,7 @@
21#include <mach/hardware.h> 21#include <mach/hardware.h>
22#include <plat/orion_nand.h> 22#include <plat/orion_nand.h>
23 23
24#ifdef CONFIG_MTD_CMDLINE_PARTS
25static const char *part_probes[] = { "cmdlinepart", NULL }; 24static const char *part_probes[] = { "cmdlinepart", NULL };
26#endif
27 25
28static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 26static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
29{ 27{
@@ -83,10 +81,8 @@ static int __init orion_nand_probe(struct platform_device *pdev)
83 struct resource *res; 81 struct resource *res;
84 void __iomem *io_base; 82 void __iomem *io_base;
85 int ret = 0; 83 int ret = 0;
86#ifdef CONFIG_MTD_PARTITIONS
87 struct mtd_partition *partitions = NULL; 84 struct mtd_partition *partitions = NULL;
88 int num_part = 0; 85 int num_part = 0;
89#endif
90 86
91 nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL); 87 nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
92 if (!nc) { 88 if (!nc) {
@@ -136,7 +132,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
136 goto no_dev; 132 goto no_dev;
137 } 133 }
138 134
139#ifdef CONFIG_MTD_PARTITIONS
140#ifdef CONFIG_MTD_CMDLINE_PARTS 135#ifdef CONFIG_MTD_CMDLINE_PARTS
141 mtd->name = "orion_nand"; 136 mtd->name = "orion_nand";
142 num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0); 137 num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
@@ -147,14 +142,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
147 partitions = board->parts; 142 partitions = board->parts;
148 } 143 }
149 144
150 if (partitions && num_part > 0) 145 ret = mtd_device_register(mtd, partitions, num_part);
151 ret = add_mtd_partitions(mtd, partitions, num_part);
152 else
153 ret = add_mtd_device(mtd);
154#else
155 ret = add_mtd_device(mtd);
156#endif
157
158 if (ret) { 146 if (ret) {
159 nand_release(mtd); 147 nand_release(mtd);
160 goto no_dev; 148 goto no_dev;
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 20bfe5f15afd..b1aa41b8a4eb 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -163,7 +163,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
163 goto out_lpc; 163 goto out_lpc;
164 } 164 }
165 165
166 if (add_mtd_device(pasemi_nand_mtd)) { 166 if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
167 printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n"); 167 printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
168 err = -ENODEV; 168 err = -ENODEV;
169 goto out_lpc; 169 goto out_lpc;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index caf5a736340a..633c04bf76f6 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -21,10 +21,8 @@ struct plat_nand_data {
21 struct nand_chip chip; 21 struct nand_chip chip;
22 struct mtd_info mtd; 22 struct mtd_info mtd;
23 void __iomem *io_base; 23 void __iomem *io_base;
24#ifdef CONFIG_MTD_PARTITIONS
25 int nr_parts; 24 int nr_parts;
26 struct mtd_partition *parts; 25 struct mtd_partition *parts;
27#endif
28}; 26};
29 27
30/* 28/*
@@ -101,13 +99,12 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
101 goto out; 99 goto out;
102 } 100 }
103 101
104#ifdef CONFIG_MTD_PARTITIONS
105 if (pdata->chip.part_probe_types) { 102 if (pdata->chip.part_probe_types) {
106 err = parse_mtd_partitions(&data->mtd, 103 err = parse_mtd_partitions(&data->mtd,
107 pdata->chip.part_probe_types, 104 pdata->chip.part_probe_types,
108 &data->parts, 0); 105 &data->parts, 0);
109 if (err > 0) { 106 if (err > 0) {
110 add_mtd_partitions(&data->mtd, data->parts, err); 107 mtd_device_register(&data->mtd, data->parts, err);
111 return 0; 108 return 0;
112 } 109 }
113 } 110 }
@@ -115,11 +112,10 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
115 pdata->chip.set_parts(data->mtd.size, &pdata->chip); 112 pdata->chip.set_parts(data->mtd.size, &pdata->chip);
116 if (pdata->chip.partitions) { 113 if (pdata->chip.partitions) {
117 data->parts = pdata->chip.partitions; 114 data->parts = pdata->chip.partitions;
118 err = add_mtd_partitions(&data->mtd, data->parts, 115 err = mtd_device_register(&data->mtd, data->parts,
119 pdata->chip.nr_partitions); 116 pdata->chip.nr_partitions);
120 } else 117 } else
121#endif 118 err = mtd_device_register(&data->mtd, NULL, 0);
122 err = add_mtd_device(&data->mtd);
123 119
124 if (!err) 120 if (!err)
125 return err; 121 return err;
@@ -149,10 +145,8 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
149 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 145 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
150 146
151 nand_release(&data->mtd); 147 nand_release(&data->mtd);
152#ifdef CONFIG_MTD_PARTITIONS
153 if (data->parts && data->parts != pdata->chip.partitions) 148 if (data->parts && data->parts != pdata->chip.partitions)
154 kfree(data->parts); 149 kfree(data->parts);
155#endif
156 if (pdata->ctrl.remove) 150 if (pdata->ctrl.remove)
157 pdata->ctrl.remove(pdev); 151 pdata->ctrl.remove(pdev);
158 iounmap(data->io_base); 152 iounmap(data->io_base);
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index cc8658431851..3bbb796b451c 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -73,7 +73,6 @@ __setup("ppchameleon_fio_pbase=", ppchameleon_fio_pbase);
73__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase); 73__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase);
74#endif 74#endif
75 75
76#ifdef CONFIG_MTD_PARTITIONS
77/* 76/*
78 * Define static partitions for flash devices 77 * Define static partitions for flash devices
79 */ 78 */
@@ -101,7 +100,6 @@ static struct mtd_partition partition_info_evb[] = {
101#define NUM_PARTITIONS 1 100#define NUM_PARTITIONS 1
102 101
103extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id); 102extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id);
104#endif
105 103
106/* 104/*
107 * hardware specific access to control-lines 105 * hardware specific access to control-lines
@@ -189,10 +187,8 @@ static int ppchameleonevb_device_ready(struct mtd_info *minfo)
189} 187}
190#endif 188#endif
191 189
192#ifdef CONFIG_MTD_PARTITIONS
193const char *part_probes[] = { "cmdlinepart", NULL }; 190const char *part_probes[] = { "cmdlinepart", NULL };
194const char *part_probes_evb[] = { "cmdlinepart", NULL }; 191const char *part_probes_evb[] = { "cmdlinepart", NULL };
195#endif
196 192
197/* 193/*
198 * Main initialization routine 194 * Main initialization routine
@@ -284,14 +280,13 @@ static int __init ppchameleonevb_init(void)
284 this->chip_delay = NAND_SMALL_DELAY_US; 280 this->chip_delay = NAND_SMALL_DELAY_US;
285#endif 281#endif
286 282
287#ifdef CONFIG_MTD_PARTITIONS
288 ppchameleon_mtd->name = "ppchameleon-nand"; 283 ppchameleon_mtd->name = "ppchameleon-nand";
289 mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0); 284 mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
290 if (mtd_parts_nb > 0) 285 if (mtd_parts_nb > 0)
291 part_type = "command line"; 286 part_type = "command line";
292 else 287 else
293 mtd_parts_nb = 0; 288 mtd_parts_nb = 0;
294#endif 289
295 if (mtd_parts_nb == 0) { 290 if (mtd_parts_nb == 0) {
296 if (ppchameleon_mtd->size == NAND_SMALL_SIZE) 291 if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
297 mtd_parts = partition_info_me; 292 mtd_parts = partition_info_me;
@@ -303,7 +298,7 @@ static int __init ppchameleonevb_init(void)
303 298
304 /* Register the partitions */ 299 /* Register the partitions */
305 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 300 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
306 add_mtd_partitions(ppchameleon_mtd, mtd_parts, mtd_parts_nb); 301 mtd_device_register(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
307 302
308 nand_evb_init: 303 nand_evb_init:
309 /**************************** 304 /****************************
@@ -385,14 +380,14 @@ static int __init ppchameleonevb_init(void)
385 iounmap(ppchameleon_fio_base); 380 iounmap(ppchameleon_fio_base);
386 return -ENXIO; 381 return -ENXIO;
387 } 382 }
388#ifdef CONFIG_MTD_PARTITIONS 383
389 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; 384 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
390 mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0); 385 mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
391 if (mtd_parts_nb > 0) 386 if (mtd_parts_nb > 0)
392 part_type = "command line"; 387 part_type = "command line";
393 else 388 else
394 mtd_parts_nb = 0; 389 mtd_parts_nb = 0;
395#endif 390
396 if (mtd_parts_nb == 0) { 391 if (mtd_parts_nb == 0) {
397 mtd_parts = partition_info_evb; 392 mtd_parts = partition_info_evb;
398 mtd_parts_nb = NUM_PARTITIONS; 393 mtd_parts_nb = NUM_PARTITIONS;
@@ -401,7 +396,7 @@ static int __init ppchameleonevb_init(void)
401 396
402 /* Register the partitions */ 397 /* Register the partitions */
403 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 398 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
404 add_mtd_partitions(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb); 399 mtd_device_register(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
405 400
406 /* Return happy */ 401 /* Return happy */
407 return 0; 402 return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index ff0701276d65..1fb3b3a80581 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1119,10 +1119,7 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1119 clk_put(info->clk); 1119 clk_put(info->clk);
1120 1120
1121 if (mtd) { 1121 if (mtd) {
1122 del_mtd_device(mtd); 1122 mtd_device_unregister(mtd);
1123#ifdef CONFIG_MTD_PARTITIONS
1124 del_mtd_partitions(mtd);
1125#endif
1126 kfree(mtd); 1123 kfree(mtd);
1127 } 1124 }
1128 return 0; 1125 return 0;
@@ -1149,7 +1146,6 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1149 return -ENODEV; 1146 return -ENODEV;
1150 } 1147 }
1151 1148
1152#ifdef CONFIG_MTD_PARTITIONS
1153 if (mtd_has_cmdlinepart()) { 1149 if (mtd_has_cmdlinepart()) {
1154 const char *probes[] = { "cmdlinepart", NULL }; 1150 const char *probes[] = { "cmdlinepart", NULL };
1155 struct mtd_partition *parts; 1151 struct mtd_partition *parts;
@@ -1158,13 +1154,10 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1158 nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0); 1154 nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0);
1159 1155
1160 if (nr_parts) 1156 if (nr_parts)
1161 return add_mtd_partitions(info->mtd, parts, nr_parts); 1157 return mtd_device_register(info->mtd, parts, nr_parts);
1162 } 1158 }
1163 1159
1164 return add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts); 1160 return mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
1165#else
1166 return 0;
1167#endif
1168} 1161}
1169 1162
1170#ifdef CONFIG_PM 1163#ifdef CONFIG_PM
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index 67440b5beef8..c9f9127ff770 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -580,7 +580,8 @@ static int __init rtc_from4_init(void)
580#endif 580#endif
581 581
582 /* Register the partitions */ 582 /* Register the partitions */
583 ret = add_mtd_partitions(rtc_from4_mtd, partition_info, NUM_PARTITIONS); 583 ret = mtd_device_register(rtc_from4_mtd, partition_info,
584 NUM_PARTITIONS);
584 if (ret) 585 if (ret)
585 goto err_3; 586 goto err_3;
586 587
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 33d832dddfdd..4405468f196b 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -55,7 +55,7 @@ static int hardware_ecc = 0;
55#endif 55#endif
56 56
57#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP 57#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
58static int clock_stop = 1; 58static const int clock_stop = 1;
59#else 59#else
60static const int clock_stop = 0; 60static const int clock_stop = 0;
61#endif 61#endif
@@ -96,6 +96,12 @@ enum s3c_cpu_type {
96 TYPE_S3C2440, 96 TYPE_S3C2440,
97}; 97};
98 98
99enum s3c_nand_clk_state {
100 CLOCK_DISABLE = 0,
101 CLOCK_ENABLE,
102 CLOCK_SUSPEND,
103};
104
99/* overview of the s3c2410 nand state */ 105/* overview of the s3c2410 nand state */
100 106
101/** 107/**
@@ -111,6 +117,7 @@ enum s3c_cpu_type {
111 * @mtd_count: The number of MTDs created from this controller. 117 * @mtd_count: The number of MTDs created from this controller.
112 * @save_sel: The contents of @sel_reg to be saved over suspend. 118 * @save_sel: The contents of @sel_reg to be saved over suspend.
113 * @clk_rate: The clock rate from @clk. 119 * @clk_rate: The clock rate from @clk.
120 * @clk_state: The current clock state.
114 * @cpu_type: The exact type of this controller. 121 * @cpu_type: The exact type of this controller.
115 */ 122 */
116struct s3c2410_nand_info { 123struct s3c2410_nand_info {
@@ -129,6 +136,7 @@ struct s3c2410_nand_info {
129 int mtd_count; 136 int mtd_count;
130 unsigned long save_sel; 137 unsigned long save_sel;
131 unsigned long clk_rate; 138 unsigned long clk_rate;
139 enum s3c_nand_clk_state clk_state;
132 140
133 enum s3c_cpu_type cpu_type; 141 enum s3c_cpu_type cpu_type;
134 142
@@ -159,11 +167,33 @@ static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
159 return dev->dev.platform_data; 167 return dev->dev.platform_data;
160} 168}
161 169
162static inline int allow_clk_stop(struct s3c2410_nand_info *info) 170static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
163{ 171{
164 return clock_stop; 172 return clock_stop;
165} 173}
166 174
175/**
176 * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
177 * @info: The controller instance.
178 * @new_state: State to which clock should be set.
179 */
180static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
181 enum s3c_nand_clk_state new_state)
182{
183 if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
184 return;
185
186 if (info->clk_state == CLOCK_ENABLE) {
187 if (new_state != CLOCK_ENABLE)
188 clk_disable(info->clk);
189 } else {
190 if (new_state == CLOCK_ENABLE)
191 clk_enable(info->clk);
192 }
193
194 info->clk_state = new_state;
195}
196
167/* timing calculations */ 197/* timing calculations */
168 198
169#define NS_IN_KHZ 1000000 199#define NS_IN_KHZ 1000000
@@ -333,8 +363,8 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
333 nmtd = this->priv; 363 nmtd = this->priv;
334 info = nmtd->info; 364 info = nmtd->info;
335 365
336 if (chip != -1 && allow_clk_stop(info)) 366 if (chip != -1)
337 clk_enable(info->clk); 367 s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
338 368
339 cur = readl(info->sel_reg); 369 cur = readl(info->sel_reg);
340 370
@@ -356,8 +386,8 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
356 386
357 writel(cur, info->sel_reg); 387 writel(cur, info->sel_reg);
358 388
359 if (chip == -1 && allow_clk_stop(info)) 389 if (chip == -1)
360 clk_disable(info->clk); 390 s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
361} 391}
362 392
363/* s3c2410_nand_hwcontrol 393/* s3c2410_nand_hwcontrol
@@ -694,8 +724,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
694 /* free the common resources */ 724 /* free the common resources */
695 725
696 if (info->clk != NULL && !IS_ERR(info->clk)) { 726 if (info->clk != NULL && !IS_ERR(info->clk)) {
697 if (!allow_clk_stop(info)) 727 s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
698 clk_disable(info->clk);
699 clk_put(info->clk); 728 clk_put(info->clk);
700 } 729 }
701 730
@@ -715,7 +744,6 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
715 return 0; 744 return 0;
716} 745}
717 746
718#ifdef CONFIG_MTD_PARTITIONS
719const char *part_probes[] = { "cmdlinepart", NULL }; 747const char *part_probes[] = { "cmdlinepart", NULL };
720static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, 748static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
721 struct s3c2410_nand_mtd *mtd, 749 struct s3c2410_nand_mtd *mtd,
@@ -725,7 +753,7 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
725 int nr_part = 0; 753 int nr_part = 0;
726 754
727 if (set == NULL) 755 if (set == NULL)
728 return add_mtd_device(&mtd->mtd); 756 return mtd_device_register(&mtd->mtd, NULL, 0);
729 757
730 mtd->mtd.name = set->name; 758 mtd->mtd.name = set->name;
731 nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0); 759 nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0);
@@ -735,19 +763,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
735 part_info = set->partitions; 763 part_info = set->partitions;
736 } 764 }
737 765
738 if (nr_part > 0 && part_info) 766 return mtd_device_register(&mtd->mtd, part_info, nr_part);
739 return add_mtd_partitions(&mtd->mtd, part_info, nr_part);
740
741 return add_mtd_device(&mtd->mtd);
742}
743#else
744static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
745 struct s3c2410_nand_mtd *mtd,
746 struct s3c2410_nand_set *set)
747{
748 return add_mtd_device(&mtd->mtd);
749} 767}
750#endif
751 768
752/** 769/**
753 * s3c2410_nand_init_chip - initialise a single instance of an chip 770 * s3c2410_nand_init_chip - initialise a single instance of an chip
@@ -947,7 +964,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
947 goto exit_error; 964 goto exit_error;
948 } 965 }
949 966
950 clk_enable(info->clk); 967 s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
951 968
952 /* allocate and map the resource */ 969 /* allocate and map the resource */
953 970
@@ -1026,9 +1043,9 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
1026 goto exit_error; 1043 goto exit_error;
1027 } 1044 }
1028 1045
1029 if (allow_clk_stop(info)) { 1046 if (allow_clk_suspend(info)) {
1030 dev_info(&pdev->dev, "clock idle support enabled\n"); 1047 dev_info(&pdev->dev, "clock idle support enabled\n");
1031 clk_disable(info->clk); 1048 s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
1032 } 1049 }
1033 1050
1034 pr_debug("initialised ok\n"); 1051 pr_debug("initialised ok\n");
@@ -1059,8 +1076,7 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
1059 1076
1060 writel(info->save_sel | info->sel_bit, info->sel_reg); 1077 writel(info->save_sel | info->sel_bit, info->sel_reg);
1061 1078
1062 if (!allow_clk_stop(info)) 1079 s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
1063 clk_disable(info->clk);
1064 } 1080 }
1065 1081
1066 return 0; 1082 return 0;
@@ -1072,7 +1088,7 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
1072 unsigned long sel; 1088 unsigned long sel;
1073 1089
1074 if (info) { 1090 if (info) {
1075 clk_enable(info->clk); 1091 s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
1076 s3c2410_nand_inithw(info); 1092 s3c2410_nand_inithw(info);
1077 1093
1078 /* Restore the state of the nFCE line. */ 1094 /* Restore the state of the nFCE line. */
@@ -1082,8 +1098,7 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
1082 sel |= info->save_sel & info->sel_bit; 1098 sel |= info->save_sel & info->sel_bit;
1083 writel(sel, info->sel_reg); 1099 writel(sel, info->sel_reg);
1084 1100
1085 if (allow_clk_stop(info)) 1101 s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
1086 clk_disable(info->clk);
1087 } 1102 }
1088 1103
1089 return 0; 1104 return 0;
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 81bbb5ee148d..93b1f74321c2 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -867,7 +867,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
867 if (ret) 867 if (ret)
868 goto err; 868 goto err;
869 869
870 add_mtd_partitions(flctl_mtd, pdata->parts, pdata->nr_parts); 870 mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
871 871
872 return 0; 872 return 0;
873 873
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 54ec7542a7b7..19e24ed089ea 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -103,9 +103,7 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
103 return readb(sharpsl->io + ECCCNTR) != 0; 103 return readb(sharpsl->io + ECCCNTR) != 0;
104} 104}
105 105
106#ifdef CONFIG_MTD_PARTITIONS
107static const char *part_probes[] = { "cmdlinepart", NULL }; 106static const char *part_probes[] = { "cmdlinepart", NULL };
108#endif
109 107
110/* 108/*
111 * Main initialization routine 109 * Main initialization routine
@@ -113,10 +111,8 @@ static const char *part_probes[] = { "cmdlinepart", NULL };
113static int __devinit sharpsl_nand_probe(struct platform_device *pdev) 111static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
114{ 112{
115 struct nand_chip *this; 113 struct nand_chip *this;
116#ifdef CONFIG_MTD_PARTITIONS
117 struct mtd_partition *sharpsl_partition_info; 114 struct mtd_partition *sharpsl_partition_info;
118 int nr_partitions; 115 int nr_partitions;
119#endif
120 struct resource *r; 116 struct resource *r;
121 int err = 0; 117 int err = 0;
122 struct sharpsl_nand *sharpsl; 118 struct sharpsl_nand *sharpsl;
@@ -188,18 +184,14 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
188 184
189 /* Register the partitions */ 185 /* Register the partitions */
190 sharpsl->mtd.name = "sharpsl-nand"; 186 sharpsl->mtd.name = "sharpsl-nand";
191#ifdef CONFIG_MTD_PARTITIONS
192 nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0); 187 nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
193 if (nr_partitions <= 0) { 188 if (nr_partitions <= 0) {
194 nr_partitions = data->nr_partitions; 189 nr_partitions = data->nr_partitions;
195 sharpsl_partition_info = data->partitions; 190 sharpsl_partition_info = data->partitions;
196 } 191 }
197 192
198 if (nr_partitions > 0) 193 err = mtd_device_register(&sharpsl->mtd, sharpsl_partition_info,
199 err = add_mtd_partitions(&sharpsl->mtd, sharpsl_partition_info, nr_partitions); 194 nr_partitions);
200 else
201#endif
202 err = add_mtd_device(&sharpsl->mtd);
203 if (err) 195 if (err)
204 goto err_add; 196 goto err_add;
205 197
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 57cc80cd01a3..b6332e83b289 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -139,7 +139,7 @@ int sm_register_device(struct mtd_info *mtd, int smartmedia)
139 if (ret) 139 if (ret)
140 return ret; 140 return ret;
141 141
142 return add_mtd_device(mtd); 142 return mtd_device_register(mtd, NULL, 0);
143} 143}
144EXPORT_SYMBOL_GPL(sm_register_device); 144EXPORT_SYMBOL_GPL(sm_register_device);
145 145
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index a853548986f0..ca2d0555729e 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -155,9 +155,7 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
155 return 1; 155 return 1;
156} 156}
157 157
158#ifdef CONFIG_MTD_PARTITIONS
159static const char *part_probes[] = { "cmdlinepart", NULL }; 158static const char *part_probes[] = { "cmdlinepart", NULL };
160#endif
161 159
162/* 160/*
163 * Probe for the NAND device. 161 * Probe for the NAND device.
@@ -168,11 +166,8 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
168 struct mtd_info *mtd; 166 struct mtd_info *mtd;
169 struct nand_chip *nand_chip; 167 struct nand_chip *nand_chip;
170 int res; 168 int res;
171
172#ifdef CONFIG_MTD_PARTITIONS
173 struct mtd_partition *partitions = NULL; 169 struct mtd_partition *partitions = NULL;
174 int num_partitions = 0; 170 int num_partitions = 0;
175#endif
176 171
177 /* Allocate memory for the device structure (and zero it) */ 172 /* Allocate memory for the device structure (and zero it) */
178 host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL); 173 host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
@@ -230,7 +225,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
230 goto out; 225 goto out;
231 } 226 }
232 227
233#ifdef CONFIG_MTD_PARTITIONS
234#ifdef CONFIG_MTD_CMDLINE_PARTS 228#ifdef CONFIG_MTD_CMDLINE_PARTS
235 num_partitions = parse_mtd_partitions(mtd, part_probes, 229 num_partitions = parse_mtd_partitions(mtd, part_probes,
236 &partitions, 0); 230 &partitions, 0);
@@ -240,7 +234,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
240 } 234 }
241#endif 235#endif
242 236
243#ifdef CONFIG_MTD_OF_PARTS
244 if (num_partitions == 0) { 237 if (num_partitions == 0) {
245 num_partitions = of_mtd_parse_partitions(&ofdev->dev, 238 num_partitions = of_mtd_parse_partitions(&ofdev->dev,
246 ofdev->dev.of_node, 239 ofdev->dev.of_node,
@@ -250,19 +243,12 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
250 goto release; 243 goto release;
251 } 244 }
252 } 245 }
253#endif
254 if (partitions && (num_partitions > 0))
255 res = add_mtd_partitions(mtd, partitions, num_partitions);
256 else
257#endif
258 res = add_mtd_device(mtd);
259 246
247 res = mtd_device_register(mtd, partitions, num_partitions);
260 if (!res) 248 if (!res)
261 return res; 249 return res;
262 250
263#ifdef CONFIG_MTD_PARTITIONS
264release: 251release:
265#endif
266 nand_release(mtd); 252 nand_release(mtd);
267 253
268out: 254out:
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
index 0cc6d0acb8fe..bef76cd7c24c 100644
--- a/drivers/mtd/nand/spia.c
+++ b/drivers/mtd/nand/spia.c
@@ -149,7 +149,7 @@ static int __init spia_init(void)
149 } 149 }
150 150
151 /* Register the partitions */ 151 /* Register the partitions */
152 add_mtd_partitions(spia_mtd, partition_info, NUM_PARTITIONS); 152 mtd_device_register(spia_mtd, partition_info, NUM_PARTITIONS);
153 153
154 /* Return happy */ 154 /* Return happy */
155 return 0; 155 return 0;
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index c004e474631b..11e8371b5683 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -381,10 +381,8 @@ static int tmio_probe(struct platform_device *dev)
381 struct tmio_nand *tmio; 381 struct tmio_nand *tmio;
382 struct mtd_info *mtd; 382 struct mtd_info *mtd;
383 struct nand_chip *nand_chip; 383 struct nand_chip *nand_chip;
384#ifdef CONFIG_MTD_PARTITIONS
385 struct mtd_partition *parts; 384 struct mtd_partition *parts;
386 int nbparts = 0; 385 int nbparts = 0;
387#endif
388 int retval; 386 int retval;
389 387
390 if (data == NULL) 388 if (data == NULL)
@@ -463,7 +461,6 @@ static int tmio_probe(struct platform_device *dev)
463 goto err_scan; 461 goto err_scan;
464 } 462 }
465 /* Register the partitions */ 463 /* Register the partitions */
466#ifdef CONFIG_MTD_PARTITIONS
467#ifdef CONFIG_MTD_CMDLINE_PARTS 464#ifdef CONFIG_MTD_CMDLINE_PARTS
468 nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0); 465 nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
469#endif 466#endif
@@ -472,12 +469,7 @@ static int tmio_probe(struct platform_device *dev)
472 nbparts = data->num_partitions; 469 nbparts = data->num_partitions;
473 } 470 }
474 471
475 if (nbparts) 472 retval = mtd_device_register(mtd, parts, nbparts);
476 retval = add_mtd_partitions(mtd, parts, nbparts);
477 else
478#endif
479 retval = add_mtd_device(mtd);
480
481 if (!retval) 473 if (!retval)
482 return retval; 474 return retval;
483 475
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index ca270a4881a4..bfba4e39a6c5 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -74,9 +74,7 @@ struct txx9ndfmc_drvdata {
74 unsigned char hold; /* in gbusclock */ 74 unsigned char hold; /* in gbusclock */
75 unsigned char spw; /* in gbusclock */ 75 unsigned char spw; /* in gbusclock */
76 struct nand_hw_control hw_control; 76 struct nand_hw_control hw_control;
77#ifdef CONFIG_MTD_PARTITIONS
78 struct mtd_partition *parts[MAX_TXX9NDFMC_DEV]; 77 struct mtd_partition *parts[MAX_TXX9NDFMC_DEV];
79#endif
80}; 78};
81 79
82static struct platform_device *mtd_to_platdev(struct mtd_info *mtd) 80static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
@@ -289,9 +287,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
289static int __init txx9ndfmc_probe(struct platform_device *dev) 287static int __init txx9ndfmc_probe(struct platform_device *dev)
290{ 288{
291 struct txx9ndfmc_platform_data *plat = dev->dev.platform_data; 289 struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
292#ifdef CONFIG_MTD_PARTITIONS
293 static const char *probes[] = { "cmdlinepart", NULL }; 290 static const char *probes[] = { "cmdlinepart", NULL };
294#endif
295 int hold, spw; 291 int hold, spw;
296 int i; 292 int i;
297 struct txx9ndfmc_drvdata *drvdata; 293 struct txx9ndfmc_drvdata *drvdata;
@@ -337,9 +333,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
337 struct txx9ndfmc_priv *txx9_priv; 333 struct txx9ndfmc_priv *txx9_priv;
338 struct nand_chip *chip; 334 struct nand_chip *chip;
339 struct mtd_info *mtd; 335 struct mtd_info *mtd;
340#ifdef CONFIG_MTD_PARTITIONS
341 int nr_parts; 336 int nr_parts;
342#endif
343 337
344 if (!(plat->ch_mask & (1 << i))) 338 if (!(plat->ch_mask & (1 << i)))
345 continue; 339 continue;
@@ -399,13 +393,9 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
399 } 393 }
400 mtd->name = txx9_priv->mtdname; 394 mtd->name = txx9_priv->mtdname;
401 395
402#ifdef CONFIG_MTD_PARTITIONS
403 nr_parts = parse_mtd_partitions(mtd, probes, 396 nr_parts = parse_mtd_partitions(mtd, probes,
404 &drvdata->parts[i], 0); 397 &drvdata->parts[i], 0);
405 if (nr_parts > 0) 398 mtd_device_register(mtd, drvdata->parts[i], nr_parts);
406 add_mtd_partitions(mtd, drvdata->parts[i], nr_parts);
407#endif
408 add_mtd_device(mtd);
409 drvdata->mtds[i] = mtd; 399 drvdata->mtds[i] = mtd;
410 } 400 }
411 401
@@ -431,9 +421,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
431 txx9_priv = chip->priv; 421 txx9_priv = chip->priv;
432 422
433 nand_release(mtd); 423 nand_release(mtd);
434#ifdef CONFIG_MTD_PARTITIONS
435 kfree(drvdata->parts[i]); 424 kfree(drvdata->parts[i]);
436#endif
437 kfree(txx9_priv->mtdname); 425 kfree(txx9_priv->mtdname);
438 kfree(txx9_priv); 426 kfree(txx9_priv);
439 } 427 }
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 4f426195f8db..772ad2966619 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -1,7 +1,6 @@
1menuconfig MTD_ONENAND 1menuconfig MTD_ONENAND
2 tristate "OneNAND Device Support" 2 tristate "OneNAND Device Support"
3 depends on MTD 3 depends on MTD
4 select MTD_PARTITIONS
5 help 4 help
6 This enables support for accessing all type of OneNAND flash 5 This enables support for accessing all type of OneNAND flash
7 devices. For further information see 6 devices. For further information see
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index ac08750748a3..2d70d354d846 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -30,9 +30,7 @@
30 */ 30 */
31#define DRIVER_NAME "onenand-flash" 31#define DRIVER_NAME "onenand-flash"
32 32
33#ifdef CONFIG_MTD_PARTITIONS
34static const char *part_probes[] = { "cmdlinepart", NULL, }; 33static const char *part_probes[] = { "cmdlinepart", NULL, };
35#endif
36 34
37struct onenand_info { 35struct onenand_info {
38 struct mtd_info mtd; 36 struct mtd_info mtd;
@@ -75,15 +73,13 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
75 goto out_iounmap; 73 goto out_iounmap;
76 } 74 }
77 75
78#ifdef CONFIG_MTD_PARTITIONS
79 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 76 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
80 if (err > 0) 77 if (err > 0)
81 add_mtd_partitions(&info->mtd, info->parts, err); 78 mtd_device_register(&info->mtd, info->parts, err);
82 else if (err <= 0 && pdata && pdata->parts) 79 else if (err <= 0 && pdata && pdata->parts)
83 add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts); 80 mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
84 else 81 else
85#endif 82 err = mtd_device_register(&info->mtd, NULL, 0);
86 err = add_mtd_device(&info->mtd);
87 83
88 platform_set_drvdata(pdev, info); 84 platform_set_drvdata(pdev, info);
89 85
@@ -108,11 +104,7 @@ static int __devexit generic_onenand_remove(struct platform_device *pdev)
108 platform_set_drvdata(pdev, NULL); 104 platform_set_drvdata(pdev, NULL);
109 105
110 if (info) { 106 if (info) {
111 if (info->parts) 107 mtd_device_unregister(&info->mtd);
112 del_mtd_partitions(&info->mtd);
113 else
114 del_mtd_device(&info->mtd);
115
116 onenand_release(&info->mtd); 108 onenand_release(&info->mtd);
117 release_mem_region(res->start, size); 109 release_mem_region(res->start, size);
118 iounmap(info->onenand.base); 110 iounmap(info->onenand.base);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 1fcb41adab07..a916dec29215 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -67,9 +67,7 @@ struct omap2_onenand {
67 struct regulator *regulator; 67 struct regulator *regulator;
68}; 68};
69 69
70#ifdef CONFIG_MTD_PARTITIONS
71static const char *part_probes[] = { "cmdlinepart", NULL, }; 70static const char *part_probes[] = { "cmdlinepart", NULL, };
72#endif
73 71
74static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data) 72static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
75{ 73{
@@ -755,15 +753,13 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
755 if ((r = onenand_scan(&c->mtd, 1)) < 0) 753 if ((r = onenand_scan(&c->mtd, 1)) < 0)
756 goto err_release_regulator; 754 goto err_release_regulator;
757 755
758#ifdef CONFIG_MTD_PARTITIONS
759 r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0); 756 r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
760 if (r > 0) 757 if (r > 0)
761 r = add_mtd_partitions(&c->mtd, c->parts, r); 758 r = mtd_device_register(&c->mtd, c->parts, r);
762 else if (pdata->parts != NULL) 759 else if (pdata->parts != NULL)
763 r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts); 760 r = mtd_device_register(&c->mtd, pdata->parts, pdata->nr_parts);
764 else 761 else
765#endif 762 r = mtd_device_register(&c->mtd, NULL, 0);
766 r = add_mtd_device(&c->mtd);
767 if (r) 763 if (r)
768 goto err_release_onenand; 764 goto err_release_onenand;
769 765
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 56a8b2005bda..ac9e959802a7 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -65,11 +65,11 @@ MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
65 " : 2 -> 1st Block lock" 65 " : 2 -> 1st Block lock"
66 " : 3 -> BOTH OTP Block and 1st Block lock"); 66 " : 3 -> BOTH OTP Block and 1st Block lock");
67 67
68/** 68/*
69 * onenand_oob_128 - oob info for Flex-Onenand with 4KB page 69 * flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page
70 * For now, we expose only 64 out of 80 ecc bytes 70 * For now, we expose only 64 out of 80 ecc bytes
71 */ 71 */
72static struct nand_ecclayout onenand_oob_128 = { 72static struct nand_ecclayout flexonenand_oob_128 = {
73 .eccbytes = 64, 73 .eccbytes = 64,
74 .eccpos = { 74 .eccpos = {
75 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 75 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
@@ -86,6 +86,35 @@ static struct nand_ecclayout onenand_oob_128 = {
86 } 86 }
87}; 87};
88 88
89/*
90 * onenand_oob_128 - oob info for OneNAND with 4KB page
91 *
92 * Based on specification:
93 * 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010
94 *
95 * For eccpos we expose only 64 bytes out of 72 (see struct nand_ecclayout)
96 *
97 * oobfree uses the spare area fields marked as
98 * "Managed by internal ECC logic for Logical Sector Number area"
99 */
100static struct nand_ecclayout onenand_oob_128 = {
101 .eccbytes = 64,
102 .eccpos = {
103 7, 8, 9, 10, 11, 12, 13, 14, 15,
104 23, 24, 25, 26, 27, 28, 29, 30, 31,
105 39, 40, 41, 42, 43, 44, 45, 46, 47,
106 55, 56, 57, 58, 59, 60, 61, 62, 63,
107 71, 72, 73, 74, 75, 76, 77, 78, 79,
108 87, 88, 89, 90, 91, 92, 93, 94, 95,
109 103, 104, 105, 106, 107, 108, 109, 110, 111,
110 119
111 },
112 .oobfree = {
113 {2, 3}, {18, 3}, {34, 3}, {50, 3},
114 {66, 3}, {82, 3}, {98, 3}, {114, 3}
115 }
116};
117
89/** 118/**
90 * onenand_oob_64 - oob info for large (2KB) page 119 * onenand_oob_64 - oob info for large (2KB) page
91 */ 120 */
@@ -2424,7 +2453,7 @@ static int onenand_block_by_block_erase(struct mtd_info *mtd,
2424 len -= block_size; 2453 len -= block_size;
2425 addr += block_size; 2454 addr += block_size;
2426 2455
2427 if (addr == region_end) { 2456 if (region && addr == region_end) {
2428 if (!len) 2457 if (!len)
2429 break; 2458 break;
2430 region++; 2459 region++;
@@ -4018,8 +4047,13 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
4018 */ 4047 */
4019 switch (mtd->oobsize) { 4048 switch (mtd->oobsize) {
4020 case 128: 4049 case 128:
4021 this->ecclayout = &onenand_oob_128; 4050 if (FLEXONENAND(this)) {
4022 mtd->subpage_sft = 0; 4051 this->ecclayout = &flexonenand_oob_128;
4052 mtd->subpage_sft = 0;
4053 } else {
4054 this->ecclayout = &onenand_oob_128;
4055 mtd->subpage_sft = 2;
4056 }
4023 break; 4057 break;
4024 case 64: 4058 case 64:
4025 this->ecclayout = &onenand_oob_64; 4059 this->ecclayout = &onenand_oob_64;
@@ -4108,12 +4142,8 @@ void onenand_release(struct mtd_info *mtd)
4108{ 4142{
4109 struct onenand_chip *this = mtd->priv; 4143 struct onenand_chip *this = mtd->priv;
4110 4144
4111#ifdef CONFIG_MTD_PARTITIONS
4112 /* Deregister partitions */ 4145 /* Deregister partitions */
4113 del_mtd_partitions (mtd); 4146 mtd_device_unregister(mtd);
4114#endif
4115 /* Deregister the device */
4116 del_mtd_device (mtd);
4117 4147
4118 /* Free bad block table memory, if allocated */ 4148 /* Free bad block table memory, if allocated */
4119 if (this->bbm) { 4149 if (this->bbm) {
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
index 5ef3bd547772..85399e3accda 100644
--- a/drivers/mtd/onenand/onenand_sim.c
+++ b/drivers/mtd/onenand/onenand_sim.c
@@ -539,7 +539,8 @@ static int __init onenand_sim_init(void)
539 return -ENXIO; 539 return -ENXIO;
540 } 540 }
541 541
542 add_mtd_partitions(&info->mtd, info->parts, ARRAY_SIZE(os_partitions)); 542 mtd_device_register(&info->mtd, info->parts,
543 ARRAY_SIZE(os_partitions));
543 544
544 return 0; 545 return 0;
545} 546}
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index a4c74a9ba430..3306b5b3c736 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -147,9 +147,7 @@ struct s3c_onenand {
147 struct resource *dma_res; 147 struct resource *dma_res;
148 unsigned long phys_base; 148 unsigned long phys_base;
149 struct completion complete; 149 struct completion complete;
150#ifdef CONFIG_MTD_PARTITIONS
151 struct mtd_partition *parts; 150 struct mtd_partition *parts;
152#endif
153}; 151};
154 152
155#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1))) 153#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
@@ -159,9 +157,7 @@ struct s3c_onenand {
159 157
160static struct s3c_onenand *onenand; 158static struct s3c_onenand *onenand;
161 159
162#ifdef CONFIG_MTD_PARTITIONS
163static const char *part_probes[] = { "cmdlinepart", NULL, }; 160static const char *part_probes[] = { "cmdlinepart", NULL, };
164#endif
165 161
166static inline int s3c_read_reg(int offset) 162static inline int s3c_read_reg(int offset)
167{ 163{
@@ -1021,15 +1017,13 @@ static int s3c_onenand_probe(struct platform_device *pdev)
1021 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) 1017 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
1022 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); 1018 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
1023 1019
1024#ifdef CONFIG_MTD_PARTITIONS
1025 err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0); 1020 err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
1026 if (err > 0) 1021 if (err > 0)
1027 add_mtd_partitions(mtd, onenand->parts, err); 1022 mtd_device_register(mtd, onenand->parts, err);
1028 else if (err <= 0 && pdata && pdata->parts) 1023 else if (err <= 0 && pdata && pdata->parts)
1029 add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); 1024 mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
1030 else 1025 else
1031#endif 1026 err = mtd_device_register(mtd, NULL, 0);
1032 err = add_mtd_device(mtd);
1033 1027
1034 platform_set_drvdata(pdev, mtd); 1028 platform_set_drvdata(pdev, mtd);
1035 1029
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 9aa81584c8a2..941bc3c05d6e 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -365,7 +365,7 @@ static int gluebi_create(struct ubi_device_info *di,
365 vi->vol_id); 365 vi->vol_id);
366 mutex_unlock(&devices_mutex); 366 mutex_unlock(&devices_mutex);
367 367
368 if (add_mtd_device(mtd)) { 368 if (mtd_device_register(mtd, NULL, 0)) {
369 err_msg("cannot add MTD device"); 369 err_msg("cannot add MTD device");
370 kfree(mtd->name); 370 kfree(mtd->name);
371 kfree(gluebi); 371 kfree(gluebi);
@@ -407,7 +407,7 @@ static int gluebi_remove(struct ubi_volume_info *vi)
407 return err; 407 return err;
408 408
409 mtd = &gluebi->mtd; 409 mtd = &gluebi->mtd;
410 err = del_mtd_device(mtd); 410 err = mtd_device_unregister(mtd);
411 if (err) { 411 if (err) {
412 err_msg("cannot remove fake MTD device %d, UBI device %d, " 412 err_msg("cannot remove fake MTD device %d, UBI device %d, "
413 "volume %d, error %d", mtd->index, gluebi->ubi_num, 413 "volume %d, error %d", mtd->index, gluebi->ubi_num,
@@ -524,7 +524,7 @@ static void __exit ubi_gluebi_exit(void)
524 int err; 524 int err;
525 struct mtd_info *mtd = &gluebi->mtd; 525 struct mtd_info *mtd = &gluebi->mtd;
526 526
527 err = del_mtd_device(mtd); 527 err = mtd_device_unregister(mtd);
528 if (err) 528 if (err)
529 err_msg("error %d while removing gluebi MTD device %d, " 529 err_msg("error %d while removing gluebi MTD device %d, "
530 "UBI device %d, volume %d - ignoring", err, 530 "UBI device %d, volume %d - ignoring", err,
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index e646bfce2d84..b6304486f244 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -216,7 +216,7 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
216 int rc; 216 int rc;
217 217
218 for (;;) { 218 for (;;) {
219 rc = del_mtd_device(&part->mtd); 219 rc = mtd_device_unregister(&part->mtd);
220 if (rc != -EBUSY) 220 if (rc != -EBUSY)
221 break; 221 break;
222 ssleep(1); 222 ssleep(1);
@@ -268,7 +268,7 @@ static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
268 part->mtd.write = efx_mtd->ops->write; 268 part->mtd.write = efx_mtd->ops->write;
269 part->mtd.sync = efx_mtd_sync; 269 part->mtd.sync = efx_mtd_sync;
270 270
271 if (add_mtd_device(&part->mtd)) 271 if (mtd_device_register(&part->mtd, NULL, 0))
272 goto fail; 272 goto fail;
273 } 273 }
274 274
@@ -280,7 +280,7 @@ fail:
280 --part; 280 --part;
281 efx_mtd_remove_partition(part); 281 efx_mtd_remove_partition(part);
282 } 282 }
283 /* add_mtd_device() returns 1 if the MTD table is full */ 283 /* mtd_device_register() returns 1 if the MTD table is full */
284 return -ENOMEM; 284 return -ENOMEM;
285} 285}
286 286
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0cb0b0632672..f6853247a620 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -609,7 +609,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
609 * before it gets out of hand. Naturally, this wastes entries. */ 609 * before it gets out of hand. Naturally, this wastes entries. */
610 if (capacity < 2+MAX_SKB_FRAGS) { 610 if (capacity < 2+MAX_SKB_FRAGS) {
611 netif_stop_queue(dev); 611 netif_stop_queue(dev);
612 if (unlikely(!virtqueue_enable_cb(vi->svq))) { 612 if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
613 /* More just got used, free them then recheck. */ 613 /* More just got used, free them then recheck. */
614 capacity += free_old_xmit_skbs(vi); 614 capacity += free_old_xmit_skbs(vi);
615 if (capacity >= 2+MAX_SKB_FRAGS) { 615 if (capacity >= 2+MAX_SKB_FRAGS) {
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 8b63a691a9ed..65200af29c52 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -670,7 +670,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
670 670
671 pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 671 pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
672 672
673 if (depth != 1 || 673 if (depth != 1 || !data ||
674 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 674 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
675 return 0; 675 return 0;
676 676
@@ -679,16 +679,16 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
679 /* Retrieve command line */ 679 /* Retrieve command line */
680 p = of_get_flat_dt_prop(node, "bootargs", &l); 680 p = of_get_flat_dt_prop(node, "bootargs", &l);
681 if (p != NULL && l > 0) 681 if (p != NULL && l > 0)
682 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); 682 strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
683 683
684#ifdef CONFIG_CMDLINE 684#ifdef CONFIG_CMDLINE
685#ifndef CONFIG_CMDLINE_FORCE 685#ifndef CONFIG_CMDLINE_FORCE
686 if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) 686 if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
687#endif 687#endif
688 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 688 strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
689#endif /* CONFIG_CMDLINE */ 689#endif /* CONFIG_CMDLINE */
690 690
691 pr_debug("Command line is: %s\n", cmd_line); 691 pr_debug("Command line is: %s\n", (char*)data);
692 692
693 /* break now */ 693 /* break now */
694 return 1; 694 return 1;
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
index 4e70749f8d16..a8d5bb3cba89 100644
--- a/drivers/oprofile/event_buffer.h
+++ b/drivers/oprofile/event_buffer.h
@@ -11,7 +11,7 @@
11#define EVENT_BUFFER_H 11#define EVENT_BUFFER_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <asm/mutex.h> 14#include <linux/mutex.h>
15 15
16int alloc_event_buffer(void); 16int alloc_event_buffer(void);
17 17
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index f9bda64fcd1b..dccd8636095c 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -14,7 +14,7 @@
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/time.h> 16#include <linux/time.h>
17#include <asm/mutex.h> 17#include <linux/mutex.h>
18 18
19#include "oprof.h" 19#include "oprof.h"
20#include "event_buffer.h" 20#include "event_buffer.h"
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 12e02bf92c4a..3dc9befa5aec 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -698,12 +698,7 @@ int __init detect_intel_iommu(void)
698 { 698 {
699#ifdef CONFIG_INTR_REMAP 699#ifdef CONFIG_INTR_REMAP
700 struct acpi_table_dmar *dmar; 700 struct acpi_table_dmar *dmar;
701 /* 701
702 * for now we will disable dma-remapping when interrupt
703 * remapping is enabled.
704 * When support for queued invalidation for IOTLB invalidation
705 * is added, we will not need this any more.
706 */
707 dmar = (struct acpi_table_dmar *) dmar_tbl; 702 dmar = (struct acpi_table_dmar *) dmar_tbl;
708 if (ret && cpu_has_x2apic && dmar->flags & 0x1) 703 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
709 printk(KERN_INFO 704 printk(KERN_INFO
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 6af6b628175b..59f17acf7f68 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -47,6 +47,8 @@
47#define ROOT_SIZE VTD_PAGE_SIZE 47#define ROOT_SIZE VTD_PAGE_SIZE
48#define CONTEXT_SIZE VTD_PAGE_SIZE 48#define CONTEXT_SIZE VTD_PAGE_SIZE
49 49
50#define IS_BRIDGE_HOST_DEVICE(pdev) \
51 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
50#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) 52#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
51#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) 53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
52#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) 54#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
@@ -116,6 +118,11 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
116 return (pfn + level_size(level) - 1) & level_mask(level); 118 return (pfn + level_size(level) - 1) & level_mask(level);
117} 119}
118 120
121static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
122{
123 return 1 << ((lvl - 1) * LEVEL_STRIDE);
124}
125
119/* VT-d pages must always be _smaller_ than MM pages. Otherwise things 126/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
120 are never going to work. */ 127 are never going to work. */
121static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn) 128static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
@@ -143,6 +150,12 @@ static void __init check_tylersburg_isoch(void);
143static int rwbf_quirk; 150static int rwbf_quirk;
144 151
145/* 152/*
153 * set to 1 to panic kernel if can't successfully enable VT-d
154 * (used when kernel is launched w/ TXT)
155 */
156static int force_on = 0;
157
158/*
146 * 0: Present 159 * 0: Present
147 * 1-11: Reserved 160 * 1-11: Reserved
148 * 12-63: Context Ptr (12 - (haw-1)) 161 * 12-63: Context Ptr (12 - (haw-1))
@@ -338,6 +351,9 @@ struct dmar_domain {
338 int iommu_coherency;/* indicate coherency of iommu access */ 351 int iommu_coherency;/* indicate coherency of iommu access */
339 int iommu_snooping; /* indicate snooping control feature*/ 352 int iommu_snooping; /* indicate snooping control feature*/
340 int iommu_count; /* reference count of iommu */ 353 int iommu_count; /* reference count of iommu */
354 int iommu_superpage;/* Level of superpages supported:
355 0 == 4KiB (no superpages), 1 == 2MiB,
356 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
341 spinlock_t iommu_lock; /* protect iommu set in domain */ 357 spinlock_t iommu_lock; /* protect iommu set in domain */
342 u64 max_addr; /* maximum mapped address */ 358 u64 max_addr; /* maximum mapped address */
343}; 359};
@@ -387,6 +403,7 @@ int dmar_disabled = 1;
387static int dmar_map_gfx = 1; 403static int dmar_map_gfx = 1;
388static int dmar_forcedac; 404static int dmar_forcedac;
389static int intel_iommu_strict; 405static int intel_iommu_strict;
406static int intel_iommu_superpage = 1;
390 407
391#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) 408#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
392static DEFINE_SPINLOCK(device_domain_lock); 409static DEFINE_SPINLOCK(device_domain_lock);
@@ -417,6 +434,10 @@ static int __init intel_iommu_setup(char *str)
417 printk(KERN_INFO 434 printk(KERN_INFO
418 "Intel-IOMMU: disable batched IOTLB flush\n"); 435 "Intel-IOMMU: disable batched IOTLB flush\n");
419 intel_iommu_strict = 1; 436 intel_iommu_strict = 1;
437 } else if (!strncmp(str, "sp_off", 6)) {
438 printk(KERN_INFO
439 "Intel-IOMMU: disable supported super page\n");
440 intel_iommu_superpage = 0;
420 } 441 }
421 442
422 str += strcspn(str, ","); 443 str += strcspn(str, ",");
@@ -555,11 +576,32 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
555 } 576 }
556} 577}
557 578
579static void domain_update_iommu_superpage(struct dmar_domain *domain)
580{
581 int i, mask = 0xf;
582
583 if (!intel_iommu_superpage) {
584 domain->iommu_superpage = 0;
585 return;
586 }
587
588 domain->iommu_superpage = 4; /* 1TiB */
589
590 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
591 mask |= cap_super_page_val(g_iommus[i]->cap);
592 if (!mask) {
593 break;
594 }
595 }
596 domain->iommu_superpage = fls(mask);
597}
598
558/* Some capabilities may be different across iommus */ 599/* Some capabilities may be different across iommus */
559static void domain_update_iommu_cap(struct dmar_domain *domain) 600static void domain_update_iommu_cap(struct dmar_domain *domain)
560{ 601{
561 domain_update_iommu_coherency(domain); 602 domain_update_iommu_coherency(domain);
562 domain_update_iommu_snooping(domain); 603 domain_update_iommu_snooping(domain);
604 domain_update_iommu_superpage(domain);
563} 605}
564 606
565static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn) 607static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
@@ -689,23 +731,31 @@ out:
689} 731}
690 732
691static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, 733static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
692 unsigned long pfn) 734 unsigned long pfn, int large_level)
693{ 735{
694 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 736 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
695 struct dma_pte *parent, *pte = NULL; 737 struct dma_pte *parent, *pte = NULL;
696 int level = agaw_to_level(domain->agaw); 738 int level = agaw_to_level(domain->agaw);
697 int offset; 739 int offset, target_level;
698 740
699 BUG_ON(!domain->pgd); 741 BUG_ON(!domain->pgd);
700 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); 742 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
701 parent = domain->pgd; 743 parent = domain->pgd;
702 744
745 /* Search pte */
746 if (!large_level)
747 target_level = 1;
748 else
749 target_level = large_level;
750
703 while (level > 0) { 751 while (level > 0) {
704 void *tmp_page; 752 void *tmp_page;
705 753
706 offset = pfn_level_offset(pfn, level); 754 offset = pfn_level_offset(pfn, level);
707 pte = &parent[offset]; 755 pte = &parent[offset];
708 if (level == 1) 756 if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
757 break;
758 if (level == target_level)
709 break; 759 break;
710 760
711 if (!dma_pte_present(pte)) { 761 if (!dma_pte_present(pte)) {
@@ -733,10 +783,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
733 return pte; 783 return pte;
734} 784}
735 785
786
736/* return address's pte at specific level */ 787/* return address's pte at specific level */
737static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, 788static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
738 unsigned long pfn, 789 unsigned long pfn,
739 int level) 790 int level, int *large_page)
740{ 791{
741 struct dma_pte *parent, *pte = NULL; 792 struct dma_pte *parent, *pte = NULL;
742 int total = agaw_to_level(domain->agaw); 793 int total = agaw_to_level(domain->agaw);
@@ -749,8 +800,16 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
749 if (level == total) 800 if (level == total)
750 return pte; 801 return pte;
751 802
752 if (!dma_pte_present(pte)) 803 if (!dma_pte_present(pte)) {
804 *large_page = total;
753 break; 805 break;
806 }
807
808 if (pte->val & DMA_PTE_LARGE_PAGE) {
809 *large_page = total;
810 return pte;
811 }
812
754 parent = phys_to_virt(dma_pte_addr(pte)); 813 parent = phys_to_virt(dma_pte_addr(pte));
755 total--; 814 total--;
756 } 815 }
@@ -763,6 +822,7 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
763 unsigned long last_pfn) 822 unsigned long last_pfn)
764{ 823{
765 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 824 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
825 unsigned int large_page = 1;
766 struct dma_pte *first_pte, *pte; 826 struct dma_pte *first_pte, *pte;
767 827
768 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 828 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
@@ -771,14 +831,15 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
771 831
772 /* we don't need lock here; nobody else touches the iova range */ 832 /* we don't need lock here; nobody else touches the iova range */
773 do { 833 do {
774 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1); 834 large_page = 1;
835 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
775 if (!pte) { 836 if (!pte) {
776 start_pfn = align_to_level(start_pfn + 1, 2); 837 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
777 continue; 838 continue;
778 } 839 }
779 do { 840 do {
780 dma_clear_pte(pte); 841 dma_clear_pte(pte);
781 start_pfn++; 842 start_pfn += lvl_to_nr_pages(large_page);
782 pte++; 843 pte++;
783 } while (start_pfn <= last_pfn && !first_pte_in_page(pte)); 844 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
784 845
@@ -798,6 +859,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
798 int total = agaw_to_level(domain->agaw); 859 int total = agaw_to_level(domain->agaw);
799 int level; 860 int level;
800 unsigned long tmp; 861 unsigned long tmp;
862 int large_page = 2;
801 863
802 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 864 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
803 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 865 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -813,7 +875,10 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
813 return; 875 return;
814 876
815 do { 877 do {
816 first_pte = pte = dma_pfn_level_pte(domain, tmp, level); 878 large_page = level;
879 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
880 if (large_page > level)
881 level = large_page + 1;
817 if (!pte) { 882 if (!pte) {
818 tmp = align_to_level(tmp + 1, level + 1); 883 tmp = align_to_level(tmp + 1, level + 1);
819 continue; 884 continue;
@@ -1397,6 +1462,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1397 else 1462 else
1398 domain->iommu_snooping = 0; 1463 domain->iommu_snooping = 0;
1399 1464
1465 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1400 domain->iommu_count = 1; 1466 domain->iommu_count = 1;
1401 domain->nid = iommu->node; 1467 domain->nid = iommu->node;
1402 1468
@@ -1417,6 +1483,10 @@ static void domain_exit(struct dmar_domain *domain)
1417 if (!domain) 1483 if (!domain)
1418 return; 1484 return;
1419 1485
1486 /* Flush any lazy unmaps that may reference this domain */
1487 if (!intel_iommu_strict)
1488 flush_unmaps_timeout(0);
1489
1420 domain_remove_dev_info(domain); 1490 domain_remove_dev_info(domain);
1421 /* destroy iovas */ 1491 /* destroy iovas */
1422 put_iova_domain(&domain->iovad); 1492 put_iova_domain(&domain->iovad);
@@ -1648,6 +1718,34 @@ static inline unsigned long aligned_nrpages(unsigned long host_addr,
1648 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; 1718 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1649} 1719}
1650 1720
1721/* Return largest possible superpage level for a given mapping */
1722static inline int hardware_largepage_caps(struct dmar_domain *domain,
1723 unsigned long iov_pfn,
1724 unsigned long phy_pfn,
1725 unsigned long pages)
1726{
1727 int support, level = 1;
1728 unsigned long pfnmerge;
1729
1730 support = domain->iommu_superpage;
1731
1732 /* To use a large page, the virtual *and* physical addresses
1733 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1734 of them will mean we have to use smaller pages. So just
1735 merge them and check both at once. */
1736 pfnmerge = iov_pfn | phy_pfn;
1737
1738 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1739 pages >>= VTD_STRIDE_SHIFT;
1740 if (!pages)
1741 break;
1742 pfnmerge >>= VTD_STRIDE_SHIFT;
1743 level++;
1744 support--;
1745 }
1746 return level;
1747}
1748
1651static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, 1749static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1652 struct scatterlist *sg, unsigned long phys_pfn, 1750 struct scatterlist *sg, unsigned long phys_pfn,
1653 unsigned long nr_pages, int prot) 1751 unsigned long nr_pages, int prot)
@@ -1656,6 +1754,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1656 phys_addr_t uninitialized_var(pteval); 1754 phys_addr_t uninitialized_var(pteval);
1657 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 1755 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1658 unsigned long sg_res; 1756 unsigned long sg_res;
1757 unsigned int largepage_lvl = 0;
1758 unsigned long lvl_pages = 0;
1659 1759
1660 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width); 1760 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1661 1761
@@ -1671,7 +1771,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1671 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; 1771 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1672 } 1772 }
1673 1773
1674 while (nr_pages--) { 1774 while (nr_pages > 0) {
1675 uint64_t tmp; 1775 uint64_t tmp;
1676 1776
1677 if (!sg_res) { 1777 if (!sg_res) {
@@ -1679,11 +1779,21 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1679 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 1779 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1680 sg->dma_length = sg->length; 1780 sg->dma_length = sg->length;
1681 pteval = page_to_phys(sg_page(sg)) | prot; 1781 pteval = page_to_phys(sg_page(sg)) | prot;
1782 phys_pfn = pteval >> VTD_PAGE_SHIFT;
1682 } 1783 }
1784
1683 if (!pte) { 1785 if (!pte) {
1684 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn); 1786 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1787
1788 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
1685 if (!pte) 1789 if (!pte)
1686 return -ENOMEM; 1790 return -ENOMEM;
1791 /* It is large page*/
1792 if (largepage_lvl > 1)
1793 pteval |= DMA_PTE_LARGE_PAGE;
1794 else
1795 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1796
1687 } 1797 }
1688 /* We don't need lock here, nobody else 1798 /* We don't need lock here, nobody else
1689 * touches the iova range 1799 * touches the iova range
@@ -1699,16 +1809,38 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1699 } 1809 }
1700 WARN_ON(1); 1810 WARN_ON(1);
1701 } 1811 }
1812
1813 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1814
1815 BUG_ON(nr_pages < lvl_pages);
1816 BUG_ON(sg_res < lvl_pages);
1817
1818 nr_pages -= lvl_pages;
1819 iov_pfn += lvl_pages;
1820 phys_pfn += lvl_pages;
1821 pteval += lvl_pages * VTD_PAGE_SIZE;
1822 sg_res -= lvl_pages;
1823
1824 /* If the next PTE would be the first in a new page, then we
1825 need to flush the cache on the entries we've just written.
1826 And then we'll need to recalculate 'pte', so clear it and
1827 let it get set again in the if (!pte) block above.
1828
1829 If we're done (!nr_pages) we need to flush the cache too.
1830
1831 Also if we've been setting superpages, we may need to
1832 recalculate 'pte' and switch back to smaller pages for the
1833 end of the mapping, if the trailing size is not enough to
1834 use another superpage (i.e. sg_res < lvl_pages). */
1702 pte++; 1835 pte++;
1703 if (!nr_pages || first_pte_in_page(pte)) { 1836 if (!nr_pages || first_pte_in_page(pte) ||
1837 (largepage_lvl > 1 && sg_res < lvl_pages)) {
1704 domain_flush_cache(domain, first_pte, 1838 domain_flush_cache(domain, first_pte,
1705 (void *)pte - (void *)first_pte); 1839 (void *)pte - (void *)first_pte);
1706 pte = NULL; 1840 pte = NULL;
1707 } 1841 }
1708 iov_pfn++; 1842
1709 pteval += VTD_PAGE_SIZE; 1843 if (!sg_res && nr_pages)
1710 sg_res--;
1711 if (!sg_res)
1712 sg = sg_next(sg); 1844 sg = sg_next(sg);
1713 } 1845 }
1714 return 0; 1846 return 0;
@@ -2016,7 +2148,7 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2016 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2148 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2017 return 0; 2149 return 0;
2018 return iommu_prepare_identity_map(pdev, rmrr->base_address, 2150 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2019 rmrr->end_address + 1); 2151 rmrr->end_address);
2020} 2152}
2021 2153
2022#ifdef CONFIG_DMAR_FLOPPY_WA 2154#ifdef CONFIG_DMAR_FLOPPY_WA
@@ -2030,7 +2162,7 @@ static inline void iommu_prepare_isa(void)
2030 return; 2162 return;
2031 2163
2032 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); 2164 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2033 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); 2165 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
2034 2166
2035 if (ret) 2167 if (ret)
2036 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " 2168 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
@@ -2106,10 +2238,10 @@ static int identity_mapping(struct pci_dev *pdev)
2106 if (likely(!iommu_identity_mapping)) 2238 if (likely(!iommu_identity_mapping))
2107 return 0; 2239 return 0;
2108 2240
2241 info = pdev->dev.archdata.iommu;
2242 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2243 return (info->domain == si_domain);
2109 2244
2110 list_for_each_entry(info, &si_domain->devices, link)
2111 if (info->dev == pdev)
2112 return 1;
2113 return 0; 2245 return 0;
2114} 2246}
2115 2247
@@ -2187,8 +2319,19 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2187 * Assume that they will -- if they turn out not to be, then we can 2319 * Assume that they will -- if they turn out not to be, then we can
2188 * take them out of the 1:1 domain later. 2320 * take them out of the 1:1 domain later.
2189 */ 2321 */
2190 if (!startup) 2322 if (!startup) {
2191 return pdev->dma_mask > DMA_BIT_MASK(32); 2323 /*
2324 * If the device's dma_mask is less than the system's memory
2325 * size then this is not a candidate for identity mapping.
2326 */
2327 u64 dma_mask = pdev->dma_mask;
2328
2329 if (pdev->dev.coherent_dma_mask &&
2330 pdev->dev.coherent_dma_mask < dma_mask)
2331 dma_mask = pdev->dev.coherent_dma_mask;
2332
2333 return dma_mask >= dma_get_required_mask(&pdev->dev);
2334 }
2192 2335
2193 return 1; 2336 return 1;
2194} 2337}
@@ -2203,6 +2346,9 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
2203 return -EFAULT; 2346 return -EFAULT;
2204 2347
2205 for_each_pci_dev(pdev) { 2348 for_each_pci_dev(pdev) {
2349 /* Skip Host/PCI Bridge devices */
2350 if (IS_BRIDGE_HOST_DEVICE(pdev))
2351 continue;
2206 if (iommu_should_identity_map(pdev, 1)) { 2352 if (iommu_should_identity_map(pdev, 1)) {
2207 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n", 2353 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2208 hw ? "hardware" : "software", pci_name(pdev)); 2354 hw ? "hardware" : "software", pci_name(pdev));
@@ -2218,7 +2364,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
2218 return 0; 2364 return 0;
2219} 2365}
2220 2366
2221static int __init init_dmars(int force_on) 2367static int __init init_dmars(void)
2222{ 2368{
2223 struct dmar_drhd_unit *drhd; 2369 struct dmar_drhd_unit *drhd;
2224 struct dmar_rmrr_unit *rmrr; 2370 struct dmar_rmrr_unit *rmrr;
@@ -2592,8 +2738,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2592 iommu = domain_get_iommu(domain); 2738 iommu = domain_get_iommu(domain);
2593 size = aligned_nrpages(paddr, size); 2739 size = aligned_nrpages(paddr, size);
2594 2740
2595 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), 2741 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
2596 pdev->dma_mask);
2597 if (!iova) 2742 if (!iova)
2598 goto error; 2743 goto error;
2599 2744
@@ -3118,7 +3263,17 @@ static int init_iommu_hw(void)
3118 if (iommu->qi) 3263 if (iommu->qi)
3119 dmar_reenable_qi(iommu); 3264 dmar_reenable_qi(iommu);
3120 3265
3121 for_each_active_iommu(iommu, drhd) { 3266 for_each_iommu(iommu, drhd) {
3267 if (drhd->ignored) {
3268 /*
3269 * we always have to disable PMRs or DMA may fail on
3270 * this device
3271 */
3272 if (force_on)
3273 iommu_disable_protect_mem_regions(iommu);
3274 continue;
3275 }
3276
3122 iommu_flush_write_buffer(iommu); 3277 iommu_flush_write_buffer(iommu);
3123 3278
3124 iommu_set_root_entry(iommu); 3279 iommu_set_root_entry(iommu);
@@ -3127,7 +3282,8 @@ static int init_iommu_hw(void)
3127 DMA_CCMD_GLOBAL_INVL); 3282 DMA_CCMD_GLOBAL_INVL);
3128 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 3283 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3129 DMA_TLB_GLOBAL_FLUSH); 3284 DMA_TLB_GLOBAL_FLUSH);
3130 iommu_enable_translation(iommu); 3285 if (iommu_enable_translation(iommu))
3286 return 1;
3131 iommu_disable_protect_mem_regions(iommu); 3287 iommu_disable_protect_mem_regions(iommu);
3132 } 3288 }
3133 3289
@@ -3194,7 +3350,10 @@ static void iommu_resume(void)
3194 unsigned long flag; 3350 unsigned long flag;
3195 3351
3196 if (init_iommu_hw()) { 3352 if (init_iommu_hw()) {
3197 WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); 3353 if (force_on)
3354 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3355 else
3356 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3198 return; 3357 return;
3199 } 3358 }
3200 3359
@@ -3271,7 +3430,6 @@ static struct notifier_block device_nb = {
3271int __init intel_iommu_init(void) 3430int __init intel_iommu_init(void)
3272{ 3431{
3273 int ret = 0; 3432 int ret = 0;
3274 int force_on = 0;
3275 3433
3276 /* VT-d is required for a TXT/tboot launch, so enforce that */ 3434 /* VT-d is required for a TXT/tboot launch, so enforce that */
3277 force_on = tboot_force_iommu(); 3435 force_on = tboot_force_iommu();
@@ -3309,7 +3467,7 @@ int __init intel_iommu_init(void)
3309 3467
3310 init_no_remapping_devices(); 3468 init_no_remapping_devices();
3311 3469
3312 ret = init_dmars(force_on); 3470 ret = init_dmars();
3313 if (ret) { 3471 if (ret) {
3314 if (force_on) 3472 if (force_on)
3315 panic("tboot: Failed to initialize DMARs\n"); 3473 panic("tboot: Failed to initialize DMARs\n");
@@ -3380,8 +3538,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
3380 spin_lock_irqsave(&device_domain_lock, flags); 3538 spin_lock_irqsave(&device_domain_lock, flags);
3381 list_for_each_safe(entry, tmp, &domain->devices) { 3539 list_for_each_safe(entry, tmp, &domain->devices) {
3382 info = list_entry(entry, struct device_domain_info, link); 3540 info = list_entry(entry, struct device_domain_info, link);
3383 /* No need to compare PCI domain; it has to be the same */ 3541 if (info->segment == pci_domain_nr(pdev->bus) &&
3384 if (info->bus == pdev->bus->number && 3542 info->bus == pdev->bus->number &&
3385 info->devfn == pdev->devfn) { 3543 info->devfn == pdev->devfn) {
3386 list_del(&info->link); 3544 list_del(&info->link);
3387 list_del(&info->global); 3545 list_del(&info->global);
@@ -3419,10 +3577,13 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
3419 domain_update_iommu_cap(domain); 3577 domain_update_iommu_cap(domain);
3420 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); 3578 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3421 3579
3422 spin_lock_irqsave(&iommu->lock, tmp_flags); 3580 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3423 clear_bit(domain->id, iommu->domain_ids); 3581 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3424 iommu->domains[domain->id] = NULL; 3582 spin_lock_irqsave(&iommu->lock, tmp_flags);
3425 spin_unlock_irqrestore(&iommu->lock, tmp_flags); 3583 clear_bit(domain->id, iommu->domain_ids);
3584 iommu->domains[domain->id] = NULL;
3585 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3586 }
3426 } 3587 }
3427 3588
3428 spin_unlock_irqrestore(&device_domain_lock, flags); 3589 spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -3505,6 +3666,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
3505 domain->iommu_count = 0; 3666 domain->iommu_count = 0;
3506 domain->iommu_coherency = 0; 3667 domain->iommu_coherency = 0;
3507 domain->iommu_snooping = 0; 3668 domain->iommu_snooping = 0;
3669 domain->iommu_superpage = 0;
3508 domain->max_addr = 0; 3670 domain->max_addr = 0;
3509 domain->nid = -1; 3671 domain->nid = -1;
3510 3672
@@ -3720,7 +3882,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3720 struct dma_pte *pte; 3882 struct dma_pte *pte;
3721 u64 phys = 0; 3883 u64 phys = 0;
3722 3884
3723 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT); 3885 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
3724 if (pte) 3886 if (pte)
3725 phys = dma_pte_addr(pte); 3887 phys = dma_pte_addr(pte);
3726 3888
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 9606e599a475..c5c274ab5c5a 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -63,8 +63,16 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
63 curr = iovad->cached32_node; 63 curr = iovad->cached32_node;
64 cached_iova = container_of(curr, struct iova, node); 64 cached_iova = container_of(curr, struct iova, node);
65 65
66 if (free->pfn_lo >= cached_iova->pfn_lo) 66 if (free->pfn_lo >= cached_iova->pfn_lo) {
67 iovad->cached32_node = rb_next(&free->node); 67 struct rb_node *node = rb_next(&free->node);
68 struct iova *iova = container_of(node, struct iova, node);
69
70 /* only cache if it's below 32bit pfn */
71 if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
72 iovad->cached32_node = node;
73 else
74 iovad->cached32_node = NULL;
75 }
68} 76}
69 77
70/* Computes the padding size required, to make the 78/* Computes the padding size required, to make the
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 7c3b18e78cee..d36f41ea8cbf 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -195,6 +195,8 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
195 return PCI_D2; 195 return PCI_D2;
196 case ACPI_STATE_D3: 196 case ACPI_STATE_D3:
197 return PCI_D3hot; 197 return PCI_D3hot;
198 case ACPI_STATE_D3_COLD:
199 return PCI_D3cold;
198 } 200 }
199 return PCI_POWER_ERROR; 201 return PCI_POWER_ERROR;
200} 202}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 5cb999b50f95..45e0191c35dd 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -39,7 +39,7 @@ config ACER_WMI
39 39
40config ACERHDF 40config ACERHDF
41 tristate "Acer Aspire One temperature and fan driver" 41 tristate "Acer Aspire One temperature and fan driver"
42 depends on THERMAL && THERMAL_HWMON && ACPI 42 depends on THERMAL && ACPI
43 ---help--- 43 ---help---
44 This is a driver for Acer Aspire One netbooks. It allows to access 44 This is a driver for Acer Aspire One netbooks. It allows to access
45 the temperature sensor and to control the fan. 45 the temperature sensor and to control the fan.
@@ -760,4 +760,13 @@ config MXM_WMI
760 MXM is a standard for laptop graphics cards, the WMI interface 760 MXM is a standard for laptop graphics cards, the WMI interface
761 is required for switchable nvidia graphics machines 761 is required for switchable nvidia graphics machines
762 762
763config INTEL_OAKTRAIL
764 tristate "Intel Oaktrail Platform Extras"
765 depends on ACPI
766 depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI
767 ---help---
768 Intel Oaktrail platform need this driver to provide interfaces to
769 enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
770 here; it will only load on supported platforms.
771
763endif # X86_PLATFORM_DEVICES 772endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index a7ab3bc7b3a1..afc1f832aa67 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -41,5 +41,6 @@ obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
41obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o 41obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o
42obj-$(CONFIG_IBM_RTL) += ibm_rtl.o 42obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
43obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o 43obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
44obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
45obj-$(CONFIG_MXM_WMI) += mxm-wmi.o 44obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
45obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
46obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index ac4e7f83ce6c..005417bd429e 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -98,13 +98,26 @@ enum acer_wmi_event_ids {
98 98
99static const struct key_entry acer_wmi_keymap[] = { 99static const struct key_entry acer_wmi_keymap[] = {
100 {KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */ 100 {KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */
101 {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */
101 {KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */ 102 {KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */
102 {KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */ 103 {KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */
103 {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ 104 {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
104 {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ 105 {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
105 {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ 106 {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
107 {KE_IGNORE, 0x41, {KEY_MUTE} },
108 {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
109 {KE_IGNORE, 0x43, {KEY_NEXTSONG} },
110 {KE_IGNORE, 0x44, {KEY_PLAYPAUSE} },
111 {KE_IGNORE, 0x45, {KEY_STOP} },
112 {KE_IGNORE, 0x48, {KEY_VOLUMEUP} },
113 {KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} },
114 {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} },
115 {KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} },
116 {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
106 {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ 117 {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
118 {KE_IGNORE, 0x81, {KEY_SLEEP} },
107 {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */ 119 {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */
120 {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
108 {KE_END, 0} 121 {KE_END, 0}
109}; 122};
110 123
@@ -122,6 +135,7 @@ struct event_return_value {
122 */ 135 */
123#define ACER_WMID3_GDS_WIRELESS (1<<0) /* WiFi */ 136#define ACER_WMID3_GDS_WIRELESS (1<<0) /* WiFi */
124#define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */ 137#define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */
138#define ACER_WMID3_GDS_WIMAX (1<<7) /* WiMAX */
125#define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */ 139#define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */
126 140
127struct lm_input_params { 141struct lm_input_params {
@@ -737,8 +751,11 @@ WMI_execute_u32(u32 method_id, u32 in, u32 *out)
737 751
738 obj = (union acpi_object *) result.pointer; 752 obj = (union acpi_object *) result.pointer;
739 if (obj && obj->type == ACPI_TYPE_BUFFER && 753 if (obj && obj->type == ACPI_TYPE_BUFFER &&
740 obj->buffer.length == sizeof(u32)) { 754 (obj->buffer.length == sizeof(u32) ||
755 obj->buffer.length == sizeof(u64))) {
741 tmp = *((u32 *) obj->buffer.pointer); 756 tmp = *((u32 *) obj->buffer.pointer);
757 } else if (obj->type == ACPI_TYPE_INTEGER) {
758 tmp = (u32) obj->integer.value;
742 } else { 759 } else {
743 tmp = 0; 760 tmp = 0;
744 } 761 }
@@ -866,8 +883,11 @@ static acpi_status WMID_set_capabilities(void)
866 883
867 obj = (union acpi_object *) out.pointer; 884 obj = (union acpi_object *) out.pointer;
868 if (obj && obj->type == ACPI_TYPE_BUFFER && 885 if (obj && obj->type == ACPI_TYPE_BUFFER &&
869 obj->buffer.length == sizeof(u32)) { 886 (obj->buffer.length == sizeof(u32) ||
887 obj->buffer.length == sizeof(u64))) {
870 devices = *((u32 *) obj->buffer.pointer); 888 devices = *((u32 *) obj->buffer.pointer);
889 } else if (obj->type == ACPI_TYPE_INTEGER) {
890 devices = (u32) obj->integer.value;
871 } else { 891 } else {
872 kfree(out.pointer); 892 kfree(out.pointer);
873 return AE_ERROR; 893 return AE_ERROR;
@@ -876,7 +896,8 @@ static acpi_status WMID_set_capabilities(void)
876 dmi_walk(type_aa_dmi_decode, NULL); 896 dmi_walk(type_aa_dmi_decode, NULL);
877 if (!has_type_aa) { 897 if (!has_type_aa) {
878 interface->capability |= ACER_CAP_WIRELESS; 898 interface->capability |= ACER_CAP_WIRELESS;
879 interface->capability |= ACER_CAP_THREEG; 899 if (devices & 0x40)
900 interface->capability |= ACER_CAP_THREEG;
880 if (devices & 0x10) 901 if (devices & 0x10)
881 interface->capability |= ACER_CAP_BLUETOOTH; 902 interface->capability |= ACER_CAP_BLUETOOTH;
882 } 903 }
@@ -961,10 +982,12 @@ static void __init acer_commandline_init(void)
961 * These will all fail silently if the value given is invalid, or the 982 * These will all fail silently if the value given is invalid, or the
962 * capability isn't available on the given interface 983 * capability isn't available on the given interface
963 */ 984 */
964 set_u32(mailled, ACER_CAP_MAILLED); 985 if (mailled >= 0)
965 if (!has_type_aa) 986 set_u32(mailled, ACER_CAP_MAILLED);
987 if (!has_type_aa && threeg >= 0)
966 set_u32(threeg, ACER_CAP_THREEG); 988 set_u32(threeg, ACER_CAP_THREEG);
967 set_u32(brightness, ACER_CAP_BRIGHTNESS); 989 if (brightness >= 0)
990 set_u32(brightness, ACER_CAP_BRIGHTNESS);
968} 991}
969 992
970/* 993/*
@@ -1081,7 +1104,7 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device)
1081 return AE_ERROR; 1104 return AE_ERROR;
1082 } 1105 }
1083 if (obj->buffer.length != 8) { 1106 if (obj->buffer.length != 8) {
1084 pr_warning("Unknown buffer length %d\n", obj->buffer.length); 1107 pr_warn("Unknown buffer length %d\n", obj->buffer.length);
1085 kfree(obj); 1108 kfree(obj);
1086 return AE_ERROR; 1109 return AE_ERROR;
1087 } 1110 }
@@ -1090,8 +1113,8 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device)
1090 kfree(obj); 1113 kfree(obj);
1091 1114
1092 if (return_value.error_code || return_value.ec_return_value) 1115 if (return_value.error_code || return_value.ec_return_value)
1093 pr_warning("Get Device Status failed: " 1116 pr_warn("Get Device Status failed: 0x%x - 0x%x\n",
1094 "0x%x - 0x%x\n", return_value.error_code, 1117 return_value.error_code,
1095 return_value.ec_return_value); 1118 return_value.ec_return_value);
1096 else 1119 else
1097 *value = !!(return_value.devices & device); 1120 *value = !!(return_value.devices & device);
@@ -1124,6 +1147,114 @@ static acpi_status get_device_status(u32 *value, u32 cap)
1124 } 1147 }
1125} 1148}
1126 1149
1150static acpi_status wmid3_set_device_status(u32 value, u16 device)
1151{
1152 struct wmid3_gds_return_value return_value;
1153 acpi_status status;
1154 union acpi_object *obj;
1155 u16 devices;
1156 struct wmid3_gds_input_param params = {
1157 .function_num = 0x1,
1158 .hotkey_number = 0x01,
1159 .devices = ACER_WMID3_GDS_WIRELESS &
1160 ACER_WMID3_GDS_THREEG &
1161 ACER_WMID3_GDS_WIMAX &
1162 ACER_WMID3_GDS_BLUETOOTH,
1163 };
1164 struct acpi_buffer input = {
1165 sizeof(struct wmid3_gds_input_param),
1166 &params
1167 };
1168 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
1169 struct acpi_buffer output2 = { ACPI_ALLOCATE_BUFFER, NULL };
1170
1171 status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output);
1172 if (ACPI_FAILURE(status))
1173 return status;
1174
1175 obj = output.pointer;
1176
1177 if (!obj)
1178 return AE_ERROR;
1179 else if (obj->type != ACPI_TYPE_BUFFER) {
1180 kfree(obj);
1181 return AE_ERROR;
1182 }
1183 if (obj->buffer.length != 8) {
1184 pr_warning("Unknown buffer length %d\n", obj->buffer.length);
1185 kfree(obj);
1186 return AE_ERROR;
1187 }
1188
1189 return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
1190 kfree(obj);
1191
1192 if (return_value.error_code || return_value.ec_return_value) {
1193 pr_warning("Get Current Device Status failed: "
1194 "0x%x - 0x%x\n", return_value.error_code,
1195 return_value.ec_return_value);
1196 return status;
1197 }
1198
1199 devices = return_value.devices;
1200 params.function_num = 0x2;
1201 params.hotkey_number = 0x01;
1202 params.devices = (value) ? (devices | device) : (devices & ~device);
1203
1204 status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output2);
1205 if (ACPI_FAILURE(status))
1206 return status;
1207
1208 obj = output2.pointer;
1209
1210 if (!obj)
1211 return AE_ERROR;
1212 else if (obj->type != ACPI_TYPE_BUFFER) {
1213 kfree(obj);
1214 return AE_ERROR;
1215 }
1216 if (obj->buffer.length != 4) {
1217 pr_warning("Unknown buffer length %d\n", obj->buffer.length);
1218 kfree(obj);
1219 return AE_ERROR;
1220 }
1221
1222 return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
1223 kfree(obj);
1224
1225 if (return_value.error_code || return_value.ec_return_value)
1226 pr_warning("Set Device Status failed: "
1227 "0x%x - 0x%x\n", return_value.error_code,
1228 return_value.ec_return_value);
1229
1230 return status;
1231}
1232
1233static acpi_status set_device_status(u32 value, u32 cap)
1234{
1235 if (wmi_has_guid(WMID_GUID3)) {
1236 u16 device;
1237
1238 switch (cap) {
1239 case ACER_CAP_WIRELESS:
1240 device = ACER_WMID3_GDS_WIRELESS;
1241 break;
1242 case ACER_CAP_BLUETOOTH:
1243 device = ACER_WMID3_GDS_BLUETOOTH;
1244 break;
1245 case ACER_CAP_THREEG:
1246 device = ACER_WMID3_GDS_THREEG;
1247 break;
1248 default:
1249 return AE_ERROR;
1250 }
1251 return wmid3_set_device_status(value, device);
1252
1253 } else {
1254 return set_u32(value, cap);
1255 }
1256}
1257
1127/* 1258/*
1128 * Rfkill devices 1259 * Rfkill devices
1129 */ 1260 */
@@ -1160,7 +1291,7 @@ static int acer_rfkill_set(void *data, bool blocked)
1160 u32 cap = (unsigned long)data; 1291 u32 cap = (unsigned long)data;
1161 1292
1162 if (rfkill_inited) { 1293 if (rfkill_inited) {
1163 status = set_u32(!blocked, cap); 1294 status = set_device_status(!blocked, cap);
1164 if (ACPI_FAILURE(status)) 1295 if (ACPI_FAILURE(status))
1165 return -ENODEV; 1296 return -ENODEV;
1166 } 1297 }
@@ -1317,7 +1448,7 @@ static void acer_wmi_notify(u32 value, void *context)
1317 1448
1318 status = wmi_get_event_data(value, &response); 1449 status = wmi_get_event_data(value, &response);
1319 if (status != AE_OK) { 1450 if (status != AE_OK) {
1320 pr_warning("bad event status 0x%x\n", status); 1451 pr_warn("bad event status 0x%x\n", status);
1321 return; 1452 return;
1322 } 1453 }
1323 1454
@@ -1326,12 +1457,12 @@ static void acer_wmi_notify(u32 value, void *context)
1326 if (!obj) 1457 if (!obj)
1327 return; 1458 return;
1328 if (obj->type != ACPI_TYPE_BUFFER) { 1459 if (obj->type != ACPI_TYPE_BUFFER) {
1329 pr_warning("Unknown response received %d\n", obj->type); 1460 pr_warn("Unknown response received %d\n", obj->type);
1330 kfree(obj); 1461 kfree(obj);
1331 return; 1462 return;
1332 } 1463 }
1333 if (obj->buffer.length != 8) { 1464 if (obj->buffer.length != 8) {
1334 pr_warning("Unknown buffer length %d\n", obj->buffer.length); 1465 pr_warn("Unknown buffer length %d\n", obj->buffer.length);
1335 kfree(obj); 1466 kfree(obj);
1336 return; 1467 return;
1337 } 1468 }
@@ -1343,7 +1474,7 @@ static void acer_wmi_notify(u32 value, void *context)
1343 case WMID_HOTKEY_EVENT: 1474 case WMID_HOTKEY_EVENT:
1344 if (return_value.device_state) { 1475 if (return_value.device_state) {
1345 u16 device_state = return_value.device_state; 1476 u16 device_state = return_value.device_state;
1346 pr_debug("deivces states: 0x%x\n", device_state); 1477 pr_debug("device state: 0x%x\n", device_state);
1347 if (has_cap(ACER_CAP_WIRELESS)) 1478 if (has_cap(ACER_CAP_WIRELESS))
1348 rfkill_set_sw_state(wireless_rfkill, 1479 rfkill_set_sw_state(wireless_rfkill,
1349 !(device_state & ACER_WMID3_GDS_WIRELESS)); 1480 !(device_state & ACER_WMID3_GDS_WIRELESS));
@@ -1356,11 +1487,11 @@ static void acer_wmi_notify(u32 value, void *context)
1356 } 1487 }
1357 if (!sparse_keymap_report_event(acer_wmi_input_dev, 1488 if (!sparse_keymap_report_event(acer_wmi_input_dev,
1358 return_value.key_num, 1, true)) 1489 return_value.key_num, 1, true))
1359 pr_warning("Unknown key number - 0x%x\n", 1490 pr_warn("Unknown key number - 0x%x\n",
1360 return_value.key_num); 1491 return_value.key_num);
1361 break; 1492 break;
1362 default: 1493 default:
1363 pr_warning("Unknown function number - %d - %d\n", 1494 pr_warn("Unknown function number - %d - %d\n",
1364 return_value.function, return_value.key_num); 1495 return_value.function, return_value.key_num);
1365 break; 1496 break;
1366 } 1497 }
@@ -1389,7 +1520,7 @@ wmid3_set_lm_mode(struct lm_input_params *params,
1389 return AE_ERROR; 1520 return AE_ERROR;
1390 } 1521 }
1391 if (obj->buffer.length != 4) { 1522 if (obj->buffer.length != 4) {
1392 pr_warning("Unknown buffer length %d\n", obj->buffer.length); 1523 pr_warn("Unknown buffer length %d\n", obj->buffer.length);
1393 kfree(obj); 1524 kfree(obj);
1394 return AE_ERROR; 1525 return AE_ERROR;
1395 } 1526 }
@@ -1414,11 +1545,11 @@ static int acer_wmi_enable_ec_raw(void)
1414 status = wmid3_set_lm_mode(&params, &return_value); 1545 status = wmid3_set_lm_mode(&params, &return_value);
1415 1546
1416 if (return_value.error_code || return_value.ec_return_value) 1547 if (return_value.error_code || return_value.ec_return_value)
1417 pr_warning("Enabling EC raw mode failed: " 1548 pr_warn("Enabling EC raw mode failed: 0x%x - 0x%x\n",
1418 "0x%x - 0x%x\n", return_value.error_code, 1549 return_value.error_code,
1419 return_value.ec_return_value); 1550 return_value.ec_return_value);
1420 else 1551 else
1421 pr_info("Enabled EC raw mode"); 1552 pr_info("Enabled EC raw mode\n");
1422 1553
1423 return status; 1554 return status;
1424} 1555}
@@ -1437,9 +1568,9 @@ static int acer_wmi_enable_lm(void)
1437 status = wmid3_set_lm_mode(&params, &return_value); 1568 status = wmid3_set_lm_mode(&params, &return_value);
1438 1569
1439 if (return_value.error_code || return_value.ec_return_value) 1570 if (return_value.error_code || return_value.ec_return_value)
1440 pr_warning("Enabling Launch Manager failed: " 1571 pr_warn("Enabling Launch Manager failed: 0x%x - 0x%x\n",
1441 "0x%x - 0x%x\n", return_value.error_code, 1572 return_value.error_code,
1442 return_value.ec_return_value); 1573 return_value.ec_return_value);
1443 1574
1444 return status; 1575 return status;
1445} 1576}
@@ -1506,8 +1637,11 @@ static u32 get_wmid_devices(void)
1506 1637
1507 obj = (union acpi_object *) out.pointer; 1638 obj = (union acpi_object *) out.pointer;
1508 if (obj && obj->type == ACPI_TYPE_BUFFER && 1639 if (obj && obj->type == ACPI_TYPE_BUFFER &&
1509 obj->buffer.length == sizeof(u32)) { 1640 (obj->buffer.length == sizeof(u32) ||
1641 obj->buffer.length == sizeof(u64))) {
1510 devices = *((u32 *) obj->buffer.pointer); 1642 devices = *((u32 *) obj->buffer.pointer);
1643 } else if (obj->type == ACPI_TYPE_INTEGER) {
1644 devices = (u32) obj->integer.value;
1511 } 1645 }
1512 1646
1513 kfree(out.pointer); 1647 kfree(out.pointer);
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 60f9cfcac93f..fca3489218b7 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -35,10 +35,8 @@
35 35
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/fs.h>
39#include <linux/dmi.h> 38#include <linux/dmi.h>
40#include <acpi/acpi_drivers.h> 39#include <linux/acpi.h>
41#include <linux/sched.h>
42#include <linux/thermal.h> 40#include <linux/thermal.h>
43#include <linux/platform_device.h> 41#include <linux/platform_device.h>
44 42
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index c53b3ff7978a..d65df92e2acc 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -318,7 +318,7 @@ static int acpi_check_handle(acpi_handle handle, const char *method,
318 318
319 if (status != AE_OK) { 319 if (status != AE_OK) {
320 if (ret) 320 if (ret)
321 pr_warning("Error finding %s\n", method); 321 pr_warn("Error finding %s\n", method);
322 return -ENODEV; 322 return -ENODEV;
323 } 323 }
324 return 0; 324 return 0;
@@ -383,7 +383,7 @@ static int asus_kled_lvl(struct asus_laptop *asus)
383 rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET, 383 rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET,
384 &params, &kblv); 384 &params, &kblv);
385 if (ACPI_FAILURE(rv)) { 385 if (ACPI_FAILURE(rv)) {
386 pr_warning("Error reading kled level\n"); 386 pr_warn("Error reading kled level\n");
387 return -ENODEV; 387 return -ENODEV;
388 } 388 }
389 return kblv; 389 return kblv;
@@ -397,7 +397,7 @@ static int asus_kled_set(struct asus_laptop *asus, int kblv)
397 kblv = 0; 397 kblv = 0;
398 398
399 if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) { 399 if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) {
400 pr_warning("Keyboard LED display write failed\n"); 400 pr_warn("Keyboard LED display write failed\n");
401 return -EINVAL; 401 return -EINVAL;
402 } 402 }
403 return 0; 403 return 0;
@@ -531,7 +531,7 @@ static int asus_read_brightness(struct backlight_device *bd)
531 rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET, 531 rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
532 NULL, &value); 532 NULL, &value);
533 if (ACPI_FAILURE(rv)) 533 if (ACPI_FAILURE(rv))
534 pr_warning("Error reading brightness\n"); 534 pr_warn("Error reading brightness\n");
535 535
536 return value; 536 return value;
537} 537}
@@ -541,7 +541,7 @@ static int asus_set_brightness(struct backlight_device *bd, int value)
541 struct asus_laptop *asus = bl_get_data(bd); 541 struct asus_laptop *asus = bl_get_data(bd);
542 542
543 if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) { 543 if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) {
544 pr_warning("Error changing brightness\n"); 544 pr_warn("Error changing brightness\n");
545 return -EIO; 545 return -EIO;
546 } 546 }
547 return 0; 547 return 0;
@@ -730,7 +730,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
730 rv = parse_arg(buf, count, &value); 730 rv = parse_arg(buf, count, &value);
731 if (rv > 0) { 731 if (rv > 0) {
732 if (write_acpi_int(asus->handle, METHOD_LEDD, value)) { 732 if (write_acpi_int(asus->handle, METHOD_LEDD, value)) {
733 pr_warning("LED display write failed\n"); 733 pr_warn("LED display write failed\n");
734 return -ENODEV; 734 return -ENODEV;
735 } 735 }
736 asus->ledd_status = (u32) value; 736 asus->ledd_status = (u32) value;
@@ -752,7 +752,7 @@ static int asus_wireless_status(struct asus_laptop *asus, int mask)
752 rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS, 752 rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS,
753 NULL, &status); 753 NULL, &status);
754 if (ACPI_FAILURE(rv)) { 754 if (ACPI_FAILURE(rv)) {
755 pr_warning("Error reading Wireless status\n"); 755 pr_warn("Error reading Wireless status\n");
756 return -EINVAL; 756 return -EINVAL;
757 } 757 }
758 return !!(status & mask); 758 return !!(status & mask);
@@ -764,7 +764,7 @@ static int asus_wireless_status(struct asus_laptop *asus, int mask)
764static int asus_wlan_set(struct asus_laptop *asus, int status) 764static int asus_wlan_set(struct asus_laptop *asus, int status)
765{ 765{
766 if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) { 766 if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) {
767 pr_warning("Error setting wlan status to %d", status); 767 pr_warn("Error setting wlan status to %d\n", status);
768 return -EIO; 768 return -EIO;
769 } 769 }
770 return 0; 770 return 0;
@@ -792,7 +792,7 @@ static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
792static int asus_bluetooth_set(struct asus_laptop *asus, int status) 792static int asus_bluetooth_set(struct asus_laptop *asus, int status)
793{ 793{
794 if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) { 794 if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) {
795 pr_warning("Error setting bluetooth status to %d", status); 795 pr_warn("Error setting bluetooth status to %d\n", status);
796 return -EIO; 796 return -EIO;
797 } 797 }
798 return 0; 798 return 0;
@@ -821,7 +821,7 @@ static ssize_t store_bluetooth(struct device *dev,
821static int asus_wimax_set(struct asus_laptop *asus, int status) 821static int asus_wimax_set(struct asus_laptop *asus, int status)
822{ 822{
823 if (write_acpi_int(asus->handle, METHOD_WIMAX, !!status)) { 823 if (write_acpi_int(asus->handle, METHOD_WIMAX, !!status)) {
824 pr_warning("Error setting wimax status to %d", status); 824 pr_warn("Error setting wimax status to %d\n", status);
825 return -EIO; 825 return -EIO;
826 } 826 }
827 return 0; 827 return 0;
@@ -850,7 +850,7 @@ static ssize_t store_wimax(struct device *dev,
850static int asus_wwan_set(struct asus_laptop *asus, int status) 850static int asus_wwan_set(struct asus_laptop *asus, int status)
851{ 851{
852 if (write_acpi_int(asus->handle, METHOD_WWAN, !!status)) { 852 if (write_acpi_int(asus->handle, METHOD_WWAN, !!status)) {
853 pr_warning("Error setting wwan status to %d", status); 853 pr_warn("Error setting wwan status to %d\n", status);
854 return -EIO; 854 return -EIO;
855 } 855 }
856 return 0; 856 return 0;
@@ -880,7 +880,7 @@ static void asus_set_display(struct asus_laptop *asus, int value)
880{ 880{
881 /* no sanity check needed for now */ 881 /* no sanity check needed for now */
882 if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value)) 882 if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value))
883 pr_warning("Error setting display\n"); 883 pr_warn("Error setting display\n");
884 return; 884 return;
885} 885}
886 886
@@ -909,7 +909,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
909static void asus_als_switch(struct asus_laptop *asus, int value) 909static void asus_als_switch(struct asus_laptop *asus, int value)
910{ 910{
911 if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value)) 911 if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value))
912 pr_warning("Error setting light sensor switch\n"); 912 pr_warn("Error setting light sensor switch\n");
913 asus->light_switch = value; 913 asus->light_switch = value;
914} 914}
915 915
@@ -937,7 +937,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
937static void asus_als_level(struct asus_laptop *asus, int value) 937static void asus_als_level(struct asus_laptop *asus, int value)
938{ 938{
939 if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value)) 939 if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value))
940 pr_warning("Error setting light sensor level\n"); 940 pr_warn("Error setting light sensor level\n");
941 asus->light_level = value; 941 asus->light_level = value;
942} 942}
943 943
@@ -976,7 +976,7 @@ static int asus_gps_status(struct asus_laptop *asus)
976 rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS, 976 rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
977 NULL, &status); 977 NULL, &status);
978 if (ACPI_FAILURE(rv)) { 978 if (ACPI_FAILURE(rv)) {
979 pr_warning("Error reading GPS status\n"); 979 pr_warn("Error reading GPS status\n");
980 return -ENODEV; 980 return -ENODEV;
981 } 981 }
982 return !!status; 982 return !!status;
@@ -1284,7 +1284,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
1284 */ 1284 */
1285 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info); 1285 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info);
1286 if (ACPI_FAILURE(status)) 1286 if (ACPI_FAILURE(status))
1287 pr_warning("Couldn't get the DSDT table header\n"); 1287 pr_warn("Couldn't get the DSDT table header\n");
1288 1288
1289 /* We have to write 0 on init this far for all ASUS models */ 1289 /* We have to write 0 on init this far for all ASUS models */
1290 if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) { 1290 if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) {
@@ -1296,7 +1296,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
1296 status = 1296 status =
1297 acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result); 1297 acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result);
1298 if (ACPI_FAILURE(status)) 1298 if (ACPI_FAILURE(status))
1299 pr_warning("Error calling BSTS\n"); 1299 pr_warn("Error calling BSTS\n");
1300 else if (bsts_result) 1300 else if (bsts_result)
1301 pr_notice("BSTS called, 0x%02x returned\n", 1301 pr_notice("BSTS called, 0x%02x returned\n",
1302 (uint) bsts_result); 1302 (uint) bsts_result);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 832a3fd7c1c8..00460cb9587b 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -425,7 +425,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
425 if (asus->hotplug_slot) { 425 if (asus->hotplug_slot) {
426 bus = pci_find_bus(0, 1); 426 bus = pci_find_bus(0, 1);
427 if (!bus) { 427 if (!bus) {
428 pr_warning("Unable to find PCI bus 1?\n"); 428 pr_warn("Unable to find PCI bus 1?\n");
429 goto out_unlock; 429 goto out_unlock;
430 } 430 }
431 431
@@ -436,12 +436,12 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
436 absent = (l == 0xffffffff); 436 absent = (l == 0xffffffff);
437 437
438 if (blocked != absent) { 438 if (blocked != absent) {
439 pr_warning("BIOS says wireless lan is %s, " 439 pr_warn("BIOS says wireless lan is %s, "
440 "but the pci device is %s\n", 440 "but the pci device is %s\n",
441 blocked ? "blocked" : "unblocked", 441 blocked ? "blocked" : "unblocked",
442 absent ? "absent" : "present"); 442 absent ? "absent" : "present");
443 pr_warning("skipped wireless hotplug as probably " 443 pr_warn("skipped wireless hotplug as probably "
444 "inappropriate for this model\n"); 444 "inappropriate for this model\n");
445 goto out_unlock; 445 goto out_unlock;
446 } 446 }
447 447
@@ -500,7 +500,7 @@ static int asus_register_rfkill_notifier(struct asus_wmi *asus, char *node)
500 ACPI_SYSTEM_NOTIFY, 500 ACPI_SYSTEM_NOTIFY,
501 asus_rfkill_notify, asus); 501 asus_rfkill_notify, asus);
502 if (ACPI_FAILURE(status)) 502 if (ACPI_FAILURE(status))
503 pr_warning("Failed to register notify on %s\n", node); 503 pr_warn("Failed to register notify on %s\n", node);
504 } else 504 } else
505 return -ENODEV; 505 return -ENODEV;
506 506
@@ -1223,7 +1223,7 @@ static int asus_wmi_sysfs_init(struct platform_device *device)
1223/* 1223/*
1224 * Platform device 1224 * Platform device
1225 */ 1225 */
1226static int __init asus_wmi_platform_init(struct asus_wmi *asus) 1226static int asus_wmi_platform_init(struct asus_wmi *asus)
1227{ 1227{
1228 int rv; 1228 int rv;
1229 1229
@@ -1583,12 +1583,12 @@ static int asus_wmi_probe(struct platform_device *pdev)
1583 int ret; 1583 int ret;
1584 1584
1585 if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) { 1585 if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) {
1586 pr_warning("Management GUID not found\n"); 1586 pr_warn("Management GUID not found\n");
1587 return -ENODEV; 1587 return -ENODEV;
1588 } 1588 }
1589 1589
1590 if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) { 1590 if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) {
1591 pr_warning("Event GUID not found\n"); 1591 pr_warn("Event GUID not found\n");
1592 return -ENODEV; 1592 return -ENODEV;
1593 } 1593 }
1594 1594
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index f503607c0645..d9312b3073e5 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -30,6 +30,8 @@
30 * 30 *
31 */ 31 */
32 32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
33#include <linux/kernel.h> 35#include <linux/kernel.h>
34#include <linux/module.h> 36#include <linux/module.h>
35#include <linux/slab.h> 37#include <linux/slab.h>
@@ -581,8 +583,7 @@ static int read_led(const char *ledname, int ledmask)
581 if (read_acpi_int(NULL, ledname, &led_status)) 583 if (read_acpi_int(NULL, ledname, &led_status))
582 return led_status; 584 return led_status;
583 else 585 else
584 printk(KERN_WARNING "Asus ACPI: Error reading LED " 586 pr_warn("Error reading LED status\n");
585 "status\n");
586 } 587 }
587 return (hotk->status & ledmask) ? 1 : 0; 588 return (hotk->status & ledmask) ? 1 : 0;
588} 589}
@@ -621,8 +622,7 @@ write_led(const char __user *buffer, unsigned long count,
621 led_out = !led_out; 622 led_out = !led_out;
622 623
623 if (!write_acpi_int(hotk->handle, ledname, led_out, NULL)) 624 if (!write_acpi_int(hotk->handle, ledname, led_out, NULL))
624 printk(KERN_WARNING "Asus ACPI: LED (%s) write failed\n", 625 pr_warn("LED (%s) write failed\n", ledname);
625 ledname);
626 626
627 return rv; 627 return rv;
628} 628}
@@ -679,8 +679,7 @@ static ssize_t ledd_proc_write(struct file *file, const char __user *buffer,
679 if (rv > 0) { 679 if (rv > 0) {
680 if (!write_acpi_int 680 if (!write_acpi_int
681 (hotk->handle, hotk->methods->mt_ledd, value, NULL)) 681 (hotk->handle, hotk->methods->mt_ledd, value, NULL))
682 printk(KERN_WARNING 682 pr_warn("LED display write failed\n");
683 "Asus ACPI: LED display write failed\n");
684 else 683 else
685 hotk->ledd_status = (u32) value; 684 hotk->ledd_status = (u32) value;
686 } 685 }
@@ -838,8 +837,7 @@ static int get_lcd_state(void)
838 } else { 837 } else {
839 /* We don't have to check anything if we are here */ 838 /* We don't have to check anything if we are here */
840 if (!read_acpi_int(NULL, hotk->methods->lcd_status, &lcd)) 839 if (!read_acpi_int(NULL, hotk->methods->lcd_status, &lcd))
841 printk(KERN_WARNING 840 pr_warn("Error reading LCD status\n");
842 "Asus ACPI: Error reading LCD status\n");
843 841
844 if (hotk->model == L2D) 842 if (hotk->model == L2D)
845 lcd = ~lcd; 843 lcd = ~lcd;
@@ -871,7 +869,7 @@ static int set_lcd_state(int value)
871 the exact behaviour is simulated here */ 869 the exact behaviour is simulated here */
872 } 870 }
873 if (ACPI_FAILURE(status)) 871 if (ACPI_FAILURE(status))
874 printk(KERN_WARNING "Asus ACPI: Error switching LCD\n"); 872 pr_warn("Error switching LCD\n");
875 } 873 }
876 return 0; 874 return 0;
877 875
@@ -915,13 +913,11 @@ static int read_brightness(struct backlight_device *bd)
915 if (hotk->methods->brightness_get) { /* SPLV/GPLV laptop */ 913 if (hotk->methods->brightness_get) { /* SPLV/GPLV laptop */
916 if (!read_acpi_int(hotk->handle, hotk->methods->brightness_get, 914 if (!read_acpi_int(hotk->handle, hotk->methods->brightness_get,
917 &value)) 915 &value))
918 printk(KERN_WARNING 916 pr_warn("Error reading brightness\n");
919 "Asus ACPI: Error reading brightness\n");
920 } else if (hotk->methods->brightness_status) { /* For D1 for example */ 917 } else if (hotk->methods->brightness_status) { /* For D1 for example */
921 if (!read_acpi_int(NULL, hotk->methods->brightness_status, 918 if (!read_acpi_int(NULL, hotk->methods->brightness_status,
922 &value)) 919 &value))
923 printk(KERN_WARNING 920 pr_warn("Error reading brightness\n");
924 "Asus ACPI: Error reading brightness\n");
925 } else /* No GPLV method */ 921 } else /* No GPLV method */
926 value = hotk->brightness; 922 value = hotk->brightness;
927 return value; 923 return value;
@@ -939,8 +935,7 @@ static int set_brightness(int value)
939 if (hotk->methods->brightness_set) { 935 if (hotk->methods->brightness_set) {
940 if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set, 936 if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set,
941 value, NULL)) { 937 value, NULL)) {
942 printk(KERN_WARNING 938 pr_warn("Error changing brightness\n");
943 "Asus ACPI: Error changing brightness\n");
944 ret = -EIO; 939 ret = -EIO;
945 } 940 }
946 goto out; 941 goto out;
@@ -955,8 +950,7 @@ static int set_brightness(int value)
955 NULL, NULL); 950 NULL, NULL);
956 (value > 0) ? value-- : value++; 951 (value > 0) ? value-- : value++;
957 if (ACPI_FAILURE(status)) { 952 if (ACPI_FAILURE(status)) {
958 printk(KERN_WARNING 953 pr_warn("Error changing brightness\n");
959 "Asus ACPI: Error changing brightness\n");
960 ret = -EIO; 954 ret = -EIO;
961 } 955 }
962 } 956 }
@@ -1008,7 +1002,7 @@ static void set_display(int value)
1008 /* no sanity check needed for now */ 1002 /* no sanity check needed for now */
1009 if (!write_acpi_int(hotk->handle, hotk->methods->display_set, 1003 if (!write_acpi_int(hotk->handle, hotk->methods->display_set,
1010 value, NULL)) 1004 value, NULL))
1011 printk(KERN_WARNING "Asus ACPI: Error setting display\n"); 1005 pr_warn("Error setting display\n");
1012 return; 1006 return;
1013} 1007}
1014 1008
@@ -1021,8 +1015,7 @@ static int disp_proc_show(struct seq_file *m, void *v)
1021 int value = 0; 1015 int value = 0;
1022 1016
1023 if (!read_acpi_int(hotk->handle, hotk->methods->display_get, &value)) 1017 if (!read_acpi_int(hotk->handle, hotk->methods->display_get, &value))
1024 printk(KERN_WARNING 1018 pr_warn("Error reading display status\n");
1025 "Asus ACPI: Error reading display status\n");
1026 value &= 0x07; /* needed for some models, shouldn't hurt others */ 1019 value &= 0x07; /* needed for some models, shouldn't hurt others */
1027 seq_printf(m, "%d\n", value); 1020 seq_printf(m, "%d\n", value);
1028 return 0; 1021 return 0;
@@ -1068,7 +1061,7 @@ asus_proc_add(char *name, const struct file_operations *proc_fops, mode_t mode,
1068 proc = proc_create_data(name, mode, acpi_device_dir(device), 1061 proc = proc_create_data(name, mode, acpi_device_dir(device),
1069 proc_fops, acpi_driver_data(device)); 1062 proc_fops, acpi_driver_data(device));
1070 if (!proc) { 1063 if (!proc) {
1071 printk(KERN_WARNING " Unable to create %s fs entry\n", name); 1064 pr_warn(" Unable to create %s fs entry\n", name);
1072 return -1; 1065 return -1;
1073 } 1066 }
1074 proc->uid = asus_uid; 1067 proc->uid = asus_uid;
@@ -1085,8 +1078,8 @@ static int asus_hotk_add_fs(struct acpi_device *device)
1085 mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP; 1078 mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
1086 } else { 1079 } else {
1087 mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP; 1080 mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
1088 printk(KERN_WARNING " asus_uid and asus_gid parameters are " 1081 pr_warn(" asus_uid and asus_gid parameters are "
1089 "deprecated, use chown and chmod instead!\n"); 1082 "deprecated, use chown and chmod instead!\n");
1090 } 1083 }
1091 1084
1092 acpi_device_dir(device) = asus_proc_dir; 1085 acpi_device_dir(device) = asus_proc_dir;
@@ -1099,8 +1092,7 @@ static int asus_hotk_add_fs(struct acpi_device *device)
1099 proc->uid = asus_uid; 1092 proc->uid = asus_uid;
1100 proc->gid = asus_gid; 1093 proc->gid = asus_gid;
1101 } else { 1094 } else {
1102 printk(KERN_WARNING " Unable to create " PROC_INFO 1095 pr_warn(" Unable to create " PROC_INFO " fs entry\n");
1103 " fs entry\n");
1104 } 1096 }
1105 1097
1106 if (hotk->methods->mt_wled) { 1098 if (hotk->methods->mt_wled) {
@@ -1283,20 +1275,19 @@ static int asus_hotk_get_info(void)
1283 */ 1275 */
1284 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info); 1276 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
1285 if (ACPI_FAILURE(status)) 1277 if (ACPI_FAILURE(status))
1286 printk(KERN_WARNING " Couldn't get the DSDT table header\n"); 1278 pr_warn(" Couldn't get the DSDT table header\n");
1287 1279
1288 /* We have to write 0 on init this far for all ASUS models */ 1280 /* We have to write 0 on init this far for all ASUS models */
1289 if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) { 1281 if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
1290 printk(KERN_ERR " Hotkey initialization failed\n"); 1282 pr_err(" Hotkey initialization failed\n");
1291 return -ENODEV; 1283 return -ENODEV;
1292 } 1284 }
1293 1285
1294 /* This needs to be called for some laptops to init properly */ 1286 /* This needs to be called for some laptops to init properly */
1295 if (!read_acpi_int(hotk->handle, "BSTS", &bsts_result)) 1287 if (!read_acpi_int(hotk->handle, "BSTS", &bsts_result))
1296 printk(KERN_WARNING " Error calling BSTS\n"); 1288 pr_warn(" Error calling BSTS\n");
1297 else if (bsts_result) 1289 else if (bsts_result)
1298 printk(KERN_NOTICE " BSTS called, 0x%02x returned\n", 1290 pr_notice(" BSTS called, 0x%02x returned\n", bsts_result);
1299 bsts_result);
1300 1291
1301 /* 1292 /*
1302 * Try to match the object returned by INIT to the specific model. 1293 * Try to match the object returned by INIT to the specific model.
@@ -1324,23 +1315,21 @@ static int asus_hotk_get_info(void)
1324 if (asus_info && 1315 if (asus_info &&
1325 strncmp(asus_info->oem_table_id, "ODEM", 4) == 0) { 1316 strncmp(asus_info->oem_table_id, "ODEM", 4) == 0) {
1326 hotk->model = P30; 1317 hotk->model = P30;
1327 printk(KERN_NOTICE 1318 pr_notice(" Samsung P30 detected, supported\n");
1328 " Samsung P30 detected, supported\n");
1329 hotk->methods = &model_conf[hotk->model]; 1319 hotk->methods = &model_conf[hotk->model];
1330 kfree(model); 1320 kfree(model);
1331 return 0; 1321 return 0;
1332 } else { 1322 } else {
1333 hotk->model = M2E; 1323 hotk->model = M2E;
1334 printk(KERN_NOTICE " unsupported model %s, trying " 1324 pr_notice(" unsupported model %s, trying default values\n",
1335 "default values\n", string); 1325 string);
1336 printk(KERN_NOTICE 1326 pr_notice(" send /proc/acpi/dsdt to the developers\n");
1337 " send /proc/acpi/dsdt to the developers\n");
1338 kfree(model); 1327 kfree(model);
1339 return -ENODEV; 1328 return -ENODEV;
1340 } 1329 }
1341 } 1330 }
1342 hotk->methods = &model_conf[hotk->model]; 1331 hotk->methods = &model_conf[hotk->model];
1343 printk(KERN_NOTICE " %s model detected, supported\n", string); 1332 pr_notice(" %s model detected, supported\n", string);
1344 1333
1345 /* Sort of per-model blacklist */ 1334 /* Sort of per-model blacklist */
1346 if (strncmp(string, "L2B", 3) == 0) 1335 if (strncmp(string, "L2B", 3) == 0)
@@ -1385,7 +1374,7 @@ static int asus_hotk_check(void)
1385 if (hotk->device->status.present) { 1374 if (hotk->device->status.present) {
1386 result = asus_hotk_get_info(); 1375 result = asus_hotk_get_info();
1387 } else { 1376 } else {
1388 printk(KERN_ERR " Hotkey device not present, aborting\n"); 1377 pr_err(" Hotkey device not present, aborting\n");
1389 return -EINVAL; 1378 return -EINVAL;
1390 } 1379 }
1391 1380
@@ -1399,8 +1388,7 @@ static int asus_hotk_add(struct acpi_device *device)
1399 acpi_status status = AE_OK; 1388 acpi_status status = AE_OK;
1400 int result; 1389 int result;
1401 1390
1402 printk(KERN_NOTICE "Asus Laptop ACPI Extras version %s\n", 1391 pr_notice("Asus Laptop ACPI Extras version %s\n", ASUS_ACPI_VERSION);
1403 ASUS_ACPI_VERSION);
1404 1392
1405 hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL); 1393 hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
1406 if (!hotk) 1394 if (!hotk)
@@ -1428,15 +1416,14 @@ static int asus_hotk_add(struct acpi_device *device)
1428 acpi_evaluate_object(NULL, hotk->methods->brightness_down, 1416 acpi_evaluate_object(NULL, hotk->methods->brightness_down,
1429 NULL, NULL); 1417 NULL, NULL);
1430 if (ACPI_FAILURE(status)) 1418 if (ACPI_FAILURE(status))
1431 printk(KERN_WARNING " Error changing brightness\n"); 1419 pr_warn(" Error changing brightness\n");
1432 else { 1420 else {
1433 status = 1421 status =
1434 acpi_evaluate_object(NULL, 1422 acpi_evaluate_object(NULL,
1435 hotk->methods->brightness_up, 1423 hotk->methods->brightness_up,
1436 NULL, NULL); 1424 NULL, NULL);
1437 if (ACPI_FAILURE(status)) 1425 if (ACPI_FAILURE(status))
1438 printk(KERN_WARNING " Strange, error changing" 1426 pr_warn(" Strange, error changing brightness\n");
1439 " brightness\n");
1440 } 1427 }
1441 } 1428 }
1442 1429
@@ -1488,7 +1475,7 @@ static int __init asus_acpi_init(void)
1488 1475
1489 asus_proc_dir = proc_mkdir(PROC_ASUS, acpi_root_dir); 1476 asus_proc_dir = proc_mkdir(PROC_ASUS, acpi_root_dir);
1490 if (!asus_proc_dir) { 1477 if (!asus_proc_dir) {
1491 printk(KERN_ERR "Asus ACPI: Unable to create /proc entry\n"); 1478 pr_err("Unable to create /proc entry\n");
1492 acpi_bus_unregister_driver(&asus_hotk_driver); 1479 acpi_bus_unregister_driver(&asus_hotk_driver);
1493 return -ENODEV; 1480 return -ENODEV;
1494 } 1481 }
@@ -1513,7 +1500,7 @@ static int __init asus_acpi_init(void)
1513 &asus_backlight_data, 1500 &asus_backlight_data,
1514 &props); 1501 &props);
1515 if (IS_ERR(asus_backlight_device)) { 1502 if (IS_ERR(asus_backlight_device)) {
1516 printk(KERN_ERR "Could not register asus backlight device\n"); 1503 pr_err("Could not register asus backlight device\n");
1517 asus_backlight_device = NULL; 1504 asus_backlight_device = NULL;
1518 asus_acpi_exit(); 1505 asus_acpi_exit();
1519 return -ENODEV; 1506 return -ENODEV;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index c16a27641ced..3f204fde1b02 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -68,6 +68,8 @@
68 * only enabled on a JHL90 board until it is verified that they work on the 68 * only enabled on a JHL90 board until it is verified that they work on the
69 * other boards too. See the extra_features variable. */ 69 * other boards too. See the extra_features variable. */
70 70
71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
71#include <linux/module.h> 73#include <linux/module.h>
72#include <linux/kernel.h> 74#include <linux/kernel.h>
73#include <linux/init.h> 75#include <linux/init.h>
@@ -200,8 +202,8 @@ static bool extra_features;
200 * watching the output of address 0x4F (do an ec_transaction writing 0x33 202 * watching the output of address 0x4F (do an ec_transaction writing 0x33
201 * into 0x4F and read a few bytes from the output, like so: 203 * into 0x4F and read a few bytes from the output, like so:
202 * u8 writeData = 0x33; 204 * u8 writeData = 0x33;
203 * ec_transaction(0x4F, &writeData, 1, buffer, 32, 0); 205 * ec_transaction(0x4F, &writeData, 1, buffer, 32);
204 * That address is labelled "fan1 table information" in the service manual. 206 * That address is labeled "fan1 table information" in the service manual.
205 * It should be clear which value in 'buffer' changes). This seems to be 207 * It should be clear which value in 'buffer' changes). This seems to be
206 * related to fan speed. It isn't a proper 'realtime' fan speed value 208 * related to fan speed. It isn't a proper 'realtime' fan speed value
207 * though, because physically stopping or speeding up the fan doesn't 209 * though, because physically stopping or speeding up the fan doesn't
@@ -286,7 +288,7 @@ static int get_backlight_level(void)
286static void set_backlight_state(bool on) 288static void set_backlight_state(bool on)
287{ 289{
288 u8 data = on ? BACKLIGHT_STATE_ON_DATA : BACKLIGHT_STATE_OFF_DATA; 290 u8 data = on ? BACKLIGHT_STATE_ON_DATA : BACKLIGHT_STATE_OFF_DATA;
289 ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0, 0); 291 ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0);
290} 292}
291 293
292 294
@@ -294,24 +296,24 @@ static void set_backlight_state(bool on)
294static void pwm_enable_control(void) 296static void pwm_enable_control(void)
295{ 297{
296 unsigned char writeData = PWM_ENABLE_DATA; 298 unsigned char writeData = PWM_ENABLE_DATA;
297 ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0, 0); 299 ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0);
298} 300}
299 301
300static void pwm_disable_control(void) 302static void pwm_disable_control(void)
301{ 303{
302 unsigned char writeData = PWM_DISABLE_DATA; 304 unsigned char writeData = PWM_DISABLE_DATA;
303 ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0, 0); 305 ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0);
304} 306}
305 307
306static void set_pwm(int pwm) 308static void set_pwm(int pwm)
307{ 309{
308 ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0, 0); 310 ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0);
309} 311}
310 312
311static int get_fan_rpm(void) 313static int get_fan_rpm(void)
312{ 314{
313 u8 value, data = FAN_DATA; 315 u8 value, data = FAN_DATA;
314 ec_transaction(FAN_ADDRESS, &data, 1, &value, 1, 0); 316 ec_transaction(FAN_ADDRESS, &data, 1, &value, 1);
315 return 100 * (int)value; 317 return 100 * (int)value;
316} 318}
317 319
@@ -760,16 +762,14 @@ static struct rfkill *bt_rfkill;
760 762
761static int dmi_check_cb(const struct dmi_system_id *id) 763static int dmi_check_cb(const struct dmi_system_id *id)
762{ 764{
763 printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s'\n", 765 pr_info("Identified laptop model '%s'\n", id->ident);
764 id->ident);
765 extra_features = false; 766 extra_features = false;
766 return 1; 767 return 1;
767} 768}
768 769
769static int dmi_check_cb_extra(const struct dmi_system_id *id) 770static int dmi_check_cb_extra(const struct dmi_system_id *id)
770{ 771{
771 printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s', " 772 pr_info("Identified laptop model '%s', enabling extra features\n",
772 "enabling extra features\n",
773 id->ident); 773 id->ident);
774 extra_features = true; 774 extra_features = true;
775 return 1; 775 return 1;
@@ -956,14 +956,12 @@ static int __init compal_init(void)
956 int ret; 956 int ret;
957 957
958 if (acpi_disabled) { 958 if (acpi_disabled) {
959 printk(KERN_ERR DRIVER_NAME": ACPI needs to be enabled for " 959 pr_err("ACPI needs to be enabled for this driver to work!\n");
960 "this driver to work!\n");
961 return -ENODEV; 960 return -ENODEV;
962 } 961 }
963 962
964 if (!force && !dmi_check_system(compal_dmi_table)) { 963 if (!force && !dmi_check_system(compal_dmi_table)) {
965 printk(KERN_ERR DRIVER_NAME": Motherboard not recognized (You " 964 pr_err("Motherboard not recognized (You could try the module's force-parameter)\n");
966 "could try the module's force-parameter)");
967 return -ENODEV; 965 return -ENODEV;
968 } 966 }
969 967
@@ -998,8 +996,7 @@ static int __init compal_init(void)
998 if (ret) 996 if (ret)
999 goto err_rfkill; 997 goto err_rfkill;
1000 998
1001 printk(KERN_INFO DRIVER_NAME": Driver "DRIVER_VERSION 999 pr_info("Driver " DRIVER_VERSION " successfully loaded\n");
1002 " successfully loaded\n");
1003 return 0; 1000 return 0;
1004 1001
1005err_rfkill: 1002err_rfkill:
@@ -1064,7 +1061,7 @@ static void __exit compal_cleanup(void)
1064 rfkill_destroy(wifi_rfkill); 1061 rfkill_destroy(wifi_rfkill);
1065 rfkill_destroy(bt_rfkill); 1062 rfkill_destroy(bt_rfkill);
1066 1063
1067 printk(KERN_INFO DRIVER_NAME": Driver unloaded\n"); 1064 pr_info("Driver unloaded\n");
1068} 1065}
1069 1066
1070static int __devexit compal_remove(struct platform_device *pdev) 1067static int __devexit compal_remove(struct platform_device *pdev)
@@ -1074,8 +1071,7 @@ static int __devexit compal_remove(struct platform_device *pdev)
1074 if (!extra_features) 1071 if (!extra_features)
1075 return 0; 1072 return 0;
1076 1073
1077 printk(KERN_INFO DRIVER_NAME": Unloading: resetting fan control " 1074 pr_info("Unloading: resetting fan control to motherboard\n");
1078 "to motherboard\n");
1079 pwm_disable_control(); 1075 pwm_disable_control();
1080 1076
1081 data = platform_get_drvdata(pdev); 1077 data = platform_get_drvdata(pdev);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index de301aa8e5c3..d3841de6a8cf 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -11,6 +11,8 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
14#include <linux/module.h> 16#include <linux/module.h>
15#include <linux/kernel.h> 17#include <linux/kernel.h>
16#include <linux/init.h> 18#include <linux/init.h>
@@ -434,8 +436,7 @@ static int __init dell_setup_rfkill(void)
434 int ret; 436 int ret;
435 437
436 if (dmi_check_system(dell_blacklist)) { 438 if (dmi_check_system(dell_blacklist)) {
437 printk(KERN_INFO "dell-laptop: Blacklisted hardware detected - " 439 pr_info("Blacklisted hardware detected - not enabling rfkill\n");
438 "not enabling rfkill\n");
439 return 0; 440 return 0;
440 } 441 }
441 442
@@ -606,7 +607,7 @@ static int __init dell_init(void)
606 dmi_walk(find_tokens, NULL); 607 dmi_walk(find_tokens, NULL);
607 608
608 if (!da_tokens) { 609 if (!da_tokens) {
609 printk(KERN_INFO "dell-laptop: Unable to find dmi tokens\n"); 610 pr_info("Unable to find dmi tokens\n");
610 return -ENODEV; 611 return -ENODEV;
611 } 612 }
612 613
@@ -636,14 +637,13 @@ static int __init dell_init(void)
636 ret = dell_setup_rfkill(); 637 ret = dell_setup_rfkill();
637 638
638 if (ret) { 639 if (ret) {
639 printk(KERN_WARNING "dell-laptop: Unable to setup rfkill\n"); 640 pr_warn("Unable to setup rfkill\n");
640 goto fail_rfkill; 641 goto fail_rfkill;
641 } 642 }
642 643
643 ret = i8042_install_filter(dell_laptop_i8042_filter); 644 ret = i8042_install_filter(dell_laptop_i8042_filter);
644 if (ret) { 645 if (ret) {
645 printk(KERN_WARNING 646 pr_warn("Unable to install key filter\n");
646 "dell-laptop: Unable to install key filter\n");
647 goto fail_filter; 647 goto fail_filter;
648 } 648 }
649 649
diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c
index 0ed84573ae1f..3f945457f71c 100644
--- a/drivers/platform/x86/dell-wmi-aio.c
+++ b/drivers/platform/x86/dell-wmi-aio.c
@@ -15,6 +15,7 @@
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 17 */
18
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 20
20#include <linux/kernel.h> 21#include <linux/kernel.h>
@@ -138,7 +139,7 @@ static int __init dell_wmi_aio_init(void)
138 139
139 guid = dell_wmi_aio_find(); 140 guid = dell_wmi_aio_find();
140 if (!guid) { 141 if (!guid) {
141 pr_warning("No known WMI GUID found\n"); 142 pr_warn("No known WMI GUID found\n");
142 return -ENXIO; 143 return -ENXIO;
143 } 144 }
144 145
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 77f1d55414c6..ce790827e199 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -23,6 +23,8 @@
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */ 24 */
25 25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
26#include <linux/kernel.h> 28#include <linux/kernel.h>
27#include <linux/module.h> 29#include <linux/module.h>
28#include <linux/init.h> 30#include <linux/init.h>
@@ -141,7 +143,7 @@ static void dell_wmi_notify(u32 value, void *context)
141 143
142 status = wmi_get_event_data(value, &response); 144 status = wmi_get_event_data(value, &response);
143 if (status != AE_OK) { 145 if (status != AE_OK) {
144 printk(KERN_INFO "dell-wmi: bad event status 0x%x\n", status); 146 pr_info("bad event status 0x%x\n", status);
145 return; 147 return;
146 } 148 }
147 149
@@ -153,8 +155,8 @@ static void dell_wmi_notify(u32 value, void *context)
153 u16 *buffer_entry = (u16 *)obj->buffer.pointer; 155 u16 *buffer_entry = (u16 *)obj->buffer.pointer;
154 156
155 if (dell_new_hk_type && (buffer_entry[1] != 0x10)) { 157 if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
156 printk(KERN_INFO "dell-wmi: Received unknown WMI event" 158 pr_info("Received unknown WMI event (0x%x)\n",
157 " (0x%x)\n", buffer_entry[1]); 159 buffer_entry[1]);
158 kfree(obj); 160 kfree(obj);
159 return; 161 return;
160 } 162 }
@@ -167,8 +169,7 @@ static void dell_wmi_notify(u32 value, void *context)
167 key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev, 169 key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
168 reported_key); 170 reported_key);
169 if (!key) { 171 if (!key) {
170 printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n", 172 pr_info("Unknown key %x pressed\n", reported_key);
171 reported_key);
172 } else if ((key->keycode == KEY_BRIGHTNESSUP || 173 } else if ((key->keycode == KEY_BRIGHTNESSUP ||
173 key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) { 174 key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) {
174 /* Don't report brightness notifications that will also 175 /* Don't report brightness notifications that will also
@@ -275,7 +276,7 @@ static int __init dell_wmi_init(void)
275 acpi_status status; 276 acpi_status status;
276 277
277 if (!wmi_has_guid(DELL_EVENT_GUID)) { 278 if (!wmi_has_guid(DELL_EVENT_GUID)) {
278 printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n"); 279 pr_warn("No known WMI GUID found\n");
279 return -ENODEV; 280 return -ENODEV;
280 } 281 }
281 282
@@ -290,9 +291,7 @@ static int __init dell_wmi_init(void)
290 dell_wmi_notify, NULL); 291 dell_wmi_notify, NULL);
291 if (ACPI_FAILURE(status)) { 292 if (ACPI_FAILURE(status)) {
292 dell_wmi_input_destroy(); 293 dell_wmi_input_destroy();
293 printk(KERN_ERR 294 pr_err("Unable to register notify handler - %d\n", status);
294 "dell-wmi: Unable to register notify handler - %d\n",
295 status);
296 return -ENODEV; 295 return -ENODEV;
297 } 296 }
298 297
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 2c1abf63957f..1c45d92e2163 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -228,7 +228,7 @@ static int set_acpi(struct eeepc_laptop *eeepc, int cm, int value)
228 return -ENODEV; 228 return -ENODEV;
229 229
230 if (write_acpi_int(eeepc->handle, method, value)) 230 if (write_acpi_int(eeepc->handle, method, value))
231 pr_warning("Error writing %s\n", method); 231 pr_warn("Error writing %s\n", method);
232 return 0; 232 return 0;
233} 233}
234 234
@@ -243,7 +243,7 @@ static int get_acpi(struct eeepc_laptop *eeepc, int cm)
243 return -ENODEV; 243 return -ENODEV;
244 244
245 if (read_acpi_int(eeepc->handle, method, &value)) 245 if (read_acpi_int(eeepc->handle, method, &value))
246 pr_warning("Error reading %s\n", method); 246 pr_warn("Error reading %s\n", method);
247 return value; 247 return value;
248} 248}
249 249
@@ -261,7 +261,7 @@ static int acpi_setter_handle(struct eeepc_laptop *eeepc, int cm,
261 status = acpi_get_handle(eeepc->handle, (char *)method, 261 status = acpi_get_handle(eeepc->handle, (char *)method,
262 handle); 262 handle);
263 if (status != AE_OK) { 263 if (status != AE_OK) {
264 pr_warning("Error finding %s\n", method); 264 pr_warn("Error finding %s\n", method);
265 return -ENODEV; 265 return -ENODEV;
266 } 266 }
267 return 0; 267 return 0;
@@ -417,7 +417,7 @@ static ssize_t store_cpufv_disabled(struct device *dev,
417 switch (value) { 417 switch (value) {
418 case 0: 418 case 0:
419 if (eeepc->cpufv_disabled) 419 if (eeepc->cpufv_disabled)
420 pr_warning("cpufv enabled (not officially supported " 420 pr_warn("cpufv enabled (not officially supported "
421 "on this model)\n"); 421 "on this model)\n");
422 eeepc->cpufv_disabled = false; 422 eeepc->cpufv_disabled = false;
423 return rv; 423 return rv;
@@ -609,7 +609,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
609 bus = port->subordinate; 609 bus = port->subordinate;
610 610
611 if (!bus) { 611 if (!bus) {
612 pr_warning("Unable to find PCI bus?\n"); 612 pr_warn("Unable to find PCI bus 1?\n");
613 goto out_unlock; 613 goto out_unlock;
614 } 614 }
615 615
@@ -621,12 +621,12 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
621 absent = (l == 0xffffffff); 621 absent = (l == 0xffffffff);
622 622
623 if (blocked != absent) { 623 if (blocked != absent) {
624 pr_warning("BIOS says wireless lan is %s, " 624 pr_warn("BIOS says wireless lan is %s, "
625 "but the pci device is %s\n", 625 "but the pci device is %s\n",
626 blocked ? "blocked" : "unblocked", 626 blocked ? "blocked" : "unblocked",
627 absent ? "absent" : "present"); 627 absent ? "absent" : "present");
628 pr_warning("skipped wireless hotplug as probably " 628 pr_warn("skipped wireless hotplug as probably "
629 "inappropriate for this model\n"); 629 "inappropriate for this model\n");
630 goto out_unlock; 630 goto out_unlock;
631 } 631 }
632 632
@@ -691,7 +691,8 @@ static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
691 eeepc_rfkill_notify, 691 eeepc_rfkill_notify,
692 eeepc); 692 eeepc);
693 if (ACPI_FAILURE(status)) 693 if (ACPI_FAILURE(status))
694 pr_warning("Failed to register notify on %s\n", node); 694 pr_warn("Failed to register notify on %s\n", node);
695
695 /* 696 /*
696 * Refresh pci hotplug in case the rfkill state was 697 * Refresh pci hotplug in case the rfkill state was
697 * changed during setup. 698 * changed during setup.
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 649dcadd8ea3..4aa867a9b88b 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -84,7 +84,7 @@ static const struct key_entry eeepc_wmi_keymap[] = {
84static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level, 84static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level,
85 void *context, void **retval) 85 void *context, void **retval)
86{ 86{
87 pr_warning("Found legacy ATKD device (%s)", EEEPC_ACPI_HID); 87 pr_warn("Found legacy ATKD device (%s)\n", EEEPC_ACPI_HID);
88 *(bool *)context = true; 88 *(bool *)context = true;
89 return AE_CTRL_TERMINATE; 89 return AE_CTRL_TERMINATE;
90} 90}
@@ -105,12 +105,12 @@ static int eeepc_wmi_check_atkd(void)
105static int eeepc_wmi_probe(struct platform_device *pdev) 105static int eeepc_wmi_probe(struct platform_device *pdev)
106{ 106{
107 if (eeepc_wmi_check_atkd()) { 107 if (eeepc_wmi_check_atkd()) {
108 pr_warning("WMI device present, but legacy ATKD device is also " 108 pr_warn("WMI device present, but legacy ATKD device is also "
109 "present and enabled."); 109 "present and enabled\n");
110 pr_warning("You probably booted with acpi_osi=\"Linux\" or " 110 pr_warn("You probably booted with acpi_osi=\"Linux\" or "
111 "acpi_osi=\"!Windows 2009\""); 111 "acpi_osi=\"!Windows 2009\"\n");
112 pr_warning("Can't load eeepc-wmi, use default acpi_osi " 112 pr_warn("Can't load eeepc-wmi, use default acpi_osi "
113 "(preferred) or eeepc-laptop"); 113 "(preferred) or eeepc-laptop\n");
114 return -EBUSY; 114 return -EBUSY;
115 } 115 }
116 return 0; 116 return 0;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 493054c2dbe1..6b26666b37f2 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -56,6 +56,8 @@
56 * 56 *
57 */ 57 */
58 58
59#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
60
59#include <linux/module.h> 61#include <linux/module.h>
60#include <linux/kernel.h> 62#include <linux/kernel.h>
61#include <linux/init.h> 63#include <linux/init.h>
@@ -585,8 +587,7 @@ static struct platform_driver fujitsupf_driver = {
585static void dmi_check_cb_common(const struct dmi_system_id *id) 587static void dmi_check_cb_common(const struct dmi_system_id *id)
586{ 588{
587 acpi_handle handle; 589 acpi_handle handle;
588 printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n", 590 pr_info("Identified laptop model '%s'\n", id->ident);
589 id->ident);
590 if (use_alt_lcd_levels == -1) { 591 if (use_alt_lcd_levels == -1) {
591 if (ACPI_SUCCESS(acpi_get_handle(NULL, 592 if (ACPI_SUCCESS(acpi_get_handle(NULL,
592 "\\_SB.PCI0.LPCB.FJEX.SBL2", &handle))) 593 "\\_SB.PCI0.LPCB.FJEX.SBL2", &handle)))
@@ -691,11 +692,11 @@ static int acpi_fujitsu_add(struct acpi_device *device)
691 692
692 result = acpi_bus_update_power(fujitsu->acpi_handle, &state); 693 result = acpi_bus_update_power(fujitsu->acpi_handle, &state);
693 if (result) { 694 if (result) {
694 printk(KERN_ERR "Error reading power state\n"); 695 pr_err("Error reading power state\n");
695 goto err_unregister_input_dev; 696 goto err_unregister_input_dev;
696 } 697 }
697 698
698 printk(KERN_INFO "ACPI: %s [%s] (%s)\n", 699 pr_info("ACPI: %s [%s] (%s)\n",
699 acpi_device_name(device), acpi_device_bid(device), 700 acpi_device_name(device), acpi_device_bid(device),
700 !device->power.state ? "on" : "off"); 701 !device->power.state ? "on" : "off");
701 702
@@ -707,7 +708,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
707 if (ACPI_FAILURE 708 if (ACPI_FAILURE
708 (acpi_evaluate_object 709 (acpi_evaluate_object
709 (device->handle, METHOD_NAME__INI, NULL, NULL))) 710 (device->handle, METHOD_NAME__INI, NULL, NULL)))
710 printk(KERN_ERR "_INI Method failed\n"); 711 pr_err("_INI Method failed\n");
711 } 712 }
712 713
713 /* do config (detect defaults) */ 714 /* do config (detect defaults) */
@@ -827,7 +828,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
827 error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int), 828 error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int),
828 GFP_KERNEL); 829 GFP_KERNEL);
829 if (error) { 830 if (error) {
830 printk(KERN_ERR "kfifo_alloc failed\n"); 831 pr_err("kfifo_alloc failed\n");
831 goto err_stop; 832 goto err_stop;
832 } 833 }
833 834
@@ -859,13 +860,13 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
859 860
860 result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state); 861 result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
861 if (result) { 862 if (result) {
862 printk(KERN_ERR "Error reading power state\n"); 863 pr_err("Error reading power state\n");
863 goto err_unregister_input_dev; 864 goto err_unregister_input_dev;
864 } 865 }
865 866
866 printk(KERN_INFO "ACPI: %s [%s] (%s)\n", 867 pr_info("ACPI: %s [%s] (%s)\n",
867 acpi_device_name(device), acpi_device_bid(device), 868 acpi_device_name(device), acpi_device_bid(device),
868 !device->power.state ? "on" : "off"); 869 !device->power.state ? "on" : "off");
869 870
870 fujitsu_hotkey->dev = device; 871 fujitsu_hotkey->dev = device;
871 872
@@ -875,7 +876,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
875 if (ACPI_FAILURE 876 if (ACPI_FAILURE
876 (acpi_evaluate_object 877 (acpi_evaluate_object
877 (device->handle, METHOD_NAME__INI, NULL, NULL))) 878 (device->handle, METHOD_NAME__INI, NULL, NULL)))
878 printk(KERN_ERR "_INI Method failed\n"); 879 pr_err("_INI Method failed\n");
879 } 880 }
880 881
881 i = 0; 882 i = 0;
@@ -897,8 +898,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
897 call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); 898 call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
898 899
899 /* Suspect this is a keymap of the application panel, print it */ 900 /* Suspect this is a keymap of the application panel, print it */
900 printk(KERN_INFO "fujitsu-laptop: BTNI: [0x%x]\n", 901 pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
901 call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
902 902
903#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 903#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
904 if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) { 904 if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
@@ -907,8 +907,8 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
907 if (result == 0) { 907 if (result == 0) {
908 fujitsu_hotkey->logolamp_registered = 1; 908 fujitsu_hotkey->logolamp_registered = 1;
909 } else { 909 } else {
910 printk(KERN_ERR "fujitsu-laptop: Could not register " 910 pr_err("Could not register LED handler for logo lamp, error %i\n",
911 "LED handler for logo lamp, error %i\n", result); 911 result);
912 } 912 }
913 } 913 }
914 914
@@ -919,8 +919,8 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
919 if (result == 0) { 919 if (result == 0) {
920 fujitsu_hotkey->kblamps_registered = 1; 920 fujitsu_hotkey->kblamps_registered = 1;
921 } else { 921 } else {
922 printk(KERN_ERR "fujitsu-laptop: Could not register " 922 pr_err("Could not register LED handler for keyboard lamps, error %i\n",
923 "LED handler for keyboard lamps, error %i\n", result); 923 result);
924 } 924 }
925 } 925 }
926#endif 926#endif
@@ -1169,8 +1169,7 @@ static int __init fujitsu_init(void)
1169 fujitsu->bl_device->props.power = 0; 1169 fujitsu->bl_device->props.power = 0;
1170 } 1170 }
1171 1171
1172 printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION 1172 pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
1173 " successfully loaded.\n");
1174 1173
1175 return 0; 1174 return 0;
1176 1175
@@ -1216,7 +1215,7 @@ static void __exit fujitsu_cleanup(void)
1216 1215
1217 kfree(fujitsu); 1216 kfree(fujitsu);
1218 1217
1219 printk(KERN_INFO "fujitsu-laptop: driver unloaded.\n"); 1218 pr_info("driver unloaded\n");
1220} 1219}
1221 1220
1222module_init(fujitsu_init); 1221module_init(fujitsu_init);
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 067bf36d32f3..5a34973dc164 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -26,6 +26,8 @@
26 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA 26 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include <linux/delay.h> 31#include <linux/delay.h>
30#include <linux/platform_device.h> 32#include <linux/platform_device.h>
31#include <linux/input-polldev.h> 33#include <linux/input-polldev.h>
@@ -238,7 +240,7 @@ static int hdaps_device_init(void)
238 __check_latch(0x1611, 0x01)) 240 __check_latch(0x1611, 0x01))
239 goto out; 241 goto out;
240 242
241 printk(KERN_DEBUG "hdaps: initial latch check good (0x%02x).\n", 243 printk(KERN_DEBUG "hdaps: initial latch check good (0x%02x)\n",
242 __get_latch(0x1611)); 244 __get_latch(0x1611));
243 245
244 outb(0x17, 0x1610); 246 outb(0x17, 0x1610);
@@ -299,7 +301,7 @@ static int hdaps_probe(struct platform_device *dev)
299 if (ret) 301 if (ret)
300 return ret; 302 return ret;
301 303
302 printk(KERN_INFO "hdaps: device successfully initialized.\n"); 304 pr_info("device successfully initialized\n");
303 return 0; 305 return 0;
304} 306}
305 307
@@ -480,7 +482,7 @@ static struct attribute_group hdaps_attribute_group = {
480/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */ 482/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */
481static int __init hdaps_dmi_match(const struct dmi_system_id *id) 483static int __init hdaps_dmi_match(const struct dmi_system_id *id)
482{ 484{
483 printk(KERN_INFO "hdaps: %s detected.\n", id->ident); 485 pr_info("%s detected\n", id->ident);
484 return 1; 486 return 1;
485} 487}
486 488
@@ -488,8 +490,7 @@ static int __init hdaps_dmi_match(const struct dmi_system_id *id)
488static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id) 490static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
489{ 491{
490 hdaps_invert = (unsigned long)id->driver_data; 492 hdaps_invert = (unsigned long)id->driver_data;
491 printk(KERN_INFO "hdaps: inverting axis (%u) readings.\n", 493 pr_info("inverting axis (%u) readings\n", hdaps_invert);
492 hdaps_invert);
493 return hdaps_dmi_match(id); 494 return hdaps_dmi_match(id);
494} 495}
495 496
@@ -543,7 +544,7 @@ static int __init hdaps_init(void)
543 int ret; 544 int ret;
544 545
545 if (!dmi_check_system(hdaps_whitelist)) { 546 if (!dmi_check_system(hdaps_whitelist)) {
546 printk(KERN_WARNING "hdaps: supported laptop not found!\n"); 547 pr_warn("supported laptop not found!\n");
547 ret = -ENODEV; 548 ret = -ENODEV;
548 goto out; 549 goto out;
549 } 550 }
@@ -595,7 +596,7 @@ static int __init hdaps_init(void)
595 if (ret) 596 if (ret)
596 goto out_idev; 597 goto out_idev;
597 598
598 printk(KERN_INFO "hdaps: driver successfully loaded.\n"); 599 pr_info("driver successfully loaded\n");
599 return 0; 600 return 0;
600 601
601out_idev: 602out_idev:
@@ -609,7 +610,7 @@ out_driver:
609out_region: 610out_region:
610 release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS); 611 release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
611out: 612out:
612 printk(KERN_WARNING "hdaps: driver init failed (ret=%d)!\n", ret); 613 pr_warn("driver init failed (ret=%d)!\n", ret);
613 return ret; 614 return ret;
614} 615}
615 616
@@ -622,7 +623,7 @@ static void __exit hdaps_exit(void)
622 platform_driver_unregister(&hdaps_driver); 623 platform_driver_unregister(&hdaps_driver);
623 release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS); 624 release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
624 625
625 printk(KERN_INFO "hdaps: driver unloaded.\n"); 626 pr_info("driver unloaded\n");
626} 627}
627 628
628module_init(hdaps_init); 629module_init(hdaps_init);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1bc4a7539ba9..f94017bcdd6e 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -24,6 +24,8 @@
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */ 25 */
26 26
27#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28
27#include <linux/kernel.h> 29#include <linux/kernel.h>
28#include <linux/module.h> 30#include <linux/module.h>
29#include <linux/init.h> 31#include <linux/init.h>
@@ -54,9 +56,6 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
54#define HPWMI_HOTKEY_QUERY 0xc 56#define HPWMI_HOTKEY_QUERY 0xc
55#define HPWMI_WIRELESS2_QUERY 0x1b 57#define HPWMI_WIRELESS2_QUERY 0x1b
56 58
57#define PREFIX "HP WMI: "
58#define UNIMP "Unimplemented "
59
60enum hp_wmi_radio { 59enum hp_wmi_radio {
61 HPWMI_WIFI = 0, 60 HPWMI_WIFI = 0,
62 HPWMI_BLUETOOTH = 1, 61 HPWMI_BLUETOOTH = 1,
@@ -228,9 +227,8 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
228 227
229 if (bios_return->return_code) { 228 if (bios_return->return_code) {
230 if (bios_return->return_code != HPWMI_RET_UNKNOWN_CMDTYPE) 229 if (bios_return->return_code != HPWMI_RET_UNKNOWN_CMDTYPE)
231 printk(KERN_WARNING PREFIX "query 0x%x returned " 230 pr_warn("query 0x%x returned error 0x%x\n",
232 "error 0x%x\n", 231 query, bios_return->return_code);
233 query, bios_return->return_code);
234 kfree(obj); 232 kfree(obj);
235 return bios_return->return_code; 233 return bios_return->return_code;
236 } 234 }
@@ -384,8 +382,7 @@ static int hp_wmi_rfkill2_refresh(void)
384 382
385 if (num >= state.count || 383 if (num >= state.count ||
386 devstate->rfkill_id != rfkill2[i].id) { 384 devstate->rfkill_id != rfkill2[i].id) {
387 printk(KERN_WARNING PREFIX "power configuration of " 385 pr_warn("power configuration of the wireless devices unexpectedly changed\n");
388 "the wireless devices unexpectedly changed\n");
389 continue; 386 continue;
390 } 387 }
391 388
@@ -471,7 +468,7 @@ static void hp_wmi_notify(u32 value, void *context)
471 468
472 status = wmi_get_event_data(value, &response); 469 status = wmi_get_event_data(value, &response);
473 if (status != AE_OK) { 470 if (status != AE_OK) {
474 printk(KERN_INFO PREFIX "bad event status 0x%x\n", status); 471 pr_info("bad event status 0x%x\n", status);
475 return; 472 return;
476 } 473 }
477 474
@@ -480,8 +477,7 @@ static void hp_wmi_notify(u32 value, void *context)
480 if (!obj) 477 if (!obj)
481 return; 478 return;
482 if (obj->type != ACPI_TYPE_BUFFER) { 479 if (obj->type != ACPI_TYPE_BUFFER) {
483 printk(KERN_INFO "hp-wmi: Unknown response received %d\n", 480 pr_info("Unknown response received %d\n", obj->type);
484 obj->type);
485 kfree(obj); 481 kfree(obj);
486 return; 482 return;
487 } 483 }
@@ -498,8 +494,7 @@ static void hp_wmi_notify(u32 value, void *context)
498 event_id = *location; 494 event_id = *location;
499 event_data = *(location + 2); 495 event_data = *(location + 2);
500 } else { 496 } else {
501 printk(KERN_INFO "hp-wmi: Unknown buffer length %d\n", 497 pr_info("Unknown buffer length %d\n", obj->buffer.length);
502 obj->buffer.length);
503 kfree(obj); 498 kfree(obj);
504 return; 499 return;
505 } 500 }
@@ -527,8 +522,7 @@ static void hp_wmi_notify(u32 value, void *context)
527 522
528 if (!sparse_keymap_report_event(hp_wmi_input_dev, 523 if (!sparse_keymap_report_event(hp_wmi_input_dev,
529 key_code, 1, true)) 524 key_code, 1, true))
530 printk(KERN_INFO PREFIX "Unknown key code - 0x%x\n", 525 pr_info("Unknown key code - 0x%x\n", key_code);
531 key_code);
532 break; 526 break;
533 case HPWMI_WIRELESS: 527 case HPWMI_WIRELESS:
534 if (rfkill2_count) { 528 if (rfkill2_count) {
@@ -550,14 +544,12 @@ static void hp_wmi_notify(u32 value, void *context)
550 hp_wmi_get_hw_state(HPWMI_WWAN)); 544 hp_wmi_get_hw_state(HPWMI_WWAN));
551 break; 545 break;
552 case HPWMI_CPU_BATTERY_THROTTLE: 546 case HPWMI_CPU_BATTERY_THROTTLE:
553 printk(KERN_INFO PREFIX UNIMP "CPU throttle because of 3 Cell" 547 pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
554 " battery event detected\n");
555 break; 548 break;
556 case HPWMI_LOCK_SWITCH: 549 case HPWMI_LOCK_SWITCH:
557 break; 550 break;
558 default: 551 default:
559 printk(KERN_INFO PREFIX "Unknown event_id - %d - 0x%x\n", 552 pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
560 event_id, event_data);
561 break; 553 break;
562 } 554 }
563} 555}
@@ -705,7 +697,7 @@ static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
705 return err; 697 return err;
706 698
707 if (state.count > HPWMI_MAX_RFKILL2_DEVICES) { 699 if (state.count > HPWMI_MAX_RFKILL2_DEVICES) {
708 printk(KERN_WARNING PREFIX "unable to parse 0x1b query output\n"); 700 pr_warn("unable to parse 0x1b query output\n");
709 return -EINVAL; 701 return -EINVAL;
710 } 702 }
711 703
@@ -727,14 +719,14 @@ static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
727 name = "hp-wwan"; 719 name = "hp-wwan";
728 break; 720 break;
729 default: 721 default:
730 printk(KERN_WARNING PREFIX "unknown device type 0x%x\n", 722 pr_warn("unknown device type 0x%x\n",
731 state.device[i].radio_type); 723 state.device[i].radio_type);
732 continue; 724 continue;
733 } 725 }
734 726
735 if (!state.device[i].vendor_id) { 727 if (!state.device[i].vendor_id) {
736 printk(KERN_WARNING PREFIX "zero device %d while %d " 728 pr_warn("zero device %d while %d reported\n",
737 "reported\n", i, state.count); 729 i, state.count);
738 continue; 730 continue;
739 } 731 }
740 732
@@ -755,8 +747,7 @@ static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
755 IS_HWBLOCKED(state.device[i].power)); 747 IS_HWBLOCKED(state.device[i].power));
756 748
757 if (!(state.device[i].power & HPWMI_POWER_BIOS)) 749 if (!(state.device[i].power & HPWMI_POWER_BIOS))
758 printk(KERN_INFO PREFIX "device %s blocked by BIOS\n", 750 pr_info("device %s blocked by BIOS\n", name);
759 name);
760 751
761 err = rfkill_register(rfkill); 752 err = rfkill_register(rfkill);
762 if (err) { 753 if (err) {
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index b1396e5b2953..811d436cd677 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -22,6 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
25#include <linux/kernel.h> 27#include <linux/kernel.h>
26#include <linux/delay.h> 28#include <linux/delay.h>
27#include <linux/module.h> 29#include <linux/module.h>
@@ -69,9 +71,10 @@ struct ibm_rtl_table {
69#define RTL_SIGNATURE 0x0000005f4c54525fULL 71#define RTL_SIGNATURE 0x0000005f4c54525fULL
70#define RTL_MASK 0x000000ffffffffffULL 72#define RTL_MASK 0x000000ffffffffffULL
71 73
72#define RTL_DEBUG(A, ...) do { \ 74#define RTL_DEBUG(fmt, ...) \
73 if (debug) \ 75do { \
74 pr_info("ibm-rtl: " A, ##__VA_ARGS__ ); \ 76 if (debug) \
77 pr_info(fmt, ##__VA_ARGS__); \
75} while (0) 78} while (0)
76 79
77static DEFINE_MUTEX(rtl_lock); 80static DEFINE_MUTEX(rtl_lock);
@@ -114,7 +117,7 @@ static int ibm_rtl_write(u8 value)
114 int ret = 0, count = 0; 117 int ret = 0, count = 0;
115 static u32 cmd_port_val; 118 static u32 cmd_port_val;
116 119
117 RTL_DEBUG("%s(%d)\n", __FUNCTION__, value); 120 RTL_DEBUG("%s(%d)\n", __func__, value);
118 121
119 value = value == 1 ? RTL_CMD_ENTER_PRTM : RTL_CMD_EXIT_PRTM; 122 value = value == 1 ? RTL_CMD_ENTER_PRTM : RTL_CMD_EXIT_PRTM;
120 123
@@ -144,8 +147,8 @@ static int ibm_rtl_write(u8 value)
144 while (ioread8(&rtl_table->command)) { 147 while (ioread8(&rtl_table->command)) {
145 msleep(10); 148 msleep(10);
146 if (count++ > 500) { 149 if (count++ > 500) {
147 pr_err("ibm-rtl: Hardware not responding to " 150 pr_err("Hardware not responding to "
148 "mode switch request\n"); 151 "mode switch request\n");
149 ret = -EIO; 152 ret = -EIO;
150 break; 153 break;
151 } 154 }
@@ -250,7 +253,7 @@ static int __init ibm_rtl_init(void) {
250 int ret = -ENODEV, i; 253 int ret = -ENODEV, i;
251 254
252 if (force) 255 if (force)
253 pr_warning("ibm-rtl: module loaded by force\n"); 256 pr_warn("module loaded by force\n");
254 /* first ensure that we are running on IBM HW */ 257 /* first ensure that we are running on IBM HW */
255 else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table)) 258 else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table))
256 return -ENODEV; 259 return -ENODEV;
@@ -288,19 +291,19 @@ static int __init ibm_rtl_init(void) {
288 if ((readq(&tmp->signature) & RTL_MASK) == RTL_SIGNATURE) { 291 if ((readq(&tmp->signature) & RTL_MASK) == RTL_SIGNATURE) {
289 phys_addr_t addr; 292 phys_addr_t addr;
290 unsigned int plen; 293 unsigned int plen;
291 RTL_DEBUG("found RTL_SIGNATURE at %#llx\n", (u64)tmp); 294 RTL_DEBUG("found RTL_SIGNATURE at %p\n", tmp);
292 rtl_table = tmp; 295 rtl_table = tmp;
293 /* The address, value, width and offset are platform 296 /* The address, value, width and offset are platform
294 * dependent and found in the ibm_rtl_table */ 297 * dependent and found in the ibm_rtl_table */
295 rtl_cmd_width = ioread8(&rtl_table->cmd_granularity); 298 rtl_cmd_width = ioread8(&rtl_table->cmd_granularity);
296 rtl_cmd_type = ioread8(&rtl_table->cmd_address_type); 299 rtl_cmd_type = ioread8(&rtl_table->cmd_address_type);
297 RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n", 300 RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n",
298 rtl_cmd_width, rtl_cmd_type); 301 rtl_cmd_width, rtl_cmd_type);
299 addr = ioread32(&rtl_table->cmd_port_address); 302 addr = ioread32(&rtl_table->cmd_port_address);
300 RTL_DEBUG("addr = %#llx\n", (unsigned long long)addr); 303 RTL_DEBUG("addr = %#llx\n", (unsigned long long)addr);
301 plen = rtl_cmd_width/sizeof(char); 304 plen = rtl_cmd_width/sizeof(char);
302 rtl_cmd_addr = rtl_port_map(addr, plen); 305 rtl_cmd_addr = rtl_port_map(addr, plen);
303 RTL_DEBUG("rtl_cmd_addr = %#llx\n", (u64)rtl_cmd_addr); 306 RTL_DEBUG("rtl_cmd_addr = %p\n", rtl_cmd_addr);
304 if (!rtl_cmd_addr) { 307 if (!rtl_cmd_addr) {
305 ret = -ENOMEM; 308 ret = -ENOMEM;
306 break; 309 break;
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 21b101899bae..bfdda33feb26 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -20,6 +20,8 @@
20 * 02110-1301, USA. 20 * 02110-1301, USA.
21 */ 21 */
22 22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
23#include <linux/kernel.h> 25#include <linux/kernel.h>
24#include <linux/module.h> 26#include <linux/module.h>
25#include <linux/init.h> 27#include <linux/init.h>
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index eacd5da7dd24..809adea4965f 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -27,6 +27,8 @@
27 * to get/set bandwidth. 27 * to get/set bandwidth.
28 */ 28 */
29 29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
30#include <linux/kernel.h> 32#include <linux/kernel.h>
31#include <linux/module.h> 33#include <linux/module.h>
32#include <linux/init.h> 34#include <linux/init.h>
@@ -135,8 +137,7 @@ static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev,
135 acpi_evaluate_integer(handle, MEMORY_SET_BANDWIDTH, &arg_list, 137 acpi_evaluate_integer(handle, MEMORY_SET_BANDWIDTH, &arg_list,
136 &temp); 138 &temp);
137 139
138 printk(KERN_INFO 140 pr_info("Bandwidth value was %ld: status is %d\n", state, status);
139 "Bandwidth value was %ld: status is %d\n", state, status);
140 if (ACPI_FAILURE(status)) 141 if (ACPI_FAILURE(status))
141 return -EFAULT; 142 return -EFAULT;
142 143
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 213e79ba68d5..f1ae5078b7ec 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -23,58 +23,48 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/input.h> 25#include <linux/input.h>
26
26#include <asm/intel_scu_ipc.h> 27#include <asm/intel_scu_ipc.h>
27 28
28#define DRIVER_NAME "msic_power_btn" 29#define DRIVER_NAME "msic_power_btn"
29 30
30#define MSIC_IRQ_STAT 0x02
31 #define MSIC_IRQ_PB (1 << 0)
32#define MSIC_PB_CONFIG 0x3e
33#define MSIC_PB_STATUS 0x3f 31#define MSIC_PB_STATUS 0x3f
34 #define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */ 32#define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */
35
36struct mfld_pb_priv {
37 struct input_dev *input;
38 unsigned int irq;
39};
40 33
41static irqreturn_t mfld_pb_isr(int irq, void *dev_id) 34static irqreturn_t mfld_pb_isr(int irq, void *dev_id)
42{ 35{
43 struct mfld_pb_priv *priv = dev_id; 36 struct input_dev *input = dev_id;
44 int ret; 37 int ret;
45 u8 pbstat; 38 u8 pbstat;
46 39
47 ret = intel_scu_ipc_ioread8(MSIC_PB_STATUS, &pbstat); 40 ret = intel_scu_ipc_ioread8(MSIC_PB_STATUS, &pbstat);
48 if (ret < 0) 41 if (ret < 0) {
49 return IRQ_HANDLED; 42 dev_err(input->dev.parent, "Read error %d while reading"
50 43 " MSIC_PB_STATUS\n", ret);
51 input_event(priv->input, EV_KEY, KEY_POWER, !(pbstat & MSIC_PB_LEVEL)); 44 } else {
52 input_sync(priv->input); 45 input_event(input, EV_KEY, KEY_POWER,
46 !(pbstat & MSIC_PB_LEVEL));
47 input_sync(input);
48 }
53 49
54 return IRQ_HANDLED; 50 return IRQ_HANDLED;
55} 51}
56 52
57static int __devinit mfld_pb_probe(struct platform_device *pdev) 53static int __devinit mfld_pb_probe(struct platform_device *pdev)
58{ 54{
59 struct mfld_pb_priv *priv;
60 struct input_dev *input; 55 struct input_dev *input;
61 int irq; 56 int irq = platform_get_irq(pdev, 0);
62 int error; 57 int error;
63 58
64 irq = platform_get_irq(pdev, 0);
65 if (irq < 0) 59 if (irq < 0)
66 return -EINVAL; 60 return -EINVAL;
67 61
68 priv = kzalloc(sizeof(struct mfld_pb_priv), GFP_KERNEL);
69 input = input_allocate_device(); 62 input = input_allocate_device();
70 if (!priv || !input) { 63 if (!input) {
71 error = -ENOMEM; 64 dev_err(&pdev->dev, "Input device allocation error\n");
72 goto err_free_mem; 65 return -ENOMEM;
73 } 66 }
74 67
75 priv->input = input;
76 priv->irq = irq;
77
78 input->name = pdev->name; 68 input->name = pdev->name;
79 input->phys = "power-button/input0"; 69 input->phys = "power-button/input0";
80 input->id.bustype = BUS_HOST; 70 input->id.bustype = BUS_HOST;
@@ -82,42 +72,40 @@ static int __devinit mfld_pb_probe(struct platform_device *pdev)
82 72
83 input_set_capability(input, EV_KEY, KEY_POWER); 73 input_set_capability(input, EV_KEY, KEY_POWER);
84 74
85 error = request_threaded_irq(priv->irq, NULL, mfld_pb_isr, 75 error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
86 0, DRIVER_NAME, priv); 76 DRIVER_NAME, input);
87 if (error) { 77 if (error) {
88 dev_err(&pdev->dev, 78 dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
89 "unable to request irq %d for mfld power button\n", 79 "button\n", irq);
90 irq); 80 goto err_free_input;
91 goto err_free_mem;
92 } 81 }
93 82
94 error = input_register_device(input); 83 error = input_register_device(input);
95 if (error) { 84 if (error) {
96 dev_err(&pdev->dev, 85 dev_err(&pdev->dev, "Unable to register input dev, error "
97 "unable to register input dev, error %d\n", error); 86 "%d\n", error);
98 goto err_free_irq; 87 goto err_free_irq;
99 } 88 }
100 89
101 platform_set_drvdata(pdev, priv); 90 platform_set_drvdata(pdev, input);
102 return 0; 91 return 0;
103 92
104err_free_irq: 93err_free_irq:
105 free_irq(priv->irq, priv); 94 free_irq(irq, input);
106err_free_mem: 95err_free_input:
107 input_free_device(input); 96 input_free_device(input);
108 kfree(priv);
109 return error; 97 return error;
110} 98}
111 99
112static int __devexit mfld_pb_remove(struct platform_device *pdev) 100static int __devexit mfld_pb_remove(struct platform_device *pdev)
113{ 101{
114 struct mfld_pb_priv *priv = platform_get_drvdata(pdev); 102 struct input_dev *input = platform_get_drvdata(pdev);
115 103 int irq = platform_get_irq(pdev, 0);
116 free_irq(priv->irq, priv);
117 input_unregister_device(priv->input);
118 kfree(priv);
119 104
105 free_irq(irq, input);
106 input_unregister_device(input);
120 platform_set_drvdata(pdev, NULL); 107 platform_set_drvdata(pdev, NULL);
108
121 return 0; 109 return 0;
122} 110}
123 111
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index c2f4bd8013b5..3a578323122b 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -37,49 +37,50 @@
37#include <asm/intel_scu_ipc.h> 37#include <asm/intel_scu_ipc.h>
38 38
39/* Number of thermal sensors */ 39/* Number of thermal sensors */
40#define MSIC_THERMAL_SENSORS 4 40#define MSIC_THERMAL_SENSORS 4
41 41
42/* ADC1 - thermal registers */ 42/* ADC1 - thermal registers */
43#define MSIC_THERM_ADC1CNTL1 0x1C0 43#define MSIC_THERM_ADC1CNTL1 0x1C0
44#define MSIC_ADC_ENBL 0x10 44#define MSIC_ADC_ENBL 0x10
45#define MSIC_ADC_START 0x08 45#define MSIC_ADC_START 0x08
46 46
47#define MSIC_THERM_ADC1CNTL3 0x1C2 47#define MSIC_THERM_ADC1CNTL3 0x1C2
48#define MSIC_ADCTHERM_ENBL 0x04 48#define MSIC_ADCTHERM_ENBL 0x04
49#define MSIC_ADCRRDATA_ENBL 0x05 49#define MSIC_ADCRRDATA_ENBL 0x05
50#define MSIC_CHANL_MASK_VAL 0x0F 50#define MSIC_CHANL_MASK_VAL 0x0F
51 51
52#define MSIC_STOPBIT_MASK 16 52#define MSIC_STOPBIT_MASK 16
53#define MSIC_ADCTHERM_MASK 4 53#define MSIC_ADCTHERM_MASK 4
54#define ADC_CHANLS_MAX 15 /* Number of ADC channels */ 54/* Number of ADC channels */
55#define ADC_LOOP_MAX (ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS) 55#define ADC_CHANLS_MAX 15
56#define ADC_LOOP_MAX (ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS)
56 57
57/* ADC channel code values */ 58/* ADC channel code values */
58#define SKIN_SENSOR0_CODE 0x08 59#define SKIN_SENSOR0_CODE 0x08
59#define SKIN_SENSOR1_CODE 0x09 60#define SKIN_SENSOR1_CODE 0x09
60#define SYS_SENSOR_CODE 0x0A 61#define SYS_SENSOR_CODE 0x0A
61#define MSIC_DIE_SENSOR_CODE 0x03 62#define MSIC_DIE_SENSOR_CODE 0x03
62 63
63#define SKIN_THERM_SENSOR0 0 64#define SKIN_THERM_SENSOR0 0
64#define SKIN_THERM_SENSOR1 1 65#define SKIN_THERM_SENSOR1 1
65#define SYS_THERM_SENSOR2 2 66#define SYS_THERM_SENSOR2 2
66#define MSIC_DIE_THERM_SENSOR3 3 67#define MSIC_DIE_THERM_SENSOR3 3
67 68
68/* ADC code range */ 69/* ADC code range */
69#define ADC_MAX 977 70#define ADC_MAX 977
70#define ADC_MIN 162 71#define ADC_MIN 162
71#define ADC_VAL0C 887 72#define ADC_VAL0C 887
72#define ADC_VAL20C 720 73#define ADC_VAL20C 720
73#define ADC_VAL40C 508 74#define ADC_VAL40C 508
74#define ADC_VAL60C 315 75#define ADC_VAL60C 315
75 76
76/* ADC base addresses */ 77/* ADC base addresses */
77#define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */ 78#define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
78#define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */ 79#define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
79 80
80/* MSIC die attributes */ 81/* MSIC die attributes */
81#define MSIC_DIE_ADC_MIN 488 82#define MSIC_DIE_ADC_MIN 488
82#define MSIC_DIE_ADC_MAX 1004 83#define MSIC_DIE_ADC_MAX 1004
83 84
84/* This holds the address of the first free ADC channel, 85/* This holds the address of the first free ADC channel,
85 * among the 15 channels 86 * among the 15 channels
@@ -87,15 +88,15 @@
87static int channel_index; 88static int channel_index;
88 89
89struct platform_info { 90struct platform_info {
90 struct platform_device *pdev; 91 struct platform_device *pdev;
91 struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS]; 92 struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS];
92}; 93};
93 94
94struct thermal_device_info { 95struct thermal_device_info {
95 unsigned int chnl_addr; 96 unsigned int chnl_addr;
96 int direct; 97 int direct;
97 /* This holds the current temperature in millidegree celsius */ 98 /* This holds the current temperature in millidegree celsius */
98 long curr_temp; 99 long curr_temp;
99}; 100};
100 101
101/** 102/**
@@ -106,7 +107,7 @@ struct thermal_device_info {
106 */ 107 */
107static int to_msic_die_temp(uint16_t adc_val) 108static int to_msic_die_temp(uint16_t adc_val)
108{ 109{
109 return (368 * (adc_val) / 1000) - 220; 110 return (368 * (adc_val) / 1000) - 220;
110} 111}
111 112
112/** 113/**
@@ -118,7 +119,7 @@ static int to_msic_die_temp(uint16_t adc_val)
118 */ 119 */
119static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max) 120static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
120{ 121{
121 return (adc_val >= min) && (adc_val <= max); 122 return (adc_val >= min) && (adc_val <= max);
122} 123}
123 124
124/** 125/**
@@ -136,35 +137,35 @@ static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
136 */ 137 */
137static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp) 138static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
138{ 139{
139 int temp; 140 int temp;
140 141
141 /* Direct conversion for die temperature */ 142 /* Direct conversion for die temperature */
142 if (direct) { 143 if (direct) {
143 if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) { 144 if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) {
144 *tp = to_msic_die_temp(adc_val) * 1000; 145 *tp = to_msic_die_temp(adc_val) * 1000;
145 return 0; 146 return 0;
146 } 147 }
147 return -ERANGE; 148 return -ERANGE;
148 } 149 }
149 150
150 if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX)) 151 if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX))
151 return -ERANGE; 152 return -ERANGE;
152 153
153 /* Linear approximation for skin temperature */ 154 /* Linear approximation for skin temperature */
154 if (adc_val > ADC_VAL0C) 155 if (adc_val > ADC_VAL0C)
155 temp = 177 - (adc_val/5); 156 temp = 177 - (adc_val/5);
156 else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C)) 157 else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C))
157 temp = 111 - (adc_val/8); 158 temp = 111 - (adc_val/8);
158 else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C)) 159 else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C))
159 temp = 92 - (adc_val/10); 160 temp = 92 - (adc_val/10);
160 else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C)) 161 else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C))
161 temp = 91 - (adc_val/10); 162 temp = 91 - (adc_val/10);
162 else 163 else
163 temp = 112 - (adc_val/6); 164 temp = 112 - (adc_val/6);
164 165
165 /* Convert temperature in celsius to milli degree celsius */ 166 /* Convert temperature in celsius to milli degree celsius */
166 *tp = temp * 1000; 167 *tp = temp * 1000;
167 return 0; 168 return 0;
168} 169}
169 170
170/** 171/**
@@ -178,47 +179,47 @@ static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
178 */ 179 */
179static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp) 180static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp)
180{ 181{
181 struct thermal_device_info *td_info = tzd->devdata; 182 struct thermal_device_info *td_info = tzd->devdata;
182 uint16_t adc_val, addr; 183 uint16_t adc_val, addr;
183 uint8_t data = 0; 184 uint8_t data = 0;
184 int ret; 185 int ret;
185 unsigned long curr_temp; 186 unsigned long curr_temp;
186 187
187 188
188 addr = td_info->chnl_addr; 189 addr = td_info->chnl_addr;
189 190
190 /* Enable the msic for conversion before reading */ 191 /* Enable the msic for conversion before reading */
191 ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCRRDATA_ENBL); 192 ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCRRDATA_ENBL);
192 if (ret) 193 if (ret)
193 return ret; 194 return ret;
194 195
195 /* Re-toggle the RRDATARD bit (temporary workaround) */ 196 /* Re-toggle the RRDATARD bit (temporary workaround) */
196 ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCTHERM_ENBL); 197 ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCTHERM_ENBL);
197 if (ret) 198 if (ret)
198 return ret; 199 return ret;
199 200
200 /* Read the higher bits of data */ 201 /* Read the higher bits of data */
201 ret = intel_scu_ipc_ioread8(addr, &data); 202 ret = intel_scu_ipc_ioread8(addr, &data);
202 if (ret) 203 if (ret)
203 return ret; 204 return ret;
204 205
205 /* Shift bits to accommodate the lower two data bits */ 206 /* Shift bits to accommodate the lower two data bits */
206 adc_val = (data << 2); 207 adc_val = (data << 2);
207 addr++; 208 addr++;
208 209
209 ret = intel_scu_ipc_ioread8(addr, &data);/* Read lower bits */ 210 ret = intel_scu_ipc_ioread8(addr, &data);/* Read lower bits */
210 if (ret) 211 if (ret)
211 return ret; 212 return ret;
212 213
213 /* Adding lower two bits to the higher bits */ 214 /* Adding lower two bits to the higher bits */
214 data &= 03; 215 data &= 03;
215 adc_val += data; 216 adc_val += data;
216 217
217 /* Convert ADC value to temperature */ 218 /* Convert ADC value to temperature */
218 ret = adc_to_temp(td_info->direct, adc_val, &curr_temp); 219 ret = adc_to_temp(td_info->direct, adc_val, &curr_temp);
219 if (ret == 0) 220 if (ret == 0)
220 *temp = td_info->curr_temp = curr_temp; 221 *temp = td_info->curr_temp = curr_temp;
221 return ret; 222 return ret;
222} 223}
223 224
224/** 225/**
@@ -231,22 +232,21 @@ static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp)
231 */ 232 */
232static int configure_adc(int val) 233static int configure_adc(int val)
233{ 234{
234 int ret; 235 int ret;
235 uint8_t data; 236 uint8_t data;
236 237
237 ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data); 238 ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
238 if (ret) 239 if (ret)
239 return ret; 240 return ret;
240 241
241 if (val) { 242 if (val) {
242 /* Enable and start the ADC */ 243 /* Enable and start the ADC */
243 data |= (MSIC_ADC_ENBL | MSIC_ADC_START); 244 data |= (MSIC_ADC_ENBL | MSIC_ADC_START);
244 } else { 245 } else {
245 /* Just stop the ADC */ 246 /* Just stop the ADC */
246 data &= (~MSIC_ADC_START); 247 data &= (~MSIC_ADC_START);
247 } 248 }
248 249 return intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL1, data);
249 return intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL1, data);
250} 250}
251 251
252/** 252/**
@@ -259,30 +259,30 @@ static int configure_adc(int val)
259 */ 259 */
260static int set_up_therm_channel(u16 base_addr) 260static int set_up_therm_channel(u16 base_addr)
261{ 261{
262 int ret; 262 int ret;
263 263
264 /* Enable all the sensor channels */ 264 /* Enable all the sensor channels */
265 ret = intel_scu_ipc_iowrite8(base_addr, SKIN_SENSOR0_CODE); 265 ret = intel_scu_ipc_iowrite8(base_addr, SKIN_SENSOR0_CODE);
266 if (ret) 266 if (ret)
267 return ret; 267 return ret;
268 268
269 ret = intel_scu_ipc_iowrite8(base_addr + 1, SKIN_SENSOR1_CODE); 269 ret = intel_scu_ipc_iowrite8(base_addr + 1, SKIN_SENSOR1_CODE);
270 if (ret) 270 if (ret)
271 return ret; 271 return ret;
272 272
273 ret = intel_scu_ipc_iowrite8(base_addr + 2, SYS_SENSOR_CODE); 273 ret = intel_scu_ipc_iowrite8(base_addr + 2, SYS_SENSOR_CODE);
274 if (ret) 274 if (ret)
275 return ret; 275 return ret;
276 276
277 /* Since this is the last channel, set the stop bit 277 /* Since this is the last channel, set the stop bit
278 to 1 by ORing the DIE_SENSOR_CODE with 0x10 */ 278 * to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
279 ret = intel_scu_ipc_iowrite8(base_addr + 3, 279 ret = intel_scu_ipc_iowrite8(base_addr + 3,
280 (MSIC_DIE_SENSOR_CODE | 0x10)); 280 (MSIC_DIE_SENSOR_CODE | 0x10));
281 if (ret) 281 if (ret)
282 return ret; 282 return ret;
283 283
284 /* Enable ADC and start it */ 284 /* Enable ADC and start it */
285 return configure_adc(1); 285 return configure_adc(1);
286} 286}
287 287
288/** 288/**
@@ -293,13 +293,13 @@ static int set_up_therm_channel(u16 base_addr)
293 */ 293 */
294static int reset_stopbit(uint16_t addr) 294static int reset_stopbit(uint16_t addr)
295{ 295{
296 int ret; 296 int ret;
297 uint8_t data; 297 uint8_t data;
298 ret = intel_scu_ipc_ioread8(addr, &data); 298 ret = intel_scu_ipc_ioread8(addr, &data);
299 if (ret) 299 if (ret)
300 return ret; 300 return ret;
301 /* Set the stop bit to zero */ 301 /* Set the stop bit to zero */
302 return intel_scu_ipc_iowrite8(addr, (data & 0xEF)); 302 return intel_scu_ipc_iowrite8(addr, (data & 0xEF));
303} 303}
304 304
305/** 305/**
@@ -317,30 +317,30 @@ static int reset_stopbit(uint16_t addr)
317 */ 317 */
318static int find_free_channel(void) 318static int find_free_channel(void)
319{ 319{
320 int ret; 320 int ret;
321 int i; 321 int i;
322 uint8_t data; 322 uint8_t data;
323 323
324 /* check whether ADC is enabled */ 324 /* check whether ADC is enabled */
325 ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data); 325 ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
326 if (ret) 326 if (ret)
327 return ret; 327 return ret;
328 328
329 if ((data & MSIC_ADC_ENBL) == 0) 329 if ((data & MSIC_ADC_ENBL) == 0)
330 return 0; 330 return 0;
331 331
332 /* ADC is already enabled; Looking for an empty channel */ 332 /* ADC is already enabled; Looking for an empty channel */
333 for (i = 0; i < ADC_CHANLS_MAX; i++) { 333 for (i = 0; i < ADC_CHANLS_MAX; i++) {
334 ret = intel_scu_ipc_ioread8(ADC_CHNL_START_ADDR + i, &data); 334 ret = intel_scu_ipc_ioread8(ADC_CHNL_START_ADDR + i, &data);
335 if (ret) 335 if (ret)
336 return ret; 336 return ret;
337 337
338 if (data & MSIC_STOPBIT_MASK) { 338 if (data & MSIC_STOPBIT_MASK) {
339 ret = i; 339 ret = i;
340 break; 340 break;
341 } 341 }
342 } 342 }
343 return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret; 343 return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
344} 344}
345 345
346/** 346/**
@@ -351,48 +351,48 @@ static int find_free_channel(void)
351 */ 351 */
352static int mid_initialize_adc(struct device *dev) 352static int mid_initialize_adc(struct device *dev)
353{ 353{
354 u8 data; 354 u8 data;
355 u16 base_addr; 355 u16 base_addr;
356 int ret; 356 int ret;
357 357
358 /* 358 /*
359 * Ensure that adctherm is disabled before we 359 * Ensure that adctherm is disabled before we
360 * initialize the ADC 360 * initialize the ADC
361 */ 361 */
362 ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL3, &data); 362 ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL3, &data);
363 if (ret) 363 if (ret)
364 return ret; 364 return ret;
365 365
366 if (data & MSIC_ADCTHERM_MASK) 366 if (data & MSIC_ADCTHERM_MASK)
367 dev_warn(dev, "ADCTHERM already set"); 367 dev_warn(dev, "ADCTHERM already set");
368 368
369 /* Index of the first channel in which the stop bit is set */ 369 /* Index of the first channel in which the stop bit is set */
370 channel_index = find_free_channel(); 370 channel_index = find_free_channel();
371 if (channel_index < 0) { 371 if (channel_index < 0) {
372 dev_err(dev, "No free ADC channels"); 372 dev_err(dev, "No free ADC channels");
373 return channel_index; 373 return channel_index;
374 } 374 }
375 375
376 base_addr = ADC_CHNL_START_ADDR + channel_index; 376 base_addr = ADC_CHNL_START_ADDR + channel_index;
377 377
378 if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) { 378 if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
379 /* Reset stop bit for channels other than 0 and 12 */ 379 /* Reset stop bit for channels other than 0 and 12 */
380 ret = reset_stopbit(base_addr); 380 ret = reset_stopbit(base_addr);
381 if (ret) 381 if (ret)
382 return ret; 382 return ret;
383 383
384 /* Index of the first free channel */ 384 /* Index of the first free channel */
385 base_addr++; 385 base_addr++;
386 channel_index++; 386 channel_index++;
387 } 387 }
388 388
389 ret = set_up_therm_channel(base_addr); 389 ret = set_up_therm_channel(base_addr);
390 if (ret) { 390 if (ret) {
391 dev_err(dev, "unable to enable ADC"); 391 dev_err(dev, "unable to enable ADC");
392 return ret; 392 return ret;
393 } 393 }
394 dev_dbg(dev, "ADC initialization successful"); 394 dev_dbg(dev, "ADC initialization successful");
395 return ret; 395 return ret;
396} 396}
397 397
398/** 398/**
@@ -403,18 +403,18 @@ static int mid_initialize_adc(struct device *dev)
403 */ 403 */
404static struct thermal_device_info *initialize_sensor(int index) 404static struct thermal_device_info *initialize_sensor(int index)
405{ 405{
406 struct thermal_device_info *td_info = 406 struct thermal_device_info *td_info =
407 kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL); 407 kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
408 408
409 if (!td_info) 409 if (!td_info)
410 return NULL; 410 return NULL;
411 411
412 /* Set the base addr of the channel for this sensor */ 412 /* Set the base addr of the channel for this sensor */
413 td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index); 413 td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index);
414 /* Sensor 3 is direct conversion */ 414 /* Sensor 3 is direct conversion */
415 if (index == 3) 415 if (index == 3)
416 td_info->direct = 1; 416 td_info->direct = 1;
417 return td_info; 417 return td_info;
418} 418}
419 419
420/** 420/**
@@ -425,7 +425,7 @@ static struct thermal_device_info *initialize_sensor(int index)
425 */ 425 */
426static int mid_thermal_resume(struct platform_device *pdev) 426static int mid_thermal_resume(struct platform_device *pdev)
427{ 427{
428 return mid_initialize_adc(&pdev->dev); 428 return mid_initialize_adc(&pdev->dev);
429} 429}
430 430
431/** 431/**
@@ -437,12 +437,12 @@ static int mid_thermal_resume(struct platform_device *pdev)
437 */ 437 */
438static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg) 438static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg)
439{ 439{
440 /* 440 /*
441 * This just stops the ADC and does not disable it. 441 * This just stops the ADC and does not disable it.
442 * temporary workaround until we have a generic ADC driver. 442 * temporary workaround until we have a generic ADC driver.
443 * If 0 is passed, it disables the ADC. 443 * If 0 is passed, it disables the ADC.
444 */ 444 */
445 return configure_adc(0); 445 return configure_adc(0);
446} 446}
447 447
448/** 448/**
@@ -453,16 +453,15 @@ static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg)
453 */ 453 */
454static int read_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp) 454static int read_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp)
455{ 455{
456 WARN_ON(tzd == NULL); 456 WARN_ON(tzd == NULL);
457 return mid_read_temp(tzd, temp); 457 return mid_read_temp(tzd, temp);
458} 458}
459 459
460/* Can't be const */ 460/* Can't be const */
461static struct thermal_zone_device_ops tzd_ops = { 461static struct thermal_zone_device_ops tzd_ops = {
462 .get_temp = read_curr_temp, 462 .get_temp = read_curr_temp,
463}; 463};
464 464
465
466/** 465/**
467 * mid_thermal_probe - mfld thermal initialize 466 * mid_thermal_probe - mfld thermal initialize
468 * @pdev: platform device structure 467 * @pdev: platform device structure
@@ -472,46 +471,45 @@ static struct thermal_zone_device_ops tzd_ops = {
472 */ 471 */
473static int mid_thermal_probe(struct platform_device *pdev) 472static int mid_thermal_probe(struct platform_device *pdev)
474{ 473{
475 static char *name[MSIC_THERMAL_SENSORS] = { 474 static char *name[MSIC_THERMAL_SENSORS] = {
476 "skin0", "skin1", "sys", "msicdie" 475 "skin0", "skin1", "sys", "msicdie"
477 }; 476 };
478 477
479 int ret; 478 int ret;
480 int i; 479 int i;
481 struct platform_info *pinfo; 480 struct platform_info *pinfo;
482 481
483 pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL); 482 pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
484 if (!pinfo) 483 if (!pinfo)
485 return -ENOMEM; 484 return -ENOMEM;
486 485
487 /* Initializing the hardware */ 486 /* Initializing the hardware */
488 ret = mid_initialize_adc(&pdev->dev); 487 ret = mid_initialize_adc(&pdev->dev);
489 if (ret) { 488 if (ret) {
490 dev_err(&pdev->dev, "ADC init failed"); 489 dev_err(&pdev->dev, "ADC init failed");
491 kfree(pinfo); 490 kfree(pinfo);
492 return ret; 491 return ret;
493 } 492 }
494 493
495 /* Register each sensor with the generic thermal framework*/ 494 /* Register each sensor with the generic thermal framework*/
496 for (i = 0; i < MSIC_THERMAL_SENSORS; i++) { 495 for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
497 pinfo->tzd[i] = thermal_zone_device_register(name[i], 496 pinfo->tzd[i] = thermal_zone_device_register(name[i],
498 0, initialize_sensor(i), 497 0, initialize_sensor(i), &tzd_ops, 0, 0, 0, 0);
499 &tzd_ops, 0, 0, 0, 0); 498 if (IS_ERR(pinfo->tzd[i]))
500 if (IS_ERR(pinfo->tzd[i])) 499 goto reg_fail;
501 goto reg_fail; 500 }
502 } 501
503 502 pinfo->pdev = pdev;
504 pinfo->pdev = pdev; 503 platform_set_drvdata(pdev, pinfo);
505 platform_set_drvdata(pdev, pinfo); 504 return 0;
506 return 0;
507 505
508reg_fail: 506reg_fail:
509 ret = PTR_ERR(pinfo->tzd[i]); 507 ret = PTR_ERR(pinfo->tzd[i]);
510 while (--i >= 0) 508 while (--i >= 0)
511 thermal_zone_device_unregister(pinfo->tzd[i]); 509 thermal_zone_device_unregister(pinfo->tzd[i]);
512 configure_adc(0); 510 configure_adc(0);
513 kfree(pinfo); 511 kfree(pinfo);
514 return ret; 512 return ret;
515} 513}
516 514
517/** 515/**
@@ -523,49 +521,46 @@ reg_fail:
523 */ 521 */
524static int mid_thermal_remove(struct platform_device *pdev) 522static int mid_thermal_remove(struct platform_device *pdev)
525{ 523{
526 int i; 524 int i;
527 struct platform_info *pinfo = platform_get_drvdata(pdev); 525 struct platform_info *pinfo = platform_get_drvdata(pdev);
528 526
529 for (i = 0; i < MSIC_THERMAL_SENSORS; i++) 527 for (i = 0; i < MSIC_THERMAL_SENSORS; i++)
530 thermal_zone_device_unregister(pinfo->tzd[i]); 528 thermal_zone_device_unregister(pinfo->tzd[i]);
531 529
532 platform_set_drvdata(pdev, NULL); 530 kfree(pinfo);
531 platform_set_drvdata(pdev, NULL);
533 532
534 /* Stop the ADC */ 533 /* Stop the ADC */
535 return configure_adc(0); 534 return configure_adc(0);
536} 535}
537 536
538/*********************************************************************
539 * Driver initialisation and finalization
540 *********************************************************************/
541
542#define DRIVER_NAME "msic_sensor" 537#define DRIVER_NAME "msic_sensor"
543 538
544static const struct platform_device_id therm_id_table[] = { 539static const struct platform_device_id therm_id_table[] = {
545 { DRIVER_NAME, 1 }, 540 { DRIVER_NAME, 1 },
546 { } 541 { }
547}; 542};
548 543
549static struct platform_driver mid_thermal_driver = { 544static struct platform_driver mid_thermal_driver = {
550 .driver = { 545 .driver = {
551 .name = DRIVER_NAME, 546 .name = DRIVER_NAME,
552 .owner = THIS_MODULE, 547 .owner = THIS_MODULE,
553 }, 548 },
554 .probe = mid_thermal_probe, 549 .probe = mid_thermal_probe,
555 .suspend = mid_thermal_suspend, 550 .suspend = mid_thermal_suspend,
556 .resume = mid_thermal_resume, 551 .resume = mid_thermal_resume,
557 .remove = __devexit_p(mid_thermal_remove), 552 .remove = __devexit_p(mid_thermal_remove),
558 .id_table = therm_id_table, 553 .id_table = therm_id_table,
559}; 554};
560 555
561static int __init mid_thermal_module_init(void) 556static int __init mid_thermal_module_init(void)
562{ 557{
563 return platform_driver_register(&mid_thermal_driver); 558 return platform_driver_register(&mid_thermal_driver);
564} 559}
565 560
566static void __exit mid_thermal_module_exit(void) 561static void __exit mid_thermal_module_exit(void)
567{ 562{
568 platform_driver_unregister(&mid_thermal_driver); 563 platform_driver_unregister(&mid_thermal_driver);
569} 564}
570 565
571module_init(mid_thermal_module_init); 566module_init(mid_thermal_module_init);
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
new file mode 100644
index 000000000000..e936364a609d
--- /dev/null
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -0,0 +1,396 @@
1/*
2 * intel_oaktrail.c - Intel OakTrail Platform support.
3 *
4 * Copyright (C) 2010-2011 Intel Corporation
5 * Author: Yin Kangkai (kangkai.yin@intel.com)
6 *
7 * based on Compal driver, Copyright (C) 2008 Cezary Jackiewicz
8 * <cezary.jackiewicz (at) gmail.com>, based on MSI driver
9 * Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24 * 02110-1301, USA.
25 *
26 * This driver does below things:
27 * 1. registers itself in the Linux backlight control in
28 * /sys/class/backlight/intel_oaktrail/
29 *
30 * 2. registers in the rfkill subsystem here: /sys/class/rfkill/rfkillX/
31 * for these components: wifi, bluetooth, wwan (3g), gps
32 *
33 * This driver might work on other products based on Oaktrail. If you
34 * want to try it you can pass force=1 as argument to the module which
35 * will force it to load even when the DMI data doesn't identify the
36 * product as compatible.
37 */
38
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/acpi.h>
45#include <linux/fb.h>
46#include <linux/mutex.h>
47#include <linux/err.h>
48#include <linux/i2c.h>
49#include <linux/backlight.h>
50#include <linux/platform_device.h>
51#include <linux/dmi.h>
52#include <linux/rfkill.h>
53#include <acpi/acpi_bus.h>
54#include <acpi/acpi_drivers.h>
55
56
57#define DRIVER_NAME "intel_oaktrail"
58#define DRIVER_VERSION "0.4ac1"
59
60/*
61 * This is the devices status address in EC space, and the control bits
62 * definition:
63 *
64 * (1 << 0): Camera enable/disable, RW.
65 * (1 << 1): Bluetooth enable/disable, RW.
66 * (1 << 2): GPS enable/disable, RW.
67 * (1 << 3): WiFi enable/disable, RW.
68 * (1 << 4): WWAN (3G) enable/disalbe, RW.
69 * (1 << 5): Touchscreen enable/disable, Read Only.
70 */
71#define OT_EC_DEVICE_STATE_ADDRESS 0xD6
72
73#define OT_EC_CAMERA_MASK (1 << 0)
74#define OT_EC_BT_MASK (1 << 1)
75#define OT_EC_GPS_MASK (1 << 2)
76#define OT_EC_WIFI_MASK (1 << 3)
77#define OT_EC_WWAN_MASK (1 << 4)
78#define OT_EC_TS_MASK (1 << 5)
79
80/*
81 * This is the address in EC space and commands used to control LCD backlight:
82 *
83 * Two steps needed to change the LCD backlight:
84 * 1. write the backlight percentage into OT_EC_BL_BRIGHTNESS_ADDRESS;
85 * 2. write OT_EC_BL_CONTROL_ON_DATA into OT_EC_BL_CONTROL_ADDRESS.
86 *
87 * To read the LCD back light, just read out the value from
88 * OT_EC_BL_BRIGHTNESS_ADDRESS.
89 *
90 * LCD backlight brightness range: 0 - 100 (OT_EC_BL_BRIGHTNESS_MAX)
91 */
92#define OT_EC_BL_BRIGHTNESS_ADDRESS 0x44
93#define OT_EC_BL_BRIGHTNESS_MAX 100
94#define OT_EC_BL_CONTROL_ADDRESS 0x3A
95#define OT_EC_BL_CONTROL_ON_DATA 0x1A
96
97
98static int force;
99module_param(force, bool, 0);
100MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
101
102static struct platform_device *oaktrail_device;
103static struct backlight_device *oaktrail_bl_device;
104static struct rfkill *bt_rfkill;
105static struct rfkill *gps_rfkill;
106static struct rfkill *wifi_rfkill;
107static struct rfkill *wwan_rfkill;
108
109
110/* rfkill */
111static int oaktrail_rfkill_set(void *data, bool blocked)
112{
113 u8 value;
114 u8 result;
115 unsigned long radio = (unsigned long) data;
116
117 ec_read(OT_EC_DEVICE_STATE_ADDRESS, &result);
118
119 if (!blocked)
120 value = (u8) (result | radio);
121 else
122 value = (u8) (result & ~radio);
123
124 ec_write(OT_EC_DEVICE_STATE_ADDRESS, value);
125
126 return 0;
127}
128
129static const struct rfkill_ops oaktrail_rfkill_ops = {
130 .set_block = oaktrail_rfkill_set,
131};
132
133static struct rfkill *oaktrail_rfkill_new(char *name, enum rfkill_type type,
134 unsigned long mask)
135{
136 struct rfkill *rfkill_dev;
137 u8 value;
138 int err;
139
140 rfkill_dev = rfkill_alloc(name, &oaktrail_device->dev, type,
141 &oaktrail_rfkill_ops, (void *)mask);
142 if (!rfkill_dev)
143 return ERR_PTR(-ENOMEM);
144
145 ec_read(OT_EC_DEVICE_STATE_ADDRESS, &value);
146 rfkill_init_sw_state(rfkill_dev, (value & mask) != 1);
147
148 err = rfkill_register(rfkill_dev);
149 if (err) {
150 rfkill_destroy(rfkill_dev);
151 return ERR_PTR(err);
152 }
153
154 return rfkill_dev;
155}
156
157static inline void __oaktrail_rfkill_cleanup(struct rfkill *rf)
158{
159 if (rf) {
160 rfkill_unregister(rf);
161 rfkill_destroy(rf);
162 }
163}
164
165static void oaktrail_rfkill_cleanup(void)
166{
167 __oaktrail_rfkill_cleanup(wifi_rfkill);
168 __oaktrail_rfkill_cleanup(bt_rfkill);
169 __oaktrail_rfkill_cleanup(gps_rfkill);
170 __oaktrail_rfkill_cleanup(wwan_rfkill);
171}
172
173static int oaktrail_rfkill_init(void)
174{
175 int ret;
176
177 wifi_rfkill = oaktrail_rfkill_new("oaktrail-wifi",
178 RFKILL_TYPE_WLAN,
179 OT_EC_WIFI_MASK);
180 if (IS_ERR(wifi_rfkill)) {
181 ret = PTR_ERR(wifi_rfkill);
182 wifi_rfkill = NULL;
183 goto cleanup;
184 }
185
186 bt_rfkill = oaktrail_rfkill_new("oaktrail-bluetooth",
187 RFKILL_TYPE_BLUETOOTH,
188 OT_EC_BT_MASK);
189 if (IS_ERR(bt_rfkill)) {
190 ret = PTR_ERR(bt_rfkill);
191 bt_rfkill = NULL;
192 goto cleanup;
193 }
194
195 gps_rfkill = oaktrail_rfkill_new("oaktrail-gps",
196 RFKILL_TYPE_GPS,
197 OT_EC_GPS_MASK);
198 if (IS_ERR(gps_rfkill)) {
199 ret = PTR_ERR(gps_rfkill);
200 gps_rfkill = NULL;
201 goto cleanup;
202 }
203
204 wwan_rfkill = oaktrail_rfkill_new("oaktrail-wwan",
205 RFKILL_TYPE_WWAN,
206 OT_EC_WWAN_MASK);
207 if (IS_ERR(wwan_rfkill)) {
208 ret = PTR_ERR(wwan_rfkill);
209 wwan_rfkill = NULL;
210 goto cleanup;
211 }
212
213 return 0;
214
215cleanup:
216 oaktrail_rfkill_cleanup();
217 return ret;
218}
219
220
221/* backlight */
222static int get_backlight_brightness(struct backlight_device *b)
223{
224 u8 value;
225 ec_read(OT_EC_BL_BRIGHTNESS_ADDRESS, &value);
226
227 return value;
228}
229
230static int set_backlight_brightness(struct backlight_device *b)
231{
232 u8 percent = (u8) b->props.brightness;
233 if (percent < 0 || percent > OT_EC_BL_BRIGHTNESS_MAX)
234 return -EINVAL;
235
236 ec_write(OT_EC_BL_BRIGHTNESS_ADDRESS, percent);
237 ec_write(OT_EC_BL_CONTROL_ADDRESS, OT_EC_BL_CONTROL_ON_DATA);
238
239 return 0;
240}
241
242static const struct backlight_ops oaktrail_bl_ops = {
243 .get_brightness = get_backlight_brightness,
244 .update_status = set_backlight_brightness,
245};
246
247static int oaktrail_backlight_init(void)
248{
249 struct backlight_device *bd;
250 struct backlight_properties props;
251
252 memset(&props, 0, sizeof(struct backlight_properties));
253 props.max_brightness = OT_EC_BL_BRIGHTNESS_MAX;
254 bd = backlight_device_register(DRIVER_NAME,
255 &oaktrail_device->dev, NULL,
256 &oaktrail_bl_ops,
257 &props);
258
259 if (IS_ERR(bd)) {
260 oaktrail_bl_device = NULL;
261 pr_warning("Unable to register backlight device\n");
262 return PTR_ERR(bd);
263 }
264
265 oaktrail_bl_device = bd;
266
267 bd->props.brightness = get_backlight_brightness(bd);
268 bd->props.power = FB_BLANK_UNBLANK;
269 backlight_update_status(bd);
270
271 return 0;
272}
273
274static void oaktrail_backlight_exit(void)
275{
276 if (oaktrail_bl_device)
277 backlight_device_unregister(oaktrail_bl_device);
278}
279
280static int __devinit oaktrail_probe(struct platform_device *pdev)
281{
282 return 0;
283}
284
285static int __devexit oaktrail_remove(struct platform_device *pdev)
286{
287 return 0;
288}
289
290static struct platform_driver oaktrail_driver = {
291 .driver = {
292 .name = DRIVER_NAME,
293 .owner = THIS_MODULE,
294 },
295 .probe = oaktrail_probe,
296 .remove = __devexit_p(oaktrail_remove)
297};
298
299static int dmi_check_cb(const struct dmi_system_id *id)
300{
301 pr_info("Identified model '%s'\n", id->ident);
302 return 0;
303}
304
305static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
306 {
307 .ident = "OakTrail platform",
308 .matches = {
309 DMI_MATCH(DMI_PRODUCT_NAME, "OakTrail platform"),
310 },
311 .callback = dmi_check_cb
312 },
313 { }
314};
315
316static int __init oaktrail_init(void)
317{
318 int ret;
319
320 if (acpi_disabled) {
321 pr_err("ACPI needs to be enabled for this driver to work!\n");
322 return -ENODEV;
323 }
324
325 if (!force && !dmi_check_system(oaktrail_dmi_table)) {
326 pr_err("Platform not recognized (You could try the module's force-parameter)");
327 return -ENODEV;
328 }
329
330 ret = platform_driver_register(&oaktrail_driver);
331 if (ret) {
332 pr_warning("Unable to register platform driver\n");
333 goto err_driver_reg;
334 }
335
336 oaktrail_device = platform_device_alloc(DRIVER_NAME, -1);
337 if (!oaktrail_device) {
338 pr_warning("Unable to allocate platform device\n");
339 ret = -ENOMEM;
340 goto err_device_alloc;
341 }
342
343 ret = platform_device_add(oaktrail_device);
344 if (ret) {
345 pr_warning("Unable to add platform device\n");
346 goto err_device_add;
347 }
348
349 if (!acpi_video_backlight_support()) {
350 ret = oaktrail_backlight_init();
351 if (ret)
352 goto err_backlight;
353
354 } else
355 pr_info("Backlight controlled by ACPI video driver\n");
356
357 ret = oaktrail_rfkill_init();
358 if (ret) {
359 pr_warning("Setup rfkill failed\n");
360 goto err_rfkill;
361 }
362
363 pr_info("Driver "DRIVER_VERSION" successfully loaded\n");
364 return 0;
365
366err_rfkill:
367 oaktrail_backlight_exit();
368err_backlight:
369 platform_device_del(oaktrail_device);
370err_device_add:
371 platform_device_put(oaktrail_device);
372err_device_alloc:
373 platform_driver_unregister(&oaktrail_driver);
374err_driver_reg:
375
376 return ret;
377}
378
379static void __exit oaktrail_cleanup(void)
380{
381 oaktrail_backlight_exit();
382 oaktrail_rfkill_cleanup();
383 platform_device_unregister(oaktrail_device);
384 platform_driver_unregister(&oaktrail_driver);
385
386 pr_info("Driver unloaded\n");
387}
388
389module_init(oaktrail_init);
390module_exit(oaktrail_cleanup);
391
392MODULE_AUTHOR("Yin Kangkai (kangkai.yin@intel.com)");
393MODULE_DESCRIPTION("Intel Oaktrail Platform ACPI Extras");
394MODULE_VERSION(DRIVER_VERSION);
395MODULE_LICENSE("GPL");
396MODULE_ALIAS("dmi:*:svnIntelCorporation:pnOakTrailplatform:*");
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 464bb3fc4d88..1686c1e07d5d 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -19,6 +19,8 @@
19 * Moorestown platform PMIC chip 19 * Moorestown platform PMIC chip
20 */ 20 */
21 21
22#define pr_fmt(fmt) "%s: " fmt, __func__
23
22#include <linux/module.h> 24#include <linux/module.h>
23#include <linux/kernel.h> 25#include <linux/kernel.h>
24#include <linux/interrupt.h> 26#include <linux/interrupt.h>
@@ -90,8 +92,7 @@ static void pmic_program_irqtype(int gpio, int type)
90static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 92static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
91{ 93{
92 if (offset > 8) { 94 if (offset > 8) {
93 printk(KERN_ERR 95 pr_err("only pin 0-7 support input\n");
94 "%s: only pin 0-7 support input\n", __func__);
95 return -1;/* we only have 8 GPIO can use as input */ 96 return -1;/* we only have 8 GPIO can use as input */
96 } 97 }
97 return intel_scu_ipc_update_register(GPIO0 + offset, 98 return intel_scu_ipc_update_register(GPIO0 + offset,
@@ -116,8 +117,7 @@ static int pmic_gpio_direction_output(struct gpio_chip *chip,
116 value ? 1 << (offset - 16) : 0, 117 value ? 1 << (offset - 16) : 0,
117 1 << (offset - 16)); 118 1 << (offset - 16));
118 else { 119 else {
119 printk(KERN_ERR 120 pr_err("invalid PMIC GPIO pin %d!\n", offset);
120 "%s: invalid PMIC GPIO pin %d!\n", __func__, offset);
121 WARN_ON(1); 121 WARN_ON(1);
122 } 122 }
123 123
@@ -260,7 +260,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
260 /* setting up SRAM mapping for GPIOINT register */ 260 /* setting up SRAM mapping for GPIOINT register */
261 pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8); 261 pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8);
262 if (!pg->gpiointr) { 262 if (!pg->gpiointr) {
263 printk(KERN_ERR "%s: Can not map GPIOINT.\n", __func__); 263 pr_err("Can not map GPIOINT\n");
264 retval = -EINVAL; 264 retval = -EINVAL;
265 goto err2; 265 goto err2;
266 } 266 }
@@ -281,13 +281,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
281 pg->chip.dev = dev; 281 pg->chip.dev = dev;
282 retval = gpiochip_add(&pg->chip); 282 retval = gpiochip_add(&pg->chip);
283 if (retval) { 283 if (retval) {
284 printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__); 284 pr_err("Can not add pmic gpio chip\n");
285 goto err; 285 goto err;
286 } 286 }
287 287
288 retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg); 288 retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
289 if (retval) { 289 if (retval) {
290 printk(KERN_WARNING "pmic: Interrupt request failed\n"); 290 pr_warn("Interrupt request failed\n");
291 goto err; 291 goto err;
292 } 292 }
293 293
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 23fb2afda00b..3ff629df9f01 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -135,7 +135,7 @@ static int set_lcd_level(int level)
135 buf[1] = (u8) (level*31); 135 buf[1] = (u8) (level*31);
136 136
137 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), 137 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf),
138 NULL, 0, 1); 138 NULL, 0);
139} 139}
140 140
141static int get_lcd_level(void) 141static int get_lcd_level(void)
@@ -144,7 +144,7 @@ static int get_lcd_level(void)
144 int result; 144 int result;
145 145
146 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, 146 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
147 &rdata, 1, 1); 147 &rdata, 1);
148 if (result < 0) 148 if (result < 0)
149 return result; 149 return result;
150 150
@@ -157,7 +157,7 @@ static int get_auto_brightness(void)
157 int result; 157 int result;
158 158
159 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, 159 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
160 &rdata, 1, 1); 160 &rdata, 1);
161 if (result < 0) 161 if (result < 0)
162 return result; 162 return result;
163 163
@@ -172,7 +172,7 @@ static int set_auto_brightness(int enable)
172 wdata[0] = 4; 172 wdata[0] = 4;
173 173
174 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, 174 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1,
175 &rdata, 1, 1); 175 &rdata, 1);
176 if (result < 0) 176 if (result < 0)
177 return result; 177 return result;
178 178
@@ -180,7 +180,7 @@ static int set_auto_brightness(int enable)
180 wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0); 180 wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
181 181
182 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, 182 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2,
183 NULL, 0, 1); 183 NULL, 0);
184} 184}
185 185
186static ssize_t set_device_state(const char *buf, size_t count, u8 mask) 186static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
@@ -217,7 +217,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
217 u8 wdata = 0, rdata; 217 u8 wdata = 0, rdata;
218 int result; 218 int result;
219 219
220 result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1, 1); 220 result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
221 if (result < 0) 221 if (result < 0)
222 return -1; 222 return -1;
223 223
@@ -447,7 +447,7 @@ static struct platform_device *msipf_device;
447 447
448static int dmi_check_cb(const struct dmi_system_id *id) 448static int dmi_check_cb(const struct dmi_system_id *id)
449{ 449{
450 pr_info("Identified laptop model '%s'.\n", id->ident); 450 pr_info("Identified laptop model '%s'\n", id->ident);
451 return 1; 451 return 1;
452} 452}
453 453
@@ -800,7 +800,7 @@ static void msi_laptop_input_destroy(void)
800 input_unregister_device(msi_laptop_input_dev); 800 input_unregister_device(msi_laptop_input_dev);
801} 801}
802 802
803static int load_scm_model_init(struct platform_device *sdev) 803static int __init load_scm_model_init(struct platform_device *sdev)
804{ 804{
805 u8 data; 805 u8 data;
806 int result; 806 int result;
@@ -875,8 +875,7 @@ static int __init msi_init(void)
875 /* Register backlight stuff */ 875 /* Register backlight stuff */
876 876
877 if (acpi_video_backlight_support()) { 877 if (acpi_video_backlight_support()) {
878 pr_info("Brightness ignored, must be controlled " 878 pr_info("Brightness ignored, must be controlled by ACPI video driver\n");
879 "by ACPI video driver\n");
880 } else { 879 } else {
881 struct backlight_properties props; 880 struct backlight_properties props;
882 memset(&props, 0, sizeof(struct backlight_properties)); 881 memset(&props, 0, sizeof(struct backlight_properties));
@@ -930,7 +929,7 @@ static int __init msi_init(void)
930 if (auto_brightness != 2) 929 if (auto_brightness != 2)
931 set_auto_brightness(auto_brightness); 930 set_auto_brightness(auto_brightness);
932 931
933 pr_info("driver "MSI_DRIVER_VERSION" successfully loaded.\n"); 932 pr_info("driver " MSI_DRIVER_VERSION " successfully loaded\n");
934 933
935 return 0; 934 return 0;
936 935
@@ -978,7 +977,7 @@ static void __exit msi_cleanup(void)
978 if (auto_brightness != 2) 977 if (auto_brightness != 2)
979 set_auto_brightness(1); 978 set_auto_brightness(1);
980 979
981 pr_info("driver unloaded.\n"); 980 pr_info("driver unloaded\n");
982} 981}
983 982
984module_init(msi_init); 983module_init(msi_init);
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index d5419c9ec07a..c832e3356cd6 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -20,6 +20,7 @@
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 21 */
22 22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 24
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/input.h> 26#include <linux/input.h>
@@ -36,13 +37,10 @@ MODULE_ALIAS("wmi:551A1F84-FBDD-4125-91DB-3EA8F44F1D45");
36MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"); 37MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2");
37 38
38#define DRV_NAME "msi-wmi" 39#define DRV_NAME "msi-wmi"
39#define DRV_PFX DRV_NAME ": "
40 40
41#define MSIWMI_BIOS_GUID "551A1F84-FBDD-4125-91DB-3EA8F44F1D45" 41#define MSIWMI_BIOS_GUID "551A1F84-FBDD-4125-91DB-3EA8F44F1D45"
42#define MSIWMI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2" 42#define MSIWMI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
43 43
44#define dprintk(msg...) pr_debug(DRV_PFX msg)
45
46#define SCANCODE_BASE 0xD0 44#define SCANCODE_BASE 0xD0
47#define MSI_WMI_BRIGHTNESSUP SCANCODE_BASE 45#define MSI_WMI_BRIGHTNESSUP SCANCODE_BASE
48#define MSI_WMI_BRIGHTNESSDOWN (SCANCODE_BASE + 1) 46#define MSI_WMI_BRIGHTNESSDOWN (SCANCODE_BASE + 1)
@@ -78,7 +76,7 @@ static int msi_wmi_query_block(int instance, int *ret)
78 76
79 if (!obj || obj->type != ACPI_TYPE_INTEGER) { 77 if (!obj || obj->type != ACPI_TYPE_INTEGER) {
80 if (obj) { 78 if (obj) {
81 printk(KERN_ERR DRV_PFX "query block returned object " 79 pr_err("query block returned object "
82 "type: %d - buffer length:%d\n", obj->type, 80 "type: %d - buffer length:%d\n", obj->type,
83 obj->type == ACPI_TYPE_BUFFER ? 81 obj->type == ACPI_TYPE_BUFFER ?
84 obj->buffer.length : 0); 82 obj->buffer.length : 0);
@@ -97,8 +95,8 @@ static int msi_wmi_set_block(int instance, int value)
97 95
98 struct acpi_buffer input = { sizeof(int), &value }; 96 struct acpi_buffer input = { sizeof(int), &value };
99 97
100 dprintk("Going to set block of instance: %d - value: %d\n", 98 pr_debug("Going to set block of instance: %d - value: %d\n",
101 instance, value); 99 instance, value);
102 100
103 status = wmi_set_block(MSIWMI_BIOS_GUID, instance, &input); 101 status = wmi_set_block(MSIWMI_BIOS_GUID, instance, &input);
104 102
@@ -112,20 +110,19 @@ static int bl_get(struct backlight_device *bd)
112 /* Instance 1 is "get backlight", cmp with DSDT */ 110 /* Instance 1 is "get backlight", cmp with DSDT */
113 err = msi_wmi_query_block(1, &ret); 111 err = msi_wmi_query_block(1, &ret);
114 if (err) { 112 if (err) {
115 printk(KERN_ERR DRV_PFX "Could not query backlight: %d\n", err); 113 pr_err("Could not query backlight: %d\n", err);
116 return -EINVAL; 114 return -EINVAL;
117 } 115 }
118 dprintk("Get: Query block returned: %d\n", ret); 116 pr_debug("Get: Query block returned: %d\n", ret);
119 for (level = 0; level < ARRAY_SIZE(backlight_map); level++) { 117 for (level = 0; level < ARRAY_SIZE(backlight_map); level++) {
120 if (backlight_map[level] == ret) { 118 if (backlight_map[level] == ret) {
121 dprintk("Current backlight level: 0x%X - index: %d\n", 119 pr_debug("Current backlight level: 0x%X - index: %d\n",
122 backlight_map[level], level); 120 backlight_map[level], level);
123 break; 121 break;
124 } 122 }
125 } 123 }
126 if (level == ARRAY_SIZE(backlight_map)) { 124 if (level == ARRAY_SIZE(backlight_map)) {
127 printk(KERN_ERR DRV_PFX "get: Invalid brightness value: 0x%X\n", 125 pr_err("get: Invalid brightness value: 0x%X\n", ret);
128 ret);
129 return -EINVAL; 126 return -EINVAL;
130 } 127 }
131 return level; 128 return level;
@@ -156,7 +153,7 @@ static void msi_wmi_notify(u32 value, void *context)
156 153
157 status = wmi_get_event_data(value, &response); 154 status = wmi_get_event_data(value, &response);
158 if (status != AE_OK) { 155 if (status != AE_OK) {
159 printk(KERN_INFO DRV_PFX "bad event status 0x%x\n", status); 156 pr_info("bad event status 0x%x\n", status);
160 return; 157 return;
161 } 158 }
162 159
@@ -164,7 +161,7 @@ static void msi_wmi_notify(u32 value, void *context)
164 161
165 if (obj && obj->type == ACPI_TYPE_INTEGER) { 162 if (obj && obj->type == ACPI_TYPE_INTEGER) {
166 int eventcode = obj->integer.value; 163 int eventcode = obj->integer.value;
167 dprintk("Eventcode: 0x%x\n", eventcode); 164 pr_debug("Eventcode: 0x%x\n", eventcode);
168 key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev, 165 key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev,
169 eventcode); 166 eventcode);
170 if (key) { 167 if (key) {
@@ -175,8 +172,8 @@ static void msi_wmi_notify(u32 value, void *context)
175 /* Ignore event if the same event happened in a 50 ms 172 /* Ignore event if the same event happened in a 50 ms
176 timeframe -> Key press may result in 10-20 GPEs */ 173 timeframe -> Key press may result in 10-20 GPEs */
177 if (ktime_to_us(diff) < 1000 * 50) { 174 if (ktime_to_us(diff) < 1000 * 50) {
178 dprintk("Suppressed key event 0x%X - " 175 pr_debug("Suppressed key event 0x%X - "
179 "Last press was %lld us ago\n", 176 "Last press was %lld us ago\n",
180 key->code, ktime_to_us(diff)); 177 key->code, ktime_to_us(diff));
181 return; 178 return;
182 } 179 }
@@ -187,17 +184,16 @@ static void msi_wmi_notify(u32 value, void *context)
187 (!acpi_video_backlight_support() || 184 (!acpi_video_backlight_support() ||
188 (key->code != MSI_WMI_BRIGHTNESSUP && 185 (key->code != MSI_WMI_BRIGHTNESSUP &&
189 key->code != MSI_WMI_BRIGHTNESSDOWN))) { 186 key->code != MSI_WMI_BRIGHTNESSDOWN))) {
190 dprintk("Send key: 0x%X - " 187 pr_debug("Send key: 0x%X - "
191 "Input layer keycode: %d\n", key->code, 188 "Input layer keycode: %d\n",
192 key->keycode); 189 key->code, key->keycode);
193 sparse_keymap_report_entry(msi_wmi_input_dev, 190 sparse_keymap_report_entry(msi_wmi_input_dev,
194 key, 1, true); 191 key, 1, true);
195 } 192 }
196 } else 193 } else
197 printk(KERN_INFO "Unknown key pressed - %x\n", 194 pr_info("Unknown key pressed - %x\n", eventcode);
198 eventcode);
199 } else 195 } else
200 printk(KERN_INFO DRV_PFX "Unknown event received\n"); 196 pr_info("Unknown event received\n");
201 kfree(response.pointer); 197 kfree(response.pointer);
202} 198}
203 199
@@ -238,8 +234,7 @@ static int __init msi_wmi_init(void)
238 int err; 234 int err;
239 235
240 if (!wmi_has_guid(MSIWMI_EVENT_GUID)) { 236 if (!wmi_has_guid(MSIWMI_EVENT_GUID)) {
241 printk(KERN_ERR 237 pr_err("This machine doesn't have MSI-hotkeys through WMI\n");
242 "This machine doesn't have MSI-hotkeys through WMI\n");
243 return -ENODEV; 238 return -ENODEV;
244 } 239 }
245 err = wmi_install_notify_handler(MSIWMI_EVENT_GUID, 240 err = wmi_install_notify_handler(MSIWMI_EVENT_GUID,
@@ -270,7 +265,7 @@ static int __init msi_wmi_init(void)
270 265
271 backlight->props.brightness = err; 266 backlight->props.brightness = err;
272 } 267 }
273 dprintk("Event handler installed\n"); 268 pr_debug("Event handler installed\n");
274 269
275 return 0; 270 return 0;
276 271
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 6fe8cd6e23b5..bbd182e178cb 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -42,6 +42,8 @@
42 * 42 *
43 */ 43 */
44 44
45#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46
45#include <linux/kernel.h> 47#include <linux/kernel.h>
46#include <linux/module.h> 48#include <linux/module.h>
47#include <linux/moduleparam.h> 49#include <linux/moduleparam.h>
@@ -70,10 +72,10 @@
70#include <linux/miscdevice.h> 72#include <linux/miscdevice.h>
71#endif 73#endif
72 74
73#define DRV_PFX "sony-laptop: " 75#define dprintk(fmt, ...) \
74#define dprintk(msg...) do { \ 76do { \
75 if (debug) \ 77 if (debug) \
76 pr_warn(DRV_PFX msg); \ 78 pr_warn(fmt, ##__VA_ARGS__); \
77} while (0) 79} while (0)
78 80
79#define SONY_LAPTOP_DRIVER_VERSION "0.6" 81#define SONY_LAPTOP_DRIVER_VERSION "0.6"
@@ -418,7 +420,7 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
418 error = kfifo_alloc(&sony_laptop_input.fifo, 420 error = kfifo_alloc(&sony_laptop_input.fifo,
419 SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); 421 SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
420 if (error) { 422 if (error) {
421 pr_err(DRV_PFX "kfifo_alloc failed\n"); 423 pr_err("kfifo_alloc failed\n");
422 goto err_dec_users; 424 goto err_dec_users;
423 } 425 }
424 426
@@ -702,7 +704,7 @@ static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
702 return 0; 704 return 0;
703 } 705 }
704 706
705 pr_warn(DRV_PFX "acpi_callreadfunc failed\n"); 707 pr_warn("acpi_callreadfunc failed\n");
706 708
707 return -1; 709 return -1;
708} 710}
@@ -728,8 +730,7 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
728 if (status == AE_OK) { 730 if (status == AE_OK) {
729 if (result != NULL) { 731 if (result != NULL) {
730 if (out_obj.type != ACPI_TYPE_INTEGER) { 732 if (out_obj.type != ACPI_TYPE_INTEGER) {
731 pr_warn(DRV_PFX "acpi_evaluate_object bad " 733 pr_warn("acpi_evaluate_object bad return type\n");
732 "return type\n");
733 return -1; 734 return -1;
734 } 735 }
735 *result = out_obj.integer.value; 736 *result = out_obj.integer.value;
@@ -737,7 +738,7 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
737 return 0; 738 return 0;
738 } 739 }
739 740
740 pr_warn(DRV_PFX "acpi_evaluate_object failed\n"); 741 pr_warn("acpi_evaluate_object failed\n");
741 742
742 return -1; 743 return -1;
743} 744}
@@ -961,7 +962,6 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
961static int sony_nc_get_brightness_ng(struct backlight_device *bd) 962static int sony_nc_get_brightness_ng(struct backlight_device *bd)
962{ 963{
963 int result; 964 int result;
964 int *handle = (int *)bl_get_data(bd);
965 struct sony_backlight_props *sdev = 965 struct sony_backlight_props *sdev =
966 (struct sony_backlight_props *)bl_get_data(bd); 966 (struct sony_backlight_props *)bl_get_data(bd);
967 967
@@ -973,7 +973,6 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
973static int sony_nc_update_status_ng(struct backlight_device *bd) 973static int sony_nc_update_status_ng(struct backlight_device *bd)
974{ 974{
975 int value, result; 975 int value, result;
976 int *handle = (int *)bl_get_data(bd);
977 struct sony_backlight_props *sdev = 976 struct sony_backlight_props *sdev =
978 (struct sony_backlight_props *)bl_get_data(bd); 977 (struct sony_backlight_props *)bl_get_data(bd);
979 978
@@ -1104,10 +1103,8 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
1104 } 1103 }
1105 1104
1106 if (!key_event->data) 1105 if (!key_event->data)
1107 pr_info(DRV_PFX 1106 pr_info("Unknown event: 0x%x 0x%x\n",
1108 "Unknown event: 0x%x 0x%x\n", 1107 key_handle, ev);
1109 key_handle,
1110 ev);
1111 else 1108 else
1112 sony_laptop_report_input_event(ev); 1109 sony_laptop_report_input_event(ev);
1113 } 1110 }
@@ -1128,7 +1125,7 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
1128 struct acpi_device_info *info; 1125 struct acpi_device_info *info;
1129 1126
1130 if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) { 1127 if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) {
1131 pr_warn(DRV_PFX "method: name: %4.4s, args %X\n", 1128 pr_warn("method: name: %4.4s, args %X\n",
1132 (char *)&info->name, info->param_count); 1129 (char *)&info->name, info->param_count);
1133 1130
1134 kfree(info); 1131 kfree(info);
@@ -1169,7 +1166,7 @@ static int sony_nc_resume(struct acpi_device *device)
1169 ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, 1166 ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
1170 item->value, NULL); 1167 item->value, NULL);
1171 if (ret < 0) { 1168 if (ret < 0) {
1172 pr_err(DRV_PFX "%s: %d\n", __func__, ret); 1169 pr_err("%s: %d\n", __func__, ret);
1173 break; 1170 break;
1174 } 1171 }
1175 } 1172 }
@@ -1336,12 +1333,12 @@ static void sony_nc_rfkill_setup(struct acpi_device *device)
1336 1333
1337 device_enum = (union acpi_object *) buffer.pointer; 1334 device_enum = (union acpi_object *) buffer.pointer;
1338 if (!device_enum) { 1335 if (!device_enum) {
1339 pr_err(DRV_PFX "No SN06 return object."); 1336 pr_err("No SN06 return object\n");
1340 goto out_no_enum; 1337 goto out_no_enum;
1341 } 1338 }
1342 if (device_enum->type != ACPI_TYPE_BUFFER) { 1339 if (device_enum->type != ACPI_TYPE_BUFFER) {
1343 pr_err(DRV_PFX "Invalid SN06 return object 0x%.2x\n", 1340 pr_err("Invalid SN06 return object 0x%.2x\n",
1344 device_enum->type); 1341 device_enum->type);
1345 goto out_no_enum; 1342 goto out_no_enum;
1346 } 1343 }
1347 1344
@@ -1662,7 +1659,7 @@ static void sony_nc_backlight_setup(void)
1662 ops, &props); 1659 ops, &props);
1663 1660
1664 if (IS_ERR(sony_bl_props.dev)) { 1661 if (IS_ERR(sony_bl_props.dev)) {
1665 pr_warn(DRV_PFX "unable to register backlight device\n"); 1662 pr_warn("unable to register backlight device\n");
1666 sony_bl_props.dev = NULL; 1663 sony_bl_props.dev = NULL;
1667 } else 1664 } else
1668 sony_bl_props.dev->props.brightness = 1665 sony_bl_props.dev->props.brightness =
@@ -1682,8 +1679,7 @@ static int sony_nc_add(struct acpi_device *device)
1682 acpi_handle handle; 1679 acpi_handle handle;
1683 struct sony_nc_value *item; 1680 struct sony_nc_value *item;
1684 1681
1685 pr_info(DRV_PFX "%s v%s.\n", SONY_NC_DRIVER_NAME, 1682 pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
1686 SONY_LAPTOP_DRIVER_VERSION);
1687 1683
1688 sony_nc_acpi_device = device; 1684 sony_nc_acpi_device = device;
1689 strcpy(acpi_device_class(device), "sony/hotkey"); 1685 strcpy(acpi_device_class(device), "sony/hotkey");
@@ -1708,7 +1704,7 @@ static int sony_nc_add(struct acpi_device *device)
1708 sony_nc_acpi_handle, 1, sony_walk_callback, 1704 sony_nc_acpi_handle, 1, sony_walk_callback,
1709 NULL, NULL, NULL); 1705 NULL, NULL, NULL);
1710 if (ACPI_FAILURE(status)) { 1706 if (ACPI_FAILURE(status)) {
1711 pr_warn(DRV_PFX "unable to walk acpi resources\n"); 1707 pr_warn("unable to walk acpi resources\n");
1712 result = -ENODEV; 1708 result = -ENODEV;
1713 goto outpresent; 1709 goto outpresent;
1714 } 1710 }
@@ -1736,13 +1732,12 @@ static int sony_nc_add(struct acpi_device *device)
1736 /* setup input devices and helper fifo */ 1732 /* setup input devices and helper fifo */
1737 result = sony_laptop_setup_input(device); 1733 result = sony_laptop_setup_input(device);
1738 if (result) { 1734 if (result) {
1739 pr_err(DRV_PFX "Unable to create input devices.\n"); 1735 pr_err("Unable to create input devices\n");
1740 goto outkbdbacklight; 1736 goto outkbdbacklight;
1741 } 1737 }
1742 1738
1743 if (acpi_video_backlight_support()) { 1739 if (acpi_video_backlight_support()) {
1744 pr_info(DRV_PFX "brightness ignored, must be " 1740 pr_info("brightness ignored, must be controlled by ACPI video driver\n");
1745 "controlled by ACPI video driver\n");
1746 } else { 1741 } else {
1747 sony_nc_backlight_setup(); 1742 sony_nc_backlight_setup();
1748 } 1743 }
@@ -2265,9 +2260,9 @@ out:
2265 if (pcidev) 2260 if (pcidev)
2266 pci_dev_put(pcidev); 2261 pci_dev_put(pcidev);
2267 2262
2268 pr_info(DRV_PFX "detected Type%d model\n", 2263 pr_info("detected Type%d model\n",
2269 dev->model == SONYPI_DEVICE_TYPE1 ? 1 : 2264 dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
2270 dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); 2265 dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
2271} 2266}
2272 2267
2273/* camera tests and poweron/poweroff */ 2268/* camera tests and poweron/poweroff */
@@ -2313,7 +2308,7 @@ static int __sony_pic_camera_ready(void)
2313static int __sony_pic_camera_off(void) 2308static int __sony_pic_camera_off(void)
2314{ 2309{
2315 if (!camera) { 2310 if (!camera) {
2316 pr_warn(DRV_PFX "camera control not enabled\n"); 2311 pr_warn("camera control not enabled\n");
2317 return -ENODEV; 2312 return -ENODEV;
2318 } 2313 }
2319 2314
@@ -2333,7 +2328,7 @@ static int __sony_pic_camera_on(void)
2333 int i, j, x; 2328 int i, j, x;
2334 2329
2335 if (!camera) { 2330 if (!camera) {
2336 pr_warn(DRV_PFX "camera control not enabled\n"); 2331 pr_warn("camera control not enabled\n");
2337 return -ENODEV; 2332 return -ENODEV;
2338 } 2333 }
2339 2334
@@ -2356,7 +2351,7 @@ static int __sony_pic_camera_on(void)
2356 } 2351 }
2357 2352
2358 if (j == 0) { 2353 if (j == 0) {
2359 pr_warn(DRV_PFX "failed to power on camera\n"); 2354 pr_warn("failed to power on camera\n");
2360 return -ENODEV; 2355 return -ENODEV;
2361 } 2356 }
2362 2357
@@ -2412,8 +2407,7 @@ int sony_pic_camera_command(int command, u8 value)
2412 ITERATIONS_SHORT); 2407 ITERATIONS_SHORT);
2413 break; 2408 break;
2414 default: 2409 default:
2415 pr_err(DRV_PFX "sony_pic_camera_command invalid: %d\n", 2410 pr_err("sony_pic_camera_command invalid: %d\n", command);
2416 command);
2417 break; 2411 break;
2418 } 2412 }
2419 mutex_unlock(&spic_dev.lock); 2413 mutex_unlock(&spic_dev.lock);
@@ -2819,7 +2813,7 @@ static int sonypi_compat_init(void)
2819 error = 2813 error =
2820 kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); 2814 kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
2821 if (error) { 2815 if (error) {
2822 pr_err(DRV_PFX "kfifo_alloc failed\n"); 2816 pr_err("kfifo_alloc failed\n");
2823 return error; 2817 return error;
2824 } 2818 }
2825 2819
@@ -2829,12 +2823,12 @@ static int sonypi_compat_init(void)
2829 sonypi_misc_device.minor = minor; 2823 sonypi_misc_device.minor = minor;
2830 error = misc_register(&sonypi_misc_device); 2824 error = misc_register(&sonypi_misc_device);
2831 if (error) { 2825 if (error) {
2832 pr_err(DRV_PFX "misc_register failed\n"); 2826 pr_err("misc_register failed\n");
2833 goto err_free_kfifo; 2827 goto err_free_kfifo;
2834 } 2828 }
2835 if (minor == -1) 2829 if (minor == -1)
2836 pr_info(DRV_PFX "device allocated minor is %d\n", 2830 pr_info("device allocated minor is %d\n",
2837 sonypi_misc_device.minor); 2831 sonypi_misc_device.minor);
2838 2832
2839 return 0; 2833 return 0;
2840 2834
@@ -2893,8 +2887,8 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
2893 } 2887 }
2894 for (i = 0; i < p->interrupt_count; i++) { 2888 for (i = 0; i < p->interrupt_count; i++) {
2895 if (!p->interrupts[i]) { 2889 if (!p->interrupts[i]) {
2896 pr_warn(DRV_PFX "Invalid IRQ %d\n", 2890 pr_warn("Invalid IRQ %d\n",
2897 p->interrupts[i]); 2891 p->interrupts[i]);
2898 continue; 2892 continue;
2899 } 2893 }
2900 interrupt = kzalloc(sizeof(*interrupt), 2894 interrupt = kzalloc(sizeof(*interrupt),
@@ -2932,14 +2926,14 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
2932 ioport->io2.address_length); 2926 ioport->io2.address_length);
2933 } 2927 }
2934 else { 2928 else {
2935 pr_err(DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n"); 2929 pr_err("Unknown SPIC Type, more than 2 IO Ports\n");
2936 return AE_ERROR; 2930 return AE_ERROR;
2937 } 2931 }
2938 return AE_OK; 2932 return AE_OK;
2939 } 2933 }
2940 default: 2934 default:
2941 dprintk("Resource %d isn't an IRQ nor an IO port\n", 2935 dprintk("Resource %d isn't an IRQ nor an IO port\n",
2942 resource->type); 2936 resource->type);
2943 2937
2944 case ACPI_RESOURCE_TYPE_END_TAG: 2938 case ACPI_RESOURCE_TYPE_END_TAG:
2945 return AE_OK; 2939 return AE_OK;
@@ -2960,7 +2954,7 @@ static int sony_pic_possible_resources(struct acpi_device *device)
2960 dprintk("Evaluating _STA\n"); 2954 dprintk("Evaluating _STA\n");
2961 result = acpi_bus_get_status(device); 2955 result = acpi_bus_get_status(device);
2962 if (result) { 2956 if (result) {
2963 pr_warn(DRV_PFX "Unable to read status\n"); 2957 pr_warn("Unable to read status\n");
2964 goto end; 2958 goto end;
2965 } 2959 }
2966 2960
@@ -2976,8 +2970,7 @@ static int sony_pic_possible_resources(struct acpi_device *device)
2976 status = acpi_walk_resources(device->handle, METHOD_NAME__PRS, 2970 status = acpi_walk_resources(device->handle, METHOD_NAME__PRS,
2977 sony_pic_read_possible_resource, &spic_dev); 2971 sony_pic_read_possible_resource, &spic_dev);
2978 if (ACPI_FAILURE(status)) { 2972 if (ACPI_FAILURE(status)) {
2979 pr_warn(DRV_PFX "Failure evaluating %s\n", 2973 pr_warn("Failure evaluating %s\n", METHOD_NAME__PRS);
2980 METHOD_NAME__PRS);
2981 result = -ENODEV; 2974 result = -ENODEV;
2982 } 2975 }
2983end: 2976end:
@@ -3090,7 +3083,7 @@ static int sony_pic_enable(struct acpi_device *device,
3090 3083
3091 /* check for total failure */ 3084 /* check for total failure */
3092 if (ACPI_FAILURE(status)) { 3085 if (ACPI_FAILURE(status)) {
3093 pr_err(DRV_PFX "Error evaluating _SRS\n"); 3086 pr_err("Error evaluating _SRS\n");
3094 result = -ENODEV; 3087 result = -ENODEV;
3095 goto end; 3088 goto end;
3096 } 3089 }
@@ -3182,7 +3175,7 @@ static int sony_pic_remove(struct acpi_device *device, int type)
3182 struct sony_pic_irq *irq, *tmp_irq; 3175 struct sony_pic_irq *irq, *tmp_irq;
3183 3176
3184 if (sony_pic_disable(device)) { 3177 if (sony_pic_disable(device)) {
3185 pr_err(DRV_PFX "Couldn't disable device.\n"); 3178 pr_err("Couldn't disable device\n");
3186 return -ENXIO; 3179 return -ENXIO;
3187 } 3180 }
3188 3181
@@ -3222,8 +3215,7 @@ static int sony_pic_add(struct acpi_device *device)
3222 struct sony_pic_ioport *io, *tmp_io; 3215 struct sony_pic_ioport *io, *tmp_io;
3223 struct sony_pic_irq *irq, *tmp_irq; 3216 struct sony_pic_irq *irq, *tmp_irq;
3224 3217
3225 pr_info(DRV_PFX "%s v%s.\n", SONY_PIC_DRIVER_NAME, 3218 pr_info("%s v%s\n", SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
3226 SONY_LAPTOP_DRIVER_VERSION);
3227 3219
3228 spic_dev.acpi_dev = device; 3220 spic_dev.acpi_dev = device;
3229 strcpy(acpi_device_class(device), "sony/hotkey"); 3221 strcpy(acpi_device_class(device), "sony/hotkey");
@@ -3233,14 +3225,14 @@ static int sony_pic_add(struct acpi_device *device)
3233 /* read _PRS resources */ 3225 /* read _PRS resources */
3234 result = sony_pic_possible_resources(device); 3226 result = sony_pic_possible_resources(device);
3235 if (result) { 3227 if (result) {
3236 pr_err(DRV_PFX "Unable to read possible resources.\n"); 3228 pr_err("Unable to read possible resources\n");
3237 goto err_free_resources; 3229 goto err_free_resources;
3238 } 3230 }
3239 3231
3240 /* setup input devices and helper fifo */ 3232 /* setup input devices and helper fifo */
3241 result = sony_laptop_setup_input(device); 3233 result = sony_laptop_setup_input(device);
3242 if (result) { 3234 if (result) {
3243 pr_err(DRV_PFX "Unable to create input devices.\n"); 3235 pr_err("Unable to create input devices\n");
3244 goto err_free_resources; 3236 goto err_free_resources;
3245 } 3237 }
3246 3238
@@ -3281,7 +3273,7 @@ static int sony_pic_add(struct acpi_device *device)
3281 } 3273 }
3282 } 3274 }
3283 if (!spic_dev.cur_ioport) { 3275 if (!spic_dev.cur_ioport) {
3284 pr_err(DRV_PFX "Failed to request_region.\n"); 3276 pr_err("Failed to request_region\n");
3285 result = -ENODEV; 3277 result = -ENODEV;
3286 goto err_remove_compat; 3278 goto err_remove_compat;
3287 } 3279 }
@@ -3301,7 +3293,7 @@ static int sony_pic_add(struct acpi_device *device)
3301 } 3293 }
3302 } 3294 }
3303 if (!spic_dev.cur_irq) { 3295 if (!spic_dev.cur_irq) {
3304 pr_err(DRV_PFX "Failed to request_irq.\n"); 3296 pr_err("Failed to request_irq\n");
3305 result = -ENODEV; 3297 result = -ENODEV;
3306 goto err_release_region; 3298 goto err_release_region;
3307 } 3299 }
@@ -3309,7 +3301,7 @@ static int sony_pic_add(struct acpi_device *device)
3309 /* set resource status _SRS */ 3301 /* set resource status _SRS */
3310 result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq); 3302 result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
3311 if (result) { 3303 if (result) {
3312 pr_err(DRV_PFX "Couldn't enable device.\n"); 3304 pr_err("Couldn't enable device\n");
3313 goto err_free_irq; 3305 goto err_free_irq;
3314 } 3306 }
3315 3307
@@ -3418,7 +3410,7 @@ static int __init sony_laptop_init(void)
3418 if (!no_spic && dmi_check_system(sonypi_dmi_table)) { 3410 if (!no_spic && dmi_check_system(sonypi_dmi_table)) {
3419 result = acpi_bus_register_driver(&sony_pic_driver); 3411 result = acpi_bus_register_driver(&sony_pic_driver);
3420 if (result) { 3412 if (result) {
3421 pr_err(DRV_PFX "Unable to register SPIC driver."); 3413 pr_err("Unable to register SPIC driver\n");
3422 goto out; 3414 goto out;
3423 } 3415 }
3424 spic_drv_registered = 1; 3416 spic_drv_registered = 1;
@@ -3426,7 +3418,7 @@ static int __init sony_laptop_init(void)
3426 3418
3427 result = acpi_bus_register_driver(&sony_nc_driver); 3419 result = acpi_bus_register_driver(&sony_nc_driver);
3428 if (result) { 3420 if (result) {
3429 pr_err(DRV_PFX "Unable to register SNC driver."); 3421 pr_err("Unable to register SNC driver\n");
3430 goto out_unregister_pic; 3422 goto out_unregister_pic;
3431 } 3423 }
3432 3424
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 865ef78d6f1a..e24f5ae475af 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -25,6 +25,8 @@
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 */ 26 */
27 27
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
28#include <linux/kernel.h> 30#include <linux/kernel.h>
29#include <linux/module.h> 31#include <linux/module.h>
30#include <linux/slab.h> 32#include <linux/slab.h>
@@ -40,9 +42,6 @@
40#define TC1100_INSTANCE_WIRELESS 1 42#define TC1100_INSTANCE_WIRELESS 1
41#define TC1100_INSTANCE_JOGDIAL 2 43#define TC1100_INSTANCE_JOGDIAL 2
42 44
43#define TC1100_LOGPREFIX "tc1100-wmi: "
44#define TC1100_INFO KERN_INFO TC1100_LOGPREFIX
45
46MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho"); 45MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho");
47MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras"); 46MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras");
48MODULE_LICENSE("GPL"); 47MODULE_LICENSE("GPL");
@@ -264,7 +263,7 @@ static int __init tc1100_init(void)
264 if (error) 263 if (error)
265 goto err_device_del; 264 goto err_device_del;
266 265
267 printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras loaded\n"); 266 pr_info("HP Compaq TC1100 Tablet WMI Extras loaded\n");
268 return 0; 267 return 0;
269 268
270 err_device_del: 269 err_device_del:
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 562fcf0dd2b5..77f6e707a2a9 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -21,6 +21,8 @@
21 * 02110-1301, USA. 21 * 02110-1301, USA.
22 */ 22 */
23 23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
24#define TPACPI_VERSION "0.24" 26#define TPACPI_VERSION "0.24"
25#define TPACPI_SYSFS_VERSION 0x020700 27#define TPACPI_SYSFS_VERSION 0x020700
26 28
@@ -224,17 +226,6 @@ enum tpacpi_hkey_event_t {
224 226
225#define TPACPI_MAX_ACPI_ARGS 3 227#define TPACPI_MAX_ACPI_ARGS 3
226 228
227/* printk headers */
228#define TPACPI_LOG TPACPI_FILE ": "
229#define TPACPI_EMERG KERN_EMERG TPACPI_LOG
230#define TPACPI_ALERT KERN_ALERT TPACPI_LOG
231#define TPACPI_CRIT KERN_CRIT TPACPI_LOG
232#define TPACPI_ERR KERN_ERR TPACPI_LOG
233#define TPACPI_WARN KERN_WARNING TPACPI_LOG
234#define TPACPI_NOTICE KERN_NOTICE TPACPI_LOG
235#define TPACPI_INFO KERN_INFO TPACPI_LOG
236#define TPACPI_DEBUG KERN_DEBUG TPACPI_LOG
237
238/* Debugging printk groups */ 229/* Debugging printk groups */
239#define TPACPI_DBG_ALL 0xffff 230#define TPACPI_DBG_ALL 0xffff
240#define TPACPI_DBG_DISCLOSETASK 0x8000 231#define TPACPI_DBG_DISCLOSETASK 0x8000
@@ -389,34 +380,36 @@ static int tpacpi_uwb_emulstate;
389 * Debugging helpers 380 * Debugging helpers
390 */ 381 */
391 382
392#define dbg_printk(a_dbg_level, format, arg...) \ 383#define dbg_printk(a_dbg_level, format, arg...) \
393 do { if (dbg_level & (a_dbg_level)) \ 384do { \
394 printk(TPACPI_DEBUG "%s: " format, __func__ , ## arg); \ 385 if (dbg_level & (a_dbg_level)) \
395 } while (0) 386 printk(KERN_DEBUG pr_fmt("%s: " format), \
387 __func__, ##arg); \
388} while (0)
396 389
397#ifdef CONFIG_THINKPAD_ACPI_DEBUG 390#ifdef CONFIG_THINKPAD_ACPI_DEBUG
398#define vdbg_printk dbg_printk 391#define vdbg_printk dbg_printk
399static const char *str_supported(int is_supported); 392static const char *str_supported(int is_supported);
400#else 393#else
401#define vdbg_printk(a_dbg_level, format, arg...) \ 394static inline const char *str_supported(int is_supported) { return ""; }
402 do { } while (0) 395#define vdbg_printk(a_dbg_level, format, arg...) \
396 no_printk(format, ##arg)
403#endif 397#endif
404 398
405static void tpacpi_log_usertask(const char * const what) 399static void tpacpi_log_usertask(const char * const what)
406{ 400{
407 printk(TPACPI_DEBUG "%s: access by process with PID %d\n", 401 printk(KERN_DEBUG pr_fmt("%s: access by process with PID %d\n"),
408 what, task_tgid_vnr(current)); 402 what, task_tgid_vnr(current));
409} 403}
410 404
411#define tpacpi_disclose_usertask(what, format, arg...) \ 405#define tpacpi_disclose_usertask(what, format, arg...) \
412 do { \ 406do { \
413 if (unlikely( \ 407 if (unlikely((dbg_level & TPACPI_DBG_DISCLOSETASK) && \
414 (dbg_level & TPACPI_DBG_DISCLOSETASK) && \ 408 (tpacpi_lifecycle == TPACPI_LIFE_RUNNING))) { \
415 (tpacpi_lifecycle == TPACPI_LIFE_RUNNING))) { \ 409 printk(KERN_DEBUG pr_fmt("%s: PID %d: " format), \
416 printk(TPACPI_DEBUG "%s: PID %d: " format, \ 410 what, task_tgid_vnr(current), ## arg); \
417 what, task_tgid_vnr(current), ## arg); \ 411 } \
418 } \ 412} while (0)
419 } while (0)
420 413
421/* 414/*
422 * Quirk handling helpers 415 * Quirk handling helpers
@@ -535,15 +528,6 @@ TPACPI_HANDLE(hkey, ec, "\\_SB.HKEY", /* 600e/x, 770e, 770x */
535 "HKEY", /* all others */ 528 "HKEY", /* all others */
536 ); /* 570 */ 529 ); /* 570 */
537 530
538TPACPI_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA", /* 570 */
539 "\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
540 "\\_SB.PCI0.VID0", /* 770e */
541 "\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
542 "\\_SB.PCI0.AGP.VGA", /* X100e and a few others */
543 "\\_SB.PCI0.AGP.VID", /* all others */
544 ); /* R30, R31 */
545
546
547/************************************************************************* 531/*************************************************************************
548 * ACPI helpers 532 * ACPI helpers
549 */ 533 */
@@ -563,7 +547,7 @@ static int acpi_evalf(acpi_handle handle,
563 int quiet; 547 int quiet;
564 548
565 if (!*fmt) { 549 if (!*fmt) {
566 printk(TPACPI_ERR "acpi_evalf() called with empty format\n"); 550 pr_err("acpi_evalf() called with empty format\n");
567 return 0; 551 return 0;
568 } 552 }
569 553
@@ -588,7 +572,7 @@ static int acpi_evalf(acpi_handle handle,
588 break; 572 break;
589 /* add more types as needed */ 573 /* add more types as needed */
590 default: 574 default:
591 printk(TPACPI_ERR "acpi_evalf() called " 575 pr_err("acpi_evalf() called "
592 "with invalid format character '%c'\n", c); 576 "with invalid format character '%c'\n", c);
593 va_end(ap); 577 va_end(ap);
594 return 0; 578 return 0;
@@ -617,13 +601,13 @@ static int acpi_evalf(acpi_handle handle,
617 break; 601 break;
618 /* add more types as needed */ 602 /* add more types as needed */
619 default: 603 default:
620 printk(TPACPI_ERR "acpi_evalf() called " 604 pr_err("acpi_evalf() called "
621 "with invalid format character '%c'\n", res_type); 605 "with invalid format character '%c'\n", res_type);
622 return 0; 606 return 0;
623 } 607 }
624 608
625 if (!success && !quiet) 609 if (!success && !quiet)
626 printk(TPACPI_ERR "acpi_evalf(%s, %s, ...) failed: %s\n", 610 pr_err("acpi_evalf(%s, %s, ...) failed: %s\n",
627 method, fmt0, acpi_format_exception(status)); 611 method, fmt0, acpi_format_exception(status));
628 612
629 return success; 613 return success;
@@ -767,8 +751,7 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
767 751
768 rc = acpi_bus_get_device(*ibm->acpi->handle, &ibm->acpi->device); 752 rc = acpi_bus_get_device(*ibm->acpi->handle, &ibm->acpi->device);
769 if (rc < 0) { 753 if (rc < 0) {
770 printk(TPACPI_ERR "acpi_bus_get_device(%s) failed: %d\n", 754 pr_err("acpi_bus_get_device(%s) failed: %d\n", ibm->name, rc);
771 ibm->name, rc);
772 return -ENODEV; 755 return -ENODEV;
773 } 756 }
774 757
@@ -781,12 +764,10 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
781 ibm->acpi->type, dispatch_acpi_notify, ibm); 764 ibm->acpi->type, dispatch_acpi_notify, ibm);
782 if (ACPI_FAILURE(status)) { 765 if (ACPI_FAILURE(status)) {
783 if (status == AE_ALREADY_EXISTS) { 766 if (status == AE_ALREADY_EXISTS) {
784 printk(TPACPI_NOTICE 767 pr_notice("another device driver is already "
785 "another device driver is already " 768 "handling %s events\n", ibm->name);
786 "handling %s events\n", ibm->name);
787 } else { 769 } else {
788 printk(TPACPI_ERR 770 pr_err("acpi_install_notify_handler(%s) failed: %s\n",
789 "acpi_install_notify_handler(%s) failed: %s\n",
790 ibm->name, acpi_format_exception(status)); 771 ibm->name, acpi_format_exception(status));
791 } 772 }
792 return -ENODEV; 773 return -ENODEV;
@@ -811,8 +792,7 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
811 792
812 ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL); 793 ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL);
813 if (!ibm->acpi->driver) { 794 if (!ibm->acpi->driver) {
814 printk(TPACPI_ERR 795 pr_err("failed to allocate memory for ibm->acpi->driver\n");
815 "failed to allocate memory for ibm->acpi->driver\n");
816 return -ENOMEM; 796 return -ENOMEM;
817 } 797 }
818 798
@@ -823,7 +803,7 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
823 803
824 rc = acpi_bus_register_driver(ibm->acpi->driver); 804 rc = acpi_bus_register_driver(ibm->acpi->driver);
825 if (rc < 0) { 805 if (rc < 0) {
826 printk(TPACPI_ERR "acpi_bus_register_driver(%s) failed: %d\n", 806 pr_err("acpi_bus_register_driver(%s) failed: %d\n",
827 ibm->name, rc); 807 ibm->name, rc);
828 kfree(ibm->acpi->driver); 808 kfree(ibm->acpi->driver);
829 ibm->acpi->driver = NULL; 809 ibm->acpi->driver = NULL;
@@ -1081,15 +1061,14 @@ static int parse_strtoul(const char *buf,
1081static void tpacpi_disable_brightness_delay(void) 1061static void tpacpi_disable_brightness_delay(void)
1082{ 1062{
1083 if (acpi_evalf(hkey_handle, NULL, "PWMS", "qvd", 0)) 1063 if (acpi_evalf(hkey_handle, NULL, "PWMS", "qvd", 0))
1084 printk(TPACPI_NOTICE 1064 pr_notice("ACPI backlight control delay disabled\n");
1085 "ACPI backlight control delay disabled\n");
1086} 1065}
1087 1066
1088static void printk_deprecated_attribute(const char * const what, 1067static void printk_deprecated_attribute(const char * const what,
1089 const char * const details) 1068 const char * const details)
1090{ 1069{
1091 tpacpi_log_usertask("deprecated sysfs attribute"); 1070 tpacpi_log_usertask("deprecated sysfs attribute");
1092 printk(TPACPI_WARN "WARNING: sysfs attribute %s is deprecated and " 1071 pr_warn("WARNING: sysfs attribute %s is deprecated and "
1093 "will be removed. %s\n", 1072 "will be removed. %s\n",
1094 what, details); 1073 what, details);
1095} 1074}
@@ -1264,8 +1243,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
1264 &tpacpi_rfk_rfkill_ops, 1243 &tpacpi_rfk_rfkill_ops,
1265 atp_rfk); 1244 atp_rfk);
1266 if (!atp_rfk || !atp_rfk->rfkill) { 1245 if (!atp_rfk || !atp_rfk->rfkill) {
1267 printk(TPACPI_ERR 1246 pr_err("failed to allocate memory for rfkill class\n");
1268 "failed to allocate memory for rfkill class\n");
1269 kfree(atp_rfk); 1247 kfree(atp_rfk);
1270 return -ENOMEM; 1248 return -ENOMEM;
1271 } 1249 }
@@ -1275,9 +1253,8 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
1275 1253
1276 sw_status = (tp_rfkops->get_status)(); 1254 sw_status = (tp_rfkops->get_status)();
1277 if (sw_status < 0) { 1255 if (sw_status < 0) {
1278 printk(TPACPI_ERR 1256 pr_err("failed to read initial state for %s, error %d\n",
1279 "failed to read initial state for %s, error %d\n", 1257 name, sw_status);
1280 name, sw_status);
1281 } else { 1258 } else {
1282 sw_state = (sw_status == TPACPI_RFK_RADIO_OFF); 1259 sw_state = (sw_status == TPACPI_RFK_RADIO_OFF);
1283 if (set_default) { 1260 if (set_default) {
@@ -1291,9 +1268,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
1291 1268
1292 res = rfkill_register(atp_rfk->rfkill); 1269 res = rfkill_register(atp_rfk->rfkill);
1293 if (res < 0) { 1270 if (res < 0) {
1294 printk(TPACPI_ERR 1271 pr_err("failed to register %s rfkill switch: %d\n", name, res);
1295 "failed to register %s rfkill switch: %d\n",
1296 name, res);
1297 rfkill_destroy(atp_rfk->rfkill); 1272 rfkill_destroy(atp_rfk->rfkill);
1298 kfree(atp_rfk); 1273 kfree(atp_rfk);
1299 return res; 1274 return res;
@@ -1301,7 +1276,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
1301 1276
1302 tpacpi_rfkill_switches[id] = atp_rfk; 1277 tpacpi_rfkill_switches[id] = atp_rfk;
1303 1278
1304 printk(TPACPI_INFO "rfkill switch %s: radio is %sblocked\n", 1279 pr_info("rfkill switch %s: radio is %sblocked\n",
1305 name, (sw_state || hw_state) ? "" : "un"); 1280 name, (sw_state || hw_state) ? "" : "un");
1306 return 0; 1281 return 0;
1307} 1282}
@@ -1825,10 +1800,8 @@ static void __init tpacpi_check_outdated_fw(void)
1825 * broken, or really stable to begin with, so it is 1800 * broken, or really stable to begin with, so it is
1826 * best if the user upgrades the firmware anyway. 1801 * best if the user upgrades the firmware anyway.
1827 */ 1802 */
1828 printk(TPACPI_WARN 1803 pr_warn("WARNING: Outdated ThinkPad BIOS/EC firmware\n");
1829 "WARNING: Outdated ThinkPad BIOS/EC firmware\n"); 1804 pr_warn("WARNING: This firmware may be missing critical bug "
1830 printk(TPACPI_WARN
1831 "WARNING: This firmware may be missing critical bug "
1832 "fixes and/or important features\n"); 1805 "fixes and/or important features\n");
1833 } 1806 }
1834} 1807}
@@ -2117,9 +2090,7 @@ void static hotkey_mask_warn_incomplete_mask(void)
2117 (hotkey_all_mask | TPACPI_HKEY_NVRAM_KNOWN_MASK); 2090 (hotkey_all_mask | TPACPI_HKEY_NVRAM_KNOWN_MASK);
2118 2091
2119 if (wantedmask) 2092 if (wantedmask)
2120 printk(TPACPI_NOTICE 2093 pr_notice("required events 0x%08x not enabled!\n", wantedmask);
2121 "required events 0x%08x not enabled!\n",
2122 wantedmask);
2123} 2094}
2124 2095
2125/* 2096/*
@@ -2157,10 +2128,9 @@ static int hotkey_mask_set(u32 mask)
2157 * a given event. 2128 * a given event.
2158 */ 2129 */
2159 if (!hotkey_mask_get() && !rc && (fwmask & ~hotkey_acpi_mask)) { 2130 if (!hotkey_mask_get() && !rc && (fwmask & ~hotkey_acpi_mask)) {
2160 printk(TPACPI_NOTICE 2131 pr_notice("asked for hotkey mask 0x%08x, but "
2161 "asked for hotkey mask 0x%08x, but " 2132 "firmware forced it to 0x%08x\n",
2162 "firmware forced it to 0x%08x\n", 2133 fwmask, hotkey_acpi_mask);
2163 fwmask, hotkey_acpi_mask);
2164 } 2134 }
2165 2135
2166 if (tpacpi_lifecycle != TPACPI_LIFE_EXITING) 2136 if (tpacpi_lifecycle != TPACPI_LIFE_EXITING)
@@ -2184,13 +2154,11 @@ static int hotkey_user_mask_set(const u32 mask)
2184 (mask == 0xffff || mask == 0xffffff || 2154 (mask == 0xffff || mask == 0xffffff ||
2185 mask == 0xffffffff)) { 2155 mask == 0xffffffff)) {
2186 tp_warned.hotkey_mask_ff = 1; 2156 tp_warned.hotkey_mask_ff = 1;
2187 printk(TPACPI_NOTICE 2157 pr_notice("setting the hotkey mask to 0x%08x is likely "
2188 "setting the hotkey mask to 0x%08x is likely " 2158 "not the best way to go about it\n", mask);
2189 "not the best way to go about it\n", mask); 2159 pr_notice("please consider using the driver defaults, "
2190 printk(TPACPI_NOTICE 2160 "and refer to up-to-date thinkpad-acpi "
2191 "please consider using the driver defaults, " 2161 "documentation\n");
2192 "and refer to up-to-date thinkpad-acpi "
2193 "documentation\n");
2194 } 2162 }
2195 2163
2196 /* Try to enable what the user asked for, plus whatever we need. 2164 /* Try to enable what the user asked for, plus whatever we need.
@@ -2574,8 +2542,7 @@ static void hotkey_poll_setup(const bool may_warn)
2574 NULL, TPACPI_NVRAM_KTHREAD_NAME); 2542 NULL, TPACPI_NVRAM_KTHREAD_NAME);
2575 if (IS_ERR(tpacpi_hotkey_task)) { 2543 if (IS_ERR(tpacpi_hotkey_task)) {
2576 tpacpi_hotkey_task = NULL; 2544 tpacpi_hotkey_task = NULL;
2577 printk(TPACPI_ERR 2545 pr_err("could not create kernel thread "
2578 "could not create kernel thread "
2579 "for hotkey polling\n"); 2546 "for hotkey polling\n");
2580 } 2547 }
2581 } 2548 }
@@ -2583,11 +2550,10 @@ static void hotkey_poll_setup(const bool may_warn)
2583 hotkey_poll_stop_sync(); 2550 hotkey_poll_stop_sync();
2584 if (may_warn && (poll_driver_mask || poll_user_mask) && 2551 if (may_warn && (poll_driver_mask || poll_user_mask) &&
2585 hotkey_poll_freq == 0) { 2552 hotkey_poll_freq == 0) {
2586 printk(TPACPI_NOTICE 2553 pr_notice("hot keys 0x%08x and/or events 0x%08x "
2587 "hot keys 0x%08x and/or events 0x%08x " 2554 "require polling, which is currently "
2588 "require polling, which is currently " 2555 "disabled\n",
2589 "disabled\n", 2556 poll_user_mask, poll_driver_mask);
2590 poll_user_mask, poll_driver_mask);
2591 } 2557 }
2592 } 2558 }
2593} 2559}
@@ -2811,13 +2777,13 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
2811 mutex_unlock(&hotkey_mutex); 2777 mutex_unlock(&hotkey_mutex);
2812 2778
2813 if (rc < 0) 2779 if (rc < 0)
2814 printk(TPACPI_ERR "hotkey_source_mask: failed to update the" 2780 pr_err("hotkey_source_mask: "
2815 "firmware event mask!\n"); 2781 "failed to update the firmware event mask!\n");
2816 2782
2817 if (r_ev) 2783 if (r_ev)
2818 printk(TPACPI_NOTICE "hotkey_source_mask: " 2784 pr_notice("hotkey_source_mask: "
2819 "some important events were disabled: " 2785 "some important events were disabled: 0x%04x\n",
2820 "0x%04x\n", r_ev); 2786 r_ev);
2821 2787
2822 tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t); 2788 tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t);
2823 2789
@@ -3048,8 +3014,7 @@ static void hotkey_exit(void)
3048 if (((tp_features.hotkey_mask && 3014 if (((tp_features.hotkey_mask &&
3049 hotkey_mask_set(hotkey_orig_mask)) | 3015 hotkey_mask_set(hotkey_orig_mask)) |
3050 hotkey_status_set(false)) != 0) 3016 hotkey_status_set(false)) != 0)
3051 printk(TPACPI_ERR 3017 pr_err("failed to restore hot key mask "
3052 "failed to restore hot key mask "
3053 "to BIOS defaults\n"); 3018 "to BIOS defaults\n");
3054} 3019}
3055 3020
@@ -3288,10 +3253,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3288 for HKEY interface version 0x100 */ 3253 for HKEY interface version 0x100 */
3289 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { 3254 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
3290 if ((hkeyv >> 8) != 1) { 3255 if ((hkeyv >> 8) != 1) {
3291 printk(TPACPI_ERR "unknown version of the " 3256 pr_err("unknown version of the HKEY interface: 0x%x\n",
3292 "HKEY interface: 0x%x\n", hkeyv); 3257 hkeyv);
3293 printk(TPACPI_ERR "please report this to %s\n", 3258 pr_err("please report this to %s\n", TPACPI_MAIL);
3294 TPACPI_MAIL);
3295 } else { 3259 } else {
3296 /* 3260 /*
3297 * MHKV 0x100 in A31, R40, R40e, 3261 * MHKV 0x100 in A31, R40, R40e,
@@ -3304,8 +3268,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3304 /* Paranoia check AND init hotkey_all_mask */ 3268 /* Paranoia check AND init hotkey_all_mask */
3305 if (!acpi_evalf(hkey_handle, &hotkey_all_mask, 3269 if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
3306 "MHKA", "qd")) { 3270 "MHKA", "qd")) {
3307 printk(TPACPI_ERR 3271 pr_err("missing MHKA handler, "
3308 "missing MHKA handler, "
3309 "please report this to %s\n", 3272 "please report this to %s\n",
3310 TPACPI_MAIL); 3273 TPACPI_MAIL);
3311 /* Fallback: pre-init for FN+F3,F4,F12 */ 3274 /* Fallback: pre-init for FN+F3,F4,F12 */
@@ -3343,16 +3306,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3343 if (dbg_wlswemul) { 3306 if (dbg_wlswemul) {
3344 tp_features.hotkey_wlsw = 1; 3307 tp_features.hotkey_wlsw = 1;
3345 radiosw_state = !!tpacpi_wlsw_emulstate; 3308 radiosw_state = !!tpacpi_wlsw_emulstate;
3346 printk(TPACPI_INFO 3309 pr_info("radio switch emulation enabled\n");
3347 "radio switch emulation enabled\n");
3348 } else 3310 } else
3349#endif 3311#endif
3350 /* Not all thinkpads have a hardware radio switch */ 3312 /* Not all thinkpads have a hardware radio switch */
3351 if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) { 3313 if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
3352 tp_features.hotkey_wlsw = 1; 3314 tp_features.hotkey_wlsw = 1;
3353 radiosw_state = !!status; 3315 radiosw_state = !!status;
3354 printk(TPACPI_INFO 3316 pr_info("radio switch found; radios are %s\n",
3355 "radio switch found; radios are %s\n",
3356 enabled(status, 0)); 3317 enabled(status, 0));
3357 } 3318 }
3358 if (tp_features.hotkey_wlsw) 3319 if (tp_features.hotkey_wlsw)
@@ -3363,8 +3324,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3363 if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) { 3324 if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
3364 tp_features.hotkey_tablet = 1; 3325 tp_features.hotkey_tablet = 1;
3365 tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK); 3326 tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK);
3366 printk(TPACPI_INFO 3327 pr_info("possible tablet mode switch found; "
3367 "possible tablet mode switch found; "
3368 "ThinkPad in %s mode\n", 3328 "ThinkPad in %s mode\n",
3369 (tabletsw_state) ? "tablet" : "laptop"); 3329 (tabletsw_state) ? "tablet" : "laptop");
3370 res = add_to_attr_set(hotkey_dev_attributes, 3330 res = add_to_attr_set(hotkey_dev_attributes,
@@ -3382,8 +3342,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3382 hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE, 3342 hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
3383 GFP_KERNEL); 3343 GFP_KERNEL);
3384 if (!hotkey_keycode_map) { 3344 if (!hotkey_keycode_map) {
3385 printk(TPACPI_ERR 3345 pr_err("failed to allocate memory for key map\n");
3386 "failed to allocate memory for key map\n");
3387 res = -ENOMEM; 3346 res = -ENOMEM;
3388 goto err_exit; 3347 goto err_exit;
3389 } 3348 }
@@ -3426,13 +3385,11 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3426 * userspace. tpacpi_detect_brightness_capabilities() must have 3385 * userspace. tpacpi_detect_brightness_capabilities() must have
3427 * been called before this point */ 3386 * been called before this point */
3428 if (tp_features.bright_acpimode && acpi_video_backlight_support()) { 3387 if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
3429 printk(TPACPI_INFO 3388 pr_info("This ThinkPad has standard ACPI backlight "
3430 "This ThinkPad has standard ACPI backlight " 3389 "brightness control, supported by the ACPI "
3431 "brightness control, supported by the ACPI " 3390 "video driver\n");
3432 "video driver\n"); 3391 pr_notice("Disabling thinkpad-acpi brightness events "
3433 printk(TPACPI_NOTICE 3392 "by default...\n");
3434 "Disabling thinkpad-acpi brightness events "
3435 "by default...\n");
3436 3393
3437 /* Disable brightness up/down on Lenovo thinkpads when 3394 /* Disable brightness up/down on Lenovo thinkpads when
3438 * ACPI is handling them, otherwise it is plain impossible 3395 * ACPI is handling them, otherwise it is plain impossible
@@ -3539,8 +3496,7 @@ static bool hotkey_notify_wakeup(const u32 hkey,
3539 3496
3540 case TP_HKEY_EV_WKUP_S3_BATLOW: /* Battery on critical low level/S3 */ 3497 case TP_HKEY_EV_WKUP_S3_BATLOW: /* Battery on critical low level/S3 */
3541 case TP_HKEY_EV_WKUP_S4_BATLOW: /* Battery on critical low level/S4 */ 3498 case TP_HKEY_EV_WKUP_S4_BATLOW: /* Battery on critical low level/S4 */
3542 printk(TPACPI_ALERT 3499 pr_alert("EMERGENCY WAKEUP: battery almost empty\n");
3543 "EMERGENCY WAKEUP: battery almost empty\n");
3544 /* how to auto-heal: */ 3500 /* how to auto-heal: */
3545 /* 2313: woke up from S3, go to S4/S5 */ 3501 /* 2313: woke up from S3, go to S4/S5 */
3546 /* 2413: woke up from S4, go to S5 */ 3502 /* 2413: woke up from S4, go to S5 */
@@ -3551,9 +3507,7 @@ static bool hotkey_notify_wakeup(const u32 hkey,
3551 } 3507 }
3552 3508
3553 if (hotkey_wakeup_reason != TP_ACPI_WAKEUP_NONE) { 3509 if (hotkey_wakeup_reason != TP_ACPI_WAKEUP_NONE) {
3554 printk(TPACPI_INFO 3510 pr_info("woke up due to a hot-unplug request...\n");
3555 "woke up due to a hot-unplug "
3556 "request...\n");
3557 hotkey_wakeup_reason_notify_change(); 3511 hotkey_wakeup_reason_notify_change();
3558 } 3512 }
3559 return true; 3513 return true;
@@ -3605,37 +3559,31 @@ static bool hotkey_notify_thermal(const u32 hkey,
3605 3559
3606 switch (hkey) { 3560 switch (hkey) {
3607 case TP_HKEY_EV_THM_TABLE_CHANGED: 3561 case TP_HKEY_EV_THM_TABLE_CHANGED:
3608 printk(TPACPI_INFO 3562 pr_info("EC reports that Thermal Table has changed\n");
3609 "EC reports that Thermal Table has changed\n");
3610 /* recommended action: do nothing, we don't have 3563 /* recommended action: do nothing, we don't have
3611 * Lenovo ATM information */ 3564 * Lenovo ATM information */
3612 return true; 3565 return true;
3613 case TP_HKEY_EV_ALARM_BAT_HOT: 3566 case TP_HKEY_EV_ALARM_BAT_HOT:
3614 printk(TPACPI_CRIT 3567 pr_crit("THERMAL ALARM: battery is too hot!\n");
3615 "THERMAL ALARM: battery is too hot!\n");
3616 /* recommended action: warn user through gui */ 3568 /* recommended action: warn user through gui */
3617 break; 3569 break;
3618 case TP_HKEY_EV_ALARM_BAT_XHOT: 3570 case TP_HKEY_EV_ALARM_BAT_XHOT:
3619 printk(TPACPI_ALERT 3571 pr_alert("THERMAL EMERGENCY: battery is extremely hot!\n");
3620 "THERMAL EMERGENCY: battery is extremely hot!\n");
3621 /* recommended action: immediate sleep/hibernate */ 3572 /* recommended action: immediate sleep/hibernate */
3622 break; 3573 break;
3623 case TP_HKEY_EV_ALARM_SENSOR_HOT: 3574 case TP_HKEY_EV_ALARM_SENSOR_HOT:
3624 printk(TPACPI_CRIT 3575 pr_crit("THERMAL ALARM: "
3625 "THERMAL ALARM: "
3626 "a sensor reports something is too hot!\n"); 3576 "a sensor reports something is too hot!\n");
3627 /* recommended action: warn user through gui, that */ 3577 /* recommended action: warn user through gui, that */
3628 /* some internal component is too hot */ 3578 /* some internal component is too hot */
3629 break; 3579 break;
3630 case TP_HKEY_EV_ALARM_SENSOR_XHOT: 3580 case TP_HKEY_EV_ALARM_SENSOR_XHOT:
3631 printk(TPACPI_ALERT 3581 pr_alert("THERMAL EMERGENCY: "
3632 "THERMAL EMERGENCY: " 3582 "a sensor reports something is extremely hot!\n");
3633 "a sensor reports something is extremely hot!\n");
3634 /* recommended action: immediate sleep/hibernate */ 3583 /* recommended action: immediate sleep/hibernate */
3635 break; 3584 break;
3636 default: 3585 default:
3637 printk(TPACPI_ALERT 3586 pr_alert("THERMAL ALERT: unknown thermal alarm received\n");
3638 "THERMAL ALERT: unknown thermal alarm received\n");
3639 known = false; 3587 known = false;
3640 } 3588 }
3641 3589
@@ -3652,8 +3600,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3652 bool known_ev; 3600 bool known_ev;
3653 3601
3654 if (event != 0x80) { 3602 if (event != 0x80) {
3655 printk(TPACPI_ERR 3603 pr_err("unknown HKEY notification event %d\n", event);
3656 "unknown HKEY notification event %d\n", event);
3657 /* forward it to userspace, maybe it knows how to handle it */ 3604 /* forward it to userspace, maybe it knows how to handle it */
3658 acpi_bus_generate_netlink_event( 3605 acpi_bus_generate_netlink_event(
3659 ibm->acpi->device->pnp.device_class, 3606 ibm->acpi->device->pnp.device_class,
@@ -3664,7 +3611,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3664 3611
3665 while (1) { 3612 while (1) {
3666 if (!acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) { 3613 if (!acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) {
3667 printk(TPACPI_ERR "failed to retrieve HKEY event\n"); 3614 pr_err("failed to retrieve HKEY event\n");
3668 return; 3615 return;
3669 } 3616 }
3670 3617
@@ -3692,8 +3639,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3692 switch (hkey) { 3639 switch (hkey) {
3693 case TP_HKEY_EV_BAYEJ_ACK: 3640 case TP_HKEY_EV_BAYEJ_ACK:
3694 hotkey_autosleep_ack = 1; 3641 hotkey_autosleep_ack = 1;
3695 printk(TPACPI_INFO 3642 pr_info("bay ejected\n");
3696 "bay ejected\n");
3697 hotkey_wakeup_hotunplug_complete_notify_change(); 3643 hotkey_wakeup_hotunplug_complete_notify_change();
3698 known_ev = true; 3644 known_ev = true;
3699 break; 3645 break;
@@ -3709,8 +3655,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3709 /* 0x4000-0x4FFF: dock-related wakeups */ 3655 /* 0x4000-0x4FFF: dock-related wakeups */
3710 if (hkey == TP_HKEY_EV_UNDOCK_ACK) { 3656 if (hkey == TP_HKEY_EV_UNDOCK_ACK) {
3711 hotkey_autosleep_ack = 1; 3657 hotkey_autosleep_ack = 1;
3712 printk(TPACPI_INFO 3658 pr_info("undocked\n");
3713 "undocked\n");
3714 hotkey_wakeup_hotunplug_complete_notify_change(); 3659 hotkey_wakeup_hotunplug_complete_notify_change();
3715 known_ev = true; 3660 known_ev = true;
3716 } else { 3661 } else {
@@ -3741,11 +3686,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3741 known_ev = false; 3686 known_ev = false;
3742 } 3687 }
3743 if (!known_ev) { 3688 if (!known_ev) {
3744 printk(TPACPI_NOTICE 3689 pr_notice("unhandled HKEY event 0x%04x\n", hkey);
3745 "unhandled HKEY event 0x%04x\n", hkey); 3690 pr_notice("please report the conditions when this "
3746 printk(TPACPI_NOTICE 3691 "event happened to %s\n", TPACPI_MAIL);
3747 "please report the conditions when this "
3748 "event happened to %s\n", TPACPI_MAIL);
3749 } 3692 }
3750 3693
3751 /* Legacy events */ 3694 /* Legacy events */
@@ -3778,8 +3721,7 @@ static void hotkey_resume(void)
3778 3721
3779 if (hotkey_status_set(true) < 0 || 3722 if (hotkey_status_set(true) < 0 ||
3780 hotkey_mask_set(hotkey_acpi_mask) < 0) 3723 hotkey_mask_set(hotkey_acpi_mask) < 0)
3781 printk(TPACPI_ERR 3724 pr_err("error while attempting to reset the event "
3782 "error while attempting to reset the event "
3783 "firmware interface\n"); 3725 "firmware interface\n");
3784 3726
3785 tpacpi_send_radiosw_update(); 3727 tpacpi_send_radiosw_update();
@@ -3824,14 +3766,12 @@ static void hotkey_enabledisable_warn(bool enable)
3824{ 3766{
3825 tpacpi_log_usertask("procfs hotkey enable/disable"); 3767 tpacpi_log_usertask("procfs hotkey enable/disable");
3826 if (!WARN((tpacpi_lifecycle == TPACPI_LIFE_RUNNING || !enable), 3768 if (!WARN((tpacpi_lifecycle == TPACPI_LIFE_RUNNING || !enable),
3827 TPACPI_WARN 3769 pr_fmt("hotkey enable/disable functionality has been "
3828 "hotkey enable/disable functionality has been " 3770 "removed from the driver. "
3829 "removed from the driver. Hotkeys are always " 3771 "Hotkeys are always enabled.\n")))
3830 "enabled\n")) 3772 pr_err("Please remove the hotkey=enable module "
3831 printk(TPACPI_ERR 3773 "parameter, it is deprecated. "
3832 "Please remove the hotkey=enable module " 3774 "Hotkeys are always enabled.\n");
3833 "parameter, it is deprecated. Hotkeys are always "
3834 "enabled\n");
3835} 3775}
3836 3776
3837static int hotkey_write(char *buf) 3777static int hotkey_write(char *buf)
@@ -4011,8 +3951,7 @@ static void bluetooth_shutdown(void)
4011 /* Order firmware to save current state to NVRAM */ 3951 /* Order firmware to save current state to NVRAM */
4012 if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd", 3952 if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd",
4013 TP_ACPI_BLTH_SAVE_STATE)) 3953 TP_ACPI_BLTH_SAVE_STATE))
4014 printk(TPACPI_NOTICE 3954 pr_notice("failed to save bluetooth state to NVRAM\n");
4015 "failed to save bluetooth state to NVRAM\n");
4016 else 3955 else
4017 vdbg_printk(TPACPI_DBG_RFKILL, 3956 vdbg_printk(TPACPI_DBG_RFKILL,
4018 "bluestooth state saved to NVRAM\n"); 3957 "bluestooth state saved to NVRAM\n");
@@ -4051,8 +3990,7 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
4051#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES 3990#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
4052 if (dbg_bluetoothemul) { 3991 if (dbg_bluetoothemul) {
4053 tp_features.bluetooth = 1; 3992 tp_features.bluetooth = 1;
4054 printk(TPACPI_INFO 3993 pr_info("bluetooth switch emulation enabled\n");
4055 "bluetooth switch emulation enabled\n");
4056 } else 3994 } else
4057#endif 3995#endif
4058 if (tp_features.bluetooth && 3996 if (tp_features.bluetooth &&
@@ -4203,8 +4141,7 @@ static void wan_shutdown(void)
4203 /* Order firmware to save current state to NVRAM */ 4141 /* Order firmware to save current state to NVRAM */
4204 if (!acpi_evalf(NULL, NULL, "\\WGSV", "vd", 4142 if (!acpi_evalf(NULL, NULL, "\\WGSV", "vd",
4205 TP_ACPI_WGSV_SAVE_STATE)) 4143 TP_ACPI_WGSV_SAVE_STATE))
4206 printk(TPACPI_NOTICE 4144 pr_notice("failed to save WWAN state to NVRAM\n");
4207 "failed to save WWAN state to NVRAM\n");
4208 else 4145 else
4209 vdbg_printk(TPACPI_DBG_RFKILL, 4146 vdbg_printk(TPACPI_DBG_RFKILL,
4210 "WWAN state saved to NVRAM\n"); 4147 "WWAN state saved to NVRAM\n");
@@ -4241,8 +4178,7 @@ static int __init wan_init(struct ibm_init_struct *iibm)
4241#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES 4178#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
4242 if (dbg_wwanemul) { 4179 if (dbg_wwanemul) {
4243 tp_features.wan = 1; 4180 tp_features.wan = 1;
4244 printk(TPACPI_INFO 4181 pr_info("wwan switch emulation enabled\n");
4245 "wwan switch emulation enabled\n");
4246 } else 4182 } else
4247#endif 4183#endif
4248 if (tp_features.wan && 4184 if (tp_features.wan &&
@@ -4382,8 +4318,7 @@ static int __init uwb_init(struct ibm_init_struct *iibm)
4382#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES 4318#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
4383 if (dbg_uwbemul) { 4319 if (dbg_uwbemul) {
4384 tp_features.uwb = 1; 4320 tp_features.uwb = 1;
4385 printk(TPACPI_INFO 4321 pr_info("uwb switch emulation enabled\n");
4386 "uwb switch emulation enabled\n");
4387 } else 4322 } else
4388#endif 4323#endif
4389 if (tp_features.uwb && 4324 if (tp_features.uwb &&
@@ -4444,6 +4379,15 @@ static int video_orig_autosw;
4444static int video_autosw_get(void); 4379static int video_autosw_get(void);
4445static int video_autosw_set(int enable); 4380static int video_autosw_set(int enable);
4446 4381
4382TPACPI_HANDLE(vid, root,
4383 "\\_SB.PCI.AGP.VGA", /* 570 */
4384 "\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
4385 "\\_SB.PCI0.VID0", /* 770e */
4386 "\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
4387 "\\_SB.PCI0.AGP.VGA", /* X100e and a few others */
4388 "\\_SB.PCI0.AGP.VID", /* all others */
4389 ); /* R30, R31 */
4390
4447TPACPI_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID"); /* G41 */ 4391TPACPI_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID"); /* G41 */
4448 4392
4449static int __init video_init(struct ibm_init_struct *iibm) 4393static int __init video_init(struct ibm_init_struct *iibm)
@@ -4487,7 +4431,7 @@ static void video_exit(void)
4487 dbg_printk(TPACPI_DBG_EXIT, 4431 dbg_printk(TPACPI_DBG_EXIT,
4488 "restoring original video autoswitch mode\n"); 4432 "restoring original video autoswitch mode\n");
4489 if (video_autosw_set(video_orig_autosw)) 4433 if (video_autosw_set(video_orig_autosw))
4490 printk(TPACPI_ERR "error while trying to restore original " 4434 pr_err("error while trying to restore original "
4491 "video autoswitch mode\n"); 4435 "video autoswitch mode\n");
4492} 4436}
4493 4437
@@ -4560,8 +4504,7 @@ static int video_outputsw_set(int status)
4560 res = acpi_evalf(vid_handle, NULL, 4504 res = acpi_evalf(vid_handle, NULL,
4561 "ASWT", "vdd", status * 0x100, 0); 4505 "ASWT", "vdd", status * 0x100, 0);
4562 if (!autosw && video_autosw_set(autosw)) { 4506 if (!autosw && video_autosw_set(autosw)) {
4563 printk(TPACPI_ERR 4507 pr_err("video auto-switch left enabled due to error\n");
4564 "video auto-switch left enabled due to error\n");
4565 return -EIO; 4508 return -EIO;
4566 } 4509 }
4567 break; 4510 break;
@@ -4630,8 +4573,7 @@ static int video_outputsw_cycle(void)
4630 return -ENOSYS; 4573 return -ENOSYS;
4631 } 4574 }
4632 if (!autosw && video_autosw_set(autosw)) { 4575 if (!autosw && video_autosw_set(autosw)) {
4633 printk(TPACPI_ERR 4576 pr_err("video auto-switch left enabled due to error\n");
4634 "video auto-switch left enabled due to error\n");
4635 return -EIO; 4577 return -EIO;
4636 } 4578 }
4637 4579
@@ -5348,7 +5290,7 @@ static int __init led_init(struct ibm_init_struct *iibm)
5348 tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS, 5290 tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS,
5349 GFP_KERNEL); 5291 GFP_KERNEL);
5350 if (!tpacpi_leds) { 5292 if (!tpacpi_leds) {
5351 printk(TPACPI_ERR "Out of memory for LED data\n"); 5293 pr_err("Out of memory for LED data\n");
5352 return -ENOMEM; 5294 return -ENOMEM;
5353 } 5295 }
5354 5296
@@ -5367,9 +5309,8 @@ static int __init led_init(struct ibm_init_struct *iibm)
5367 } 5309 }
5368 5310
5369#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS 5311#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS
5370 printk(TPACPI_NOTICE 5312 pr_notice("warning: userspace override of important "
5371 "warning: userspace override of important " 5313 "firmware LEDs is enabled\n");
5372 "firmware LEDs is enabled\n");
5373#endif 5314#endif
5374 return 0; 5315 return 0;
5375} 5316}
@@ -5639,17 +5580,16 @@ static void thermal_dump_all_sensors(void)
5639 if (n <= 0) 5580 if (n <= 0)
5640 return; 5581 return;
5641 5582
5642 printk(TPACPI_NOTICE 5583 pr_notice("temperatures (Celsius):");
5643 "temperatures (Celsius):");
5644 5584
5645 for (i = 0; i < n; i++) { 5585 for (i = 0; i < n; i++) {
5646 if (t.temp[i] != TPACPI_THERMAL_SENSOR_NA) 5586 if (t.temp[i] != TPACPI_THERMAL_SENSOR_NA)
5647 printk(KERN_CONT " %d", (int)(t.temp[i] / 1000)); 5587 pr_cont(" %d", (int)(t.temp[i] / 1000));
5648 else 5588 else
5649 printk(KERN_CONT " N/A"); 5589 pr_cont(" N/A");
5650 } 5590 }
5651 5591
5652 printk(KERN_CONT "\n"); 5592 pr_cont("\n");
5653} 5593}
5654 5594
5655/* sysfs temp##_input -------------------------------------------------- */ 5595/* sysfs temp##_input -------------------------------------------------- */
@@ -5769,14 +5709,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
5769 if (ta1 == 0) { 5709 if (ta1 == 0) {
5770 /* This is sheer paranoia, but we handle it anyway */ 5710 /* This is sheer paranoia, but we handle it anyway */
5771 if (acpi_tmp7) { 5711 if (acpi_tmp7) {
5772 printk(TPACPI_ERR 5712 pr_err("ThinkPad ACPI EC access misbehaving, "
5773 "ThinkPad ACPI EC access misbehaving, "
5774 "falling back to ACPI TMPx access " 5713 "falling back to ACPI TMPx access "
5775 "mode\n"); 5714 "mode\n");
5776 thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07; 5715 thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
5777 } else { 5716 } else {
5778 printk(TPACPI_ERR 5717 pr_err("ThinkPad ACPI EC access misbehaving, "
5779 "ThinkPad ACPI EC access misbehaving, "
5780 "disabling thermal sensors access\n"); 5718 "disabling thermal sensors access\n");
5781 thermal_read_mode = TPACPI_THERMAL_NONE; 5719 thermal_read_mode = TPACPI_THERMAL_NONE;
5782 } 5720 }
@@ -6129,8 +6067,8 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle)
6129 if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) { 6067 if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
6130 obj = (union acpi_object *)buffer.pointer; 6068 obj = (union acpi_object *)buffer.pointer;
6131 if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { 6069 if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
6132 printk(TPACPI_ERR "Unknown _BCL data, " 6070 pr_err("Unknown _BCL data, please report this to %s\n",
6133 "please report this to %s\n", TPACPI_MAIL); 6071 TPACPI_MAIL);
6134 rc = 0; 6072 rc = 0;
6135 } else { 6073 } else {
6136 rc = obj->package.count; 6074 rc = obj->package.count;
@@ -6214,18 +6152,15 @@ static void __init tpacpi_detect_brightness_capabilities(void)
6214 switch (b) { 6152 switch (b) {
6215 case 16: 6153 case 16:
6216 bright_maxlvl = 15; 6154 bright_maxlvl = 15;
6217 printk(TPACPI_INFO 6155 pr_info("detected a 16-level brightness capable ThinkPad\n");
6218 "detected a 16-level brightness capable ThinkPad\n");
6219 break; 6156 break;
6220 case 8: 6157 case 8:
6221 case 0: 6158 case 0:
6222 bright_maxlvl = 7; 6159 bright_maxlvl = 7;
6223 printk(TPACPI_INFO 6160 pr_info("detected a 8-level brightness capable ThinkPad\n");
6224 "detected a 8-level brightness capable ThinkPad\n");
6225 break; 6161 break;
6226 default: 6162 default:
6227 printk(TPACPI_ERR 6163 pr_err("Unsupported brightness interface, "
6228 "Unsupported brightness interface, "
6229 "please contact %s\n", TPACPI_MAIL); 6164 "please contact %s\n", TPACPI_MAIL);
6230 tp_features.bright_unkfw = 1; 6165 tp_features.bright_unkfw = 1;
6231 bright_maxlvl = b - 1; 6166 bright_maxlvl = b - 1;
@@ -6260,22 +6195,19 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6260 6195
6261 if (acpi_video_backlight_support()) { 6196 if (acpi_video_backlight_support()) {
6262 if (brightness_enable > 1) { 6197 if (brightness_enable > 1) {
6263 printk(TPACPI_INFO 6198 pr_info("Standard ACPI backlight interface "
6264 "Standard ACPI backlight interface " 6199 "available, not loading native one\n");
6265 "available, not loading native one.\n");
6266 return 1; 6200 return 1;
6267 } else if (brightness_enable == 1) { 6201 } else if (brightness_enable == 1) {
6268 printk(TPACPI_WARN 6202 pr_warn("Cannot enable backlight brightness support, "
6269 "Cannot enable backlight brightness support, "
6270 "ACPI is already handling it. Refer to the " 6203 "ACPI is already handling it. Refer to the "
6271 "acpi_backlight kernel parameter\n"); 6204 "acpi_backlight kernel parameter.\n");
6272 return 1; 6205 return 1;
6273 } 6206 }
6274 } else if (tp_features.bright_acpimode && brightness_enable > 1) { 6207 } else if (tp_features.bright_acpimode && brightness_enable > 1) {
6275 printk(TPACPI_NOTICE 6208 pr_notice("Standard ACPI backlight interface not "
6276 "Standard ACPI backlight interface not " 6209 "available, thinkpad_acpi native "
6277 "available, thinkpad_acpi native " 6210 "brightness control enabled\n");
6278 "brightness control enabled\n");
6279 } 6211 }
6280 6212
6281 /* 6213 /*
@@ -6319,19 +6251,17 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6319 if (IS_ERR(ibm_backlight_device)) { 6251 if (IS_ERR(ibm_backlight_device)) {
6320 int rc = PTR_ERR(ibm_backlight_device); 6252 int rc = PTR_ERR(ibm_backlight_device);
6321 ibm_backlight_device = NULL; 6253 ibm_backlight_device = NULL;
6322 printk(TPACPI_ERR "Could not register backlight device\n"); 6254 pr_err("Could not register backlight device\n");
6323 return rc; 6255 return rc;
6324 } 6256 }
6325 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT, 6257 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
6326 "brightness is supported\n"); 6258 "brightness is supported\n");
6327 6259
6328 if (quirks & TPACPI_BRGHT_Q_ASK) { 6260 if (quirks & TPACPI_BRGHT_Q_ASK) {
6329 printk(TPACPI_NOTICE 6261 pr_notice("brightness: will use unverified default: "
6330 "brightness: will use unverified default: " 6262 "brightness_mode=%d\n", brightness_mode);
6331 "brightness_mode=%d\n", brightness_mode); 6263 pr_notice("brightness: please report to %s whether it works well "
6332 printk(TPACPI_NOTICE 6264 "or not on your ThinkPad\n", TPACPI_MAIL);
6333 "brightness: please report to %s whether it works well "
6334 "or not on your ThinkPad\n", TPACPI_MAIL);
6335 } 6265 }
6336 6266
6337 /* Added by mistake in early 2007. Probably useless, but it could 6267 /* Added by mistake in early 2007. Probably useless, but it could
@@ -6804,8 +6734,7 @@ static int __init volume_create_alsa_mixer(void)
6804 rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE, 6734 rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE,
6805 sizeof(struct tpacpi_alsa_data), &card); 6735 sizeof(struct tpacpi_alsa_data), &card);
6806 if (rc < 0 || !card) { 6736 if (rc < 0 || !card) {
6807 printk(TPACPI_ERR 6737 pr_err("Failed to create ALSA card structures: %d\n", rc);
6808 "Failed to create ALSA card structures: %d\n", rc);
6809 return 1; 6738 return 1;
6810 } 6739 }
6811 6740
@@ -6839,9 +6768,8 @@ static int __init volume_create_alsa_mixer(void)
6839 ctl_vol = snd_ctl_new1(&volume_alsa_control_vol, NULL); 6768 ctl_vol = snd_ctl_new1(&volume_alsa_control_vol, NULL);
6840 rc = snd_ctl_add(card, ctl_vol); 6769 rc = snd_ctl_add(card, ctl_vol);
6841 if (rc < 0) { 6770 if (rc < 0) {
6842 printk(TPACPI_ERR 6771 pr_err("Failed to create ALSA volume control: %d\n",
6843 "Failed to create ALSA volume control: %d\n", 6772 rc);
6844 rc);
6845 goto err_exit; 6773 goto err_exit;
6846 } 6774 }
6847 data->ctl_vol_id = &ctl_vol->id; 6775 data->ctl_vol_id = &ctl_vol->id;
@@ -6850,8 +6778,7 @@ static int __init volume_create_alsa_mixer(void)
6850 ctl_mute = snd_ctl_new1(&volume_alsa_control_mute, NULL); 6778 ctl_mute = snd_ctl_new1(&volume_alsa_control_mute, NULL);
6851 rc = snd_ctl_add(card, ctl_mute); 6779 rc = snd_ctl_add(card, ctl_mute);
6852 if (rc < 0) { 6780 if (rc < 0) {
6853 printk(TPACPI_ERR "Failed to create ALSA mute control: %d\n", 6781 pr_err("Failed to create ALSA mute control: %d\n", rc);
6854 rc);
6855 goto err_exit; 6782 goto err_exit;
6856 } 6783 }
6857 data->ctl_mute_id = &ctl_mute->id; 6784 data->ctl_mute_id = &ctl_mute->id;
@@ -6859,7 +6786,7 @@ static int __init volume_create_alsa_mixer(void)
6859 snd_card_set_dev(card, &tpacpi_pdev->dev); 6786 snd_card_set_dev(card, &tpacpi_pdev->dev);
6860 rc = snd_card_register(card); 6787 rc = snd_card_register(card);
6861 if (rc < 0) { 6788 if (rc < 0) {
6862 printk(TPACPI_ERR "Failed to register ALSA card: %d\n", rc); 6789 pr_err("Failed to register ALSA card: %d\n", rc);
6863 goto err_exit; 6790 goto err_exit;
6864 } 6791 }
6865 6792
@@ -6915,9 +6842,8 @@ static int __init volume_init(struct ibm_init_struct *iibm)
6915 return -EINVAL; 6842 return -EINVAL;
6916 6843
6917 if (volume_mode == TPACPI_VOL_MODE_UCMS_STEP) { 6844 if (volume_mode == TPACPI_VOL_MODE_UCMS_STEP) {
6918 printk(TPACPI_ERR 6845 pr_err("UCMS step volume mode not implemented, "
6919 "UCMS step volume mode not implemented, " 6846 "please contact %s\n", TPACPI_MAIL);
6920 "please contact %s\n", TPACPI_MAIL);
6921 return 1; 6847 return 1;
6922 } 6848 }
6923 6849
@@ -6981,13 +6907,11 @@ static int __init volume_init(struct ibm_init_struct *iibm)
6981 6907
6982 rc = volume_create_alsa_mixer(); 6908 rc = volume_create_alsa_mixer();
6983 if (rc) { 6909 if (rc) {
6984 printk(TPACPI_ERR 6910 pr_err("Could not create the ALSA mixer interface\n");
6985 "Could not create the ALSA mixer interface\n");
6986 return rc; 6911 return rc;
6987 } 6912 }
6988 6913
6989 printk(TPACPI_INFO 6914 pr_info("Console audio control enabled, mode: %s\n",
6990 "Console audio control enabled, mode: %s\n",
6991 (volume_control_allowed) ? 6915 (volume_control_allowed) ?
6992 "override (read/write)" : 6916 "override (read/write)" :
6993 "monitor (read only)"); 6917 "monitor (read only)");
@@ -7049,12 +6973,10 @@ static int volume_write(char *buf)
7049 if (!volume_control_allowed && tpacpi_lifecycle != TPACPI_LIFE_INIT) { 6973 if (!volume_control_allowed && tpacpi_lifecycle != TPACPI_LIFE_INIT) {
7050 if (unlikely(!tp_warned.volume_ctrl_forbidden)) { 6974 if (unlikely(!tp_warned.volume_ctrl_forbidden)) {
7051 tp_warned.volume_ctrl_forbidden = 1; 6975 tp_warned.volume_ctrl_forbidden = 1;
7052 printk(TPACPI_NOTICE 6976 pr_notice("Console audio control in monitor mode, "
7053 "Console audio control in monitor mode, " 6977 "changes are not allowed\n");
7054 "changes are not allowed.\n"); 6978 pr_notice("Use the volume_control=1 module parameter "
7055 printk(TPACPI_NOTICE 6979 "to enable volume control\n");
7056 "Use the volume_control=1 module parameter "
7057 "to enable volume control\n");
7058 } 6980 }
7059 return -EPERM; 6981 return -EPERM;
7060 } 6982 }
@@ -7129,8 +7051,7 @@ static void inline volume_alsa_notify_change(void)
7129 7051
7130static int __init volume_init(struct ibm_init_struct *iibm) 7052static int __init volume_init(struct ibm_init_struct *iibm)
7131{ 7053{
7132 printk(TPACPI_INFO 7054 pr_info("volume: disabled as there is no ALSA support in this kernel\n");
7133 "volume: disabled as there is no ALSA support in this kernel\n");
7134 7055
7135 return 1; 7056 return 1;
7136} 7057}
@@ -7337,9 +7258,8 @@ TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */
7337static void fan_quirk1_setup(void) 7258static void fan_quirk1_setup(void)
7338{ 7259{
7339 if (fan_control_initial_status == 0x07) { 7260 if (fan_control_initial_status == 0x07) {
7340 printk(TPACPI_NOTICE 7261 pr_notice("fan_init: initial fan status is unknown, "
7341 "fan_init: initial fan status is unknown, " 7262 "assuming it is in auto mode\n");
7342 "assuming it is in auto mode\n");
7343 tp_features.fan_ctrl_status_undef = 1; 7263 tp_features.fan_ctrl_status_undef = 1;
7344 } 7264 }
7345} 7265}
@@ -7726,8 +7646,7 @@ static void fan_watchdog_reset(void)
7726 if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task, 7646 if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task,
7727 msecs_to_jiffies(fan_watchdog_maxinterval 7647 msecs_to_jiffies(fan_watchdog_maxinterval
7728 * 1000))) { 7648 * 1000))) {
7729 printk(TPACPI_ERR 7649 pr_err("failed to queue the fan watchdog, "
7730 "failed to queue the fan watchdog, "
7731 "watchdog will not trigger\n"); 7650 "watchdog will not trigger\n");
7732 } 7651 }
7733 } else 7652 } else
@@ -7741,11 +7660,11 @@ static void fan_watchdog_fire(struct work_struct *ignored)
7741 if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING) 7660 if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
7742 return; 7661 return;
7743 7662
7744 printk(TPACPI_NOTICE "fan watchdog: enabling fan\n"); 7663 pr_notice("fan watchdog: enabling fan\n");
7745 rc = fan_set_enable(); 7664 rc = fan_set_enable();
7746 if (rc < 0) { 7665 if (rc < 0) {
7747 printk(TPACPI_ERR "fan watchdog: error %d while enabling fan, " 7666 pr_err("fan watchdog: error %d while enabling fan, "
7748 "will try again later...\n", -rc); 7667 "will try again later...\n", -rc);
7749 /* reschedule for later */ 7668 /* reschedule for later */
7750 fan_watchdog_reset(); 7669 fan_watchdog_reset();
7751 } 7670 }
@@ -8049,8 +7968,7 @@ static int __init fan_init(struct ibm_init_struct *iibm)
8049 "secondary fan support enabled\n"); 7968 "secondary fan support enabled\n");
8050 } 7969 }
8051 } else { 7970 } else {
8052 printk(TPACPI_ERR 7971 pr_err("ThinkPad ACPI EC access misbehaving, "
8053 "ThinkPad ACPI EC access misbehaving, "
8054 "fan status and control unavailable\n"); 7972 "fan status and control unavailable\n");
8055 return 1; 7973 return 1;
8056 } 7974 }
@@ -8150,9 +8068,8 @@ static void fan_suspend(pm_message_t state)
8150 fan_control_resume_level = 0; 8068 fan_control_resume_level = 0;
8151 rc = fan_get_status_safe(&fan_control_resume_level); 8069 rc = fan_get_status_safe(&fan_control_resume_level);
8152 if (rc < 0) 8070 if (rc < 0)
8153 printk(TPACPI_NOTICE 8071 pr_notice("failed to read fan level for later "
8154 "failed to read fan level for later " 8072 "restore during resume: %d\n", rc);
8155 "restore during resume: %d\n", rc);
8156 8073
8157 /* if it is undefined, don't attempt to restore it. 8074 /* if it is undefined, don't attempt to restore it.
8158 * KEEP THIS LAST */ 8075 * KEEP THIS LAST */
@@ -8207,13 +8124,11 @@ static void fan_resume(void)
8207 return; 8124 return;
8208 } 8125 }
8209 if (do_set) { 8126 if (do_set) {
8210 printk(TPACPI_NOTICE 8127 pr_notice("restoring fan level to 0x%02x\n",
8211 "restoring fan level to 0x%02x\n", 8128 fan_control_resume_level);
8212 fan_control_resume_level);
8213 rc = fan_set_level_safe(fan_control_resume_level); 8129 rc = fan_set_level_safe(fan_control_resume_level);
8214 if (rc < 0) 8130 if (rc < 0)
8215 printk(TPACPI_NOTICE 8131 pr_notice("failed to restore fan level: %d\n", rc);
8216 "failed to restore fan level: %d\n", rc);
8217 } 8132 }
8218} 8133}
8219 8134
@@ -8305,8 +8220,8 @@ static int fan_write_cmd_level(const char *cmd, int *rc)
8305 8220
8306 *rc = fan_set_level_safe(level); 8221 *rc = fan_set_level_safe(level);
8307 if (*rc == -ENXIO) 8222 if (*rc == -ENXIO)
8308 printk(TPACPI_ERR "level command accepted for unsupported " 8223 pr_err("level command accepted for unsupported access mode %d\n",
8309 "access mode %d", fan_control_access_mode); 8224 fan_control_access_mode);
8310 else if (!*rc) 8225 else if (!*rc)
8311 tpacpi_disclose_usertask("procfs fan", 8226 tpacpi_disclose_usertask("procfs fan",
8312 "set level to %d\n", level); 8227 "set level to %d\n", level);
@@ -8321,8 +8236,8 @@ static int fan_write_cmd_enable(const char *cmd, int *rc)
8321 8236
8322 *rc = fan_set_enable(); 8237 *rc = fan_set_enable();
8323 if (*rc == -ENXIO) 8238 if (*rc == -ENXIO)
8324 printk(TPACPI_ERR "enable command accepted for unsupported " 8239 pr_err("enable command accepted for unsupported access mode %d\n",
8325 "access mode %d", fan_control_access_mode); 8240 fan_control_access_mode);
8326 else if (!*rc) 8241 else if (!*rc)
8327 tpacpi_disclose_usertask("procfs fan", "enable\n"); 8242 tpacpi_disclose_usertask("procfs fan", "enable\n");
8328 8243
@@ -8336,8 +8251,8 @@ static int fan_write_cmd_disable(const char *cmd, int *rc)
8336 8251
8337 *rc = fan_set_disable(); 8252 *rc = fan_set_disable();
8338 if (*rc == -ENXIO) 8253 if (*rc == -ENXIO)
8339 printk(TPACPI_ERR "disable command accepted for unsupported " 8254 pr_err("disable command accepted for unsupported access mode %d\n",
8340 "access mode %d", fan_control_access_mode); 8255 fan_control_access_mode);
8341 else if (!*rc) 8256 else if (!*rc)
8342 tpacpi_disclose_usertask("procfs fan", "disable\n"); 8257 tpacpi_disclose_usertask("procfs fan", "disable\n");
8343 8258
@@ -8356,8 +8271,8 @@ static int fan_write_cmd_speed(const char *cmd, int *rc)
8356 8271
8357 *rc = fan_set_speed(speed); 8272 *rc = fan_set_speed(speed);
8358 if (*rc == -ENXIO) 8273 if (*rc == -ENXIO)
8359 printk(TPACPI_ERR "speed command accepted for unsupported " 8274 pr_err("speed command accepted for unsupported access mode %d\n",
8360 "access mode %d", fan_control_access_mode); 8275 fan_control_access_mode);
8361 else if (!*rc) 8276 else if (!*rc)
8362 tpacpi_disclose_usertask("procfs fan", 8277 tpacpi_disclose_usertask("procfs fan",
8363 "set speed to %d\n", speed); 8278 "set speed to %d\n", speed);
@@ -8560,8 +8475,8 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
8560 if (ibm->acpi->notify) { 8475 if (ibm->acpi->notify) {
8561 ret = setup_acpi_notify(ibm); 8476 ret = setup_acpi_notify(ibm);
8562 if (ret == -ENODEV) { 8477 if (ret == -ENODEV) {
8563 printk(TPACPI_NOTICE "disabling subdriver %s\n", 8478 pr_notice("disabling subdriver %s\n",
8564 ibm->name); 8479 ibm->name);
8565 ret = 0; 8480 ret = 0;
8566 goto err_out; 8481 goto err_out;
8567 } 8482 }
@@ -8583,8 +8498,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
8583 entry = proc_create_data(ibm->name, mode, proc_dir, 8498 entry = proc_create_data(ibm->name, mode, proc_dir,
8584 &dispatch_proc_fops, ibm); 8499 &dispatch_proc_fops, ibm);
8585 if (!entry) { 8500 if (!entry) {
8586 printk(TPACPI_ERR "unable to create proc entry %s\n", 8501 pr_err("unable to create proc entry %s\n", ibm->name);
8587 ibm->name);
8588 ret = -ENODEV; 8502 ret = -ENODEV;
8589 goto err_out; 8503 goto err_out;
8590 } 8504 }
@@ -8683,13 +8597,11 @@ static int __must_check __init get_thinkpad_model_data(
8683 tp->ec_release = (ec_fw_string[4] << 8) 8597 tp->ec_release = (ec_fw_string[4] << 8)
8684 | ec_fw_string[5]; 8598 | ec_fw_string[5];
8685 } else { 8599 } else {
8686 printk(TPACPI_NOTICE 8600 pr_notice("ThinkPad firmware release %s "
8687 "ThinkPad firmware release %s " 8601 "doesn't match the known patterns\n",
8688 "doesn't match the known patterns\n", 8602 ec_fw_string);
8689 ec_fw_string); 8603 pr_notice("please report this to %s\n",
8690 printk(TPACPI_NOTICE 8604 TPACPI_MAIL);
8691 "please report this to %s\n",
8692 TPACPI_MAIL);
8693 } 8605 }
8694 break; 8606 break;
8695 } 8607 }
@@ -8733,8 +8645,7 @@ static int __init probe_for_thinkpad(void)
8733 tpacpi_acpi_handle_locate("ec", TPACPI_ACPI_EC_HID, &ec_handle); 8645 tpacpi_acpi_handle_locate("ec", TPACPI_ACPI_EC_HID, &ec_handle);
8734 if (!ec_handle) { 8646 if (!ec_handle) {
8735 if (is_thinkpad) 8647 if (is_thinkpad)
8736 printk(TPACPI_ERR 8648 pr_err("Not yet supported ThinkPad detected!\n");
8737 "Not yet supported ThinkPad detected!\n");
8738 return -ENODEV; 8649 return -ENODEV;
8739 } 8650 }
8740 8651
@@ -8746,10 +8657,10 @@ static int __init probe_for_thinkpad(void)
8746 8657
8747static void __init thinkpad_acpi_init_banner(void) 8658static void __init thinkpad_acpi_init_banner(void)
8748{ 8659{
8749 printk(TPACPI_INFO "%s v%s\n", TPACPI_DESC, TPACPI_VERSION); 8660 pr_info("%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
8750 printk(TPACPI_INFO "%s\n", TPACPI_URL); 8661 pr_info("%s\n", TPACPI_URL);
8751 8662
8752 printk(TPACPI_INFO "ThinkPad BIOS %s, EC %s\n", 8663 pr_info("ThinkPad BIOS %s, EC %s\n",
8753 (thinkpad_id.bios_version_str) ? 8664 (thinkpad_id.bios_version_str) ?
8754 thinkpad_id.bios_version_str : "unknown", 8665 thinkpad_id.bios_version_str : "unknown",
8755 (thinkpad_id.ec_version_str) ? 8666 (thinkpad_id.ec_version_str) ?
@@ -8758,7 +8669,7 @@ static void __init thinkpad_acpi_init_banner(void)
8758 BUG_ON(!thinkpad_id.vendor); 8669 BUG_ON(!thinkpad_id.vendor);
8759 8670
8760 if (thinkpad_id.model_str) 8671 if (thinkpad_id.model_str)
8761 printk(TPACPI_INFO "%s %s, model %s\n", 8672 pr_info("%s %s, model %s\n",
8762 (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ? 8673 (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
8763 "IBM" : ((thinkpad_id.vendor == 8674 "IBM" : ((thinkpad_id.vendor ==
8764 PCI_VENDOR_ID_LENOVO) ? 8675 PCI_VENDOR_ID_LENOVO) ?
@@ -9024,8 +8935,7 @@ static int __init thinkpad_acpi_module_init(void)
9024 8935
9025 ret = get_thinkpad_model_data(&thinkpad_id); 8936 ret = get_thinkpad_model_data(&thinkpad_id);
9026 if (ret) { 8937 if (ret) {
9027 printk(TPACPI_ERR 8938 pr_err("unable to get DMI data: %d\n", ret);
9028 "unable to get DMI data: %d\n", ret);
9029 thinkpad_acpi_module_exit(); 8939 thinkpad_acpi_module_exit();
9030 return ret; 8940 return ret;
9031 } 8941 }
@@ -9051,16 +8961,14 @@ static int __init thinkpad_acpi_module_init(void)
9051 8961
9052 proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir); 8962 proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir);
9053 if (!proc_dir) { 8963 if (!proc_dir) {
9054 printk(TPACPI_ERR 8964 pr_err("unable to create proc dir " TPACPI_PROC_DIR "\n");
9055 "unable to create proc dir " TPACPI_PROC_DIR);
9056 thinkpad_acpi_module_exit(); 8965 thinkpad_acpi_module_exit();
9057 return -ENODEV; 8966 return -ENODEV;
9058 } 8967 }
9059 8968
9060 ret = platform_driver_register(&tpacpi_pdriver); 8969 ret = platform_driver_register(&tpacpi_pdriver);
9061 if (ret) { 8970 if (ret) {
9062 printk(TPACPI_ERR 8971 pr_err("unable to register main platform driver\n");
9063 "unable to register main platform driver\n");
9064 thinkpad_acpi_module_exit(); 8972 thinkpad_acpi_module_exit();
9065 return ret; 8973 return ret;
9066 } 8974 }
@@ -9068,8 +8976,7 @@ static int __init thinkpad_acpi_module_init(void)
9068 8976
9069 ret = platform_driver_register(&tpacpi_hwmon_pdriver); 8977 ret = platform_driver_register(&tpacpi_hwmon_pdriver);
9070 if (ret) { 8978 if (ret) {
9071 printk(TPACPI_ERR 8979 pr_err("unable to register hwmon platform driver\n");
9072 "unable to register hwmon platform driver\n");
9073 thinkpad_acpi_module_exit(); 8980 thinkpad_acpi_module_exit();
9074 return ret; 8981 return ret;
9075 } 8982 }
@@ -9082,8 +8989,7 @@ static int __init thinkpad_acpi_module_init(void)
9082 &tpacpi_hwmon_pdriver.driver); 8989 &tpacpi_hwmon_pdriver.driver);
9083 } 8990 }
9084 if (ret) { 8991 if (ret) {
9085 printk(TPACPI_ERR 8992 pr_err("unable to create sysfs driver attributes\n");
9086 "unable to create sysfs driver attributes\n");
9087 thinkpad_acpi_module_exit(); 8993 thinkpad_acpi_module_exit();
9088 return ret; 8994 return ret;
9089 } 8995 }
@@ -9096,7 +9002,7 @@ static int __init thinkpad_acpi_module_init(void)
9096 if (IS_ERR(tpacpi_pdev)) { 9002 if (IS_ERR(tpacpi_pdev)) {
9097 ret = PTR_ERR(tpacpi_pdev); 9003 ret = PTR_ERR(tpacpi_pdev);
9098 tpacpi_pdev = NULL; 9004 tpacpi_pdev = NULL;
9099 printk(TPACPI_ERR "unable to register platform device\n"); 9005 pr_err("unable to register platform device\n");
9100 thinkpad_acpi_module_exit(); 9006 thinkpad_acpi_module_exit();
9101 return ret; 9007 return ret;
9102 } 9008 }
@@ -9106,16 +9012,14 @@ static int __init thinkpad_acpi_module_init(void)
9106 if (IS_ERR(tpacpi_sensors_pdev)) { 9012 if (IS_ERR(tpacpi_sensors_pdev)) {
9107 ret = PTR_ERR(tpacpi_sensors_pdev); 9013 ret = PTR_ERR(tpacpi_sensors_pdev);
9108 tpacpi_sensors_pdev = NULL; 9014 tpacpi_sensors_pdev = NULL;
9109 printk(TPACPI_ERR 9015 pr_err("unable to register hwmon platform device\n");
9110 "unable to register hwmon platform device\n");
9111 thinkpad_acpi_module_exit(); 9016 thinkpad_acpi_module_exit();
9112 return ret; 9017 return ret;
9113 } 9018 }
9114 ret = device_create_file(&tpacpi_sensors_pdev->dev, 9019 ret = device_create_file(&tpacpi_sensors_pdev->dev,
9115 &dev_attr_thinkpad_acpi_pdev_name); 9020 &dev_attr_thinkpad_acpi_pdev_name);
9116 if (ret) { 9021 if (ret) {
9117 printk(TPACPI_ERR 9022 pr_err("unable to create sysfs hwmon device attributes\n");
9118 "unable to create sysfs hwmon device attributes\n");
9119 thinkpad_acpi_module_exit(); 9023 thinkpad_acpi_module_exit();
9120 return ret; 9024 return ret;
9121 } 9025 }
@@ -9124,14 +9028,14 @@ static int __init thinkpad_acpi_module_init(void)
9124 if (IS_ERR(tpacpi_hwmon)) { 9028 if (IS_ERR(tpacpi_hwmon)) {
9125 ret = PTR_ERR(tpacpi_hwmon); 9029 ret = PTR_ERR(tpacpi_hwmon);
9126 tpacpi_hwmon = NULL; 9030 tpacpi_hwmon = NULL;
9127 printk(TPACPI_ERR "unable to register hwmon device\n"); 9031 pr_err("unable to register hwmon device\n");
9128 thinkpad_acpi_module_exit(); 9032 thinkpad_acpi_module_exit();
9129 return ret; 9033 return ret;
9130 } 9034 }
9131 mutex_init(&tpacpi_inputdev_send_mutex); 9035 mutex_init(&tpacpi_inputdev_send_mutex);
9132 tpacpi_inputdev = input_allocate_device(); 9036 tpacpi_inputdev = input_allocate_device();
9133 if (!tpacpi_inputdev) { 9037 if (!tpacpi_inputdev) {
9134 printk(TPACPI_ERR "unable to allocate input device\n"); 9038 pr_err("unable to allocate input device\n");
9135 thinkpad_acpi_module_exit(); 9039 thinkpad_acpi_module_exit();
9136 return -ENOMEM; 9040 return -ENOMEM;
9137 } else { 9041 } else {
@@ -9163,7 +9067,7 @@ static int __init thinkpad_acpi_module_init(void)
9163 9067
9164 ret = input_register_device(tpacpi_inputdev); 9068 ret = input_register_device(tpacpi_inputdev);
9165 if (ret < 0) { 9069 if (ret < 0) {
9166 printk(TPACPI_ERR "unable to register input device\n"); 9070 pr_err("unable to register input device\n");
9167 thinkpad_acpi_module_exit(); 9071 thinkpad_acpi_module_exit();
9168 return ret; 9072 return ret;
9169 } else { 9073 } else {
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 1d07d6d09f27..4c20447ddbb7 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -194,7 +194,7 @@ static int __init topstar_laptop_init(void)
194 if (ret < 0) 194 if (ret < 0)
195 return ret; 195 return ret;
196 196
197 printk(KERN_INFO "Topstar Laptop ACPI extras driver loaded\n"); 197 pr_info("ACPI extras driver loaded\n");
198 198
199 return 0; 199 return 0;
200} 200}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 63f42a22e102..cb009b2629ee 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -35,6 +35,8 @@
35 * 35 *
36 */ 36 */
37 37
38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
38#define TOSHIBA_ACPI_VERSION "0.19" 40#define TOSHIBA_ACPI_VERSION "0.19"
39#define PROC_INTERFACE_VERSION 1 41#define PROC_INTERFACE_VERSION 1
40 42
@@ -60,11 +62,6 @@ MODULE_AUTHOR("John Belmonte");
60MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver"); 62MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
61MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
62 64
63#define MY_LOGPREFIX "toshiba_acpi: "
64#define MY_ERR KERN_ERR MY_LOGPREFIX
65#define MY_NOTICE KERN_NOTICE MY_LOGPREFIX
66#define MY_INFO KERN_INFO MY_LOGPREFIX
67
68/* Toshiba ACPI method paths */ 65/* Toshiba ACPI method paths */
69#define METHOD_LCD_BRIGHTNESS "\\_SB_.PCI0.VGA_.LCD_._BCM" 66#define METHOD_LCD_BRIGHTNESS "\\_SB_.PCI0.VGA_.LCD_._BCM"
70#define TOSH_INTERFACE_1 "\\_SB_.VALD" 67#define TOSH_INTERFACE_1 "\\_SB_.VALD"
@@ -301,7 +298,7 @@ static int toshiba_illumination_available(void)
301 in[0] = 0xf100; 298 in[0] = 0xf100;
302 status = hci_raw(in, out); 299 status = hci_raw(in, out);
303 if (ACPI_FAILURE(status)) { 300 if (ACPI_FAILURE(status)) {
304 printk(MY_INFO "Illumination device not available\n"); 301 pr_info("Illumination device not available\n");
305 return 0; 302 return 0;
306 } 303 }
307 in[0] = 0xf400; 304 in[0] = 0xf400;
@@ -320,7 +317,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
320 in[0] = 0xf100; 317 in[0] = 0xf100;
321 status = hci_raw(in, out); 318 status = hci_raw(in, out);
322 if (ACPI_FAILURE(status)) { 319 if (ACPI_FAILURE(status)) {
323 printk(MY_INFO "Illumination device not available\n"); 320 pr_info("Illumination device not available\n");
324 return; 321 return;
325 } 322 }
326 323
@@ -331,7 +328,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
331 in[2] = 1; 328 in[2] = 1;
332 status = hci_raw(in, out); 329 status = hci_raw(in, out);
333 if (ACPI_FAILURE(status)) { 330 if (ACPI_FAILURE(status)) {
334 printk(MY_INFO "ACPI call for illumination failed.\n"); 331 pr_info("ACPI call for illumination failed\n");
335 return; 332 return;
336 } 333 }
337 } else { 334 } else {
@@ -341,7 +338,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
341 in[2] = 0; 338 in[2] = 0;
342 status = hci_raw(in, out); 339 status = hci_raw(in, out);
343 if (ACPI_FAILURE(status)) { 340 if (ACPI_FAILURE(status)) {
344 printk(MY_INFO "ACPI call for illumination failed.\n"); 341 pr_info("ACPI call for illumination failed.\n");
345 return; 342 return;
346 } 343 }
347 } 344 }
@@ -364,7 +361,7 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
364 in[0] = 0xf100; 361 in[0] = 0xf100;
365 status = hci_raw(in, out); 362 status = hci_raw(in, out);
366 if (ACPI_FAILURE(status)) { 363 if (ACPI_FAILURE(status)) {
367 printk(MY_INFO "Illumination device not available\n"); 364 pr_info("Illumination device not available\n");
368 return LED_OFF; 365 return LED_OFF;
369 } 366 }
370 367
@@ -373,7 +370,7 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
373 in[1] = 0x14e; 370 in[1] = 0x14e;
374 status = hci_raw(in, out); 371 status = hci_raw(in, out);
375 if (ACPI_FAILURE(status)) { 372 if (ACPI_FAILURE(status)) {
376 printk(MY_INFO "ACPI call for illumination failed.\n"); 373 pr_info("ACPI call for illumination failed.\n");
377 return LED_OFF; 374 return LED_OFF;
378 } 375 }
379 376
@@ -517,7 +514,7 @@ static int lcd_proc_show(struct seq_file *m, void *v)
517 seq_printf(m, "brightness_levels: %d\n", 514 seq_printf(m, "brightness_levels: %d\n",
518 HCI_LCD_BRIGHTNESS_LEVELS); 515 HCI_LCD_BRIGHTNESS_LEVELS);
519 } else { 516 } else {
520 printk(MY_ERR "Error reading LCD brightness\n"); 517 pr_err("Error reading LCD brightness\n");
521 } 518 }
522 519
523 return 0; 520 return 0;
@@ -592,7 +589,7 @@ static int video_proc_show(struct seq_file *m, void *v)
592 seq_printf(m, "crt_out: %d\n", is_crt); 589 seq_printf(m, "crt_out: %d\n", is_crt);
593 seq_printf(m, "tv_out: %d\n", is_tv); 590 seq_printf(m, "tv_out: %d\n", is_tv);
594 } else { 591 } else {
595 printk(MY_ERR "Error reading video out status\n"); 592 pr_err("Error reading video out status\n");
596 } 593 }
597 594
598 return 0; 595 return 0;
@@ -686,7 +683,7 @@ static int fan_proc_show(struct seq_file *m, void *v)
686 seq_printf(m, "running: %d\n", (value > 0)); 683 seq_printf(m, "running: %d\n", (value > 0));
687 seq_printf(m, "force_on: %d\n", force_fan); 684 seq_printf(m, "force_on: %d\n", force_fan);
688 } else { 685 } else {
689 printk(MY_ERR "Error reading fan status\n"); 686 pr_err("Error reading fan status\n");
690 } 687 }
691 688
692 return 0; 689 return 0;
@@ -750,9 +747,9 @@ static int keys_proc_show(struct seq_file *m, void *v)
750 * some machines where system events sporadically 747 * some machines where system events sporadically
751 * become disabled. */ 748 * become disabled. */
752 hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result); 749 hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
753 printk(MY_NOTICE "Re-enabled hotkeys\n"); 750 pr_notice("Re-enabled hotkeys\n");
754 } else { 751 } else {
755 printk(MY_ERR "Error reading hotkey status\n"); 752 pr_err("Error reading hotkey status\n");
756 goto end; 753 goto end;
757 } 754 }
758 } 755 }
@@ -863,7 +860,7 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
863 860
864 if (!sparse_keymap_report_event(toshiba_acpi.hotkey_dev, 861 if (!sparse_keymap_report_event(toshiba_acpi.hotkey_dev,
865 value, 1, true)) { 862 value, 1, true)) {
866 printk(MY_INFO "Unknown key %x\n", 863 pr_info("Unknown key %x\n",
867 value); 864 value);
868 } 865 }
869 } else if (hci_result == HCI_NOT_SUPPORTED) { 866 } else if (hci_result == HCI_NOT_SUPPORTED) {
@@ -871,7 +868,7 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
871 * some machines where system events sporadically 868 * some machines where system events sporadically
872 * become disabled. */ 869 * become disabled. */
873 hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result); 870 hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
874 printk(MY_NOTICE "Re-enabled hotkeys\n"); 871 pr_notice("Re-enabled hotkeys\n");
875 } 872 }
876 } while (hci_result != HCI_EMPTY); 873 } while (hci_result != HCI_EMPTY);
877} 874}
@@ -883,13 +880,13 @@ static int __init toshiba_acpi_setup_keyboard(char *device)
883 880
884 status = acpi_get_handle(NULL, device, &toshiba_acpi.handle); 881 status = acpi_get_handle(NULL, device, &toshiba_acpi.handle);
885 if (ACPI_FAILURE(status)) { 882 if (ACPI_FAILURE(status)) {
886 printk(MY_INFO "Unable to get notification device\n"); 883 pr_info("Unable to get notification device\n");
887 return -ENODEV; 884 return -ENODEV;
888 } 885 }
889 886
890 toshiba_acpi.hotkey_dev = input_allocate_device(); 887 toshiba_acpi.hotkey_dev = input_allocate_device();
891 if (!toshiba_acpi.hotkey_dev) { 888 if (!toshiba_acpi.hotkey_dev) {
892 printk(MY_INFO "Unable to register input device\n"); 889 pr_info("Unable to register input device\n");
893 return -ENOMEM; 890 return -ENOMEM;
894 } 891 }
895 892
@@ -905,21 +902,21 @@ static int __init toshiba_acpi_setup_keyboard(char *device)
905 status = acpi_install_notify_handler(toshiba_acpi.handle, 902 status = acpi_install_notify_handler(toshiba_acpi.handle,
906 ACPI_DEVICE_NOTIFY, toshiba_acpi_notify, NULL); 903 ACPI_DEVICE_NOTIFY, toshiba_acpi_notify, NULL);
907 if (ACPI_FAILURE(status)) { 904 if (ACPI_FAILURE(status)) {
908 printk(MY_INFO "Unable to install hotkey notification\n"); 905 pr_info("Unable to install hotkey notification\n");
909 error = -ENODEV; 906 error = -ENODEV;
910 goto err_free_keymap; 907 goto err_free_keymap;
911 } 908 }
912 909
913 status = acpi_evaluate_object(toshiba_acpi.handle, "ENAB", NULL, NULL); 910 status = acpi_evaluate_object(toshiba_acpi.handle, "ENAB", NULL, NULL);
914 if (ACPI_FAILURE(status)) { 911 if (ACPI_FAILURE(status)) {
915 printk(MY_INFO "Unable to enable hotkeys\n"); 912 pr_info("Unable to enable hotkeys\n");
916 error = -ENODEV; 913 error = -ENODEV;
917 goto err_remove_notify; 914 goto err_remove_notify;
918 } 915 }
919 916
920 error = input_register_device(toshiba_acpi.hotkey_dev); 917 error = input_register_device(toshiba_acpi.hotkey_dev);
921 if (error) { 918 if (error) {
922 printk(MY_INFO "Unable to register input device\n"); 919 pr_info("Unable to register input device\n");
923 goto err_remove_notify; 920 goto err_remove_notify;
924 } 921 }
925 922
@@ -980,17 +977,17 @@ static int __init toshiba_acpi_init(void)
980 if (is_valid_acpi_path(TOSH_INTERFACE_1 GHCI_METHOD)) { 977 if (is_valid_acpi_path(TOSH_INTERFACE_1 GHCI_METHOD)) {
981 method_hci = TOSH_INTERFACE_1 GHCI_METHOD; 978 method_hci = TOSH_INTERFACE_1 GHCI_METHOD;
982 if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_1)) 979 if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_1))
983 printk(MY_INFO "Unable to activate hotkeys\n"); 980 pr_info("Unable to activate hotkeys\n");
984 } else if (is_valid_acpi_path(TOSH_INTERFACE_2 GHCI_METHOD)) { 981 } else if (is_valid_acpi_path(TOSH_INTERFACE_2 GHCI_METHOD)) {
985 method_hci = TOSH_INTERFACE_2 GHCI_METHOD; 982 method_hci = TOSH_INTERFACE_2 GHCI_METHOD;
986 if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_2)) 983 if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_2))
987 printk(MY_INFO "Unable to activate hotkeys\n"); 984 pr_info("Unable to activate hotkeys\n");
988 } else 985 } else
989 return -ENODEV; 986 return -ENODEV;
990 987
991 printk(MY_INFO "Toshiba Laptop ACPI Extras version %s\n", 988 pr_info("Toshiba Laptop ACPI Extras version %s\n",
992 TOSHIBA_ACPI_VERSION); 989 TOSHIBA_ACPI_VERSION);
993 printk(MY_INFO " HCI method: %s\n", method_hci); 990 pr_info(" HCI method: %s\n", method_hci);
994 991
995 mutex_init(&toshiba_acpi.mutex); 992 mutex_init(&toshiba_acpi.mutex);
996 993
@@ -998,7 +995,7 @@ static int __init toshiba_acpi_init(void)
998 -1, NULL, 0); 995 -1, NULL, 0);
999 if (IS_ERR(toshiba_acpi.p_dev)) { 996 if (IS_ERR(toshiba_acpi.p_dev)) {
1000 ret = PTR_ERR(toshiba_acpi.p_dev); 997 ret = PTR_ERR(toshiba_acpi.p_dev);
1001 printk(MY_ERR "unable to register platform device\n"); 998 pr_err("unable to register platform device\n");
1002 toshiba_acpi.p_dev = NULL; 999 toshiba_acpi.p_dev = NULL;
1003 toshiba_acpi_exit(); 1000 toshiba_acpi_exit();
1004 return ret; 1001 return ret;
@@ -1028,7 +1025,7 @@ static int __init toshiba_acpi_init(void)
1028 if (IS_ERR(toshiba_backlight_device)) { 1025 if (IS_ERR(toshiba_backlight_device)) {
1029 ret = PTR_ERR(toshiba_backlight_device); 1026 ret = PTR_ERR(toshiba_backlight_device);
1030 1027
1031 printk(KERN_ERR "Could not register toshiba backlight device\n"); 1028 pr_err("Could not register toshiba backlight device\n");
1032 toshiba_backlight_device = NULL; 1029 toshiba_backlight_device = NULL;
1033 toshiba_acpi_exit(); 1030 toshiba_acpi_exit();
1034 return ret; 1031 return ret;
@@ -1042,14 +1039,14 @@ static int __init toshiba_acpi_init(void)
1042 &toshiba_rfk_ops, 1039 &toshiba_rfk_ops,
1043 &toshiba_acpi); 1040 &toshiba_acpi);
1044 if (!toshiba_acpi.bt_rfk) { 1041 if (!toshiba_acpi.bt_rfk) {
1045 printk(MY_ERR "unable to allocate rfkill device\n"); 1042 pr_err("unable to allocate rfkill device\n");
1046 toshiba_acpi_exit(); 1043 toshiba_acpi_exit();
1047 return -ENOMEM; 1044 return -ENOMEM;
1048 } 1045 }
1049 1046
1050 ret = rfkill_register(toshiba_acpi.bt_rfk); 1047 ret = rfkill_register(toshiba_acpi.bt_rfk);
1051 if (ret) { 1048 if (ret) {
1052 printk(MY_ERR "unable to register rfkill device\n"); 1049 pr_err("unable to register rfkill device\n");
1053 rfkill_destroy(toshiba_acpi.bt_rfk); 1050 rfkill_destroy(toshiba_acpi.bt_rfk);
1054 toshiba_acpi_exit(); 1051 toshiba_acpi_exit();
1055 return ret; 1052 return ret;
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 944068611919..5fb7186694df 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -17,6 +17,8 @@
17 * delivered. 17 * delivered.
18 */ 18 */
19 19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
20#include <linux/kernel.h> 22#include <linux/kernel.h>
21#include <linux/module.h> 23#include <linux/module.h>
22#include <linux/init.h> 24#include <linux/init.h>
@@ -70,14 +72,13 @@ static int toshiba_bluetooth_enable(acpi_handle handle)
70 if (!(result & 0x01)) 72 if (!(result & 0x01))
71 return 0; 73 return 0;
72 74
73 printk(KERN_INFO "toshiba_bluetooth: Re-enabling Toshiba Bluetooth\n"); 75 pr_info("Re-enabling Toshiba Bluetooth\n");
74 res1 = acpi_evaluate_object(handle, "AUSB", NULL, NULL); 76 res1 = acpi_evaluate_object(handle, "AUSB", NULL, NULL);
75 res2 = acpi_evaluate_object(handle, "BTPO", NULL, NULL); 77 res2 = acpi_evaluate_object(handle, "BTPO", NULL, NULL);
76 if (!ACPI_FAILURE(res1) || !ACPI_FAILURE(res2)) 78 if (!ACPI_FAILURE(res1) || !ACPI_FAILURE(res2))
77 return 0; 79 return 0;
78 80
79 printk(KERN_WARNING "toshiba_bluetooth: Failed to re-enable " 81 pr_warn("Failed to re-enable Toshiba Bluetooth\n");
80 "Toshiba Bluetooth\n");
81 82
82 return -ENODEV; 83 return -ENODEV;
83} 84}
@@ -107,8 +108,8 @@ static int toshiba_bt_rfkill_add(struct acpi_device *device)
107 &bt_present); 108 &bt_present);
108 109
109 if (!ACPI_FAILURE(status) && bt_present) { 110 if (!ACPI_FAILURE(status) && bt_present) {
110 printk(KERN_INFO "Detected Toshiba ACPI Bluetooth device - " 111 pr_info("Detected Toshiba ACPI Bluetooth device - "
111 "installing RFKill handler\n"); 112 "installing RFKill handler\n");
112 result = toshiba_bluetooth_enable(device->handle); 113 result = toshiba_bluetooth_enable(device->handle);
113 } 114 }
114 115
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 05cc79672a8b..f23d5a84e7b1 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -486,16 +486,16 @@ static void wmi_dump_wdg(const struct guid_block *g)
486 pr_info("\tnotify_id: %02X\n", g->notify_id); 486 pr_info("\tnotify_id: %02X\n", g->notify_id);
487 pr_info("\treserved: %02X\n", g->reserved); 487 pr_info("\treserved: %02X\n", g->reserved);
488 pr_info("\tinstance_count: %d\n", g->instance_count); 488 pr_info("\tinstance_count: %d\n", g->instance_count);
489 pr_info("\tflags: %#x ", g->flags); 489 pr_info("\tflags: %#x", g->flags);
490 if (g->flags) { 490 if (g->flags) {
491 if (g->flags & ACPI_WMI_EXPENSIVE) 491 if (g->flags & ACPI_WMI_EXPENSIVE)
492 pr_cont("ACPI_WMI_EXPENSIVE "); 492 pr_cont(" ACPI_WMI_EXPENSIVE");
493 if (g->flags & ACPI_WMI_METHOD) 493 if (g->flags & ACPI_WMI_METHOD)
494 pr_cont("ACPI_WMI_METHOD "); 494 pr_cont(" ACPI_WMI_METHOD");
495 if (g->flags & ACPI_WMI_STRING) 495 if (g->flags & ACPI_WMI_STRING)
496 pr_cont("ACPI_WMI_STRING "); 496 pr_cont(" ACPI_WMI_STRING");
497 if (g->flags & ACPI_WMI_EVENT) 497 if (g->flags & ACPI_WMI_EVENT)
498 pr_cont("ACPI_WMI_EVENT "); 498 pr_cont(" ACPI_WMI_EVENT");
499 } 499 }
500 pr_cont("\n"); 500 pr_cont("\n");
501 501
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index c1372ed9d2e9..fad153dc0355 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -11,6 +11,8 @@
11 * your option) any later version. 11 * your option) any later version.
12 */ 12 */
13 13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
14#include <linux/kernel.h> 16#include <linux/kernel.h>
15#include <linux/module.h> 17#include <linux/module.h>
16#include <linux/init.h> 18#include <linux/init.h>
@@ -20,7 +22,6 @@
20#include <acpi/acpi_drivers.h> 22#include <acpi/acpi_drivers.h>
21 23
22#define MODULE_NAME "xo15-ebook" 24#define MODULE_NAME "xo15-ebook"
23#define PREFIX MODULE_NAME ": "
24 25
25#define XO15_EBOOK_CLASS MODULE_NAME 26#define XO15_EBOOK_CLASS MODULE_NAME
26#define XO15_EBOOK_TYPE_UNKNOWN 0x00 27#define XO15_EBOOK_TYPE_UNKNOWN 0x00
@@ -105,7 +106,7 @@ static int ebook_switch_add(struct acpi_device *device)
105 class = acpi_device_class(device); 106 class = acpi_device_class(device);
106 107
107 if (strcmp(hid, XO15_EBOOK_HID)) { 108 if (strcmp(hid, XO15_EBOOK_HID)) {
108 printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid); 109 pr_err("Unsupported hid [%s]\n", hid);
109 error = -ENODEV; 110 error = -ENODEV;
110 goto err_free_input; 111 goto err_free_input;
111 } 112 }
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 3b7e83d2dab4..d5ff142c93a2 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
486 flash_error_table[i].reason); 486 flash_error_table[i].reason);
487} 487}
488 488
489static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO, 489static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
490 asd_show_update_bios, asd_store_update_bios); 490 asd_show_update_bios, asd_store_update_bios);
491 491
492static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) 492static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index c1f72c49196f..6c7e0339dda4 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -56,6 +56,8 @@ BFA_TRC_FILE(CNA, IOC);
56#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 56#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
57#define bfa_ioc_notify_fail(__ioc) \ 57#define bfa_ioc_notify_fail(__ioc) \
58 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) 58 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
59#define bfa_ioc_sync_start(__ioc) \
60 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
59#define bfa_ioc_sync_join(__ioc) \ 61#define bfa_ioc_sync_join(__ioc) \
60 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) 62 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
61#define bfa_ioc_sync_leave(__ioc) \ 63#define bfa_ioc_sync_leave(__ioc) \
@@ -647,7 +649,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
647 switch (event) { 649 switch (event) {
648 case IOCPF_E_SEMLOCKED: 650 case IOCPF_E_SEMLOCKED:
649 if (bfa_ioc_firmware_lock(ioc)) { 651 if (bfa_ioc_firmware_lock(ioc)) {
650 if (bfa_ioc_sync_complete(ioc)) { 652 if (bfa_ioc_sync_start(ioc)) {
651 iocpf->retry_count = 0; 653 iocpf->retry_count = 0;
652 bfa_ioc_sync_join(ioc); 654 bfa_ioc_sync_join(ioc);
653 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 655 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index ec9cf08b0e7f..c85182a704fb 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -263,6 +263,7 @@ struct bfa_ioc_hwif_s {
263 bfa_boolean_t msix); 263 bfa_boolean_t msix);
264 void (*ioc_notify_fail) (struct bfa_ioc_s *ioc); 264 void (*ioc_notify_fail) (struct bfa_ioc_s *ioc);
265 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); 265 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
266 bfa_boolean_t (*ioc_sync_start) (struct bfa_ioc_s *ioc);
266 void (*ioc_sync_join) (struct bfa_ioc_s *ioc); 267 void (*ioc_sync_join) (struct bfa_ioc_s *ioc);
267 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc); 268 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
268 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc); 269 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index e4a0713185b6..89ae4c8f95a2 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -32,6 +32,7 @@ static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
32static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 32static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
33static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc); 33static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
34static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc); 34static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
35static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc);
35static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc); 36static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
36static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc); 37static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
37static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc); 38static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
@@ -53,6 +54,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
53 hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set; 54 hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
54 hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail; 55 hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
55 hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset; 56 hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
57 hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start;
56 hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join; 58 hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
57 hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave; 59 hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
58 hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack; 60 hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
@@ -195,6 +197,15 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
195} 197}
196 198
197/* 199/*
200 * Synchronized IOC failure processing routines
201 */
202static bfa_boolean_t
203bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
204{
205 return bfa_ioc_cb_sync_complete(ioc);
206}
207
208/*
198 * Cleanup hw semaphore and usecnt registers 209 * Cleanup hw semaphore and usecnt registers
199 */ 210 */
200static void 211static void
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 008d129ddfcd..93612520f0d2 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
42static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc); 42static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); 43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
44static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
44static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc); 45static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
45static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc); 46static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
46static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc); 47static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
@@ -62,6 +63,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
62 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 63 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
63 hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; 64 hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
64 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 65 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
66 hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
65 hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; 67 hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
66 hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; 68 hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
67 hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; 69 hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -351,6 +353,30 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
351 writel(1, ioc->ioc_regs.ioc_sem_reg); 353 writel(1, ioc->ioc_regs.ioc_sem_reg);
352} 354}
353 355
356static bfa_boolean_t
357bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
358{
359 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
360 uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
361
362 /*
363 * Driver load time. If the sync required bit for this PCI fn
364 * is set, it is due to an unclean exit by the driver for this
365 * PCI fn in the previous incarnation. Whoever comes here first
366 * should clean it up, no matter which PCI fn.
367 */
368
369 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
370 writel(0, ioc->ioc_regs.ioc_fail_sync);
371 writel(1, ioc->ioc_regs.ioc_usage_reg);
372 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
373 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
374 return BFA_TRUE;
375 }
376
377 return bfa_ioc_ct_sync_complete(ioc);
378}
379
354/* 380/*
355 * Synchronized IOC failure processing routines 381 * Synchronized IOC failure processing routines
356 */ 382 */
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index cfd59023227b..6bdd25a93db9 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -66,11 +66,11 @@
66#define BD_SPLIT_SIZE 32768 66#define BD_SPLIT_SIZE 32768
67 67
68/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */ 68/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
69#define BNX2I_SQ_WQES_MIN 16 69#define BNX2I_SQ_WQES_MIN 16
70#define BNX2I_570X_SQ_WQES_MAX 128 70#define BNX2I_570X_SQ_WQES_MAX 128
71#define BNX2I_5770X_SQ_WQES_MAX 512 71#define BNX2I_5770X_SQ_WQES_MAX 512
72#define BNX2I_570X_SQ_WQES_DEFAULT 128 72#define BNX2I_570X_SQ_WQES_DEFAULT 128
73#define BNX2I_5770X_SQ_WQES_DEFAULT 256 73#define BNX2I_5770X_SQ_WQES_DEFAULT 128
74 74
75#define BNX2I_570X_CQ_WQES_MAX 128 75#define BNX2I_570X_CQ_WQES_MAX 128
76#define BNX2I_5770X_CQ_WQES_MAX 512 76#define BNX2I_5770X_CQ_WQES_MAX 512
@@ -115,6 +115,7 @@
115#define BNX2X_MAX_CQS 8 115#define BNX2X_MAX_CQS 8
116 116
117#define CNIC_ARM_CQE 1 117#define CNIC_ARM_CQE 1
118#define CNIC_ARM_CQE_FP 2
118#define CNIC_DISARM_CQE 0 119#define CNIC_DISARM_CQE 0
119 120
120#define REG_RD(__hba, offset) \ 121#define REG_RD(__hba, offset) \
@@ -666,7 +667,9 @@ enum {
666 * after HBA reset is completed by bnx2i/cnic/bnx2 667 * after HBA reset is completed by bnx2i/cnic/bnx2
667 * modules 668 * modules
668 * @state: tracks offload connection state machine 669 * @state: tracks offload connection state machine
669 * @teardown_mode: indicates if conn teardown is abortive or orderly 670 * @timestamp: tracks the start time when the ep begins to connect
671 * @num_active_cmds: tracks the number of outstanding commands for this ep
672 * @ec_shift: the amount of shift as part of the event coal calc
670 * @qp: QP information 673 * @qp: QP information
671 * @ids: contains chip allocated *context id* & driver assigned 674 * @ids: contains chip allocated *context id* & driver assigned
672 * *iscsi cid* 675 * *iscsi cid*
@@ -685,6 +688,7 @@ struct bnx2i_endpoint {
685 u32 state; 688 u32 state;
686 unsigned long timestamp; 689 unsigned long timestamp;
687 int num_active_cmds; 690 int num_active_cmds;
691 u32 ec_shift;
688 692
689 struct qp_info qp; 693 struct qp_info qp;
690 struct ep_handles ids; 694 struct ep_handles ids;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index f0b89513faed..5c54a2d9b834 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -138,7 +138,6 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
138 u16 next_index; 138 u16 next_index;
139 u32 num_active_cmds; 139 u32 num_active_cmds;
140 140
141
142 /* Coalesce CQ entries only on 10G devices */ 141 /* Coalesce CQ entries only on 10G devices */
143 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) 142 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
144 return; 143 return;
@@ -148,16 +147,19 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
148 * interrupts and other unwanted results 147 * interrupts and other unwanted results
149 */ 148 */
150 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; 149 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
151 if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
152 return;
153 150
154 if (action == CNIC_ARM_CQE) { 151 if (action != CNIC_ARM_CQE_FP)
152 if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
153 return;
154
155 if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
155 num_active_cmds = ep->num_active_cmds; 156 num_active_cmds = ep->num_active_cmds;
156 if (num_active_cmds <= event_coal_min) 157 if (num_active_cmds <= event_coal_min)
157 next_index = 1; 158 next_index = 1;
158 else 159 else
159 next_index = event_coal_min + 160 next_index = event_coal_min +
160 (num_active_cmds - event_coal_min) / event_coal_div; 161 ((num_active_cmds - event_coal_min) >>
162 ep->ec_shift);
161 if (!next_index) 163 if (!next_index)
162 next_index = 1; 164 next_index = 1;
163 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; 165 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
@@ -1274,6 +1276,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1274 iscsi_init.dummy_buffer_addr_hi = 1276 iscsi_init.dummy_buffer_addr_hi =
1275 (u32) ((u64) hba->dummy_buf_dma >> 32); 1277 (u32) ((u64) hba->dummy_buf_dma >> 32);
1276 1278
1279 hba->num_ccell = hba->max_sqes >> 1;
1277 hba->ctx_ccell_tasks = 1280 hba->ctx_ccell_tasks =
1278 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); 1281 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
1279 iscsi_init.num_ccells_per_conn = hba->num_ccell; 1282 iscsi_init.num_ccells_per_conn = hba->num_ccell;
@@ -1934,7 +1937,6 @@ cqe_out:
1934 qp->cq_cons_idx++; 1937 qp->cq_cons_idx++;
1935 } 1938 }
1936 } 1939 }
1937 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1938} 1940}
1939 1941
1940/** 1942/**
@@ -1948,22 +1950,23 @@ cqe_out:
1948static void bnx2i_fastpath_notification(struct bnx2i_hba *hba, 1950static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
1949 struct iscsi_kcqe *new_cqe_kcqe) 1951 struct iscsi_kcqe *new_cqe_kcqe)
1950{ 1952{
1951 struct bnx2i_conn *conn; 1953 struct bnx2i_conn *bnx2i_conn;
1952 u32 iscsi_cid; 1954 u32 iscsi_cid;
1953 1955
1954 iscsi_cid = new_cqe_kcqe->iscsi_conn_id; 1956 iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
1955 conn = bnx2i_get_conn_from_id(hba, iscsi_cid); 1957 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1956 1958
1957 if (!conn) { 1959 if (!bnx2i_conn) {
1958 printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid); 1960 printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
1959 return; 1961 return;
1960 } 1962 }
1961 if (!conn->ep) { 1963 if (!bnx2i_conn->ep) {
1962 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid); 1964 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
1963 return; 1965 return;
1964 } 1966 }
1965 1967 bnx2i_process_new_cqes(bnx2i_conn);
1966 bnx2i_process_new_cqes(conn); 1968 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
1969 bnx2i_process_new_cqes(bnx2i_conn);
1967} 1970}
1968 1971
1969 1972
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 1d24a2819736..6adbdc34a9a5 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -244,7 +244,7 @@ void bnx2i_stop(void *handle)
244 wait_event_interruptible_timeout(hba->eh_wait, 244 wait_event_interruptible_timeout(hba->eh_wait,
245 (list_empty(&hba->ep_ofld_list) && 245 (list_empty(&hba->ep_ofld_list) &&
246 list_empty(&hba->ep_destroy_list)), 246 list_empty(&hba->ep_destroy_list)),
247 10 * HZ); 247 2 * HZ);
248 /* Wait for all endpoints to be torn down, Chip will be reset once 248 /* Wait for all endpoints to be torn down, Chip will be reset once
249 * control returns to network driver. So it is required to cleanup and 249 * control returns to network driver. So it is required to cleanup and
250 * release all connection resources before returning from this routine. 250 * release all connection resources before returning from this routine.
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1809f9ccc4ce..041928b23cb0 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -379,6 +379,7 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
379{ 379{
380 struct iscsi_endpoint *ep; 380 struct iscsi_endpoint *ep;
381 struct bnx2i_endpoint *bnx2i_ep; 381 struct bnx2i_endpoint *bnx2i_ep;
382 u32 ec_div;
382 383
383 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); 384 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
384 if (!ep) { 385 if (!ep) {
@@ -393,6 +394,11 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
393 bnx2i_ep->ep_iscsi_cid = (u16) -1; 394 bnx2i_ep->ep_iscsi_cid = (u16) -1;
394 bnx2i_ep->hba = hba; 395 bnx2i_ep->hba = hba;
395 bnx2i_ep->hba_age = hba->age; 396 bnx2i_ep->hba_age = hba->age;
397
398 ec_div = event_coal_div;
399 while (ec_div >>= 1)
400 bnx2i_ep->ec_shift += 1;
401
396 hba->ofld_conns_active++; 402 hba->ofld_conns_active++;
397 init_waitqueue_head(&bnx2i_ep->ofld_wait); 403 init_waitqueue_head(&bnx2i_ep->ofld_wait);
398 return ep; 404 return ep;
@@ -858,7 +864,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
858 mutex_init(&hba->net_dev_lock); 864 mutex_init(&hba->net_dev_lock);
859 init_waitqueue_head(&hba->eh_wait); 865 init_waitqueue_head(&hba->eh_wait);
860 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 866 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
861 hba->hba_shutdown_tmo = 20 * HZ; 867 hba->hba_shutdown_tmo = 30 * HZ;
862 hba->conn_teardown_tmo = 20 * HZ; 868 hba->conn_teardown_tmo = 20 * HZ;
863 hba->conn_ctx_destroy_tmo = 6 * HZ; 869 hba->conn_ctx_destroy_tmo = 6 * HZ;
864 } else { /* 5706/5708/5709 */ 870 } else { /* 5706/5708/5709 */
@@ -1208,6 +1214,9 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
1208 struct bnx2i_cmd *cmd = task->dd_data; 1214 struct bnx2i_cmd *cmd = task->dd_data;
1209 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; 1215 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
1210 1216
1217 if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes)
1218 return -ENOMEM;
1219
1211 /* 1220 /*
1212 * If there is no scsi_cmnd this must be a mgmt task 1221 * If there is no scsi_cmnd this must be a mgmt task
1213 */ 1222 */
@@ -2156,7 +2165,7 @@ static struct scsi_host_template bnx2i_host_template = {
2156 .change_queue_depth = iscsi_change_queue_depth, 2165 .change_queue_depth = iscsi_change_queue_depth,
2157 .can_queue = 1024, 2166 .can_queue = 1024,
2158 .max_sectors = 127, 2167 .max_sectors = 127,
2159 .cmd_per_lun = 32, 2168 .cmd_per_lun = 24,
2160 .this_id = -1, 2169 .this_id = -1,
2161 .use_clustering = ENABLE_CLUSTERING, 2170 .use_clustering = ENABLE_CLUSTERING,
2162 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, 2171 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cc23bd9480b2..155d7b9bdeae 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -137,6 +137,7 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
137static int fcoe_vport_disable(struct fc_vport *, bool disable); 137static int fcoe_vport_disable(struct fc_vport *, bool disable);
138static void fcoe_set_vport_symbolic_name(struct fc_vport *); 138static void fcoe_set_vport_symbolic_name(struct fc_vport *);
139static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); 139static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
140static int fcoe_validate_vport_create(struct fc_vport *);
140 141
141static struct libfc_function_template fcoe_libfc_fcn_templ = { 142static struct libfc_function_template fcoe_libfc_fcn_templ = {
142 .frame_send = fcoe_xmit, 143 .frame_send = fcoe_xmit,
@@ -2351,6 +2352,17 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
2351 struct fcoe_interface *fcoe = port->priv; 2352 struct fcoe_interface *fcoe = port->priv;
2352 struct net_device *netdev = fcoe->netdev; 2353 struct net_device *netdev = fcoe->netdev;
2353 struct fc_lport *vn_port; 2354 struct fc_lport *vn_port;
2355 int rc;
2356 char buf[32];
2357
2358 rc = fcoe_validate_vport_create(vport);
2359 if (rc) {
2360 wwn_to_str(vport->port_name, buf, sizeof(buf));
2361 printk(KERN_ERR "fcoe: Failed to create vport, "
2362 "WWPN (0x%s) already exists\n",
2363 buf);
2364 return rc;
2365 }
2354 2366
2355 mutex_lock(&fcoe_config_mutex); 2367 mutex_lock(&fcoe_config_mutex);
2356 vn_port = fcoe_if_create(fcoe, &vport->dev, 1); 2368 vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
@@ -2497,3 +2509,49 @@ static void fcoe_set_port_id(struct fc_lport *lport,
2497 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) 2509 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
2498 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); 2510 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
2499} 2511}
2512
2513/**
2514 * fcoe_validate_vport_create() - Validate a vport before creating it
2515 * @vport: NPIV port to be created
2516 *
2517 * This routine is meant to add validation for a vport before creating it
2518 * via fcoe_vport_create().
2519 * Current validations are:
2520 * - WWPN supplied is unique for given lport
2521 *
2522 *
2523*/
2524static int fcoe_validate_vport_create(struct fc_vport *vport)
2525{
2526 struct Scsi_Host *shost = vport_to_shost(vport);
2527 struct fc_lport *n_port = shost_priv(shost);
2528 struct fc_lport *vn_port;
2529 int rc = 0;
2530 char buf[32];
2531
2532 mutex_lock(&n_port->lp_mutex);
2533
2534 wwn_to_str(vport->port_name, buf, sizeof(buf));
2535 /* Check if the wwpn is not same as that of the lport */
2536 if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
2537 FCOE_DBG("vport WWPN 0x%s is same as that of the "
2538 "base port WWPN\n", buf);
2539 rc = -EINVAL;
2540 goto out;
2541 }
2542
2543 /* Check if there is any existing vport with same wwpn */
2544 list_for_each_entry(vn_port, &n_port->vports, list) {
2545 if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
2546 FCOE_DBG("vport with given WWPN 0x%s already "
2547 "exists\n", buf);
2548 rc = -EINVAL;
2549 break;
2550 }
2551 }
2552
2553out:
2554 mutex_unlock(&n_port->lp_mutex);
2555
2556 return rc;
2557}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 408a6fd78fb4..c4a93993c0cf 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -99,4 +99,14 @@ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
99 ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; 99 ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
100} 100}
101 101
102static inline void wwn_to_str(u64 wwn, char *buf, int len)
103{
104 u8 wwpn[8];
105
106 u64_to_wwn(wwn, wwpn);
107 snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
108 wwpn[0], wwpn[1], wwpn[2], wwpn[3],
109 wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
110}
111
102#endif /* _FCOE_H_ */ 112#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 229e4af5508a..c74c4b8e71ef 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1173,7 +1173,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1173 struct fc_lport *lport = fip->lp; 1173 struct fc_lport *lport = fip->lp;
1174 struct fc_lport *vn_port = NULL; 1174 struct fc_lport *vn_port = NULL;
1175 u32 desc_mask; 1175 u32 desc_mask;
1176 int is_vn_port = 0; 1176 int num_vlink_desc;
1177 int reset_phys_port = 0;
1178 struct fip_vn_desc **vlink_desc_arr = NULL;
1177 1179
1178 LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); 1180 LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
1179 1181
@@ -1183,70 +1185,73 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1183 /* 1185 /*
1184 * mask of required descriptors. Validating each one clears its bit. 1186 * mask of required descriptors. Validating each one clears its bit.
1185 */ 1187 */
1186 desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID); 1188 desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
1187 1189
1188 rlen = ntohs(fh->fip_dl_len) * FIP_BPW; 1190 rlen = ntohs(fh->fip_dl_len) * FIP_BPW;
1189 desc = (struct fip_desc *)(fh + 1); 1191 desc = (struct fip_desc *)(fh + 1);
1192
1193 /*
1194 * Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen'
1195 * before determining max Vx_Port descriptor but a buggy FCF could have
1196 * omited either or both MAC Address and Name Identifier descriptors
1197 */
1198 num_vlink_desc = rlen / sizeof(*vp);
1199 if (num_vlink_desc)
1200 vlink_desc_arr = kmalloc(sizeof(vp) * num_vlink_desc,
1201 GFP_ATOMIC);
1202 if (!vlink_desc_arr)
1203 return;
1204 num_vlink_desc = 0;
1205
1190 while (rlen >= sizeof(*desc)) { 1206 while (rlen >= sizeof(*desc)) {
1191 dlen = desc->fip_dlen * FIP_BPW; 1207 dlen = desc->fip_dlen * FIP_BPW;
1192 if (dlen > rlen) 1208 if (dlen > rlen)
1193 return; 1209 goto err;
1194 /* Drop CVL if there are duplicate critical descriptors */ 1210 /* Drop CVL if there are duplicate critical descriptors */
1195 if ((desc->fip_dtype < 32) && 1211 if ((desc->fip_dtype < 32) &&
1212 (desc->fip_dtype != FIP_DT_VN_ID) &&
1196 !(desc_mask & 1U << desc->fip_dtype)) { 1213 !(desc_mask & 1U << desc->fip_dtype)) {
1197 LIBFCOE_FIP_DBG(fip, "Duplicate Critical " 1214 LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
1198 "Descriptors in FIP CVL\n"); 1215 "Descriptors in FIP CVL\n");
1199 return; 1216 goto err;
1200 } 1217 }
1201 switch (desc->fip_dtype) { 1218 switch (desc->fip_dtype) {
1202 case FIP_DT_MAC: 1219 case FIP_DT_MAC:
1203 mp = (struct fip_mac_desc *)desc; 1220 mp = (struct fip_mac_desc *)desc;
1204 if (dlen < sizeof(*mp)) 1221 if (dlen < sizeof(*mp))
1205 return; 1222 goto err;
1206 if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac)) 1223 if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
1207 return; 1224 goto err;
1208 desc_mask &= ~BIT(FIP_DT_MAC); 1225 desc_mask &= ~BIT(FIP_DT_MAC);
1209 break; 1226 break;
1210 case FIP_DT_NAME: 1227 case FIP_DT_NAME:
1211 wp = (struct fip_wwn_desc *)desc; 1228 wp = (struct fip_wwn_desc *)desc;
1212 if (dlen < sizeof(*wp)) 1229 if (dlen < sizeof(*wp))
1213 return; 1230 goto err;
1214 if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name) 1231 if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name)
1215 return; 1232 goto err;
1216 desc_mask &= ~BIT(FIP_DT_NAME); 1233 desc_mask &= ~BIT(FIP_DT_NAME);
1217 break; 1234 break;
1218 case FIP_DT_VN_ID: 1235 case FIP_DT_VN_ID:
1219 vp = (struct fip_vn_desc *)desc; 1236 vp = (struct fip_vn_desc *)desc;
1220 if (dlen < sizeof(*vp)) 1237 if (dlen < sizeof(*vp))
1221 return; 1238 goto err;
1222 if (compare_ether_addr(vp->fd_mac, 1239 vlink_desc_arr[num_vlink_desc++] = vp;
1223 fip->get_src_addr(lport)) == 0 && 1240 vn_port = fc_vport_id_lookup(lport,
1224 get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn && 1241 ntoh24(vp->fd_fc_id));
1225 ntoh24(vp->fd_fc_id) == lport->port_id) { 1242 if (vn_port && (vn_port == lport)) {
1226 desc_mask &= ~BIT(FIP_DT_VN_ID); 1243 mutex_lock(&fip->ctlr_mutex);
1227 break; 1244 per_cpu_ptr(lport->dev_stats,
1245 get_cpu())->VLinkFailureCount++;
1246 put_cpu();
1247 fcoe_ctlr_reset(fip);
1248 mutex_unlock(&fip->ctlr_mutex);
1228 } 1249 }
1229 /* check if clr_vlink is for NPIV port */
1230 mutex_lock(&lport->lp_mutex);
1231 list_for_each_entry(vn_port, &lport->vports, list) {
1232 if (compare_ether_addr(vp->fd_mac,
1233 fip->get_src_addr(vn_port)) == 0 &&
1234 (get_unaligned_be64(&vp->fd_wwpn)
1235 == vn_port->wwpn) &&
1236 (ntoh24(vp->fd_fc_id) ==
1237 fc_host_port_id(vn_port->host))) {
1238 desc_mask &= ~BIT(FIP_DT_VN_ID);
1239 is_vn_port = 1;
1240 break;
1241 }
1242 }
1243 mutex_unlock(&lport->lp_mutex);
1244
1245 break; 1250 break;
1246 default: 1251 default:
1247 /* standard says ignore unknown descriptors >= 128 */ 1252 /* standard says ignore unknown descriptors >= 128 */
1248 if (desc->fip_dtype < FIP_DT_VENDOR_BASE) 1253 if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
1249 return; 1254 goto err;
1250 break; 1255 break;
1251 } 1256 }
1252 desc = (struct fip_desc *)((char *)desc + dlen); 1257 desc = (struct fip_desc *)((char *)desc + dlen);
@@ -1256,26 +1261,68 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1256 /* 1261 /*
1257 * reset only if all required descriptors were present and valid. 1262 * reset only if all required descriptors were present and valid.
1258 */ 1263 */
1259 if (desc_mask) { 1264 if (desc_mask)
1260 LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n", 1265 LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n",
1261 desc_mask); 1266 desc_mask);
1267 else if (!num_vlink_desc) {
1268 LIBFCOE_FIP_DBG(fip, "CVL: no Vx_Port descriptor found\n");
1269 /*
1270 * No Vx_Port description. Clear all NPIV ports,
1271 * followed by physical port
1272 */
1273 mutex_lock(&lport->lp_mutex);
1274 list_for_each_entry(vn_port, &lport->vports, list)
1275 fc_lport_reset(vn_port);
1276 mutex_unlock(&lport->lp_mutex);
1277
1278 mutex_lock(&fip->ctlr_mutex);
1279 per_cpu_ptr(lport->dev_stats,
1280 get_cpu())->VLinkFailureCount++;
1281 put_cpu();
1282 fcoe_ctlr_reset(fip);
1283 mutex_unlock(&fip->ctlr_mutex);
1284
1285 fc_lport_reset(fip->lp);
1286 fcoe_ctlr_solicit(fip, NULL);
1262 } else { 1287 } else {
1263 LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); 1288 int i;
1264 1289
1265 if (is_vn_port) 1290 LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
1266 fc_lport_reset(vn_port); 1291 for (i = 0; i < num_vlink_desc; i++) {
1267 else { 1292 vp = vlink_desc_arr[i];
1268 mutex_lock(&fip->ctlr_mutex); 1293 vn_port = fc_vport_id_lookup(lport,
1269 per_cpu_ptr(lport->dev_stats, 1294 ntoh24(vp->fd_fc_id));
1270 get_cpu())->VLinkFailureCount++; 1295 if (!vn_port)
1271 put_cpu(); 1296 continue;
1272 fcoe_ctlr_reset(fip); 1297
1273 mutex_unlock(&fip->ctlr_mutex); 1298 /*
1299 * 'port_id' is already validated, check MAC address and
1300 * wwpn
1301 */
1302 if (compare_ether_addr(fip->get_src_addr(vn_port),
1303 vp->fd_mac) != 0 ||
1304 get_unaligned_be64(&vp->fd_wwpn) !=
1305 vn_port->wwpn)
1306 continue;
1307
1308 if (vn_port == lport)
1309 /*
1310 * Physical port, defer processing till all
1311 * listed NPIV ports are cleared
1312 */
1313 reset_phys_port = 1;
1314 else /* NPIV port */
1315 fc_lport_reset(vn_port);
1316 }
1274 1317
1318 if (reset_phys_port) {
1275 fc_lport_reset(fip->lp); 1319 fc_lport_reset(fip->lp);
1276 fcoe_ctlr_solicit(fip, NULL); 1320 fcoe_ctlr_solicit(fip, NULL);
1277 } 1321 }
1278 } 1322 }
1323
1324err:
1325 kfree(vlink_desc_arr);
1279} 1326}
1280 1327
1281/** 1328/**
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index f81f77c8569e..41068e8748e7 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -544,16 +544,6 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
544 struct fcoe_transport *ft = NULL; 544 struct fcoe_transport *ft = NULL;
545 enum fip_state fip_mode = (enum fip_state)(long)kp->arg; 545 enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
546 546
547#ifdef CONFIG_LIBFCOE_MODULE
548 /*
549 * Make sure the module has been initialized, and is not about to be
550 * removed. Module parameter sysfs files are writable before the
551 * module_init function is called and after module_exit.
552 */
553 if (THIS_MODULE->state != MODULE_STATE_LIVE)
554 goto out_nodev;
555#endif
556
557 mutex_lock(&ft_mutex); 547 mutex_lock(&ft_mutex);
558 548
559 netdev = fcoe_if_to_netdev(buffer); 549 netdev = fcoe_if_to_netdev(buffer);
@@ -618,16 +608,6 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
618 struct net_device *netdev = NULL; 608 struct net_device *netdev = NULL;
619 struct fcoe_transport *ft = NULL; 609 struct fcoe_transport *ft = NULL;
620 610
621#ifdef CONFIG_LIBFCOE_MODULE
622 /*
623 * Make sure the module has been initialized, and is not about to be
624 * removed. Module parameter sysfs files are writable before the
625 * module_init function is called and after module_exit.
626 */
627 if (THIS_MODULE->state != MODULE_STATE_LIVE)
628 goto out_nodev;
629#endif
630
631 mutex_lock(&ft_mutex); 611 mutex_lock(&ft_mutex);
632 612
633 netdev = fcoe_if_to_netdev(buffer); 613 netdev = fcoe_if_to_netdev(buffer);
@@ -672,16 +652,6 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
672 struct net_device *netdev = NULL; 652 struct net_device *netdev = NULL;
673 struct fcoe_transport *ft = NULL; 653 struct fcoe_transport *ft = NULL;
674 654
675#ifdef CONFIG_LIBFCOE_MODULE
676 /*
677 * Make sure the module has been initialized, and is not about to be
678 * removed. Module parameter sysfs files are writable before the
679 * module_init function is called and after module_exit.
680 */
681 if (THIS_MODULE->state != MODULE_STATE_LIVE)
682 goto out_nodev;
683#endif
684
685 mutex_lock(&ft_mutex); 655 mutex_lock(&ft_mutex);
686 656
687 netdev = fcoe_if_to_netdev(buffer); 657 netdev = fcoe_if_to_netdev(buffer);
@@ -720,16 +690,6 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
720 struct net_device *netdev = NULL; 690 struct net_device *netdev = NULL;
721 struct fcoe_transport *ft = NULL; 691 struct fcoe_transport *ft = NULL;
722 692
723#ifdef CONFIG_LIBFCOE_MODULE
724 /*
725 * Make sure the module has been initialized, and is not about to be
726 * removed. Module parameter sysfs files are writable before the
727 * module_init function is called and after module_exit.
728 */
729 if (THIS_MODULE->state != MODULE_STATE_LIVE)
730 goto out_nodev;
731#endif
732
733 mutex_lock(&ft_mutex); 693 mutex_lock(&ft_mutex);
734 694
735 netdev = fcoe_if_to_netdev(buffer); 695 netdev = fcoe_if_to_netdev(buffer);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 12868ca46110..888086c4e709 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5149,21 +5149,21 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5149 5149
5150 if (ipr_cmd != NULL) { 5150 if (ipr_cmd != NULL) {
5151 /* Clear the PCI interrupt */ 5151 /* Clear the PCI interrupt */
5152 num_hrrq = 0;
5152 do { 5153 do {
5153 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5154 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5154 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5155 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5155 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5156 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5156 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5157 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5157 5158
5158 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
5159 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5160 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5161 return IRQ_HANDLED;
5162 }
5163
5164 } else if (rc == IRQ_NONE && irq_none == 0) { 5159 } else if (rc == IRQ_NONE && irq_none == 0) {
5165 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5160 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5166 irq_none++; 5161 irq_none++;
5162 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5163 int_reg & IPR_PCII_HRRQ_UPDATED) {
5164 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5165 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5166 return IRQ_HANDLED;
5167 } else 5167 } else
5168 break; 5168 break;
5169 } 5169 }
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 911b2736cafa..b9cb8140b398 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -205,6 +205,7 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
205 default: 205 default:
206 FC_DISC_DBG(disc, "Received an unsupported request, " 206 FC_DISC_DBG(disc, "Received an unsupported request, "
207 "the opcode is (%x)\n", op); 207 "the opcode is (%x)\n", op);
208 fc_frame_free(fp);
208 break; 209 break;
209 } 210 }
210} 211}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 77035a746f60..3b8a6451ea28 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1434,6 +1434,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1434 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1434 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1435 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1435 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1436 spin_lock_bh(&ep->ex_lock); 1436 spin_lock_bh(&ep->ex_lock);
1437 resp = ep->resp;
1437 rc = fc_exch_done_locked(ep); 1438 rc = fc_exch_done_locked(ep);
1438 WARN_ON(fc_seq_exch(sp) != ep); 1439 WARN_ON(fc_seq_exch(sp) != ep);
1439 spin_unlock_bh(&ep->ex_lock); 1440 spin_unlock_bh(&ep->ex_lock);
@@ -1978,6 +1979,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1978 spin_unlock_bh(&ep->ex_lock); 1979 spin_unlock_bh(&ep->ex_lock);
1979 return sp; 1980 return sp;
1980err: 1981err:
1982 fc_fcp_ddp_done(fr_fsp(fp));
1981 rc = fc_exch_done_locked(ep); 1983 rc = fc_exch_done_locked(ep);
1982 spin_unlock_bh(&ep->ex_lock); 1984 spin_unlock_bh(&ep->ex_lock);
1983 if (!rc) 1985 if (!rc)
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 2a3a4720a771..9cd2149519ac 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -312,7 +312,7 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
312 * DDP related resources for a fcp_pkt 312 * DDP related resources for a fcp_pkt
313 * @fsp: The FCP packet that DDP had been used on 313 * @fsp: The FCP packet that DDP had been used on
314 */ 314 */
315static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) 315void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
316{ 316{
317 struct fc_lport *lport; 317 struct fc_lport *lport;
318 318
@@ -681,8 +681,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
681 error = lport->tt.seq_send(lport, seq, fp); 681 error = lport->tt.seq_send(lport, seq, fp);
682 if (error) { 682 if (error) {
683 WARN_ON(1); /* send error should be rare */ 683 WARN_ON(1); /* send error should be rare */
684 fc_fcp_retry_cmd(fsp); 684 return error;
685 return 0;
686 } 685 }
687 fp = NULL; 686 fp = NULL;
688 } 687 }
@@ -1673,7 +1672,8 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1673 FC_FCTL_REQ, 0); 1672 FC_FCTL_REQ, 0);
1674 1673
1675 rec_tov = get_fsp_rec_tov(fsp); 1674 rec_tov = get_fsp_rec_tov(fsp);
1676 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL, 1675 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
1676 fc_fcp_pkt_destroy,
1677 fsp, jiffies_to_msecs(rec_tov)); 1677 fsp, jiffies_to_msecs(rec_tov));
1678 if (!seq) 1678 if (!seq)
1679 goto retry; 1679 goto retry;
@@ -1720,7 +1720,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1720 return; 1720 return;
1721 } 1721 }
1722 1722
1723 fsp->recov_seq = NULL;
1724 switch (fc_frame_payload_op(fp)) { 1723 switch (fc_frame_payload_op(fp)) {
1725 case ELS_LS_ACC: 1724 case ELS_LS_ACC:
1726 fsp->recov_retry = 0; 1725 fsp->recov_retry = 0;
@@ -1732,10 +1731,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1732 break; 1731 break;
1733 } 1732 }
1734 fc_fcp_unlock_pkt(fsp); 1733 fc_fcp_unlock_pkt(fsp);
1735 fsp->lp->tt.exch_done(seq);
1736out: 1734out:
1735 fsp->lp->tt.exch_done(seq);
1737 fc_frame_free(fp); 1736 fc_frame_free(fp);
1738 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1739} 1737}
1740 1738
1741/** 1739/**
@@ -1747,8 +1745,6 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1747{ 1745{
1748 if (fc_fcp_lock_pkt(fsp)) 1746 if (fc_fcp_lock_pkt(fsp))
1749 goto out; 1747 goto out;
1750 fsp->lp->tt.exch_done(fsp->recov_seq);
1751 fsp->recov_seq = NULL;
1752 switch (PTR_ERR(fp)) { 1748 switch (PTR_ERR(fp)) {
1753 case -FC_EX_TIMEOUT: 1749 case -FC_EX_TIMEOUT:
1754 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1750 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
@@ -1764,7 +1760,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1764 } 1760 }
1765 fc_fcp_unlock_pkt(fsp); 1761 fc_fcp_unlock_pkt(fsp);
1766out: 1762out:
1767 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1763 fsp->lp->tt.exch_done(fsp->recov_seq);
1768} 1764}
1769 1765
1770/** 1766/**
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index fedc819d70c0..c7d071289af5 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -108,6 +108,7 @@ extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
108 * Set up direct-data placement for this I/O request 108 * Set up direct-data placement for this I/O request
109 */ 109 */
110void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid); 110void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
111void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp);
111 112
112/* 113/*
113 * Module setup functions 114 * Module setup functions
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 31fc21f4d831..db9238f2ecb8 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -99,19 +99,29 @@ static void sas_ata_task_done(struct sas_task *task)
99 struct sas_ha_struct *sas_ha; 99 struct sas_ha_struct *sas_ha;
100 enum ata_completion_errors ac; 100 enum ata_completion_errors ac;
101 unsigned long flags; 101 unsigned long flags;
102 struct ata_link *link;
102 103
103 if (!qc) 104 if (!qc)
104 goto qc_already_gone; 105 goto qc_already_gone;
105 106
106 dev = qc->ap->private_data; 107 dev = qc->ap->private_data;
107 sas_ha = dev->port->ha; 108 sas_ha = dev->port->ha;
109 link = &dev->sata_dev.ap->link;
108 110
109 spin_lock_irqsave(dev->sata_dev.ap->lock, flags); 111 spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
110 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD || 112 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
111 ((stat->stat == SAM_STAT_CHECK_CONDITION && 113 ((stat->stat == SAM_STAT_CHECK_CONDITION &&
112 dev->sata_dev.command_set == ATAPI_COMMAND_SET))) { 114 dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
113 ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf); 115 ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
114 qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command); 116
117 if (!link->sactive) {
118 qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
119 } else {
120 link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
121 if (unlikely(link->eh_info.err_mask))
122 qc->flags |= ATA_QCFLAG_FAILED;
123 }
124
115 dev->sata_dev.sstatus = resp->sstatus; 125 dev->sata_dev.sstatus = resp->sstatus;
116 dev->sata_dev.serror = resp->serror; 126 dev->sata_dev.serror = resp->serror;
117 dev->sata_dev.scontrol = resp->scontrol; 127 dev->sata_dev.scontrol = resp->scontrol;
@@ -121,7 +131,13 @@ static void sas_ata_task_done(struct sas_task *task)
121 SAS_DPRINTK("%s: SAS error %x\n", __func__, 131 SAS_DPRINTK("%s: SAS error %x\n", __func__,
122 stat->stat); 132 stat->stat);
123 /* We saw a SAS error. Send a vague error. */ 133 /* We saw a SAS error. Send a vague error. */
124 qc->err_mask = ac; 134 if (!link->sactive) {
135 qc->err_mask = ac;
136 } else {
137 link->eh_info.err_mask |= AC_ERR_DEV;
138 qc->flags |= ATA_QCFLAG_FAILED;
139 }
140
125 dev->sata_dev.tf.feature = 0x04; /* status err */ 141 dev->sata_dev.tf.feature = 0x04; /* status err */
126 dev->sata_dev.tf.command = ATA_ERR; 142 dev->sata_dev.tf.command = ATA_ERR;
127 } 143 }
@@ -279,6 +295,44 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
279 return ret; 295 return ret;
280} 296}
281 297
298static int sas_ata_soft_reset(struct ata_link *link, unsigned int *class,
299 unsigned long deadline)
300{
301 struct ata_port *ap = link->ap;
302 struct domain_device *dev = ap->private_data;
303 struct sas_internal *i =
304 to_sas_internal(dev->port->ha->core.shost->transportt);
305 int res = TMF_RESP_FUNC_FAILED;
306 int ret = 0;
307
308 if (i->dft->lldd_ata_soft_reset)
309 res = i->dft->lldd_ata_soft_reset(dev);
310
311 if (res != TMF_RESP_FUNC_COMPLETE) {
312 SAS_DPRINTK("%s: Unable to soft reset\n", __func__);
313 ret = -EAGAIN;
314 }
315
316 switch (dev->sata_dev.command_set) {
317 case ATA_COMMAND_SET:
318 SAS_DPRINTK("%s: Found ATA device.\n", __func__);
319 *class = ATA_DEV_ATA;
320 break;
321 case ATAPI_COMMAND_SET:
322 SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
323 *class = ATA_DEV_ATAPI;
324 break;
325 default:
326 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
327 __func__, dev->sata_dev.command_set);
328 *class = ATA_DEV_UNKNOWN;
329 break;
330 }
331
332 ap->cbl = ATA_CBL_SATA;
333 return ret;
334}
335
282static void sas_ata_post_internal(struct ata_queued_cmd *qc) 336static void sas_ata_post_internal(struct ata_queued_cmd *qc)
283{ 337{
284 if (qc->flags & ATA_QCFLAG_FAILED) 338 if (qc->flags & ATA_QCFLAG_FAILED)
@@ -309,7 +363,7 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
309 363
310static struct ata_port_operations sas_sata_ops = { 364static struct ata_port_operations sas_sata_ops = {
311 .prereset = ata_std_prereset, 365 .prereset = ata_std_prereset,
312 .softreset = NULL, 366 .softreset = sas_ata_soft_reset,
313 .hardreset = sas_ata_hard_reset, 367 .hardreset = sas_ata_hard_reset,
314 .postreset = ata_std_postreset, 368 .postreset = ata_std_postreset,
315 .error_handler = ata_std_error_handler, 369 .error_handler = ata_std_error_handler,
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 8b538bd1ff2b..14e21b5fb8ba 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -57,7 +57,7 @@ int sas_init_queue(struct sas_ha_struct *sas_ha);
57int sas_init_events(struct sas_ha_struct *sas_ha); 57int sas_init_events(struct sas_ha_struct *sas_ha);
58void sas_shutdown_queue(struct sas_ha_struct *sas_ha); 58void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
59 59
60void sas_deform_port(struct asd_sas_phy *phy); 60void sas_deform_port(struct asd_sas_phy *phy, int gone);
61 61
62void sas_porte_bytes_dmaed(struct work_struct *work); 62void sas_porte_bytes_dmaed(struct work_struct *work);
63void sas_porte_broadcast_rcvd(struct work_struct *work); 63void sas_porte_broadcast_rcvd(struct work_struct *work);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index b459c4b635b1..e0f5018e9071 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -39,7 +39,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
39 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, 39 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
40 &phy->phy_events_pending); 40 &phy->phy_events_pending);
41 phy->error = 0; 41 phy->error = 0;
42 sas_deform_port(phy); 42 sas_deform_port(phy, 1);
43} 43}
44 44
45static void sas_phye_oob_done(struct work_struct *work) 45static void sas_phye_oob_done(struct work_struct *work)
@@ -66,7 +66,7 @@ static void sas_phye_oob_error(struct work_struct *work)
66 sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock, 66 sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
67 &phy->phy_events_pending); 67 &phy->phy_events_pending);
68 68
69 sas_deform_port(phy); 69 sas_deform_port(phy, 1);
70 70
71 if (!port && phy->enabled && i->dft->lldd_control_phy) { 71 if (!port && phy->enabled && i->dft->lldd_control_phy) {
72 phy->error++; 72 phy->error++;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 5257fdfe699a..42fd1f25b664 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -57,7 +57,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
57 57
58 if (port) { 58 if (port) {
59 if (!phy_is_wideport_member(port, phy)) 59 if (!phy_is_wideport_member(port, phy))
60 sas_deform_port(phy); 60 sas_deform_port(phy, 0);
61 else { 61 else {
62 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", 62 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
63 __func__, phy->id, phy->port->id, 63 __func__, phy->id, phy->port->id,
@@ -153,28 +153,31 @@ static void sas_form_port(struct asd_sas_phy *phy)
153 * This is called when the physical link to the other phy has been 153 * This is called when the physical link to the other phy has been
154 * lost (on this phy), in Event thread context. We cannot delay here. 154 * lost (on this phy), in Event thread context. We cannot delay here.
155 */ 155 */
156void sas_deform_port(struct asd_sas_phy *phy) 156void sas_deform_port(struct asd_sas_phy *phy, int gone)
157{ 157{
158 struct sas_ha_struct *sas_ha = phy->ha; 158 struct sas_ha_struct *sas_ha = phy->ha;
159 struct asd_sas_port *port = phy->port; 159 struct asd_sas_port *port = phy->port;
160 struct sas_internal *si = 160 struct sas_internal *si =
161 to_sas_internal(sas_ha->core.shost->transportt); 161 to_sas_internal(sas_ha->core.shost->transportt);
162 struct domain_device *dev;
162 unsigned long flags; 163 unsigned long flags;
163 164
164 if (!port) 165 if (!port)
165 return; /* done by a phy event */ 166 return; /* done by a phy event */
166 167
167 if (port->port_dev) 168 dev = port->port_dev;
168 port->port_dev->pathways--; 169 if (dev)
170 dev->pathways--;
169 171
170 if (port->num_phys == 1) { 172 if (port->num_phys == 1) {
173 if (dev && gone)
174 dev->gone = 1;
171 sas_unregister_domain_devices(port); 175 sas_unregister_domain_devices(port);
172 sas_port_delete(port->port); 176 sas_port_delete(port->port);
173 port->port = NULL; 177 port->port = NULL;
174 } else 178 } else
175 sas_port_delete_phy(port->port, phy->phy); 179 sas_port_delete_phy(port->port, phy->phy);
176 180
177
178 if (si->dft->lldd_port_deformed) 181 if (si->dft->lldd_port_deformed)
179 si->dft->lldd_port_deformed(phy); 182 si->dft->lldd_port_deformed(phy);
180 183
@@ -244,7 +247,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
244 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, 247 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
245 &phy->port_events_pending); 248 &phy->port_events_pending);
246 249
247 sas_deform_port(phy); 250 sas_deform_port(phy, 1);
248} 251}
249 252
250void sas_porte_timer_event(struct work_struct *work) 253void sas_porte_timer_event(struct work_struct *work)
@@ -256,7 +259,7 @@ void sas_porte_timer_event(struct work_struct *work)
256 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, 259 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
257 &phy->port_events_pending); 260 &phy->port_events_pending);
258 261
259 sas_deform_port(phy); 262 sas_deform_port(phy, 1);
260} 263}
261 264
262void sas_porte_hard_reset(struct work_struct *work) 265void sas_porte_hard_reset(struct work_struct *work)
@@ -268,7 +271,7 @@ void sas_porte_hard_reset(struct work_struct *work)
268 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, 271 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
269 &phy->port_events_pending); 272 &phy->port_events_pending);
270 273
271 sas_deform_port(phy); 274 sas_deform_port(phy, 1);
272} 275}
273 276
274/* ---------- SAS port registration ---------- */ 277/* ---------- SAS port registration ---------- */
@@ -306,6 +309,6 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
306 309
307 for (i = 0; i < sas_ha->num_phys; i++) 310 for (i = 0; i < sas_ha->num_phys; i++)
308 if (sas_ha->sas_phy[i]->port) 311 if (sas_ha->sas_phy[i]->port)
309 sas_deform_port(sas_ha->sas_phy[i]); 312 sas_deform_port(sas_ha->sas_phy[i], 0);
310 313
311} 314}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index f6e189f40917..eeba76cdf774 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -207,6 +207,13 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
207 struct sas_ha_struct *sas_ha = dev->port->ha; 207 struct sas_ha_struct *sas_ha = dev->port->ha;
208 struct sas_task *task; 208 struct sas_task *task;
209 209
210 /* If the device fell off, no sense in issuing commands */
211 if (dev->gone) {
212 cmd->result = DID_BAD_TARGET << 16;
213 scsi_done(cmd);
214 goto out;
215 }
216
210 if (dev_is_sata(dev)) { 217 if (dev_is_sata(dev)) {
211 unsigned long flags; 218 unsigned long flags;
212 219
@@ -216,13 +223,6 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
216 goto out; 223 goto out;
217 } 224 }
218 225
219 /* If the device fell off, no sense in issuing commands */
220 if (dev->gone) {
221 cmd->result = DID_BAD_TARGET << 16;
222 scsi_done(cmd);
223 goto out;
224 }
225
226 res = -ENOMEM; 226 res = -ENOMEM;
227 task = sas_create_task(cmd, dev, GFP_ATOMIC); 227 task = sas_create_task(cmd, dev, GFP_ATOMIC);
228 if (!task) 228 if (!task)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 02d53d89534f..8ec2c86a49d4 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -41,6 +41,7 @@ struct lpfc_sli2_slim;
41 downloads using bsg */ 41 downloads using bsg */
42#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ 42#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
43#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 43#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
44#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
44#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ 45#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
45#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 46#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
46#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 47#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
@@ -486,6 +487,42 @@ struct unsol_rcv_ct_ctx {
486 (1 << LPFC_USER_LINK_SPEED_AUTO)) 487 (1 << LPFC_USER_LINK_SPEED_AUTO))
487#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16" 488#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
488 489
490enum nemb_type {
491 nemb_mse = 1,
492 nemb_hbd
493};
494
495enum mbox_type {
496 mbox_rd = 1,
497 mbox_wr
498};
499
500enum dma_type {
501 dma_mbox = 1,
502 dma_ebuf
503};
504
505enum sta_type {
506 sta_pre_addr = 1,
507 sta_pos_addr
508};
509
510struct lpfc_mbox_ext_buf_ctx {
511 uint32_t state;
512#define LPFC_BSG_MBOX_IDLE 0
513#define LPFC_BSG_MBOX_HOST 1
514#define LPFC_BSG_MBOX_PORT 2
515#define LPFC_BSG_MBOX_DONE 3
516#define LPFC_BSG_MBOX_ABTS 4
517 enum nemb_type nembType;
518 enum mbox_type mboxType;
519 uint32_t numBuf;
520 uint32_t mbxTag;
521 uint32_t seqNum;
522 struct lpfc_dmabuf *mbx_dmabuf;
523 struct list_head ext_dmabuf_list;
524};
525
489struct lpfc_hba { 526struct lpfc_hba {
490 /* SCSI interface function jump table entries */ 527 /* SCSI interface function jump table entries */
491 int (*lpfc_new_scsi_buf) 528 int (*lpfc_new_scsi_buf)
@@ -589,6 +626,7 @@ struct lpfc_hba {
589 626
590 MAILBOX_t *mbox; 627 MAILBOX_t *mbox;
591 uint32_t *mbox_ext; 628 uint32_t *mbox_ext;
629 struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
592 uint32_t ha_copy; 630 uint32_t ha_copy;
593 struct _PCB *pcb; 631 struct _PCB *pcb;
594 struct _IOCB *IOCBs; 632 struct _IOCB *IOCBs;
@@ -659,6 +697,7 @@ struct lpfc_hba {
659 uint32_t cfg_hostmem_hgp; 697 uint32_t cfg_hostmem_hgp;
660 uint32_t cfg_log_verbose; 698 uint32_t cfg_log_verbose;
661 uint32_t cfg_aer_support; 699 uint32_t cfg_aer_support;
700 uint32_t cfg_sriov_nr_virtfn;
662 uint32_t cfg_iocb_cnt; 701 uint32_t cfg_iocb_cnt;
663 uint32_t cfg_suppress_link_up; 702 uint32_t cfg_suppress_link_up;
664#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ 703#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
@@ -706,7 +745,6 @@ struct lpfc_hba {
706 uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */ 745 uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
707 746
708 int brd_no; /* FC board number */ 747 int brd_no; /* FC board number */
709
710 char SerialNumber[32]; /* adapter Serial Number */ 748 char SerialNumber[32]; /* adapter Serial Number */
711 char OptionROMVersion[32]; /* adapter BIOS / Fcode version */ 749 char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
712 char ModelDesc[256]; /* Model Description */ 750 char ModelDesc[256]; /* Model Description */
@@ -778,6 +816,9 @@ struct lpfc_hba {
778 uint16_t vpi_base; 816 uint16_t vpi_base;
779 uint16_t vfi_base; 817 uint16_t vfi_base;
780 unsigned long *vpi_bmask; /* vpi allocation table */ 818 unsigned long *vpi_bmask; /* vpi allocation table */
819 uint16_t *vpi_ids;
820 uint16_t vpi_count;
821 struct list_head lpfc_vpi_blk_list;
781 822
782 /* Data structure used by fabric iocb scheduler */ 823 /* Data structure used by fabric iocb scheduler */
783 struct list_head fabric_iocb_list; 824 struct list_head fabric_iocb_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 8dcbf8fff673..135a53baa735 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -755,6 +755,73 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
755} 755}
756 756
757/** 757/**
758 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
759 * @phba: lpfc_hba pointer.
760 *
761 * Description:
762 * Request SLI4 interface type-2 device to perform a physical register set
763 * access.
764 *
765 * Returns:
766 * zero for success
767 **/
768static ssize_t
769lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
770{
771 struct completion online_compl;
772 uint32_t reg_val;
773 int status = 0;
774 int rc;
775
776 if (!phba->cfg_enable_hba_reset)
777 return -EIO;
778
779 if ((phba->sli_rev < LPFC_SLI_REV4) ||
780 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
781 LPFC_SLI_INTF_IF_TYPE_2))
782 return -EPERM;
783
784 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
785
786 if (status != 0)
787 return status;
788
789 /* wait for the device to be quiesced before firmware reset */
790 msleep(100);
791
792 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
793 LPFC_CTL_PDEV_CTL_OFFSET);
794
795 if (opcode == LPFC_FW_DUMP)
796 reg_val |= LPFC_FW_DUMP_REQUEST;
797 else if (opcode == LPFC_FW_RESET)
798 reg_val |= LPFC_CTL_PDEV_CTL_FRST;
799 else if (opcode == LPFC_DV_RESET)
800 reg_val |= LPFC_CTL_PDEV_CTL_DRST;
801
802 writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
803 LPFC_CTL_PDEV_CTL_OFFSET);
804 /* flush */
805 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
806
807 /* delay driver action following IF_TYPE_2 reset */
808 msleep(100);
809
810 init_completion(&online_compl);
811 rc = lpfc_workq_post_event(phba, &status, &online_compl,
812 LPFC_EVT_ONLINE);
813 if (rc == 0)
814 return -ENOMEM;
815
816 wait_for_completion(&online_compl);
817
818 if (status != 0)
819 return -EIO;
820
821 return 0;
822}
823
824/**
758 * lpfc_nport_evt_cnt_show - Return the number of nport events 825 * lpfc_nport_evt_cnt_show - Return the number of nport events
759 * @dev: class device that is converted into a Scsi_host. 826 * @dev: class device that is converted into a Scsi_host.
760 * @attr: device attribute, not used. 827 * @attr: device attribute, not used.
@@ -848,6 +915,12 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
848 return -EINVAL; 915 return -EINVAL;
849 else 916 else
850 status = lpfc_do_offline(phba, LPFC_EVT_KILL); 917 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
918 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
919 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
920 else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
921 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
922 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
923 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
851 else 924 else
852 return -EINVAL; 925 return -EINVAL;
853 926
@@ -1322,6 +1395,102 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
1322} 1395}
1323 1396
1324/** 1397/**
1398 * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
1399 * @dev: class converted to a Scsi_host structure.
1400 * @attr: device attribute, not used.
1401 * @buf: on return contains the formatted support level.
1402 *
1403 * Description:
1404 * Returns the maximum number of virtual functions a physical function can
1405 * support, 0 will be returned if called on virtual function.
1406 *
1407 * Returns: size of formatted string.
1408 **/
1409static ssize_t
1410lpfc_sriov_hw_max_virtfn_show(struct device *dev,
1411 struct device_attribute *attr,
1412 char *buf)
1413{
1414 struct Scsi_Host *shost = class_to_shost(dev);
1415 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1416 struct lpfc_hba *phba = vport->phba;
1417 struct pci_dev *pdev = phba->pcidev;
1418 union lpfc_sli4_cfg_shdr *shdr;
1419 uint32_t shdr_status, shdr_add_status;
1420 LPFC_MBOXQ_t *mboxq;
1421 struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
1422 struct lpfc_rsrc_desc_pcie *desc;
1423 uint32_t max_nr_virtfn;
1424 uint32_t desc_count;
1425 int length, rc, i;
1426
1427 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1428 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1429 LPFC_SLI_INTF_IF_TYPE_2))
1430 return -EPERM;
1431
1432 if (!pdev->is_physfn)
1433 return snprintf(buf, PAGE_SIZE, "%d\n", 0);
1434
1435 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1436 if (!mboxq)
1437 return -ENOMEM;
1438
1439 /* get the maximum number of virtfn support by physfn */
1440 length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
1441 sizeof(struct lpfc_sli4_cfg_mhdr));
1442 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
1443 LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
1444 length, LPFC_SLI4_MBX_EMBED);
1445 shdr = (union lpfc_sli4_cfg_shdr *)
1446 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
1447 bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
1448 phba->sli4_hba.iov.pf_number + 1);
1449
1450 get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
1451 bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
1452 LPFC_CFG_TYPE_CURRENT_ACTIVE);
1453
1454 rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
1455 lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
1456
1457 if (rc != MBX_TIMEOUT) {
1458 /* check return status */
1459 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1460 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1461 &shdr->response);
1462 if (shdr_status || shdr_add_status || rc)
1463 goto error_out;
1464
1465 } else
1466 goto error_out;
1467
1468 desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
1469
1470 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
1471 desc = (struct lpfc_rsrc_desc_pcie *)
1472 &get_prof_cfg->u.response.prof_cfg.desc[i];
1473 if (LPFC_RSRC_DESC_TYPE_PCIE ==
1474 bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
1475 max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
1476 desc);
1477 break;
1478 }
1479 }
1480
1481 if (i < LPFC_RSRC_DESC_MAX_NUM) {
1482 if (rc != MBX_TIMEOUT)
1483 mempool_free(mboxq, phba->mbox_mem_pool);
1484 return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
1485 }
1486
1487error_out:
1488 if (rc != MBX_TIMEOUT)
1489 mempool_free(mboxq, phba->mbox_mem_pool);
1490 return -EIO;
1491}
1492
1493/**
1325 * lpfc_param_show - Return a cfg attribute value in decimal 1494 * lpfc_param_show - Return a cfg attribute value in decimal
1326 * 1495 *
1327 * Description: 1496 * Description:
@@ -1762,6 +1931,8 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
1762static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL); 1931static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
1763static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL); 1932static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
1764static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL); 1933static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
1934static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
1935 lpfc_sriov_hw_max_virtfn_show, NULL);
1765 1936
1766static char *lpfc_soft_wwn_key = "C99G71SL8032A"; 1937static char *lpfc_soft_wwn_key = "C99G71SL8032A";
1767 1938
@@ -3014,7 +3185,7 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
3014 * 3185 *
3015 * @dev: class device that is converted into a Scsi_host. 3186 * @dev: class device that is converted into a Scsi_host.
3016 * @attr: device attribute, not used. 3187 * @attr: device attribute, not used.
3017 * @buf: containing the string "selective". 3188 * @buf: containing enable or disable aer flag.
3018 * @count: unused variable. 3189 * @count: unused variable.
3019 * 3190 *
3020 * Description: 3191 * Description:
@@ -3098,7 +3269,7 @@ lpfc_param_show(aer_support)
3098/** 3269/**
3099 * lpfc_aer_support_init - Set the initial adapters aer support flag 3270 * lpfc_aer_support_init - Set the initial adapters aer support flag
3100 * @phba: lpfc_hba pointer. 3271 * @phba: lpfc_hba pointer.
3101 * @val: link speed value. 3272 * @val: enable aer or disable aer flag.
3102 * 3273 *
3103 * Description: 3274 * Description:
3104 * If val is in a valid range [0,1], then set the adapter's initial 3275 * If val is in a valid range [0,1], then set the adapter's initial
@@ -3137,7 +3308,7 @@ static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
3137 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device 3308 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
3138 * @dev: class device that is converted into a Scsi_host. 3309 * @dev: class device that is converted into a Scsi_host.
3139 * @attr: device attribute, not used. 3310 * @attr: device attribute, not used.
3140 * @buf: containing the string "selective". 3311 * @buf: containing flag 1 for aer cleanup state.
3141 * @count: unused variable. 3312 * @count: unused variable.
3142 * 3313 *
3143 * Description: 3314 * Description:
@@ -3180,6 +3351,136 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
3180static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL, 3351static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
3181 lpfc_aer_cleanup_state); 3352 lpfc_aer_cleanup_state);
3182 3353
3354/**
3355 * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
3356 *
3357 * @dev: class device that is converted into a Scsi_host.
3358 * @attr: device attribute, not used.
3359 * @buf: containing the string the number of vfs to be enabled.
3360 * @count: unused variable.
3361 *
3362 * Description:
3363 * When this api is called either through user sysfs, the driver shall
3364 * try to enable or disable SR-IOV virtual functions according to the
3365 * following:
3366 *
3367 * If zero virtual function has been enabled to the physical function,
3368 * the driver shall invoke the pci enable virtual function api trying
3369 * to enable the virtual functions. If the nr_vfn provided is greater
3370 * than the maximum supported, the maximum virtual function number will
3371 * be used for invoking the api; otherwise, the nr_vfn provided shall
3372 * be used for invoking the api. If the api call returned success, the
3373 * actual number of virtual functions enabled will be set to the driver
3374 * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
3375 * cfg_sriov_nr_virtfn remains zero.
3376 *
3377 * If none-zero virtual functions have already been enabled to the
3378 * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
3379 * -EINVAL will be returned and the driver does nothing;
3380 *
3381 * If the nr_vfn provided is zero and none-zero virtual functions have
3382 * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
3383 * disabling virtual function api shall be invoded to disable all the
3384 * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
3385 * zero. Otherwise, if zero virtual function has been enabled, do
3386 * nothing.
3387 *
3388 * Returns:
3389 * length of the buf on success if val is in range the intended mode
3390 * is supported.
3391 * -EINVAL if val out of range or intended mode is not supported.
3392 **/
3393static ssize_t
3394lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
3395 const char *buf, size_t count)
3396{
3397 struct Scsi_Host *shost = class_to_shost(dev);
3398 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
3399 struct lpfc_hba *phba = vport->phba;
3400 struct pci_dev *pdev = phba->pcidev;
3401 int val = 0, rc = -EINVAL;
3402
3403 /* Sanity check on user data */
3404 if (!isdigit(buf[0]))
3405 return -EINVAL;
3406 if (sscanf(buf, "%i", &val) != 1)
3407 return -EINVAL;
3408 if (val < 0)
3409 return -EINVAL;
3410
3411 /* Request disabling virtual functions */
3412 if (val == 0) {
3413 if (phba->cfg_sriov_nr_virtfn > 0) {
3414 pci_disable_sriov(pdev);
3415 phba->cfg_sriov_nr_virtfn = 0;
3416 }
3417 return strlen(buf);
3418 }
3419
3420 /* Request enabling virtual functions */
3421 if (phba->cfg_sriov_nr_virtfn > 0) {
3422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3423 "3018 There are %d virtual functions "
3424 "enabled on physical function.\n",
3425 phba->cfg_sriov_nr_virtfn);
3426 return -EEXIST;
3427 }
3428
3429 if (val <= LPFC_MAX_VFN_PER_PFN)
3430 phba->cfg_sriov_nr_virtfn = val;
3431 else {
3432 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3433 "3019 Enabling %d virtual functions is not "
3434 "allowed.\n", val);
3435 return -EINVAL;
3436 }
3437
3438 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
3439 if (rc) {
3440 phba->cfg_sriov_nr_virtfn = 0;
3441 rc = -EPERM;
3442 } else
3443 rc = strlen(buf);
3444
3445 return rc;
3446}
3447
3448static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
3449module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
3450MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
3451lpfc_param_show(sriov_nr_virtfn)
3452
3453/**
3454 * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
3455 * @phba: lpfc_hba pointer.
3456 * @val: link speed value.
3457 *
3458 * Description:
3459 * If val is in a valid range [0,255], then set the adapter's initial
3460 * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
3461 * number shall be used instead. It will be up to the driver's probe_one
3462 * routine to determine whether the device's SR-IOV is supported or not.
3463 *
3464 * Returns:
3465 * zero if val saved.
3466 * -EINVAL val out of range
3467 **/
3468static int
3469lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
3470{
3471 if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
3472 phba->cfg_sriov_nr_virtfn = val;
3473 return 0;
3474 }
3475
3476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3477 "3017 Enabling %d virtual functions is not "
3478 "allowed.\n", val);
3479 return -EINVAL;
3480}
3481static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
3482 lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
3483
3183/* 3484/*
3184# lpfc_fcp_class: Determines FC class to use for the FCP protocol. 3485# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
3185# Value range is [2,3]. Default value is 3. 3486# Value range is [2,3]. Default value is 3.
@@ -3497,6 +3798,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3497 &dev_attr_lpfc_prot_sg_seg_cnt, 3798 &dev_attr_lpfc_prot_sg_seg_cnt,
3498 &dev_attr_lpfc_aer_support, 3799 &dev_attr_lpfc_aer_support,
3499 &dev_attr_lpfc_aer_state_cleanup, 3800 &dev_attr_lpfc_aer_state_cleanup,
3801 &dev_attr_lpfc_sriov_nr_virtfn,
3500 &dev_attr_lpfc_suppress_link_up, 3802 &dev_attr_lpfc_suppress_link_up,
3501 &dev_attr_lpfc_iocb_cnt, 3803 &dev_attr_lpfc_iocb_cnt,
3502 &dev_attr_iocb_hw, 3804 &dev_attr_iocb_hw,
@@ -3505,6 +3807,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3505 &dev_attr_lpfc_fips_level, 3807 &dev_attr_lpfc_fips_level,
3506 &dev_attr_lpfc_fips_rev, 3808 &dev_attr_lpfc_fips_rev,
3507 &dev_attr_lpfc_dss, 3809 &dev_attr_lpfc_dss,
3810 &dev_attr_lpfc_sriov_hw_max_virtfn,
3508 NULL, 3811 NULL,
3509}; 3812};
3510 3813
@@ -3961,7 +4264,7 @@ static struct bin_attribute sysfs_mbox_attr = {
3961 .name = "mbox", 4264 .name = "mbox",
3962 .mode = S_IRUSR | S_IWUSR, 4265 .mode = S_IRUSR | S_IWUSR,
3963 }, 4266 },
3964 .size = MAILBOX_CMD_SIZE, 4267 .size = MAILBOX_SYSFS_MAX,
3965 .read = sysfs_mbox_read, 4268 .read = sysfs_mbox_read,
3966 .write = sysfs_mbox_write, 4269 .write = sysfs_mbox_write,
3967}; 4270};
@@ -4705,6 +5008,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4705 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 5008 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4706 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 5009 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4707 lpfc_aer_support_init(phba, lpfc_aer_support); 5010 lpfc_aer_support_init(phba, lpfc_aer_support);
5011 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
4708 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); 5012 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
4709 lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt); 5013 lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
4710 phba->cfg_enable_dss = 1; 5014 phba->cfg_enable_dss = 1;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 853e5042f39c..7fb0ba4cbfa7 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/list.h>
26 27
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
@@ -79,8 +80,7 @@ struct lpfc_bsg_iocb {
79struct lpfc_bsg_mbox { 80struct lpfc_bsg_mbox {
80 LPFC_MBOXQ_t *pmboxq; 81 LPFC_MBOXQ_t *pmboxq;
81 MAILBOX_t *mb; 82 MAILBOX_t *mb;
82 struct lpfc_dmabuf *rxbmp; /* for BIU diags */ 83 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
83 struct lpfc_dmabufext *dmp; /* for BIU diags */
84 uint8_t *ext; /* extended mailbox data */ 84 uint8_t *ext; /* extended mailbox data */
85 uint32_t mbOffset; /* from app */ 85 uint32_t mbOffset; /* from app */
86 uint32_t inExtWLen; /* from app */ 86 uint32_t inExtWLen; /* from app */
@@ -332,6 +332,8 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
332 cmd->ulpLe = 1; 332 cmd->ulpLe = 1;
333 cmd->ulpClass = CLASS3; 333 cmd->ulpClass = CLASS3;
334 cmd->ulpContext = ndlp->nlp_rpi; 334 cmd->ulpContext = ndlp->nlp_rpi;
335 if (phba->sli_rev == LPFC_SLI_REV4)
336 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
335 cmd->ulpOwner = OWN_CHIP; 337 cmd->ulpOwner = OWN_CHIP;
336 cmdiocbq->vport = phba->pport; 338 cmdiocbq->vport = phba->pport;
337 cmdiocbq->context3 = bmp; 339 cmdiocbq->context3 = bmp;
@@ -1336,6 +1338,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1336 } 1338 }
1337 1339
1338 icmd->un.ulpWord[3] = ndlp->nlp_rpi; 1340 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1341 if (phba->sli_rev == LPFC_SLI_REV4)
1342 icmd->ulpContext =
1343 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1344
1339 /* The exchange is done, mark the entry as invalid */ 1345 /* The exchange is done, mark the entry as invalid */
1340 phba->ct_ctx[tag].flags &= ~UNSOL_VALID; 1346 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1341 } else 1347 } else
@@ -1463,11 +1469,91 @@ send_mgmt_rsp_exit:
1463} 1469}
1464 1470
1465/** 1471/**
1466 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command 1472 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1473 * @phba: Pointer to HBA context object.
1467 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1474 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1468 * 1475 *
1469 * This function is responsible for placing a port into diagnostic loopback 1476 * This function is responsible for preparing driver for diag loopback
1470 * mode in order to perform a diagnostic loopback test. 1477 * on device.
1478 */
1479static int
1480lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
1481{
1482 struct lpfc_vport **vports;
1483 struct Scsi_Host *shost;
1484 struct lpfc_sli *psli;
1485 struct lpfc_sli_ring *pring;
1486 int i = 0;
1487
1488 psli = &phba->sli;
1489 if (!psli)
1490 return -ENODEV;
1491
1492 pring = &psli->ring[LPFC_FCP_RING];
1493 if (!pring)
1494 return -ENODEV;
1495
1496 if ((phba->link_state == LPFC_HBA_ERROR) ||
1497 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1498 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1499 return -EACCES;
1500
1501 vports = lpfc_create_vport_work_array(phba);
1502 if (vports) {
1503 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1504 shost = lpfc_shost_from_vport(vports[i]);
1505 scsi_block_requests(shost);
1506 }
1507 lpfc_destroy_vport_work_array(phba, vports);
1508 } else {
1509 shost = lpfc_shost_from_vport(phba->pport);
1510 scsi_block_requests(shost);
1511 }
1512
1513 while (pring->txcmplq_cnt) {
1514 if (i++ > 500) /* wait up to 5 seconds */
1515 break;
1516 msleep(10);
1517 }
1518 return 0;
1519}
1520
1521/**
1522 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1523 * @phba: Pointer to HBA context object.
1524 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1525 *
1526 * This function is responsible for driver exit processing of setting up
1527 * diag loopback mode on device.
1528 */
1529static void
1530lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1531{
1532 struct Scsi_Host *shost;
1533 struct lpfc_vport **vports;
1534 int i;
1535
1536 vports = lpfc_create_vport_work_array(phba);
1537 if (vports) {
1538 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1539 shost = lpfc_shost_from_vport(vports[i]);
1540 scsi_unblock_requests(shost);
1541 }
1542 lpfc_destroy_vport_work_array(phba, vports);
1543 } else {
1544 shost = lpfc_shost_from_vport(phba->pport);
1545 scsi_unblock_requests(shost);
1546 }
1547 return;
1548}
1549
1550/**
1551 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1552 * @phba: Pointer to HBA context object.
1553 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1554 *
1555 * This function is responsible for placing an sli3 port into diagnostic
1556 * loopback mode in order to perform a diagnostic loopback test.
1471 * All new scsi requests are blocked, a small delay is used to allow the 1557 * All new scsi requests are blocked, a small delay is used to allow the
1472 * scsi requests to complete then the link is brought down. If the link is 1558 * scsi requests to complete then the link is brought down. If the link is
1473 * is placed in loopback mode then scsi requests are again allowed 1559 * is placed in loopback mode then scsi requests are again allowed
@@ -1475,17 +1561,11 @@ send_mgmt_rsp_exit:
1475 * All of this is done in-line. 1561 * All of this is done in-line.
1476 */ 1562 */
1477static int 1563static int
1478lpfc_bsg_diag_mode(struct fc_bsg_job *job) 1564lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1479{ 1565{
1480 struct Scsi_Host *shost = job->shost;
1481 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1482 struct lpfc_hba *phba = vport->phba;
1483 struct diag_mode_set *loopback_mode; 1566 struct diag_mode_set *loopback_mode;
1484 struct lpfc_sli *psli = &phba->sli;
1485 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1486 uint32_t link_flags; 1567 uint32_t link_flags;
1487 uint32_t timeout; 1568 uint32_t timeout;
1488 struct lpfc_vport **vports;
1489 LPFC_MBOXQ_t *pmboxq; 1569 LPFC_MBOXQ_t *pmboxq;
1490 int mbxstatus; 1570 int mbxstatus;
1491 int i = 0; 1571 int i = 0;
@@ -1494,53 +1574,33 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1494 /* no data to return just the return code */ 1574 /* no data to return just the return code */
1495 job->reply->reply_payload_rcv_len = 0; 1575 job->reply->reply_payload_rcv_len = 0;
1496 1576
1497 if (job->request_len < 1577 if (job->request_len < sizeof(struct fc_bsg_request) +
1498 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) { 1578 sizeof(struct diag_mode_set)) {
1499 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1579 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1500 "2738 Received DIAG MODE request below minimum " 1580 "2738 Received DIAG MODE request size:%d "
1501 "size\n"); 1581 "below the minimum size:%d\n",
1582 job->request_len,
1583 (int)(sizeof(struct fc_bsg_request) +
1584 sizeof(struct diag_mode_set)));
1502 rc = -EINVAL; 1585 rc = -EINVAL;
1503 goto job_error; 1586 goto job_error;
1504 } 1587 }
1505 1588
1589 rc = lpfc_bsg_diag_mode_enter(phba, job);
1590 if (rc)
1591 goto job_error;
1592
1593 /* bring the link to diagnostic mode */
1506 loopback_mode = (struct diag_mode_set *) 1594 loopback_mode = (struct diag_mode_set *)
1507 job->request->rqst_data.h_vendor.vendor_cmd; 1595 job->request->rqst_data.h_vendor.vendor_cmd;
1508 link_flags = loopback_mode->type; 1596 link_flags = loopback_mode->type;
1509 timeout = loopback_mode->timeout * 100; 1597 timeout = loopback_mode->timeout * 100;
1510 1598
1511 if ((phba->link_state == LPFC_HBA_ERROR) ||
1512 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1513 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
1514 rc = -EACCES;
1515 goto job_error;
1516 }
1517
1518 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1599 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1519 if (!pmboxq) { 1600 if (!pmboxq) {
1520 rc = -ENOMEM; 1601 rc = -ENOMEM;
1521 goto job_error; 1602 goto loopback_mode_exit;
1522 }
1523
1524 vports = lpfc_create_vport_work_array(phba);
1525 if (vports) {
1526 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1527 shost = lpfc_shost_from_vport(vports[i]);
1528 scsi_block_requests(shost);
1529 }
1530
1531 lpfc_destroy_vport_work_array(phba, vports);
1532 } else {
1533 shost = lpfc_shost_from_vport(phba->pport);
1534 scsi_block_requests(shost);
1535 } 1603 }
1536
1537 while (pring->txcmplq_cnt) {
1538 if (i++ > 500) /* wait up to 5 seconds */
1539 break;
1540
1541 msleep(10);
1542 }
1543
1544 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1604 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1545 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1605 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1546 pmboxq->u.mb.mbxOwner = OWN_HOST; 1606 pmboxq->u.mb.mbxOwner = OWN_HOST;
@@ -1594,22 +1654,186 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1594 rc = -ENODEV; 1654 rc = -ENODEV;
1595 1655
1596loopback_mode_exit: 1656loopback_mode_exit:
1597 vports = lpfc_create_vport_work_array(phba); 1657 lpfc_bsg_diag_mode_exit(phba);
1598 if (vports) { 1658
1599 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1659 /*
1600 shost = lpfc_shost_from_vport(vports[i]); 1660 * Let SLI layer release mboxq if mbox command completed after timeout.
1601 scsi_unblock_requests(shost); 1661 */
1662 if (mbxstatus != MBX_TIMEOUT)
1663 mempool_free(pmboxq, phba->mbox_mem_pool);
1664
1665job_error:
1666 /* make error code available to userspace */
1667 job->reply->result = rc;
1668 /* complete the job back to userspace if no error */
1669 if (rc == 0)
1670 job->job_done(job);
1671 return rc;
1672}
1673
1674/**
1675 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1676 * @phba: Pointer to HBA context object.
1677 * @diag: Flag for set link to diag or nomral operation state.
1678 *
1679 * This function is responsible for issuing a sli4 mailbox command for setting
1680 * link to either diag state or normal operation state.
1681 */
1682static int
1683lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1684{
1685 LPFC_MBOXQ_t *pmboxq;
1686 struct lpfc_mbx_set_link_diag_state *link_diag_state;
1687 uint32_t req_len, alloc_len;
1688 int mbxstatus = MBX_SUCCESS, rc;
1689
1690 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1691 if (!pmboxq)
1692 return -ENOMEM;
1693
1694 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1695 sizeof(struct lpfc_sli4_cfg_mhdr));
1696 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1697 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1698 req_len, LPFC_SLI4_MBX_EMBED);
1699 if (alloc_len != req_len) {
1700 rc = -ENOMEM;
1701 goto link_diag_state_set_out;
1702 }
1703 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1704 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1705 phba->sli4_hba.link_state.number);
1706 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1707 phba->sli4_hba.link_state.type);
1708 if (diag)
1709 bf_set(lpfc_mbx_set_diag_state_diag,
1710 &link_diag_state->u.req, 1);
1711 else
1712 bf_set(lpfc_mbx_set_diag_state_diag,
1713 &link_diag_state->u.req, 0);
1714
1715 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1716
1717 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1718 rc = 0;
1719 else
1720 rc = -ENODEV;
1721
1722link_diag_state_set_out:
1723 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1724 mempool_free(pmboxq, phba->mbox_mem_pool);
1725
1726 return rc;
1727}
1728
1729/**
1730 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
1731 * @phba: Pointer to HBA context object.
1732 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1733 *
1734 * This function is responsible for placing an sli4 port into diagnostic
1735 * loopback mode in order to perform a diagnostic loopback test.
1736 */
1737static int
1738lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1739{
1740 struct diag_mode_set *loopback_mode;
1741 uint32_t link_flags, timeout, req_len, alloc_len;
1742 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1743 LPFC_MBOXQ_t *pmboxq = NULL;
1744 int mbxstatus, i, rc = 0;
1745
1746 /* no data to return just the return code */
1747 job->reply->reply_payload_rcv_len = 0;
1748
1749 if (job->request_len < sizeof(struct fc_bsg_request) +
1750 sizeof(struct diag_mode_set)) {
1751 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1752 "3011 Received DIAG MODE request size:%d "
1753 "below the minimum size:%d\n",
1754 job->request_len,
1755 (int)(sizeof(struct fc_bsg_request) +
1756 sizeof(struct diag_mode_set)));
1757 rc = -EINVAL;
1758 goto job_error;
1759 }
1760
1761 rc = lpfc_bsg_diag_mode_enter(phba, job);
1762 if (rc)
1763 goto job_error;
1764
1765 /* bring the link to diagnostic mode */
1766 loopback_mode = (struct diag_mode_set *)
1767 job->request->rqst_data.h_vendor.vendor_cmd;
1768 link_flags = loopback_mode->type;
1769 timeout = loopback_mode->timeout * 100;
1770
1771 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1772 if (rc)
1773 goto loopback_mode_exit;
1774
1775 /* wait for link down before proceeding */
1776 i = 0;
1777 while (phba->link_state != LPFC_LINK_DOWN) {
1778 if (i++ > timeout) {
1779 rc = -ETIMEDOUT;
1780 goto loopback_mode_exit;
1781 }
1782 msleep(10);
1783 }
1784 /* set up loopback mode */
1785 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1786 if (!pmboxq) {
1787 rc = -ENOMEM;
1788 goto loopback_mode_exit;
1789 }
1790 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1791 sizeof(struct lpfc_sli4_cfg_mhdr));
1792 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1793 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1794 req_len, LPFC_SLI4_MBX_EMBED);
1795 if (alloc_len != req_len) {
1796 rc = -ENOMEM;
1797 goto loopback_mode_exit;
1798 }
1799 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1800 bf_set(lpfc_mbx_set_diag_state_link_num,
1801 &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
1802 bf_set(lpfc_mbx_set_diag_state_link_type,
1803 &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
1804 if (link_flags == INTERNAL_LOOP_BACK)
1805 bf_set(lpfc_mbx_set_diag_lpbk_type,
1806 &link_diag_loopback->u.req,
1807 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
1808 else
1809 bf_set(lpfc_mbx_set_diag_lpbk_type,
1810 &link_diag_loopback->u.req,
1811 LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
1812
1813 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1814 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1815 rc = -ENODEV;
1816 else {
1817 phba->link_flag |= LS_LOOPBACK_MODE;
1818 /* wait for the link attention interrupt */
1819 msleep(100);
1820 i = 0;
1821 while (phba->link_state != LPFC_HBA_READY) {
1822 if (i++ > timeout) {
1823 rc = -ETIMEDOUT;
1824 break;
1825 }
1826 msleep(10);
1602 } 1827 }
1603 lpfc_destroy_vport_work_array(phba, vports);
1604 } else {
1605 shost = lpfc_shost_from_vport(phba->pport);
1606 scsi_unblock_requests(shost);
1607 } 1828 }
1608 1829
1830loopback_mode_exit:
1831 lpfc_bsg_diag_mode_exit(phba);
1832
1609 /* 1833 /*
1610 * Let SLI layer release mboxq if mbox command completed after timeout. 1834 * Let SLI layer release mboxq if mbox command completed after timeout.
1611 */ 1835 */
1612 if (mbxstatus != MBX_TIMEOUT) 1836 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1613 mempool_free(pmboxq, phba->mbox_mem_pool); 1837 mempool_free(pmboxq, phba->mbox_mem_pool);
1614 1838
1615job_error: 1839job_error:
@@ -1622,6 +1846,234 @@ job_error:
1622} 1846}
1623 1847
1624/** 1848/**
1849 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
1850 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1851 *
1852 * This function is responsible for responding to check and dispatch bsg diag
1853 * command from the user to proper driver action routines.
1854 */
1855static int
1856lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
1857{
1858 struct Scsi_Host *shost;
1859 struct lpfc_vport *vport;
1860 struct lpfc_hba *phba;
1861 int rc;
1862
1863 shost = job->shost;
1864 if (!shost)
1865 return -ENODEV;
1866 vport = (struct lpfc_vport *)job->shost->hostdata;
1867 if (!vport)
1868 return -ENODEV;
1869 phba = vport->phba;
1870 if (!phba)
1871 return -ENODEV;
1872
1873 if (phba->sli_rev < LPFC_SLI_REV4)
1874 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
1875 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1876 LPFC_SLI_INTF_IF_TYPE_2)
1877 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
1878 else
1879 rc = -ENODEV;
1880
1881 return rc;
1882
1883}
1884
1885/**
1886 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
1887 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
1888 *
1889 * This function is responsible for responding to check and dispatch bsg diag
1890 * command from the user to proper driver action routines.
1891 */
1892static int
1893lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
1894{
1895 struct Scsi_Host *shost;
1896 struct lpfc_vport *vport;
1897 struct lpfc_hba *phba;
1898 int rc;
1899
1900 shost = job->shost;
1901 if (!shost)
1902 return -ENODEV;
1903 vport = (struct lpfc_vport *)job->shost->hostdata;
1904 if (!vport)
1905 return -ENODEV;
1906 phba = vport->phba;
1907 if (!phba)
1908 return -ENODEV;
1909
1910 if (phba->sli_rev < LPFC_SLI_REV4)
1911 return -ENODEV;
1912 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1913 LPFC_SLI_INTF_IF_TYPE_2)
1914 return -ENODEV;
1915
1916 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
1917
1918 if (!rc)
1919 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1920
1921 return rc;
1922}
1923
1924/**
1925 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
1926 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
1927 *
1928 * This function is to perform SLI4 diag link test request from the user
1929 * applicaiton.
1930 */
1931static int
1932lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
1933{
1934 struct Scsi_Host *shost;
1935 struct lpfc_vport *vport;
1936 struct lpfc_hba *phba;
1937 LPFC_MBOXQ_t *pmboxq;
1938 struct sli4_link_diag *link_diag_test_cmd;
1939 uint32_t req_len, alloc_len;
1940 uint32_t timeout;
1941 struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
1942 union lpfc_sli4_cfg_shdr *shdr;
1943 uint32_t shdr_status, shdr_add_status;
1944 struct diag_status *diag_status_reply;
1945 int mbxstatus, rc = 0;
1946
1947 shost = job->shost;
1948 if (!shost) {
1949 rc = -ENODEV;
1950 goto job_error;
1951 }
1952 vport = (struct lpfc_vport *)job->shost->hostdata;
1953 if (!vport) {
1954 rc = -ENODEV;
1955 goto job_error;
1956 }
1957 phba = vport->phba;
1958 if (!phba) {
1959 rc = -ENODEV;
1960 goto job_error;
1961 }
1962
1963 if (phba->sli_rev < LPFC_SLI_REV4) {
1964 rc = -ENODEV;
1965 goto job_error;
1966 }
1967 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1968 LPFC_SLI_INTF_IF_TYPE_2) {
1969 rc = -ENODEV;
1970 goto job_error;
1971 }
1972
1973 if (job->request_len < sizeof(struct fc_bsg_request) +
1974 sizeof(struct sli4_link_diag)) {
1975 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1976 "3013 Received LINK DIAG TEST request "
1977 " size:%d below the minimum size:%d\n",
1978 job->request_len,
1979 (int)(sizeof(struct fc_bsg_request) +
1980 sizeof(struct sli4_link_diag)));
1981 rc = -EINVAL;
1982 goto job_error;
1983 }
1984
1985 rc = lpfc_bsg_diag_mode_enter(phba, job);
1986 if (rc)
1987 goto job_error;
1988
1989 link_diag_test_cmd = (struct sli4_link_diag *)
1990 job->request->rqst_data.h_vendor.vendor_cmd;
1991 timeout = link_diag_test_cmd->timeout * 100;
1992
1993 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1994
1995 if (rc)
1996 goto job_error;
1997
1998 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1999 if (!pmboxq) {
2000 rc = -ENOMEM;
2001 goto link_diag_test_exit;
2002 }
2003
2004 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2005 sizeof(struct lpfc_sli4_cfg_mhdr));
2006 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2007 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2008 req_len, LPFC_SLI4_MBX_EMBED);
2009 if (alloc_len != req_len) {
2010 rc = -ENOMEM;
2011 goto link_diag_test_exit;
2012 }
2013 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2014 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2015 phba->sli4_hba.link_state.number);
2016 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2017 phba->sli4_hba.link_state.type);
2018 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2019 link_diag_test_cmd->test_id);
2020 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2021 link_diag_test_cmd->loops);
2022 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2023 link_diag_test_cmd->test_version);
2024 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2025 link_diag_test_cmd->error_action);
2026
2027 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2028
2029 shdr = (union lpfc_sli4_cfg_shdr *)
2030 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2031 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2032 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2033 if (shdr_status || shdr_add_status || mbxstatus) {
2034 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2035 "3010 Run link diag test mailbox failed with "
2036 "mbx_status x%x status x%x, add_status x%x\n",
2037 mbxstatus, shdr_status, shdr_add_status);
2038 }
2039
2040 diag_status_reply = (struct diag_status *)
2041 job->reply->reply_data.vendor_reply.vendor_rsp;
2042
2043 if (job->reply_len <
2044 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
2045 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2046 "3012 Received Run link diag test reply "
2047 "below minimum size (%d): reply_len:%d\n",
2048 (int)(sizeof(struct fc_bsg_request) +
2049 sizeof(struct diag_status)),
2050 job->reply_len);
2051 rc = -EINVAL;
2052 goto job_error;
2053 }
2054
2055 diag_status_reply->mbox_status = mbxstatus;
2056 diag_status_reply->shdr_status = shdr_status;
2057 diag_status_reply->shdr_add_status = shdr_add_status;
2058
2059link_diag_test_exit:
2060 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2061
2062 if (pmboxq)
2063 mempool_free(pmboxq, phba->mbox_mem_pool);
2064
2065 lpfc_bsg_diag_mode_exit(phba);
2066
2067job_error:
2068 /* make error code available to userspace */
2069 job->reply->result = rc;
2070 /* complete the job back to userspace if no error */
2071 if (rc == 0)
2072 job->job_done(job);
2073 return rc;
2074}
2075
2076/**
1625 * lpfcdiag_loop_self_reg - obtains a remote port login id 2077 * lpfcdiag_loop_self_reg - obtains a remote port login id
1626 * @phba: Pointer to HBA context object 2078 * @phba: Pointer to HBA context object
1627 * @rpi: Pointer to a remote port login id 2079 * @rpi: Pointer to a remote port login id
@@ -1851,6 +2303,86 @@ err_get_xri_exit:
1851} 2303}
1852 2304
1853/** 2305/**
2306 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2307 * @phba: Pointer to HBA context object
2308 *
2309 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
2310 * retruns the pointer to the buffer.
2311 **/
2312static struct lpfc_dmabuf *
2313lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2314{
2315 struct lpfc_dmabuf *dmabuf;
2316 struct pci_dev *pcidev = phba->pcidev;
2317
2318 /* allocate dma buffer struct */
2319 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2320 if (!dmabuf)
2321 return NULL;
2322
2323 INIT_LIST_HEAD(&dmabuf->list);
2324
2325 /* now, allocate dma buffer */
2326 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2327 &(dmabuf->phys), GFP_KERNEL);
2328
2329 if (!dmabuf->virt) {
2330 kfree(dmabuf);
2331 return NULL;
2332 }
2333 memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
2334
2335 return dmabuf;
2336}
2337
2338/**
2339 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2340 * @phba: Pointer to HBA context object.
2341 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2342 *
2343 * This routine just simply frees a dma buffer and its associated buffer
2344 * descriptor referred by @dmabuf.
2345 **/
2346static void
2347lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2348{
2349 struct pci_dev *pcidev = phba->pcidev;
2350
2351 if (!dmabuf)
2352 return;
2353
2354 if (dmabuf->virt)
2355 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2356 dmabuf->virt, dmabuf->phys);
2357 kfree(dmabuf);
2358 return;
2359}
2360
2361/**
2362 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2363 * @phba: Pointer to HBA context object.
2364 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2365 *
2366 * This routine just simply frees all dma buffers and their associated buffer
2367 * descriptors referred by @dmabuf_list.
2368 **/
2369static void
2370lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2371 struct list_head *dmabuf_list)
2372{
2373 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2374
2375 if (list_empty(dmabuf_list))
2376 return;
2377
2378 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2379 list_del_init(&dmabuf->list);
2380 lpfc_bsg_dma_page_free(phba, dmabuf);
2381 }
2382 return;
2383}
2384
2385/**
1854 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 2386 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1855 * @phba: Pointer to HBA context object 2387 * @phba: Pointer to HBA context object
1856 * @bpl: Pointer to 64 bit bde structure 2388 * @bpl: Pointer to 64 bit bde structure
@@ -2067,7 +2599,7 @@ err_post_rxbufs_exit:
2067} 2599}
2068 2600
2069/** 2601/**
2070 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself 2602 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
2071 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 2603 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2072 * 2604 *
2073 * This function receives a user data buffer to be transmitted and received on 2605 * This function receives a user data buffer to be transmitted and received on
@@ -2086,7 +2618,7 @@ err_post_rxbufs_exit:
2086 * of loopback mode. 2618 * of loopback mode.
2087 **/ 2619 **/
2088static int 2620static int
2089lpfc_bsg_diag_test(struct fc_bsg_job *job) 2621lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2090{ 2622{
2091 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 2623 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2092 struct lpfc_hba *phba = vport->phba; 2624 struct lpfc_hba *phba = vport->phba;
@@ -2411,7 +2943,7 @@ job_error:
2411} 2943}
2412 2944
2413/** 2945/**
2414 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler 2946 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
2415 * @phba: Pointer to HBA context object. 2947 * @phba: Pointer to HBA context object.
2416 * @pmboxq: Pointer to mailbox command. 2948 * @pmboxq: Pointer to mailbox command.
2417 * 2949 *
@@ -2422,15 +2954,13 @@ job_error:
2422 * of the mailbox. 2954 * of the mailbox.
2423 **/ 2955 **/
2424void 2956void
2425lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2957lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2426{ 2958{
2427 struct bsg_job_data *dd_data; 2959 struct bsg_job_data *dd_data;
2428 struct fc_bsg_job *job; 2960 struct fc_bsg_job *job;
2429 struct lpfc_mbx_nembed_cmd *nembed_sge;
2430 uint32_t size; 2961 uint32_t size;
2431 unsigned long flags; 2962 unsigned long flags;
2432 uint8_t *to; 2963 uint8_t *pmb, *pmb_buf;
2433 uint8_t *from;
2434 2964
2435 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2965 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2436 dd_data = pmboxq->context1; 2966 dd_data = pmboxq->context1;
@@ -2440,62 +2970,21 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2440 return; 2970 return;
2441 } 2971 }
2442 2972
2443 /* build the outgoing buffer to do an sg copy 2973 /*
2444 * the format is the response mailbox followed by any extended 2974 * The outgoing buffer is readily referred from the dma buffer,
2445 * mailbox data 2975 * just need to get header part from mailboxq structure.
2446 */ 2976 */
2447 from = (uint8_t *)&pmboxq->u.mb; 2977 pmb = (uint8_t *)&pmboxq->u.mb;
2448 to = (uint8_t *)dd_data->context_un.mbox.mb; 2978 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
2449 memcpy(to, from, sizeof(MAILBOX_t)); 2979 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
2450 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
2451 /* copy the extended data if any, count is in words */
2452 if (dd_data->context_un.mbox.outExtWLen) {
2453 from = (uint8_t *)dd_data->context_un.mbox.ext;
2454 to += sizeof(MAILBOX_t);
2455 size = dd_data->context_un.mbox.outExtWLen *
2456 sizeof(uint32_t);
2457 memcpy(to, from, size);
2458 } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
2459 from = (uint8_t *)dd_data->context_un.mbox.
2460 dmp->dma.virt;
2461 to += sizeof(MAILBOX_t);
2462 size = dd_data->context_un.mbox.dmp->size;
2463 memcpy(to, from, size);
2464 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2465 (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
2466 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2467 virt;
2468 to += sizeof(MAILBOX_t);
2469 size = pmboxq->u.mb.un.varWords[5];
2470 memcpy(to, from, size);
2471 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2472 (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
2473 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
2474 &pmboxq->u.mb.un.varWords[0];
2475
2476 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2477 virt;
2478 to += sizeof(MAILBOX_t);
2479 size = nembed_sge->sge[0].length;
2480 memcpy(to, from, size);
2481 } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
2482 from = (uint8_t *)dd_data->context_un.
2483 mbox.dmp->dma.virt;
2484 to += sizeof(MAILBOX_t);
2485 size = dd_data->context_un.mbox.dmp->size;
2486 memcpy(to, from, size);
2487 }
2488 }
2489 2980
2490 from = (uint8_t *)dd_data->context_un.mbox.mb;
2491 job = dd_data->context_un.mbox.set_job; 2981 job = dd_data->context_un.mbox.set_job;
2492 if (job) { 2982 if (job) {
2493 size = job->reply_payload.payload_len; 2983 size = job->reply_payload.payload_len;
2494 job->reply->reply_payload_rcv_len = 2984 job->reply->reply_payload_rcv_len =
2495 sg_copy_from_buffer(job->reply_payload.sg_list, 2985 sg_copy_from_buffer(job->reply_payload.sg_list,
2496 job->reply_payload.sg_cnt, 2986 job->reply_payload.sg_cnt,
2497 from, size); 2987 pmb_buf, size);
2498 job->reply->result = 0;
2499 /* need to hold the lock until we set job->dd_data to NULL 2988 /* need to hold the lock until we set job->dd_data to NULL
2500 * to hold off the timeout handler returning to the mid-layer 2989 * to hold off the timeout handler returning to the mid-layer
2501 * while we are still processing the job. 2990 * while we are still processing the job.
@@ -2503,28 +2992,19 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2503 job->dd_data = NULL; 2992 job->dd_data = NULL;
2504 dd_data->context_un.mbox.set_job = NULL; 2993 dd_data->context_un.mbox.set_job = NULL;
2505 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2994 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2506 job->job_done(job);
2507 } else { 2995 } else {
2508 dd_data->context_un.mbox.set_job = NULL; 2996 dd_data->context_un.mbox.set_job = NULL;
2509 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2997 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2510 } 2998 }
2511 2999
2512 kfree(dd_data->context_un.mbox.mb);
2513 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3000 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
2514 kfree(dd_data->context_un.mbox.ext); 3001 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
2515 if (dd_data->context_un.mbox.dmp) {
2516 dma_free_coherent(&phba->pcidev->dev,
2517 dd_data->context_un.mbox.dmp->size,
2518 dd_data->context_un.mbox.dmp->dma.virt,
2519 dd_data->context_un.mbox.dmp->dma.phys);
2520 kfree(dd_data->context_un.mbox.dmp);
2521 }
2522 if (dd_data->context_un.mbox.rxbmp) {
2523 lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
2524 dd_data->context_un.mbox.rxbmp->phys);
2525 kfree(dd_data->context_un.mbox.rxbmp);
2526 }
2527 kfree(dd_data); 3002 kfree(dd_data);
3003
3004 if (job) {
3005 job->reply->result = 0;
3006 job->job_done(job);
3007 }
2528 return; 3008 return;
2529} 3009}
2530 3010
@@ -2619,6 +3099,1006 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2619} 3099}
2620 3100
2621/** 3101/**
3102 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3103 * @phba: Pointer to HBA context object.
3104 *
3105 * This is routine clean up and reset BSG handling of multi-buffer mbox
3106 * command session.
3107 **/
3108static void
3109lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3110{
3111 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3112 return;
3113
3114 /* free all memory, including dma buffers */
3115 lpfc_bsg_dma_page_list_free(phba,
3116 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3117 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3118 /* multi-buffer write mailbox command pass-through complete */
3119 memset((char *)&phba->mbox_ext_buf_ctx, 0,
3120 sizeof(struct lpfc_mbox_ext_buf_ctx));
3121 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3122
3123 return;
3124}
3125
3126/**
3127 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3128 * @phba: Pointer to HBA context object.
3129 * @pmboxq: Pointer to mailbox command.
3130 *
3131 * This is routine handles BSG job for mailbox commands completions with
3132 * multiple external buffers.
3133 **/
3134static struct fc_bsg_job *
3135lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3136{
3137 struct bsg_job_data *dd_data;
3138 struct fc_bsg_job *job;
3139 uint8_t *pmb, *pmb_buf;
3140 unsigned long flags;
3141 uint32_t size;
3142 int rc = 0;
3143
3144 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3145 dd_data = pmboxq->context1;
3146 /* has the job already timed out? */
3147 if (!dd_data) {
3148 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3149 job = NULL;
3150 goto job_done_out;
3151 }
3152
3153 /*
3154 * The outgoing buffer is readily referred from the dma buffer,
3155 * just need to get header part from mailboxq structure.
3156 */
3157 pmb = (uint8_t *)&pmboxq->u.mb;
3158 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3159 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3160
3161 job = dd_data->context_un.mbox.set_job;
3162 if (job) {
3163 size = job->reply_payload.payload_len;
3164 job->reply->reply_payload_rcv_len =
3165 sg_copy_from_buffer(job->reply_payload.sg_list,
3166 job->reply_payload.sg_cnt,
3167 pmb_buf, size);
3168 /* result for successful */
3169 job->reply->result = 0;
3170 job->dd_data = NULL;
3171 /* need to hold the lock util we set job->dd_data to NULL
3172 * to hold off the timeout handler from midlayer to take
3173 * any action.
3174 */
3175 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3176 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3177 "2937 SLI_CONFIG ext-buffer maibox command "
3178 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3179 phba->mbox_ext_buf_ctx.nembType,
3180 phba->mbox_ext_buf_ctx.mboxType, size);
3181 } else
3182 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3183
3184job_done_out:
3185 if (!job)
3186 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3187 "2938 SLI_CONFIG ext-buffer maibox "
3188 "command (x%x/x%x) failure, rc:x%x\n",
3189 phba->mbox_ext_buf_ctx.nembType,
3190 phba->mbox_ext_buf_ctx.mboxType, rc);
3191 /* state change */
3192 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3193 kfree(dd_data);
3194
3195 return job;
3196}
3197
3198/**
3199 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3200 * @phba: Pointer to HBA context object.
3201 * @pmboxq: Pointer to mailbox command.
3202 *
3203 * This is completion handler function for mailbox read commands with multiple
3204 * external buffers.
3205 **/
3206static void
3207lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3208{
3209 struct fc_bsg_job *job;
3210
3211 /* handle the BSG job with mailbox command */
3212 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3213 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3214
3215 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3216 "2939 SLI_CONFIG ext-buffer rd maibox command "
3217 "complete, ctxState:x%x, mbxStatus:x%x\n",
3218 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3219
3220 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3221
3222 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3223 lpfc_bsg_mbox_ext_session_reset(phba);
3224
3225 /* free base driver mailbox structure memory */
3226 mempool_free(pmboxq, phba->mbox_mem_pool);
3227
3228 /* complete the bsg job if we have it */
3229 if (job)
3230 job->job_done(job);
3231
3232 return;
3233}
3234
3235/**
3236 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3237 * @phba: Pointer to HBA context object.
3238 * @pmboxq: Pointer to mailbox command.
3239 *
3240 * This is completion handler function for mailbox write commands with multiple
3241 * external buffers.
3242 **/
3243static void
3244lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3245{
3246 struct fc_bsg_job *job;
3247
3248 /* handle the BSG job with the mailbox command */
3249 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3250 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3251
3252 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3253 "2940 SLI_CONFIG ext-buffer wr maibox command "
3254 "complete, ctxState:x%x, mbxStatus:x%x\n",
3255 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3256
3257 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3258
3259 /* free all memory, including dma buffers */
3260 mempool_free(pmboxq, phba->mbox_mem_pool);
3261 lpfc_bsg_mbox_ext_session_reset(phba);
3262
3263 /* complete the bsg job if we have it */
3264 if (job)
3265 job->job_done(job);
3266
3267 return;
3268}
3269
3270static void
3271lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3272 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3273 struct lpfc_dmabuf *ext_dmabuf)
3274{
3275 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3276
3277 /* pointer to the start of mailbox command */
3278 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3279
3280 if (nemb_tp == nemb_mse) {
3281 if (index == 0) {
3282 sli_cfg_mbx->un.sli_config_emb0_subsys.
3283 mse[index].pa_hi =
3284 putPaddrHigh(mbx_dmabuf->phys +
3285 sizeof(MAILBOX_t));
3286 sli_cfg_mbx->un.sli_config_emb0_subsys.
3287 mse[index].pa_lo =
3288 putPaddrLow(mbx_dmabuf->phys +
3289 sizeof(MAILBOX_t));
3290 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3291 "2943 SLI_CONFIG(mse)[%d], "
3292 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3293 index,
3294 sli_cfg_mbx->un.sli_config_emb0_subsys.
3295 mse[index].buf_len,
3296 sli_cfg_mbx->un.sli_config_emb0_subsys.
3297 mse[index].pa_hi,
3298 sli_cfg_mbx->un.sli_config_emb0_subsys.
3299 mse[index].pa_lo);
3300 } else {
3301 sli_cfg_mbx->un.sli_config_emb0_subsys.
3302 mse[index].pa_hi =
3303 putPaddrHigh(ext_dmabuf->phys);
3304 sli_cfg_mbx->un.sli_config_emb0_subsys.
3305 mse[index].pa_lo =
3306 putPaddrLow(ext_dmabuf->phys);
3307 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3308 "2944 SLI_CONFIG(mse)[%d], "
3309 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3310 index,
3311 sli_cfg_mbx->un.sli_config_emb0_subsys.
3312 mse[index].buf_len,
3313 sli_cfg_mbx->un.sli_config_emb0_subsys.
3314 mse[index].pa_hi,
3315 sli_cfg_mbx->un.sli_config_emb0_subsys.
3316 mse[index].pa_lo);
3317 }
3318 } else {
3319 if (index == 0) {
3320 sli_cfg_mbx->un.sli_config_emb1_subsys.
3321 hbd[index].pa_hi =
3322 putPaddrHigh(mbx_dmabuf->phys +
3323 sizeof(MAILBOX_t));
3324 sli_cfg_mbx->un.sli_config_emb1_subsys.
3325 hbd[index].pa_lo =
3326 putPaddrLow(mbx_dmabuf->phys +
3327 sizeof(MAILBOX_t));
3328 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3329 "3007 SLI_CONFIG(hbd)[%d], "
3330 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3331 index,
3332 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3333 &sli_cfg_mbx->un.
3334 sli_config_emb1_subsys.hbd[index]),
3335 sli_cfg_mbx->un.sli_config_emb1_subsys.
3336 hbd[index].pa_hi,
3337 sli_cfg_mbx->un.sli_config_emb1_subsys.
3338 hbd[index].pa_lo);
3339
3340 } else {
3341 sli_cfg_mbx->un.sli_config_emb1_subsys.
3342 hbd[index].pa_hi =
3343 putPaddrHigh(ext_dmabuf->phys);
3344 sli_cfg_mbx->un.sli_config_emb1_subsys.
3345 hbd[index].pa_lo =
3346 putPaddrLow(ext_dmabuf->phys);
3347 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3348 "3008 SLI_CONFIG(hbd)[%d], "
3349 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3350 index,
3351 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3352 &sli_cfg_mbx->un.
3353 sli_config_emb1_subsys.hbd[index]),
3354 sli_cfg_mbx->un.sli_config_emb1_subsys.
3355 hbd[index].pa_hi,
3356 sli_cfg_mbx->un.sli_config_emb1_subsys.
3357 hbd[index].pa_lo);
3358 }
3359 }
3360 return;
3361}
3362
3363/**
3364 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3365 * @phba: Pointer to HBA context object.
3366 * @mb: Pointer to a BSG mailbox object.
3367 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3368 * @dmabuff: Pointer to a DMA buffer descriptor.
3369 *
3370 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3371 * non-embedded external bufffers.
3372 **/
3373static int
3374lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3375 enum nemb_type nemb_tp,
3376 struct lpfc_dmabuf *dmabuf)
3377{
3378 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3379 struct dfc_mbox_req *mbox_req;
3380 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3381 uint32_t ext_buf_cnt, ext_buf_index;
3382 struct lpfc_dmabuf *ext_dmabuf = NULL;
3383 struct bsg_job_data *dd_data = NULL;
3384 LPFC_MBOXQ_t *pmboxq = NULL;
3385 MAILBOX_t *pmb;
3386 uint8_t *pmbx;
3387 int rc, i;
3388
3389 mbox_req =
3390 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3391
3392 /* pointer to the start of mailbox command */
3393 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3394
3395 if (nemb_tp == nemb_mse) {
3396 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3397 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3398 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3399 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3400 "2945 Handled SLI_CONFIG(mse) rd, "
3401 "ext_buf_cnt(%d) out of range(%d)\n",
3402 ext_buf_cnt,
3403 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3404 rc = -ERANGE;
3405 goto job_error;
3406 }
3407 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3408 "2941 Handled SLI_CONFIG(mse) rd, "
3409 "ext_buf_cnt:%d\n", ext_buf_cnt);
3410 } else {
3411 /* sanity check on interface type for support */
3412 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3413 LPFC_SLI_INTF_IF_TYPE_2) {
3414 rc = -ENODEV;
3415 goto job_error;
3416 }
3417 /* nemb_tp == nemb_hbd */
3418 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3419 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3420 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3421 "2946 Handled SLI_CONFIG(hbd) rd, "
3422 "ext_buf_cnt(%d) out of range(%d)\n",
3423 ext_buf_cnt,
3424 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3425 rc = -ERANGE;
3426 goto job_error;
3427 }
3428 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3429 "2942 Handled SLI_CONFIG(hbd) rd, "
3430 "ext_buf_cnt:%d\n", ext_buf_cnt);
3431 }
3432
3433 /* reject non-embedded mailbox command with none external buffer */
3434 if (ext_buf_cnt == 0) {
3435 rc = -EPERM;
3436 goto job_error;
3437 } else if (ext_buf_cnt > 1) {
3438 /* additional external read buffers */
3439 for (i = 1; i < ext_buf_cnt; i++) {
3440 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3441 if (!ext_dmabuf) {
3442 rc = -ENOMEM;
3443 goto job_error;
3444 }
3445 list_add_tail(&ext_dmabuf->list,
3446 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3447 }
3448 }
3449
3450 /* bsg tracking structure */
3451 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3452 if (!dd_data) {
3453 rc = -ENOMEM;
3454 goto job_error;
3455 }
3456
3457 /* mailbox command structure for base driver */
3458 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3459 if (!pmboxq) {
3460 rc = -ENOMEM;
3461 goto job_error;
3462 }
3463 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3464
3465 /* for the first external buffer */
3466 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3467
3468 /* for the rest of external buffer descriptors if any */
3469 if (ext_buf_cnt > 1) {
3470 ext_buf_index = 1;
3471 list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3472 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3473 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3474 ext_buf_index, dmabuf,
3475 curr_dmabuf);
3476 ext_buf_index++;
3477 }
3478 }
3479
3480 /* construct base driver mbox command */
3481 pmb = &pmboxq->u.mb;
3482 pmbx = (uint8_t *)dmabuf->virt;
3483 memcpy(pmb, pmbx, sizeof(*pmb));
3484 pmb->mbxOwner = OWN_HOST;
3485 pmboxq->vport = phba->pport;
3486
3487 /* multi-buffer handling context */
3488 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3489 phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3490 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3491 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3492 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3493 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3494
3495 /* callback for multi-buffer read mailbox command */
3496 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3497
3498 /* context fields to callback function */
3499 pmboxq->context1 = dd_data;
3500 dd_data->type = TYPE_MBOX;
3501 dd_data->context_un.mbox.pmboxq = pmboxq;
3502 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3503 dd_data->context_un.mbox.set_job = job;
3504 job->dd_data = dd_data;
3505
3506 /* state change */
3507 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3508
3509 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3510 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3511 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3512 "2947 Issued SLI_CONFIG ext-buffer "
3513 "maibox command, rc:x%x\n", rc);
3514 return 1;
3515 }
3516 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3517 "2948 Failed to issue SLI_CONFIG ext-buffer "
3518 "maibox command, rc:x%x\n", rc);
3519 rc = -EPIPE;
3520
3521job_error:
3522 if (pmboxq)
3523 mempool_free(pmboxq, phba->mbox_mem_pool);
3524 lpfc_bsg_dma_page_list_free(phba,
3525 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3526 kfree(dd_data);
3527 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3528 return rc;
3529}
3530
3531/**
3532 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
3533 * @phba: Pointer to HBA context object.
3534 * @mb: Pointer to a BSG mailbox object.
3535 * @dmabuff: Pointer to a DMA buffer descriptor.
3536 *
3537 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
3538 * non-embedded external bufffers.
3539 **/
3540static int
3541lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3542 enum nemb_type nemb_tp,
3543 struct lpfc_dmabuf *dmabuf)
3544{
3545 struct dfc_mbox_req *mbox_req;
3546 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3547 uint32_t ext_buf_cnt;
3548 struct bsg_job_data *dd_data = NULL;
3549 LPFC_MBOXQ_t *pmboxq = NULL;
3550 MAILBOX_t *pmb;
3551 uint8_t *mbx;
3552 int rc = 0, i;
3553
3554 mbox_req =
3555 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3556
3557 /* pointer to the start of mailbox command */
3558 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3559
3560 if (nemb_tp == nemb_mse) {
3561 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3562 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3563 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3564 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3565 "2953 Handled SLI_CONFIG(mse) wr, "
3566 "ext_buf_cnt(%d) out of range(%d)\n",
3567 ext_buf_cnt,
3568 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3569 return -ERANGE;
3570 }
3571 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3572 "2949 Handled SLI_CONFIG(mse) wr, "
3573 "ext_buf_cnt:%d\n", ext_buf_cnt);
3574 } else {
3575 /* sanity check on interface type for support */
3576 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3577 LPFC_SLI_INTF_IF_TYPE_2)
3578 return -ENODEV;
3579 /* nemb_tp == nemb_hbd */
3580 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3581 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3582 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3583 "2954 Handled SLI_CONFIG(hbd) wr, "
3584 "ext_buf_cnt(%d) out of range(%d)\n",
3585 ext_buf_cnt,
3586 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3587 return -ERANGE;
3588 }
3589 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3590 "2950 Handled SLI_CONFIG(hbd) wr, "
3591 "ext_buf_cnt:%d\n", ext_buf_cnt);
3592 }
3593
3594 if (ext_buf_cnt == 0)
3595 return -EPERM;
3596
3597 /* for the first external buffer */
3598 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3599
3600 /* log for looking forward */
3601 for (i = 1; i < ext_buf_cnt; i++) {
3602 if (nemb_tp == nemb_mse)
3603 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3604 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
3605 i, sli_cfg_mbx->un.sli_config_emb0_subsys.
3606 mse[i].buf_len);
3607 else
3608 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3609 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
3610 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3611 &sli_cfg_mbx->un.sli_config_emb1_subsys.
3612 hbd[i]));
3613 }
3614
3615 /* multi-buffer handling context */
3616 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3617 phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
3618 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3619 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3620 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3621 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3622
3623 if (ext_buf_cnt == 1) {
3624 /* bsg tracking structure */
3625 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3626 if (!dd_data) {
3627 rc = -ENOMEM;
3628 goto job_error;
3629 }
3630
3631 /* mailbox command structure for base driver */
3632 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3633 if (!pmboxq) {
3634 rc = -ENOMEM;
3635 goto job_error;
3636 }
3637 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3638 pmb = &pmboxq->u.mb;
3639 mbx = (uint8_t *)dmabuf->virt;
3640 memcpy(pmb, mbx, sizeof(*pmb));
3641 pmb->mbxOwner = OWN_HOST;
3642 pmboxq->vport = phba->pport;
3643
3644 /* callback for multi-buffer read mailbox command */
3645 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3646
3647 /* context fields to callback function */
3648 pmboxq->context1 = dd_data;
3649 dd_data->type = TYPE_MBOX;
3650 dd_data->context_un.mbox.pmboxq = pmboxq;
3651 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
3652 dd_data->context_un.mbox.set_job = job;
3653 job->dd_data = dd_data;
3654
3655 /* state change */
3656 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3657
3658 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3659 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3660 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3661 "2955 Issued SLI_CONFIG ext-buffer "
3662 "maibox command, rc:x%x\n", rc);
3663 return 1;
3664 }
3665 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3666 "2956 Failed to issue SLI_CONFIG ext-buffer "
3667 "maibox command, rc:x%x\n", rc);
3668 rc = -EPIPE;
3669 }
3670
3671job_error:
3672 if (pmboxq)
3673 mempool_free(pmboxq, phba->mbox_mem_pool);
3674 kfree(dd_data);
3675
3676 return rc;
3677}
3678
3679/**
3680 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
3681 * @phba: Pointer to HBA context object.
3682 * @mb: Pointer to a BSG mailbox object.
3683 * @dmabuff: Pointer to a DMA buffer descriptor.
3684 *
3685 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
3686 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
3687 * with embedded sussystem 0x1 and opcodes with external HBDs.
3688 **/
3689static int
3690lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3691 struct lpfc_dmabuf *dmabuf)
3692{
3693 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3694 uint32_t subsys;
3695 uint32_t opcode;
3696 int rc = SLI_CONFIG_NOT_HANDLED;
3697
3698 /* state change */
3699 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
3700
3701 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3702
3703 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3704 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3705 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
3706 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3707 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
3708 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3709 if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
3710 switch (opcode) {
3711 case FCOE_OPCODE_READ_FCF:
3712 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3713 "2957 Handled SLI_CONFIG "
3714 "subsys_fcoe, opcode:x%x\n",
3715 opcode);
3716 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3717 nemb_mse, dmabuf);
3718 break;
3719 case FCOE_OPCODE_ADD_FCF:
3720 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3721 "2958 Handled SLI_CONFIG "
3722 "subsys_fcoe, opcode:x%x\n",
3723 opcode);
3724 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3725 nemb_mse, dmabuf);
3726 break;
3727 default:
3728 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3729 "2959 Not handled SLI_CONFIG "
3730 "subsys_fcoe, opcode:x%x\n",
3731 opcode);
3732 rc = SLI_CONFIG_NOT_HANDLED;
3733 break;
3734 }
3735 } else {
3736 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3737 "2977 Handled SLI_CONFIG "
3738 "subsys:x%d, opcode:x%x\n",
3739 subsys, opcode);
3740 rc = SLI_CONFIG_NOT_HANDLED;
3741 }
3742 } else {
3743 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
3744 &sli_cfg_mbx->un.sli_config_emb1_subsys);
3745 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
3746 &sli_cfg_mbx->un.sli_config_emb1_subsys);
3747 if (subsys == SLI_CONFIG_SUBSYS_COMN) {
3748 switch (opcode) {
3749 case COMN_OPCODE_READ_OBJECT:
3750 case COMN_OPCODE_READ_OBJECT_LIST:
3751 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3752 "2960 Handled SLI_CONFIG "
3753 "subsys_comn, opcode:x%x\n",
3754 opcode);
3755 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3756 nemb_hbd, dmabuf);
3757 break;
3758 case COMN_OPCODE_WRITE_OBJECT:
3759 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3760 "2961 Handled SLI_CONFIG "
3761 "subsys_comn, opcode:x%x\n",
3762 opcode);
3763 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3764 nemb_hbd, dmabuf);
3765 break;
3766 default:
3767 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3768 "2962 Not handled SLI_CONFIG "
3769 "subsys_comn, opcode:x%x\n",
3770 opcode);
3771 rc = SLI_CONFIG_NOT_HANDLED;
3772 break;
3773 }
3774 } else {
3775 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3776 "2978 Handled SLI_CONFIG "
3777 "subsys:x%d, opcode:x%x\n",
3778 subsys, opcode);
3779 rc = SLI_CONFIG_NOT_HANDLED;
3780 }
3781 }
3782 return rc;
3783}
3784
3785/**
3786 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
3787 * @phba: Pointer to HBA context object.
3788 *
3789 * This routine is for requesting to abort a pass-through mailbox command with
3790 * multiple external buffers due to error condition.
3791 **/
3792static void
3793lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
3794{
3795 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
3796 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
3797 else
3798 lpfc_bsg_mbox_ext_session_reset(phba);
3799 return;
3800}
3801
3802/**
3803 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
3804 * @phba: Pointer to HBA context object.
3805 * @dmabuf: Pointer to a DMA buffer descriptor.
3806 *
3807 * This routine extracts the next mailbox read external buffer back to
3808 * user space through BSG.
3809 **/
3810static int
3811lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
3812{
3813 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3814 struct lpfc_dmabuf *dmabuf;
3815 uint8_t *pbuf;
3816 uint32_t size;
3817 uint32_t index;
3818
3819 index = phba->mbox_ext_buf_ctx.seqNum;
3820 phba->mbox_ext_buf_ctx.seqNum++;
3821
3822 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
3823 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3824
3825 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
3826 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
3827 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
3828 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3829 "2963 SLI_CONFIG (mse) ext-buffer rd get "
3830 "buffer[%d], size:%d\n", index, size);
3831 } else {
3832 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3833 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
3834 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3835 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
3836 "buffer[%d], size:%d\n", index, size);
3837 }
3838 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
3839 return -EPIPE;
3840 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
3841 struct lpfc_dmabuf, list);
3842 list_del_init(&dmabuf->list);
3843 pbuf = (uint8_t *)dmabuf->virt;
3844 job->reply->reply_payload_rcv_len =
3845 sg_copy_from_buffer(job->reply_payload.sg_list,
3846 job->reply_payload.sg_cnt,
3847 pbuf, size);
3848
3849 lpfc_bsg_dma_page_free(phba, dmabuf);
3850
3851 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3852 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3853 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
3854 "command session done\n");
3855 lpfc_bsg_mbox_ext_session_reset(phba);
3856 }
3857
3858 job->reply->result = 0;
3859 job->job_done(job);
3860
3861 return SLI_CONFIG_HANDLED;
3862}
3863
3864/**
3865 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
3866 * @phba: Pointer to HBA context object.
3867 * @dmabuf: Pointer to a DMA buffer descriptor.
3868 *
3869 * This routine sets up the next mailbox read external buffer obtained
3870 * from user space through BSG.
3871 **/
3872static int
3873lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
3874 struct lpfc_dmabuf *dmabuf)
3875{
3876 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3877 struct bsg_job_data *dd_data = NULL;
3878 LPFC_MBOXQ_t *pmboxq = NULL;
3879 MAILBOX_t *pmb;
3880 enum nemb_type nemb_tp;
3881 uint8_t *pbuf;
3882 uint32_t size;
3883 uint32_t index;
3884 int rc;
3885
3886 index = phba->mbox_ext_buf_ctx.seqNum;
3887 phba->mbox_ext_buf_ctx.seqNum++;
3888 nemb_tp = phba->mbox_ext_buf_ctx.nembType;
3889
3890 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
3891 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3892
3893 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3894 if (!dd_data) {
3895 rc = -ENOMEM;
3896 goto job_error;
3897 }
3898
3899 pbuf = (uint8_t *)dmabuf->virt;
3900 size = job->request_payload.payload_len;
3901 sg_copy_to_buffer(job->request_payload.sg_list,
3902 job->request_payload.sg_cnt,
3903 pbuf, size);
3904
3905 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
3906 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3907 "2966 SLI_CONFIG (mse) ext-buffer wr set "
3908 "buffer[%d], size:%d\n",
3909 phba->mbox_ext_buf_ctx.seqNum, size);
3910
3911 } else {
3912 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3913 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
3914 "buffer[%d], size:%d\n",
3915 phba->mbox_ext_buf_ctx.seqNum, size);
3916
3917 }
3918
3919 /* set up external buffer descriptor and add to external buffer list */
3920 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
3921 phba->mbox_ext_buf_ctx.mbx_dmabuf,
3922 dmabuf);
3923 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3924
3925 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3926 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3927 "2968 SLI_CONFIG ext-buffer wr all %d "
3928 "ebuffers received\n",
3929 phba->mbox_ext_buf_ctx.numBuf);
3930 /* mailbox command structure for base driver */
3931 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3932 if (!pmboxq) {
3933 rc = -ENOMEM;
3934 goto job_error;
3935 }
3936 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3937 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3938 pmb = &pmboxq->u.mb;
3939 memcpy(pmb, pbuf, sizeof(*pmb));
3940 pmb->mbxOwner = OWN_HOST;
3941 pmboxq->vport = phba->pport;
3942
3943 /* callback for multi-buffer write mailbox command */
3944 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3945
3946 /* context fields to callback function */
3947 pmboxq->context1 = dd_data;
3948 dd_data->type = TYPE_MBOX;
3949 dd_data->context_un.mbox.pmboxq = pmboxq;
3950 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
3951 dd_data->context_un.mbox.set_job = job;
3952 job->dd_data = dd_data;
3953
3954 /* state change */
3955 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3956
3957 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3958 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3959 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3960 "2969 Issued SLI_CONFIG ext-buffer "
3961 "maibox command, rc:x%x\n", rc);
3962 return 1;
3963 }
3964 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3965 "2970 Failed to issue SLI_CONFIG ext-buffer "
3966 "maibox command, rc:x%x\n", rc);
3967 rc = -EPIPE;
3968 goto job_error;
3969 }
3970
3971 /* wait for additoinal external buffers */
3972 job->reply->result = 0;
3973 job->job_done(job);
3974 return SLI_CONFIG_HANDLED;
3975
3976job_error:
3977 lpfc_bsg_dma_page_free(phba, dmabuf);
3978 kfree(dd_data);
3979
3980 return rc;
3981}
3982
3983/**
3984 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
3985 * @phba: Pointer to HBA context object.
3986 * @mb: Pointer to a BSG mailbox object.
3987 * @dmabuff: Pointer to a DMA buffer descriptor.
3988 *
3989 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
3990 * command with multiple non-embedded external buffers.
3991 **/
3992static int
3993lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
3994 struct lpfc_dmabuf *dmabuf)
3995{
3996 int rc;
3997
3998 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3999 "2971 SLI_CONFIG buffer (type:x%x)\n",
4000 phba->mbox_ext_buf_ctx.mboxType);
4001
4002 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4003 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4004 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4005 "2972 SLI_CONFIG rd buffer state "
4006 "mismatch:x%x\n",
4007 phba->mbox_ext_buf_ctx.state);
4008 lpfc_bsg_mbox_ext_abort(phba);
4009 return -EPIPE;
4010 }
4011 rc = lpfc_bsg_read_ebuf_get(phba, job);
4012 if (rc == SLI_CONFIG_HANDLED)
4013 lpfc_bsg_dma_page_free(phba, dmabuf);
4014 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4015 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4016 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4017 "2973 SLI_CONFIG wr buffer state "
4018 "mismatch:x%x\n",
4019 phba->mbox_ext_buf_ctx.state);
4020 lpfc_bsg_mbox_ext_abort(phba);
4021 return -EPIPE;
4022 }
4023 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4024 }
4025 return rc;
4026}
4027
4028/**
4029 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4030 * @phba: Pointer to HBA context object.
4031 * @mb: Pointer to a BSG mailbox object.
4032 * @dmabuff: Pointer to a DMA buffer descriptor.
4033 *
4034 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4035 * (0x9B) mailbox commands and external buffers.
4036 **/
4037static int
4038lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4039 struct lpfc_dmabuf *dmabuf)
4040{
4041 struct dfc_mbox_req *mbox_req;
4042 int rc;
4043
4044 mbox_req =
4045 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4046
4047 /* mbox command with/without single external buffer */
4048 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4049 return SLI_CONFIG_NOT_HANDLED;
4050
4051 /* mbox command and first external buffer */
4052 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4053 if (mbox_req->extSeqNum == 1) {
4054 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4055 "2974 SLI_CONFIG mailbox: tag:%d, "
4056 "seq:%d\n", mbox_req->extMboxTag,
4057 mbox_req->extSeqNum);
4058 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4059 return rc;
4060 } else
4061 goto sli_cfg_ext_error;
4062 }
4063
4064 /*
4065 * handle additional external buffers
4066 */
4067
4068 /* check broken pipe conditions */
4069 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4070 goto sli_cfg_ext_error;
4071 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4072 goto sli_cfg_ext_error;
4073 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4074 goto sli_cfg_ext_error;
4075
4076 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4077 "2975 SLI_CONFIG mailbox external buffer: "
4078 "extSta:x%x, tag:%d, seq:%d\n",
4079 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4080 mbox_req->extSeqNum);
4081 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4082 return rc;
4083
4084sli_cfg_ext_error:
4085 /* all other cases, broken pipe */
4086 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4087 "2976 SLI_CONFIG mailbox broken pipe: "
4088 "ctxSta:x%x, ctxNumBuf:%d "
4089 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4090 phba->mbox_ext_buf_ctx.state,
4091 phba->mbox_ext_buf_ctx.numBuf,
4092 phba->mbox_ext_buf_ctx.mbxTag,
4093 phba->mbox_ext_buf_ctx.seqNum,
4094 mbox_req->extMboxTag, mbox_req->extSeqNum);
4095
4096 lpfc_bsg_mbox_ext_session_reset(phba);
4097
4098 return -EPIPE;
4099}
4100
4101/**
2622 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 4102 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2623 * @phba: Pointer to HBA context object. 4103 * @phba: Pointer to HBA context object.
2624 * @mb: Pointer to a mailbox object. 4104 * @mb: Pointer to a mailbox object.
@@ -2638,22 +4118,21 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2638 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 4118 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
2639 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 4119 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
2640 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 4120 /* a 4k buffer to hold the mb and extended data from/to the bsg */
2641 MAILBOX_t *mb = NULL; 4121 uint8_t *pmbx = NULL;
2642 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 4122 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
2643 uint32_t size; 4123 struct lpfc_dmabuf *dmabuf = NULL;
2644 struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */ 4124 struct dfc_mbox_req *mbox_req;
2645 struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
2646 struct ulp_bde64 *rxbpl = NULL;
2647 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
2648 job->request->rqst_data.h_vendor.vendor_cmd;
2649 struct READ_EVENT_LOG_VAR *rdEventLog; 4125 struct READ_EVENT_LOG_VAR *rdEventLog;
2650 uint32_t transmit_length, receive_length, mode; 4126 uint32_t transmit_length, receive_length, mode;
4127 struct lpfc_mbx_sli4_config *sli4_config;
2651 struct lpfc_mbx_nembed_cmd *nembed_sge; 4128 struct lpfc_mbx_nembed_cmd *nembed_sge;
2652 struct mbox_header *header; 4129 struct mbox_header *header;
2653 struct ulp_bde64 *bde; 4130 struct ulp_bde64 *bde;
2654 uint8_t *ext = NULL; 4131 uint8_t *ext = NULL;
2655 int rc = 0; 4132 int rc = 0;
2656 uint8_t *from; 4133 uint8_t *from;
4134 uint32_t size;
4135
2657 4136
2658 /* in case no data is transferred */ 4137 /* in case no data is transferred */
2659 job->reply->reply_payload_rcv_len = 0; 4138 job->reply->reply_payload_rcv_len = 0;
@@ -2665,6 +4144,18 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2665 goto job_done; 4144 goto job_done;
2666 } 4145 }
2667 4146
4147 /*
4148 * Don't allow mailbox commands to be sent when blocked or when in
4149 * the middle of discovery
4150 */
4151 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4152 rc = -EAGAIN;
4153 goto job_done;
4154 }
4155
4156 mbox_req =
4157 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4158
2668 /* check if requested extended data lengths are valid */ 4159 /* check if requested extended data lengths are valid */
2669 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || 4160 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
2670 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { 4161 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
@@ -2672,6 +4163,32 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2672 goto job_done; 4163 goto job_done;
2673 } 4164 }
2674 4165
4166 dmabuf = lpfc_bsg_dma_page_alloc(phba);
4167 if (!dmabuf || !dmabuf->virt) {
4168 rc = -ENOMEM;
4169 goto job_done;
4170 }
4171
4172 /* Get the mailbox command or external buffer from BSG */
4173 pmbx = (uint8_t *)dmabuf->virt;
4174 size = job->request_payload.payload_len;
4175 sg_copy_to_buffer(job->request_payload.sg_list,
4176 job->request_payload.sg_cnt, pmbx, size);
4177
4178 /* Handle possible SLI_CONFIG with non-embedded payloads */
4179 if (phba->sli_rev == LPFC_SLI_REV4) {
4180 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4181 if (rc == SLI_CONFIG_HANDLED)
4182 goto job_cont;
4183 if (rc)
4184 goto job_done;
4185 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4186 }
4187
4188 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4189 if (rc != 0)
4190 goto job_done; /* must be negative */
4191
2675 /* allocate our bsg tracking structure */ 4192 /* allocate our bsg tracking structure */
2676 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4193 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2677 if (!dd_data) { 4194 if (!dd_data) {
@@ -2681,12 +4198,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2681 goto job_done; 4198 goto job_done;
2682 } 4199 }
2683 4200
2684 mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
2685 if (!mb) {
2686 rc = -ENOMEM;
2687 goto job_done;
2688 }
2689
2690 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4201 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2691 if (!pmboxq) { 4202 if (!pmboxq) {
2692 rc = -ENOMEM; 4203 rc = -ENOMEM;
@@ -2694,17 +4205,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2694 } 4205 }
2695 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4206 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2696 4207
2697 size = job->request_payload.payload_len;
2698 sg_copy_to_buffer(job->request_payload.sg_list,
2699 job->request_payload.sg_cnt,
2700 mb, size);
2701
2702 rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
2703 if (rc != 0)
2704 goto job_done; /* must be negative */
2705
2706 pmb = &pmboxq->u.mb; 4208 pmb = &pmboxq->u.mb;
2707 memcpy(pmb, mb, sizeof(*pmb)); 4209 memcpy(pmb, pmbx, sizeof(*pmb));
2708 pmb->mbxOwner = OWN_HOST; 4210 pmb->mbxOwner = OWN_HOST;
2709 pmboxq->vport = vport; 4211 pmboxq->vport = vport;
2710 4212
@@ -2721,30 +4223,13 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2721 "0x%x while in stopped state.\n", 4223 "0x%x while in stopped state.\n",
2722 pmb->mbxCommand); 4224 pmb->mbxCommand);
2723 4225
2724 /* Don't allow mailbox commands to be sent when blocked
2725 * or when in the middle of discovery
2726 */
2727 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2728 rc = -EAGAIN;
2729 goto job_done;
2730 }
2731
2732 /* extended mailbox commands will need an extended buffer */ 4226 /* extended mailbox commands will need an extended buffer */
2733 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4227 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
2734 ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
2735 if (!ext) {
2736 rc = -ENOMEM;
2737 goto job_done;
2738 }
2739
2740 /* any data for the device? */ 4228 /* any data for the device? */
2741 if (mbox_req->inExtWLen) { 4229 if (mbox_req->inExtWLen) {
2742 from = (uint8_t *)mb; 4230 from = pmbx;
2743 from += sizeof(MAILBOX_t); 4231 ext = from + sizeof(MAILBOX_t);
2744 memcpy((uint8_t *)ext, from,
2745 mbox_req->inExtWLen * sizeof(uint32_t));
2746 } 4232 }
2747
2748 pmboxq->context2 = ext; 4233 pmboxq->context2 = ext;
2749 pmboxq->in_ext_byte_len = 4234 pmboxq->in_ext_byte_len =
2750 mbox_req->inExtWLen * sizeof(uint32_t); 4235 mbox_req->inExtWLen * sizeof(uint32_t);
@@ -2768,46 +4253,17 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2768 rc = -ERANGE; 4253 rc = -ERANGE;
2769 goto job_done; 4254 goto job_done;
2770 } 4255 }
2771
2772 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2773 if (!rxbmp) {
2774 rc = -ENOMEM;
2775 goto job_done;
2776 }
2777
2778 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2779 if (!rxbmp->virt) {
2780 rc = -ENOMEM;
2781 goto job_done;
2782 }
2783
2784 INIT_LIST_HEAD(&rxbmp->list);
2785 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2786 dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
2787 if (!dmp) {
2788 rc = -ENOMEM;
2789 goto job_done;
2790 }
2791
2792 INIT_LIST_HEAD(&dmp->dma.list);
2793 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 4256 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
2794 putPaddrHigh(dmp->dma.phys); 4257 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
2795 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 4258 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
2796 putPaddrLow(dmp->dma.phys); 4259 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
2797 4260
2798 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 4261 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
2799 putPaddrHigh(dmp->dma.phys + 4262 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
2800 pmb->un.varBIUdiag.un.s2. 4263 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
2801 xmit_bde64.tus.f.bdeSize);
2802 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 4264 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
2803 putPaddrLow(dmp->dma.phys + 4265 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
2804 pmb->un.varBIUdiag.un.s2. 4266 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
2805 xmit_bde64.tus.f.bdeSize);
2806
2807 /* copy the transmit data found in the mailbox extension area */
2808 from = (uint8_t *)mb;
2809 from += sizeof(MAILBOX_t);
2810 memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
2811 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 4267 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
2812 rdEventLog = &pmb->un.varRdEventLog; 4268 rdEventLog = &pmb->un.varRdEventLog;
2813 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 4269 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
@@ -2823,33 +4279,10 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2823 4279
2824 /* mode zero uses a bde like biu diags command */ 4280 /* mode zero uses a bde like biu diags command */
2825 if (mode == 0) { 4281 if (mode == 0) {
2826 4282 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
2827 /* rebuild the command for sli4 using our own buffers 4283 + sizeof(MAILBOX_t));
2828 * like we do for biu diags 4284 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
2829 */ 4285 + sizeof(MAILBOX_t));
2830
2831 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2832 if (!rxbmp) {
2833 rc = -ENOMEM;
2834 goto job_done;
2835 }
2836
2837 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2838 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2839 if (rxbpl) {
2840 INIT_LIST_HEAD(&rxbmp->list);
2841 dmp = diag_cmd_data_alloc(phba, rxbpl,
2842 receive_length, 0);
2843 }
2844
2845 if (!dmp) {
2846 rc = -ENOMEM;
2847 goto job_done;
2848 }
2849
2850 INIT_LIST_HEAD(&dmp->dma.list);
2851 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2852 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2853 } 4286 }
2854 } else if (phba->sli_rev == LPFC_SLI_REV4) { 4287 } else if (phba->sli_rev == LPFC_SLI_REV4) {
2855 if (pmb->mbxCommand == MBX_DUMP_MEMORY) { 4288 if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
@@ -2860,36 +4293,14 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2860 /* receive length cannot be greater than mailbox 4293 /* receive length cannot be greater than mailbox
2861 * extension size 4294 * extension size
2862 */ 4295 */
2863 if ((receive_length == 0) || 4296 if (receive_length == 0) {
2864 (receive_length > MAILBOX_EXT_SIZE)) {
2865 rc = -ERANGE; 4297 rc = -ERANGE;
2866 goto job_done; 4298 goto job_done;
2867 } 4299 }
2868 4300 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
2869 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4301 + sizeof(MAILBOX_t));
2870 if (!rxbmp) { 4302 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
2871 rc = -ENOMEM; 4303 + sizeof(MAILBOX_t));
2872 goto job_done;
2873 }
2874
2875 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2876 if (!rxbmp->virt) {
2877 rc = -ENOMEM;
2878 goto job_done;
2879 }
2880
2881 INIT_LIST_HEAD(&rxbmp->list);
2882 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2883 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
2884 0);
2885 if (!dmp) {
2886 rc = -ENOMEM;
2887 goto job_done;
2888 }
2889
2890 INIT_LIST_HEAD(&dmp->dma.list);
2891 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2892 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2893 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 4304 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
2894 pmb->un.varUpdateCfg.co) { 4305 pmb->un.varUpdateCfg.co) {
2895 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4306 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
@@ -2899,102 +4310,53 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2899 rc = -ERANGE; 4310 rc = -ERANGE;
2900 goto job_done; 4311 goto job_done;
2901 } 4312 }
2902 4313 bde->addrHigh = putPaddrHigh(dmabuf->phys
2903 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4314 + sizeof(MAILBOX_t));
2904 if (!rxbmp) { 4315 bde->addrLow = putPaddrLow(dmabuf->phys
2905 rc = -ENOMEM; 4316 + sizeof(MAILBOX_t));
2906 goto job_done;
2907 }
2908
2909 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2910 if (!rxbmp->virt) {
2911 rc = -ENOMEM;
2912 goto job_done;
2913 }
2914
2915 INIT_LIST_HEAD(&rxbmp->list);
2916 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2917 dmp = diag_cmd_data_alloc(phba, rxbpl,
2918 bde->tus.f.bdeSize, 0);
2919 if (!dmp) {
2920 rc = -ENOMEM;
2921 goto job_done;
2922 }
2923
2924 INIT_LIST_HEAD(&dmp->dma.list);
2925 bde->addrHigh = putPaddrHigh(dmp->dma.phys);
2926 bde->addrLow = putPaddrLow(dmp->dma.phys);
2927
2928 /* copy the transmit data found in the mailbox
2929 * extension area
2930 */
2931 from = (uint8_t *)mb;
2932 from += sizeof(MAILBOX_t);
2933 memcpy((uint8_t *)dmp->dma.virt, from,
2934 bde->tus.f.bdeSize);
2935 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 4317 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
2936 /* rebuild the command for sli4 using our own buffers 4318 /* Handling non-embedded SLI_CONFIG mailbox command */
2937 * like we do for biu diags 4319 sli4_config = &pmboxq->u.mqe.un.sli4_config;
2938 */ 4320 if (!bf_get(lpfc_mbox_hdr_emb,
2939 header = (struct mbox_header *)&pmb->un.varWords[0]; 4321 &sli4_config->header.cfg_mhdr)) {
2940 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 4322 /* rebuild the command for sli4 using our
2941 &pmb->un.varWords[0]; 4323 * own buffers like we do for biu diags
2942 receive_length = nembed_sge->sge[0].length; 4324 */
2943 4325 header = (struct mbox_header *)
2944 /* receive length cannot be greater than mailbox 4326 &pmb->un.varWords[0];
2945 * extension size 4327 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
2946 */ 4328 &pmb->un.varWords[0];
2947 if ((receive_length == 0) || 4329 receive_length = nembed_sge->sge[0].length;
2948 (receive_length > MAILBOX_EXT_SIZE)) { 4330
2949 rc = -ERANGE; 4331 /* receive length cannot be greater than
2950 goto job_done; 4332 * mailbox extension size
2951 } 4333 */
2952 4334 if ((receive_length == 0) ||
2953 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4335 (receive_length > MAILBOX_EXT_SIZE)) {
2954 if (!rxbmp) { 4336 rc = -ERANGE;
2955 rc = -ENOMEM; 4337 goto job_done;
2956 goto job_done; 4338 }
2957 }
2958
2959 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2960 if (!rxbmp->virt) {
2961 rc = -ENOMEM;
2962 goto job_done;
2963 }
2964 4339
2965 INIT_LIST_HEAD(&rxbmp->list); 4340 nembed_sge->sge[0].pa_hi =
2966 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 4341 putPaddrHigh(dmabuf->phys
2967 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length, 4342 + sizeof(MAILBOX_t));
2968 0); 4343 nembed_sge->sge[0].pa_lo =
2969 if (!dmp) { 4344 putPaddrLow(dmabuf->phys
2970 rc = -ENOMEM; 4345 + sizeof(MAILBOX_t));
2971 goto job_done;
2972 } 4346 }
2973
2974 INIT_LIST_HEAD(&dmp->dma.list);
2975 nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys);
2976 nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys);
2977 /* copy the transmit data found in the mailbox
2978 * extension area
2979 */
2980 from = (uint8_t *)mb;
2981 from += sizeof(MAILBOX_t);
2982 memcpy((uint8_t *)dmp->dma.virt, from,
2983 header->cfg_mhdr.payload_length);
2984 } 4347 }
2985 } 4348 }
2986 4349
2987 dd_data->context_un.mbox.rxbmp = rxbmp; 4350 dd_data->context_un.mbox.dmabuffers = dmabuf;
2988 dd_data->context_un.mbox.dmp = dmp;
2989 4351
2990 /* setup wake call as IOCB callback */ 4352 /* setup wake call as IOCB callback */
2991 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait; 4353 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
2992 4354
2993 /* setup context field to pass wait_queue pointer to wake function */ 4355 /* setup context field to pass wait_queue pointer to wake function */
2994 pmboxq->context1 = dd_data; 4356 pmboxq->context1 = dd_data;
2995 dd_data->type = TYPE_MBOX; 4357 dd_data->type = TYPE_MBOX;
2996 dd_data->context_un.mbox.pmboxq = pmboxq; 4358 dd_data->context_un.mbox.pmboxq = pmboxq;
2997 dd_data->context_un.mbox.mb = mb; 4359 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
2998 dd_data->context_un.mbox.set_job = job; 4360 dd_data->context_un.mbox.set_job = job;
2999 dd_data->context_un.mbox.ext = ext; 4361 dd_data->context_un.mbox.ext = ext;
3000 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4362 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
@@ -3011,11 +4373,11 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3011 } 4373 }
3012 4374
3013 /* job finished, copy the data */ 4375 /* job finished, copy the data */
3014 memcpy(mb, pmb, sizeof(*pmb)); 4376 memcpy(pmbx, pmb, sizeof(*pmb));
3015 job->reply->reply_payload_rcv_len = 4377 job->reply->reply_payload_rcv_len =
3016 sg_copy_from_buffer(job->reply_payload.sg_list, 4378 sg_copy_from_buffer(job->reply_payload.sg_list,
3017 job->reply_payload.sg_cnt, 4379 job->reply_payload.sg_cnt,
3018 mb, size); 4380 pmbx, size);
3019 /* not waiting mbox already done */ 4381 /* not waiting mbox already done */
3020 rc = 0; 4382 rc = 0;
3021 goto job_done; 4383 goto job_done;
@@ -3027,22 +4389,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3027 4389
3028job_done: 4390job_done:
3029 /* common exit for error or job completed inline */ 4391 /* common exit for error or job completed inline */
3030 kfree(mb);
3031 if (pmboxq) 4392 if (pmboxq)
3032 mempool_free(pmboxq, phba->mbox_mem_pool); 4393 mempool_free(pmboxq, phba->mbox_mem_pool);
3033 kfree(ext); 4394 lpfc_bsg_dma_page_free(phba, dmabuf);
3034 if (dmp) {
3035 dma_free_coherent(&phba->pcidev->dev,
3036 dmp->size, dmp->dma.virt,
3037 dmp->dma.phys);
3038 kfree(dmp);
3039 }
3040 if (rxbmp) {
3041 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
3042 kfree(rxbmp);
3043 }
3044 kfree(dd_data); 4395 kfree(dd_data);
3045 4396
4397job_cont:
3046 return rc; 4398 return rc;
3047} 4399}
3048 4400
@@ -3055,37 +4407,28 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
3055{ 4407{
3056 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 4408 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3057 struct lpfc_hba *phba = vport->phba; 4409 struct lpfc_hba *phba = vport->phba;
4410 struct dfc_mbox_req *mbox_req;
3058 int rc = 0; 4411 int rc = 0;
3059 4412
3060 /* in case no data is transferred */ 4413 /* mix-and-match backward compatibility */
3061 job->reply->reply_payload_rcv_len = 0; 4414 job->reply->reply_payload_rcv_len = 0;
3062 if (job->request_len < 4415 if (job->request_len <
3063 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 4416 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
3064 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4417 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3065 "2737 Received MBOX_REQ request below " 4418 "2737 Mix-and-match backward compability "
3066 "minimum size\n"); 4419 "between MBOX_REQ old size:%d and "
3067 rc = -EINVAL; 4420 "new request size:%d\n",
3068 goto job_error; 4421 (int)(job->request_len -
3069 } 4422 sizeof(struct fc_bsg_request)),
3070 4423 (int)sizeof(struct dfc_mbox_req));
3071 if (job->request_payload.payload_len != BSG_MBOX_SIZE) { 4424 mbox_req = (struct dfc_mbox_req *)
3072 rc = -EINVAL; 4425 job->request->rqst_data.h_vendor.vendor_cmd;
3073 goto job_error; 4426 mbox_req->extMboxTag = 0;
3074 } 4427 mbox_req->extSeqNum = 0;
3075
3076 if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
3077 rc = -EINVAL;
3078 goto job_error;
3079 }
3080
3081 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
3082 rc = -EAGAIN;
3083 goto job_error;
3084 } 4428 }
3085 4429
3086 rc = lpfc_bsg_issue_mbox(phba, job, vport); 4430 rc = lpfc_bsg_issue_mbox(phba, job, vport);
3087 4431
3088job_error:
3089 if (rc == 0) { 4432 if (rc == 0) {
3090 /* job done */ 4433 /* job done */
3091 job->reply->result = 0; 4434 job->reply->result = 0;
@@ -3416,10 +4759,16 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
3416 rc = lpfc_bsg_send_mgmt_rsp(job); 4759 rc = lpfc_bsg_send_mgmt_rsp(job);
3417 break; 4760 break;
3418 case LPFC_BSG_VENDOR_DIAG_MODE: 4761 case LPFC_BSG_VENDOR_DIAG_MODE:
3419 rc = lpfc_bsg_diag_mode(job); 4762 rc = lpfc_bsg_diag_loopback_mode(job);
4763 break;
4764 case LPFC_BSG_VENDOR_DIAG_MODE_END:
4765 rc = lpfc_sli4_bsg_diag_mode_end(job);
4766 break;
4767 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
4768 rc = lpfc_bsg_diag_loopback_run(job);
3420 break; 4769 break;
3421 case LPFC_BSG_VENDOR_DIAG_TEST: 4770 case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
3422 rc = lpfc_bsg_diag_test(job); 4771 rc = lpfc_sli4_bsg_link_diag_test(job);
3423 break; 4772 break;
3424 case LPFC_BSG_VENDOR_GET_MGMT_REV: 4773 case LPFC_BSG_VENDOR_GET_MGMT_REV:
3425 rc = lpfc_bsg_get_dfc_rev(job); 4774 rc = lpfc_bsg_get_dfc_rev(job);
@@ -3538,6 +4887,8 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
3538 /* the mbox completion handler can now be run */ 4887 /* the mbox completion handler can now be run */
3539 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 4888 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3540 job->job_done(job); 4889 job->job_done(job);
4890 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4891 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
3541 break; 4892 break;
3542 case TYPE_MENLO: 4893 case TYPE_MENLO:
3543 menlo = &dd_data->context_un.menlo; 4894 menlo = &dd_data->context_un.menlo;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index b542aca6f5ae..c8c2b47ea886 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -24,15 +24,17 @@
24 * These are the vendor unique structures passed in using the bsg 24 * These are the vendor unique structures passed in using the bsg
25 * FC_BSG_HST_VENDOR message code type. 25 * FC_BSG_HST_VENDOR message code type.
26 */ 26 */
27#define LPFC_BSG_VENDOR_SET_CT_EVENT 1 27#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
28#define LPFC_BSG_VENDOR_GET_CT_EVENT 2 28#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
29#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3 29#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
30#define LPFC_BSG_VENDOR_DIAG_MODE 4 30#define LPFC_BSG_VENDOR_DIAG_MODE 4
31#define LPFC_BSG_VENDOR_DIAG_TEST 5 31#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6 32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
33#define LPFC_BSG_VENDOR_MBOX 7 33#define LPFC_BSG_VENDOR_MBOX 7
34#define LPFC_BSG_VENDOR_MENLO_CMD 8 34#define LPFC_BSG_VENDOR_MENLO_CMD 8
35#define LPFC_BSG_VENDOR_MENLO_DATA 9 35#define LPFC_BSG_VENDOR_MENLO_DATA 9
36#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
37#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
36 38
37struct set_ct_event { 39struct set_ct_event {
38 uint32_t command; 40 uint32_t command;
@@ -67,10 +69,25 @@ struct diag_mode_set {
67 uint32_t timeout; 69 uint32_t timeout;
68}; 70};
69 71
72struct sli4_link_diag {
73 uint32_t command;
74 uint32_t timeout;
75 uint32_t test_id;
76 uint32_t loops;
77 uint32_t test_version;
78 uint32_t error_action;
79};
80
70struct diag_mode_test { 81struct diag_mode_test {
71 uint32_t command; 82 uint32_t command;
72}; 83};
73 84
85struct diag_status {
86 uint32_t mbox_status;
87 uint32_t shdr_status;
88 uint32_t shdr_add_status;
89};
90
74#define LPFC_WWNN_TYPE 0 91#define LPFC_WWNN_TYPE 0
75#define LPFC_WWPN_TYPE 1 92#define LPFC_WWPN_TYPE 1
76 93
@@ -92,11 +109,15 @@ struct get_mgmt_rev_reply {
92}; 109};
93 110
94#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */ 111#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
112
113/* BSG mailbox request header */
95struct dfc_mbox_req { 114struct dfc_mbox_req {
96 uint32_t command; 115 uint32_t command;
97 uint32_t mbOffset; 116 uint32_t mbOffset;
98 uint32_t inExtWLen; 117 uint32_t inExtWLen;
99 uint32_t outExtWLen; 118 uint32_t outExtWLen;
119 uint32_t extMboxTag;
120 uint32_t extSeqNum;
100}; 121};
101 122
102/* Used for menlo command or menlo data. The xri is only used for menlo data */ 123/* Used for menlo command or menlo data. The xri is only used for menlo data */
@@ -171,7 +192,7 @@ struct lpfc_sli_config_mse {
171#define lpfc_mbox_sli_config_mse_len_WORD buf_len 192#define lpfc_mbox_sli_config_mse_len_WORD buf_len
172}; 193};
173 194
174struct lpfc_sli_config_subcmd_hbd { 195struct lpfc_sli_config_hbd {
175 uint32_t buf_len; 196 uint32_t buf_len;
176#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0 197#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0
177#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff 198#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff
@@ -194,21 +215,39 @@ struct lpfc_sli_config_hdr {
194 uint32_t reserved5; 215 uint32_t reserved5;
195}; 216};
196 217
197struct lpfc_sli_config_generic { 218struct lpfc_sli_config_emb0_subsys {
198 struct lpfc_sli_config_hdr sli_config_hdr; 219 struct lpfc_sli_config_hdr sli_config_hdr;
199#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19 220#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19
200 struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE]; 221 struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
222 uint32_t padding;
223 uint32_t word64;
224#define lpfc_emb0_subcmnd_opcode_SHIFT 0
225#define lpfc_emb0_subcmnd_opcode_MASK 0xff
226#define lpfc_emb0_subcmnd_opcode_WORD word64
227#define lpfc_emb0_subcmnd_subsys_SHIFT 8
228#define lpfc_emb0_subcmnd_subsys_MASK 0xff
229#define lpfc_emb0_subcmnd_subsys_WORD word64
230/* Subsystem FCOE (0x0C) OpCodes */
231#define SLI_CONFIG_SUBSYS_FCOE 0x0C
232#define FCOE_OPCODE_READ_FCF 0x08
233#define FCOE_OPCODE_ADD_FCF 0x09
201}; 234};
202 235
203struct lpfc_sli_config_subcmnd { 236struct lpfc_sli_config_emb1_subsys {
204 struct lpfc_sli_config_hdr sli_config_hdr; 237 struct lpfc_sli_config_hdr sli_config_hdr;
205 uint32_t word6; 238 uint32_t word6;
206#define lpfc_subcmnd_opcode_SHIFT 0 239#define lpfc_emb1_subcmnd_opcode_SHIFT 0
207#define lpfc_subcmnd_opcode_MASK 0xff 240#define lpfc_emb1_subcmnd_opcode_MASK 0xff
208#define lpfc_subcmnd_opcode_WORD word6 241#define lpfc_emb1_subcmnd_opcode_WORD word6
209#define lpfc_subcmnd_subsys_SHIFT 8 242#define lpfc_emb1_subcmnd_subsys_SHIFT 8
210#define lpfc_subcmnd_subsys_MASK 0xff 243#define lpfc_emb1_subcmnd_subsys_MASK 0xff
211#define lpfc_subcmnd_subsys_WORD word6 244#define lpfc_emb1_subcmnd_subsys_WORD word6
245/* Subsystem COMN (0x01) OpCodes */
246#define SLI_CONFIG_SUBSYS_COMN 0x01
247#define COMN_OPCODE_READ_OBJECT 0xAB
248#define COMN_OPCODE_WRITE_OBJECT 0xAC
249#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
250#define COMN_OPCODE_DELETE_OBJECT 0xAE
212 uint32_t timeout; 251 uint32_t timeout;
213 uint32_t request_length; 252 uint32_t request_length;
214 uint32_t word9; 253 uint32_t word9;
@@ -222,8 +261,8 @@ struct lpfc_sli_config_subcmnd {
222 uint32_t rd_offset; 261 uint32_t rd_offset;
223 uint32_t obj_name[26]; 262 uint32_t obj_name[26];
224 uint32_t hbd_count; 263 uint32_t hbd_count;
225#define LPFC_MBX_SLI_CONFIG_MAX_HBD 10 264#define LPFC_MBX_SLI_CONFIG_MAX_HBD 8
226 struct lpfc_sli_config_subcmd_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD]; 265 struct lpfc_sli_config_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
227}; 266};
228 267
229struct lpfc_sli_config_mbox { 268struct lpfc_sli_config_mbox {
@@ -235,7 +274,11 @@ struct lpfc_sli_config_mbox {
235#define lpfc_mqe_command_MASK 0x000000FF 274#define lpfc_mqe_command_MASK 0x000000FF
236#define lpfc_mqe_command_WORD word0 275#define lpfc_mqe_command_WORD word0
237 union { 276 union {
238 struct lpfc_sli_config_generic sli_config_generic; 277 struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys;
239 struct lpfc_sli_config_subcmnd sli_config_subcmnd; 278 struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys;
240 } un; 279 } un;
241}; 280};
281
282/* driver only */
283#define SLI_CONFIG_NOT_HANDLED 0
284#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f0b332f4eedb..fc20c247f36b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -55,6 +55,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
55void lpfc_supported_pages(struct lpfcMboxq *); 55void lpfc_supported_pages(struct lpfcMboxq *);
56void lpfc_pc_sli4_params(struct lpfcMboxq *); 56void lpfc_pc_sli4_params(struct lpfcMboxq *);
57int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *); 57int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
58int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
59 uint16_t, uint16_t, bool);
58int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *); 60int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
59struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 61struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
60void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); 62void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
@@ -171,6 +173,7 @@ void lpfc_delayed_disc_tmo(unsigned long);
171void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *); 173void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
172 174
173int lpfc_config_port_prep(struct lpfc_hba *); 175int lpfc_config_port_prep(struct lpfc_hba *);
176void lpfc_update_vport_wwn(struct lpfc_vport *vport);
174int lpfc_config_port_post(struct lpfc_hba *); 177int lpfc_config_port_post(struct lpfc_hba *);
175int lpfc_hba_down_prep(struct lpfc_hba *); 178int lpfc_hba_down_prep(struct lpfc_hba *);
176int lpfc_hba_down_post(struct lpfc_hba *); 179int lpfc_hba_down_post(struct lpfc_hba *);
@@ -365,6 +368,10 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
365 uint32_t, uint32_t); 368 uint32_t, uint32_t);
366extern struct lpfc_hbq_init *lpfc_hbq_defs[]; 369extern struct lpfc_hbq_init *lpfc_hbq_defs[];
367 370
371/* SLI4 if_type 2 externs. */
372int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
373int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
374
368/* externs BlockGuard */ 375/* externs BlockGuard */
369extern char *_dump_buf_data; 376extern char *_dump_buf_data;
370extern unsigned long _dump_buf_data_order; 377extern unsigned long _dump_buf_data_order;
@@ -429,3 +436,6 @@ void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
429void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *); 436void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
430struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, 437struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
431 uint32_t); 438 uint32_t);
439int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
440/* functions to support SR-IOV */
441int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d9edfd90d7ff..779b88e1469d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -352,6 +352,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
352 icmd->ulpLe = 1; 352 icmd->ulpLe = 1;
353 icmd->ulpClass = CLASS3; 353 icmd->ulpClass = CLASS3;
354 icmd->ulpContext = ndlp->nlp_rpi; 354 icmd->ulpContext = ndlp->nlp_rpi;
355 if (phba->sli_rev == LPFC_SLI_REV4)
356 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
355 357
356 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 358 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
357 /* For GEN_REQUEST64_CR, use the RPI */ 359 /* For GEN_REQUEST64_CR, use the RPI */
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index c93fca058603..ffe82d169b40 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1665,7 +1665,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1665 /* Get fast-path complete queue information */ 1665 /* Get fast-path complete queue information */
1666 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1666 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1667 "Fast-path FCP CQ information:\n"); 1667 "Fast-path FCP CQ information:\n");
1668 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { 1668 fcp_qidx = 0;
1669 do {
1669 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1670 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1670 "Associated EQID[%02d]:\n", 1671 "Associated EQID[%02d]:\n",
1671 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); 1672 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
@@ -1678,7 +1679,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1678 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size, 1679 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
1679 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, 1680 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
1680 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); 1681 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
1681 } 1682 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
1682 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 1683 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
1683 1684
1684 /* Get mailbox queue information */ 1685 /* Get mailbox queue information */
@@ -2012,7 +2013,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2012 goto pass_check; 2013 goto pass_check;
2013 } 2014 }
2014 /* FCP complete queue */ 2015 /* FCP complete queue */
2015 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) { 2016 qidx = 0;
2017 do {
2016 if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) { 2018 if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
2017 /* Sanity check */ 2019 /* Sanity check */
2018 rc = lpfc_idiag_que_param_check( 2020 rc = lpfc_idiag_que_param_check(
@@ -2024,7 +2026,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2024 phba->sli4_hba.fcp_cq[qidx]; 2026 phba->sli4_hba.fcp_cq[qidx];
2025 goto pass_check; 2027 goto pass_check;
2026 } 2028 }
2027 } 2029 } while (++qidx < phba->cfg_fcp_eq_count);
2028 goto error_out; 2030 goto error_out;
2029 break; 2031 break;
2030 case LPFC_IDIAG_MQ: 2032 case LPFC_IDIAG_MQ:
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e2c452467c8b..32a084534f3e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -250,7 +250,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
250 icmd->un.elsreq64.myID = vport->fc_myDID; 250 icmd->un.elsreq64.myID = vport->fc_myDID;
251 251
252 /* For ELS_REQUEST64_CR, use the VPI by default */ 252 /* For ELS_REQUEST64_CR, use the VPI by default */
253 icmd->ulpContext = vport->vpi + phba->vpi_base; 253 icmd->ulpContext = phba->vpi_ids[vport->vpi];
254 icmd->ulpCt_h = 0; 254 icmd->ulpCt_h = 0;
255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
256 if (elscmd == ELS_CMD_ECHO) 256 if (elscmd == ELS_CMD_ECHO)
@@ -454,6 +454,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
454 rc = -ENOMEM; 454 rc = -ENOMEM;
455 goto fail_free_dmabuf; 455 goto fail_free_dmabuf;
456 } 456 }
457
457 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 458 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
458 if (!mboxq) { 459 if (!mboxq) {
459 rc = -ENOMEM; 460 rc = -ENOMEM;
@@ -6585,6 +6586,26 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6585{ 6586{
6586 struct lpfc_vport *vport; 6587 struct lpfc_vport *vport;
6587 unsigned long flags; 6588 unsigned long flags;
6589 int i;
6590
6591 /* The physical ports are always vpi 0 - translate is unnecessary. */
6592 if (vpi > 0) {
6593 /*
6594 * Translate the physical vpi to the logical vpi. The
6595 * vport stores the logical vpi.
6596 */
6597 for (i = 0; i < phba->max_vpi; i++) {
6598 if (vpi == phba->vpi_ids[i])
6599 break;
6600 }
6601
6602 if (i >= phba->max_vpi) {
6603 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
6604 "2936 Could not find Vport mapped "
6605 "to vpi %d\n", vpi);
6606 return NULL;
6607 }
6608 }
6588 6609
6589 spin_lock_irqsave(&phba->hbalock, flags); 6610 spin_lock_irqsave(&phba->hbalock, flags);
6590 list_for_each_entry(vport, &phba->port_list, listentry) { 6611 list_for_each_entry(vport, &phba->port_list, listentry) {
@@ -6641,8 +6662,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6641 vport = phba->pport; 6662 vport = phba->pport;
6642 else 6663 else
6643 vport = lpfc_find_vport_by_vpid(phba, 6664 vport = lpfc_find_vport_by_vpid(phba,
6644 icmd->unsli3.rcvsli3.vpi - phba->vpi_base); 6665 icmd->unsli3.rcvsli3.vpi);
6645 } 6666 }
6667
6646 /* If there are no BDEs associated 6668 /* If there are no BDEs associated
6647 * with this IOCB, there is nothing to do. 6669 * with this IOCB, there is nothing to do.
6648 */ 6670 */
@@ -7222,7 +7244,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7222 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1; 7244 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
7223 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ; 7245 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
7224 /* Set the ulpContext to the vpi */ 7246 /* Set the ulpContext to the vpi */
7225 elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base; 7247 elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
7226 } else { 7248 } else {
7227 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ 7249 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
7228 icmd->ulpCt_h = 1; 7250 icmd->ulpCt_h = 1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7a35df5e2038..18d0dbfda2bc 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -881,7 +881,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
881 /* Clean up any firmware default rpi's */ 881 /* Clean up any firmware default rpi's */
882 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 882 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
883 if (mb) { 883 if (mb) {
884 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb); 884 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
885 mb->vport = vport; 885 mb->vport = vport;
886 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 886 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
887 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 887 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
@@ -2690,16 +2690,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2690 2690
2691 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, 2691 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
2692 sizeof (struct serv_parm)); 2692 sizeof (struct serv_parm));
2693 if (phba->cfg_soft_wwnn) 2693 lpfc_update_vport_wwn(vport);
2694 u64_to_wwn(phba->cfg_soft_wwnn,
2695 vport->fc_sparam.nodeName.u.wwn);
2696 if (phba->cfg_soft_wwpn)
2697 u64_to_wwn(phba->cfg_soft_wwpn,
2698 vport->fc_sparam.portName.u.wwn);
2699 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
2700 sizeof(vport->fc_nodename));
2701 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
2702 sizeof(vport->fc_portname));
2703 if (vport->port_type == LPFC_PHYSICAL_PORT) { 2694 if (vport->port_type == LPFC_PHYSICAL_PORT) {
2704 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); 2695 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
2705 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); 2696 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
@@ -3430,7 +3421,8 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3430 return; 3421 return;
3431 } 3422 }
3432 3423
3433 ndlp->nlp_rpi = mb->un.varWords[0]; 3424 if (phba->sli_rev < LPFC_SLI_REV4)
3425 ndlp->nlp_rpi = mb->un.varWords[0];
3434 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3426 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3435 ndlp->nlp_type |= NLP_FABRIC; 3427 ndlp->nlp_type |= NLP_FABRIC;
3436 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3428 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3504,7 +3496,8 @@ out:
3504 return; 3496 return;
3505 } 3497 }
3506 3498
3507 ndlp->nlp_rpi = mb->un.varWords[0]; 3499 if (phba->sli_rev < LPFC_SLI_REV4)
3500 ndlp->nlp_rpi = mb->un.varWords[0];
3508 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3501 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3509 ndlp->nlp_type |= NLP_FABRIC; 3502 ndlp->nlp_type |= NLP_FABRIC;
3510 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3503 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3591,7 +3584,6 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3591 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 3584 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
3592 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 3585 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
3593 3586
3594
3595 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 3587 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
3596 fc_remote_port_rolechg(rport, rport_ids.roles); 3588 fc_remote_port_rolechg(rport, rport_ids.roles);
3597 3589
@@ -4106,11 +4098,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4106 struct lpfc_hba *phba = vport->phba; 4098 struct lpfc_hba *phba = vport->phba;
4107 LPFC_MBOXQ_t *mbox; 4099 LPFC_MBOXQ_t *mbox;
4108 int rc; 4100 int rc;
4101 uint16_t rpi;
4109 4102
4110 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 4103 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4111 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4104 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4112 if (mbox) { 4105 if (mbox) {
4113 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 4106 /* SLI4 ports require the physical rpi value. */
4107 rpi = ndlp->nlp_rpi;
4108 if (phba->sli_rev == LPFC_SLI_REV4)
4109 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4110 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4114 mbox->vport = vport; 4111 mbox->vport = vport;
4115 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4112 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4116 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4113 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4179,7 +4176,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
4179 4176
4180 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4177 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4181 if (mbox) { 4178 if (mbox) {
4182 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); 4179 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
4180 mbox);
4183 mbox->vport = vport; 4181 mbox->vport = vport;
4184 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4182 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4185 mbox->context1 = NULL; 4183 mbox->context1 = NULL;
@@ -4203,7 +4201,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
4203 4201
4204 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4202 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4205 if (mbox) { 4203 if (mbox) {
4206 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox); 4204 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
4205 mbox);
4207 mbox->vport = vport; 4206 mbox->vport = vport;
4208 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4207 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4209 mbox->context1 = NULL; 4208 mbox->context1 = NULL;
@@ -4653,10 +4652,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
4653 if (num_sent) 4652 if (num_sent)
4654 return; 4653 return;
4655 4654
4656 /* 4655 /* Register the VPI for SLI3, NON-NPIV only. */
4657 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
4658 * continue discovery.
4659 */
4660 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4656 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4661 !(vport->fc_flag & FC_PT2PT) && 4657 !(vport->fc_flag & FC_PT2PT) &&
4662 !(vport->fc_flag & FC_RSCN_MODE) && 4658 !(vport->fc_flag & FC_RSCN_MODE) &&
@@ -4943,7 +4939,7 @@ restart_disc:
4943 if (phba->sli_rev < LPFC_SLI_REV4) { 4939 if (phba->sli_rev < LPFC_SLI_REV4) {
4944 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 4940 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
4945 lpfc_issue_reg_vpi(phba, vport); 4941 lpfc_issue_reg_vpi(phba, vport);
4946 else { /* NPIV Not enabled */ 4942 else {
4947 lpfc_issue_clear_la(phba, vport); 4943 lpfc_issue_clear_la(phba, vport);
4948 vport->port_state = LPFC_VPORT_READY; 4944 vport->port_state = LPFC_VPORT_READY;
4949 } 4945 }
@@ -5069,7 +5065,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5069 pmb->context1 = NULL; 5065 pmb->context1 = NULL;
5070 pmb->context2 = NULL; 5066 pmb->context2 = NULL;
5071 5067
5072 ndlp->nlp_rpi = mb->un.varWords[0]; 5068 if (phba->sli_rev < LPFC_SLI_REV4)
5069 ndlp->nlp_rpi = mb->un.varWords[0];
5073 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 5070 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
5074 ndlp->nlp_type |= NLP_FABRIC; 5071 ndlp->nlp_type |= NLP_FABRIC;
5075 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 5072 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -5354,6 +5351,17 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
5354 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 5351 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5355 shost = lpfc_shost_from_vport(vports[i]); 5352 shost = lpfc_shost_from_vport(vports[i]);
5356 spin_lock_irq(shost->host_lock); 5353 spin_lock_irq(shost->host_lock);
5354 /*
5355 * IF the CVL_RCVD bit is not set then we have sent the
5356 * flogi.
5357 * If dev_loss fires while we are waiting we do not want to
5358 * unreg the fcf.
5359 */
5360 if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
5361 spin_unlock_irq(shost->host_lock);
5362 ret = 1;
5363 goto out;
5364 }
5357 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 5365 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5358 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport && 5366 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
5359 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 5367 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 86b6f7e6686a..9059524cf225 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -64,6 +64,8 @@
64#define SLI3_IOCB_CMD_SIZE 128 64#define SLI3_IOCB_CMD_SIZE 128
65#define SLI3_IOCB_RSP_SIZE 64 65#define SLI3_IOCB_RSP_SIZE 64
66 66
67#define LPFC_UNREG_ALL_RPIS_VPORT 0xffff
68#define LPFC_UNREG_ALL_DFLT_RPIS 0xffffffff
67 69
68/* vendor ID used in SCSI netlink calls */ 70/* vendor ID used in SCSI netlink calls */
69#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX) 71#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
@@ -903,6 +905,8 @@ struct RRQ { /* Structure is in Big Endian format */
903#define rrq_rxid_WORD rrq_exchg 905#define rrq_rxid_WORD rrq_exchg
904}; 906};
905 907
908#define LPFC_MAX_VFN_PER_PFN 255 /* Maximum VFs allowed per ARI */
909#define LPFC_DEF_VFN_PER_PFN 0 /* Default VFs due to platform limitation*/
906 910
907struct RTV_RSP { /* Structure is in Big Endian format */ 911struct RTV_RSP { /* Structure is in Big Endian format */
908 uint32_t ratov; 912 uint32_t ratov;
@@ -1199,7 +1203,9 @@ typedef struct {
1199#define PCI_DEVICE_ID_BALIUS 0xe131 1203#define PCI_DEVICE_ID_BALIUS 0xe131
1200#define PCI_DEVICE_ID_PROTEUS_PF 0xe180 1204#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
1201#define PCI_DEVICE_ID_LANCER_FC 0xe200 1205#define PCI_DEVICE_ID_LANCER_FC 0xe200
1206#define PCI_DEVICE_ID_LANCER_FC_VF 0xe208
1202#define PCI_DEVICE_ID_LANCER_FCOE 0xe260 1207#define PCI_DEVICE_ID_LANCER_FCOE 0xe260
1208#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
1203#define PCI_DEVICE_ID_SAT_SMB 0xf011 1209#define PCI_DEVICE_ID_SAT_SMB 0xf011
1204#define PCI_DEVICE_ID_SAT_MID 0xf015 1210#define PCI_DEVICE_ID_SAT_MID 0xf015
1205#define PCI_DEVICE_ID_RFLY 0xf095 1211#define PCI_DEVICE_ID_RFLY 0xf095
@@ -3021,7 +3027,7 @@ typedef struct {
3021#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t)) 3027#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
3022#define MAILBOX_HBA_EXT_OFFSET 0x100 3028#define MAILBOX_HBA_EXT_OFFSET 0x100
3023/* max mbox xmit size is a page size for sysfs IO operations */ 3029/* max mbox xmit size is a page size for sysfs IO operations */
3024#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE 3030#define MAILBOX_SYSFS_MAX 4096
3025 3031
3026typedef union { 3032typedef union {
3027 uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/ 3033 uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 4dff668ebdad..11e26a26b5d1 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -170,6 +170,25 @@ struct lpfc_sli_intf {
170#define LPFC_PCI_FUNC3 3 170#define LPFC_PCI_FUNC3 3
171#define LPFC_PCI_FUNC4 4 171#define LPFC_PCI_FUNC4 4
172 172
173/* SLI4 interface type-2 control register offsets */
174#define LPFC_CTL_PORT_SEM_OFFSET 0x400
175#define LPFC_CTL_PORT_STA_OFFSET 0x404
176#define LPFC_CTL_PORT_CTL_OFFSET 0x408
177#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
178#define LPFC_CTL_PORT_ER2_OFFSET 0x410
179#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
180
181/* Some SLI4 interface type-2 PDEV_CTL register bits */
182#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
183#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
184#define LPFC_CTL_PDEV_CTL_DD 0x00000004
185#define LPFC_CTL_PDEV_CTL_LC 0x00000008
186#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
187#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
188#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
189
190#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
191
173/* Active interrupt test count */ 192/* Active interrupt test count */
174#define LPFC_ACT_INTR_CNT 4 193#define LPFC_ACT_INTR_CNT 4
175 194
@@ -210,9 +229,26 @@ struct ulp_bde64 {
210 229
211struct lpfc_sli4_flags { 230struct lpfc_sli4_flags {
212 uint32_t word0; 231 uint32_t word0;
213#define lpfc_fip_flag_SHIFT 0 232#define lpfc_idx_rsrc_rdy_SHIFT 0
214#define lpfc_fip_flag_MASK 0x00000001 233#define lpfc_idx_rsrc_rdy_MASK 0x00000001
215#define lpfc_fip_flag_WORD word0 234#define lpfc_idx_rsrc_rdy_WORD word0
235#define LPFC_IDX_RSRC_RDY 1
236#define lpfc_xri_rsrc_rdy_SHIFT 1
237#define lpfc_xri_rsrc_rdy_MASK 0x00000001
238#define lpfc_xri_rsrc_rdy_WORD word0
239#define LPFC_XRI_RSRC_RDY 1
240#define lpfc_rpi_rsrc_rdy_SHIFT 2
241#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
242#define lpfc_rpi_rsrc_rdy_WORD word0
243#define LPFC_RPI_RSRC_RDY 1
244#define lpfc_vpi_rsrc_rdy_SHIFT 3
245#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
246#define lpfc_vpi_rsrc_rdy_WORD word0
247#define LPFC_VPI_RSRC_RDY 1
248#define lpfc_vfi_rsrc_rdy_SHIFT 4
249#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
250#define lpfc_vfi_rsrc_rdy_WORD word0
251#define LPFC_VFI_RSRC_RDY 1
216}; 252};
217 253
218struct sli4_bls_rsp { 254struct sli4_bls_rsp {
@@ -739,6 +775,12 @@ union lpfc_sli4_cfg_shdr {
739#define lpfc_mbox_hdr_version_SHIFT 0 775#define lpfc_mbox_hdr_version_SHIFT 0
740#define lpfc_mbox_hdr_version_MASK 0x000000FF 776#define lpfc_mbox_hdr_version_MASK 0x000000FF
741#define lpfc_mbox_hdr_version_WORD word9 777#define lpfc_mbox_hdr_version_WORD word9
778#define lpfc_mbox_hdr_pf_num_SHIFT 16
779#define lpfc_mbox_hdr_pf_num_MASK 0x000000FF
780#define lpfc_mbox_hdr_pf_num_WORD word9
781#define lpfc_mbox_hdr_vh_num_SHIFT 24
782#define lpfc_mbox_hdr_vh_num_MASK 0x000000FF
783#define lpfc_mbox_hdr_vh_num_WORD word9
742#define LPFC_Q_CREATE_VERSION_2 2 784#define LPFC_Q_CREATE_VERSION_2 2
743#define LPFC_Q_CREATE_VERSION_1 1 785#define LPFC_Q_CREATE_VERSION_1 1
744#define LPFC_Q_CREATE_VERSION_0 0 786#define LPFC_Q_CREATE_VERSION_0 0
@@ -766,12 +808,22 @@ union lpfc_sli4_cfg_shdr {
766 } response; 808 } response;
767}; 809};
768 810
769/* Mailbox structures */ 811/* Mailbox Header structures.
812 * struct mbox_header is defined for first generation SLI4_CFG mailbox
813 * calls deployed for BE-based ports.
814 *
815 * struct sli4_mbox_header is defined for second generation SLI4
816 * ports that don't deploy the SLI4_CFG mechanism.
817 */
770struct mbox_header { 818struct mbox_header {
771 struct lpfc_sli4_cfg_mhdr cfg_mhdr; 819 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
772 union lpfc_sli4_cfg_shdr cfg_shdr; 820 union lpfc_sli4_cfg_shdr cfg_shdr;
773}; 821};
774 822
823#define LPFC_EXTENT_LOCAL 0
824#define LPFC_TIMEOUT_DEFAULT 0
825#define LPFC_EXTENT_VERSION_DEFAULT 0
826
775/* Subsystem Definitions */ 827/* Subsystem Definitions */
776#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 828#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
777#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC 829#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
@@ -794,6 +846,13 @@ struct mbox_header {
794#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A 846#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
795#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D 847#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
796#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A 848#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
849#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
850#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
851#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
852#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
853#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
854#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
855#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
797#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5 856#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
798 857
799/* FCoE Opcodes */ 858/* FCoE Opcodes */
@@ -808,6 +867,8 @@ struct mbox_header {
808#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A 867#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
809#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B 868#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
810#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10 869#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
870#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
871#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
811 872
812/* Mailbox command structures */ 873/* Mailbox command structures */
813struct eq_context { 874struct eq_context {
@@ -1210,6 +1271,187 @@ struct lpfc_mbx_mq_destroy {
1210 } u; 1271 } u;
1211}; 1272};
1212 1273
1274/* Start Gen 2 SLI4 Mailbox definitions: */
1275
1276/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
1277#define LPFC_RSC_TYPE_FCOE_VFI 0x20
1278#define LPFC_RSC_TYPE_FCOE_VPI 0x21
1279#define LPFC_RSC_TYPE_FCOE_RPI 0x22
1280#define LPFC_RSC_TYPE_FCOE_XRI 0x23
1281
1282struct lpfc_mbx_get_rsrc_extent_info {
1283 struct mbox_header header;
1284 union {
1285 struct {
1286 uint32_t word4;
1287#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT 0
1288#define lpfc_mbx_get_rsrc_extent_info_type_MASK 0x0000FFFF
1289#define lpfc_mbx_get_rsrc_extent_info_type_WORD word4
1290 } req;
1291 struct {
1292 uint32_t word4;
1293#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT 0
1294#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK 0x0000FFFF
1295#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD word4
1296#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT 16
1297#define lpfc_mbx_get_rsrc_extent_info_size_MASK 0x0000FFFF
1298#define lpfc_mbx_get_rsrc_extent_info_size_WORD word4
1299 } rsp;
1300 } u;
1301};
1302
1303struct lpfc_id_range {
1304 uint32_t word5;
1305#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
1306#define lpfc_mbx_rsrc_id_word4_0_MASK 0x0000FFFF
1307#define lpfc_mbx_rsrc_id_word4_0_WORD word5
1308#define lpfc_mbx_rsrc_id_word4_1_SHIFT 16
1309#define lpfc_mbx_rsrc_id_word4_1_MASK 0x0000FFFF
1310#define lpfc_mbx_rsrc_id_word4_1_WORD word5
1311};
1312
1313struct lpfc_mbx_set_link_diag_state {
1314 struct mbox_header header;
1315 union {
1316 struct {
1317 uint32_t word0;
1318#define lpfc_mbx_set_diag_state_diag_SHIFT 0
1319#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
1320#define lpfc_mbx_set_diag_state_diag_WORD word0
1321#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
1322#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
1323#define lpfc_mbx_set_diag_state_link_num_WORD word0
1324#define lpfc_mbx_set_diag_state_link_type_SHIFT 22
1325#define lpfc_mbx_set_diag_state_link_type_MASK 0x00000003
1326#define lpfc_mbx_set_diag_state_link_type_WORD word0
1327 } req;
1328 struct {
1329 uint32_t word0;
1330 } rsp;
1331 } u;
1332};
1333
1334struct lpfc_mbx_set_link_diag_loopback {
1335 struct mbox_header header;
1336 union {
1337 struct {
1338 uint32_t word0;
1339#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
1340#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000001
1341#define lpfc_mbx_set_diag_lpbk_type_WORD word0
1342#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
1343#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
1344#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL 0x2
1345#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
1346#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
1347#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
1348#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
1349#define lpfc_mbx_set_diag_lpbk_link_type_MASK 0x00000003
1350#define lpfc_mbx_set_diag_lpbk_link_type_WORD word0
1351 } req;
1352 struct {
1353 uint32_t word0;
1354 } rsp;
1355 } u;
1356};
1357
1358struct lpfc_mbx_run_link_diag_test {
1359 struct mbox_header header;
1360 union {
1361 struct {
1362 uint32_t word0;
1363#define lpfc_mbx_run_diag_test_link_num_SHIFT 16
1364#define lpfc_mbx_run_diag_test_link_num_MASK 0x0000003F
1365#define lpfc_mbx_run_diag_test_link_num_WORD word0
1366#define lpfc_mbx_run_diag_test_link_type_SHIFT 22
1367#define lpfc_mbx_run_diag_test_link_type_MASK 0x00000003
1368#define lpfc_mbx_run_diag_test_link_type_WORD word0
1369 uint32_t word1;
1370#define lpfc_mbx_run_diag_test_test_id_SHIFT 0
1371#define lpfc_mbx_run_diag_test_test_id_MASK 0x0000FFFF
1372#define lpfc_mbx_run_diag_test_test_id_WORD word1
1373#define lpfc_mbx_run_diag_test_loops_SHIFT 16
1374#define lpfc_mbx_run_diag_test_loops_MASK 0x0000FFFF
1375#define lpfc_mbx_run_diag_test_loops_WORD word1
1376 uint32_t word2;
1377#define lpfc_mbx_run_diag_test_test_ver_SHIFT 0
1378#define lpfc_mbx_run_diag_test_test_ver_MASK 0x0000FFFF
1379#define lpfc_mbx_run_diag_test_test_ver_WORD word2
1380#define lpfc_mbx_run_diag_test_err_act_SHIFT 16
1381#define lpfc_mbx_run_diag_test_err_act_MASK 0x000000FF
1382#define lpfc_mbx_run_diag_test_err_act_WORD word2
1383 } req;
1384 struct {
1385 uint32_t word0;
1386 } rsp;
1387 } u;
1388};
1389
1390/*
1391 * struct lpfc_mbx_alloc_rsrc_extents:
1392 * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
1393 * 6 words of header + 4 words of shared subcommand header +
1394 * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
1395 *
1396 * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
1397 * for extents payload.
1398 *
1399 * 212/2 (bytes per extent) = 106 extents.
1400 * 106/2 (extents per word) = 53 words.
1401 * lpfc_id_range id is statically size to 53.
1402 *
1403 * This mailbox definition is used for ALLOC or GET_ALLOCATED
1404 * extent ranges. For ALLOC, the type and cnt are required.
1405 * For GET_ALLOCATED, only the type is required.
1406 */
1407struct lpfc_mbx_alloc_rsrc_extents {
1408 struct mbox_header header;
1409 union {
1410 struct {
1411 uint32_t word4;
1412#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT 0
1413#define lpfc_mbx_alloc_rsrc_extents_type_MASK 0x0000FFFF
1414#define lpfc_mbx_alloc_rsrc_extents_type_WORD word4
1415#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT 16
1416#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK 0x0000FFFF
1417#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD word4
1418 } req;
1419 struct {
1420 uint32_t word4;
1421#define lpfc_mbx_rsrc_cnt_SHIFT 0
1422#define lpfc_mbx_rsrc_cnt_MASK 0x0000FFFF
1423#define lpfc_mbx_rsrc_cnt_WORD word4
1424 struct lpfc_id_range id[53];
1425 } rsp;
1426 } u;
1427};
1428
1429/*
1430 * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
1431 * structure shares the same SHIFT/MASK/WORD defines provided in the
1432 * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
1433 * the structures defined above. This non-embedded structure provides for the
1434 * maximum number of extents supported by the port.
1435 */
1436struct lpfc_mbx_nembed_rsrc_extent {
1437 union lpfc_sli4_cfg_shdr cfg_shdr;
1438 uint32_t word4;
1439 struct lpfc_id_range id;
1440};
1441
1442struct lpfc_mbx_dealloc_rsrc_extents {
1443 struct mbox_header header;
1444 struct {
1445 uint32_t word4;
1446#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT 0
1447#define lpfc_mbx_dealloc_rsrc_extents_type_MASK 0x0000FFFF
1448#define lpfc_mbx_dealloc_rsrc_extents_type_WORD word4
1449 } req;
1450
1451};
1452
1453/* Start SLI4 FCoE specific mbox structures. */
1454
1213struct lpfc_mbx_post_hdr_tmpl { 1455struct lpfc_mbx_post_hdr_tmpl {
1214 struct mbox_header header; 1456 struct mbox_header header;
1215 uint32_t word10; 1457 uint32_t word10;
@@ -1229,7 +1471,7 @@ struct sli4_sge { /* SLI-4 */
1229 1471
1230 uint32_t word2; 1472 uint32_t word2;
1231#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/ 1473#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
1232#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF 1474#define lpfc_sli4_sge_offset_MASK 0x1FFFFFFF
1233#define lpfc_sli4_sge_offset_WORD word2 1475#define lpfc_sli4_sge_offset_WORD word2
1234#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets 1476#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
1235 this flag !! */ 1477 this flag !! */
@@ -1773,61 +2015,31 @@ struct lpfc_mbx_read_rev {
1773 2015
1774struct lpfc_mbx_read_config { 2016struct lpfc_mbx_read_config {
1775 uint32_t word1; 2017 uint32_t word1;
1776#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0 2018#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
1777#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF 2019#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
1778#define lpfc_mbx_rd_conf_max_bbc_WORD word1 2020#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
1779#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
1780#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
1781#define lpfc_mbx_rd_conf_init_bbc_WORD word1
1782 uint32_t word2; 2021 uint32_t word2;
1783#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
1784#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
1785#define lpfc_mbx_rd_conf_nport_did_WORD word2
1786#define lpfc_mbx_rd_conf_topology_SHIFT 24 2022#define lpfc_mbx_rd_conf_topology_SHIFT 24
1787#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF 2023#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
1788#define lpfc_mbx_rd_conf_topology_WORD word2 2024#define lpfc_mbx_rd_conf_topology_WORD word2
1789 uint32_t word3; 2025 uint32_t rsvd_3;
1790#define lpfc_mbx_rd_conf_ao_SHIFT 0
1791#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
1792#define lpfc_mbx_rd_conf_ao_WORD word3
1793#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
1794#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
1795#define lpfc_mbx_rd_conf_bb_scn_WORD word3
1796#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
1797#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
1798#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
1799#define lpfc_mbx_rd_conf_mc_SHIFT 29
1800#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
1801#define lpfc_mbx_rd_conf_mc_WORD word3
1802 uint32_t word4; 2026 uint32_t word4;
1803#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0 2027#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
1804#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF 2028#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
1805#define lpfc_mbx_rd_conf_e_d_tov_WORD word4 2029#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
1806 uint32_t word5; 2030 uint32_t rsvd_5;
1807#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
1808#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
1809#define lpfc_mbx_rd_conf_lp_tov_WORD word5
1810 uint32_t word6; 2031 uint32_t word6;
1811#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0 2032#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
1812#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF 2033#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
1813#define lpfc_mbx_rd_conf_r_a_tov_WORD word6 2034#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
1814 uint32_t word7; 2035 uint32_t rsvd_7;
1815#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0 2036 uint32_t rsvd_8;
1816#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
1817#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
1818 uint32_t word8;
1819#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
1820#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
1821#define lpfc_mbx_rd_conf_al_tov_WORD word8
1822 uint32_t word9; 2037 uint32_t word9;
1823#define lpfc_mbx_rd_conf_lmt_SHIFT 0 2038#define lpfc_mbx_rd_conf_lmt_SHIFT 0
1824#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF 2039#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
1825#define lpfc_mbx_rd_conf_lmt_WORD word9 2040#define lpfc_mbx_rd_conf_lmt_WORD word9
1826 uint32_t word10; 2041 uint32_t rsvd_10;
1827#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0 2042 uint32_t rsvd_11;
1828#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
1829#define lpfc_mbx_rd_conf_max_alpa_WORD word10
1830 uint32_t word11_rsvd;
1831 uint32_t word12; 2043 uint32_t word12;
1832#define lpfc_mbx_rd_conf_xri_base_SHIFT 0 2044#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
1833#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF 2045#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
@@ -1857,9 +2069,6 @@ struct lpfc_mbx_read_config {
1857#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF 2069#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
1858#define lpfc_mbx_rd_conf_vfi_count_WORD word15 2070#define lpfc_mbx_rd_conf_vfi_count_WORD word15
1859 uint32_t word16; 2071 uint32_t word16;
1860#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
1861#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
1862#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
1863#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16 2072#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
1864#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF 2073#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
1865#define lpfc_mbx_rd_conf_fcfi_count_WORD word16 2074#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
@@ -2169,6 +2378,12 @@ struct lpfc_sli4_parameters {
2169#define cfg_fcoe_SHIFT 0 2378#define cfg_fcoe_SHIFT 0
2170#define cfg_fcoe_MASK 0x00000001 2379#define cfg_fcoe_MASK 0x00000001
2171#define cfg_fcoe_WORD word12 2380#define cfg_fcoe_WORD word12
2381#define cfg_ext_SHIFT 1
2382#define cfg_ext_MASK 0x00000001
2383#define cfg_ext_WORD word12
2384#define cfg_hdrr_SHIFT 2
2385#define cfg_hdrr_MASK 0x00000001
2386#define cfg_hdrr_WORD word12
2172#define cfg_phwq_SHIFT 15 2387#define cfg_phwq_SHIFT 15
2173#define cfg_phwq_MASK 0x00000001 2388#define cfg_phwq_MASK 0x00000001
2174#define cfg_phwq_WORD word12 2389#define cfg_phwq_WORD word12
@@ -2198,6 +2413,145 @@ struct lpfc_mbx_get_sli4_parameters {
2198 struct lpfc_sli4_parameters sli4_parameters; 2413 struct lpfc_sli4_parameters sli4_parameters;
2199}; 2414};
2200 2415
2416struct lpfc_rscr_desc_generic {
2417#define LPFC_RSRC_DESC_WSIZE 18
2418 uint32_t desc[LPFC_RSRC_DESC_WSIZE];
2419};
2420
2421struct lpfc_rsrc_desc_pcie {
2422 uint32_t word0;
2423#define lpfc_rsrc_desc_pcie_type_SHIFT 0
2424#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
2425#define lpfc_rsrc_desc_pcie_type_WORD word0
2426#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
2427 uint32_t word1;
2428#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
2429#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
2430#define lpfc_rsrc_desc_pcie_pfnum_WORD word1
2431 uint32_t reserved;
2432 uint32_t word3;
2433#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT 0
2434#define lpfc_rsrc_desc_pcie_sriov_sta_MASK 0x000000ff
2435#define lpfc_rsrc_desc_pcie_sriov_sta_WORD word3
2436#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT 8
2437#define lpfc_rsrc_desc_pcie_pf_sta_MASK 0x000000ff
2438#define lpfc_rsrc_desc_pcie_pf_sta_WORD word3
2439#define lpfc_rsrc_desc_pcie_pf_type_SHIFT 16
2440#define lpfc_rsrc_desc_pcie_pf_type_MASK 0x000000ff
2441#define lpfc_rsrc_desc_pcie_pf_type_WORD word3
2442 uint32_t word4;
2443#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT 0
2444#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK 0x0000ffff
2445#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD word4
2446};
2447
2448struct lpfc_rsrc_desc_fcfcoe {
2449 uint32_t word0;
2450#define lpfc_rsrc_desc_fcfcoe_type_SHIFT 0
2451#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
2452#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
2453#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
2454 uint32_t word1;
2455#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
2456#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
2457#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD word1
2458#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT 16
2459#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK 0x000007ff
2460#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD word1
2461 uint32_t word2;
2462#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT 0
2463#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK 0x0000ffff
2464#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD word2
2465#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT 16
2466#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK 0x0000ffff
2467#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD word2
2468 uint32_t word3;
2469#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT 0
2470#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK 0x0000ffff
2471#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD word3
2472#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT 16
2473#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK 0x0000ffff
2474#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD word3
2475 uint32_t word4;
2476#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT 0
2477#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK 0x0000ffff
2478#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD word4
2479#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT 16
2480#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK 0x0000ffff
2481#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD word4
2482 uint32_t word5;
2483#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT 0
2484#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK 0x0000ffff
2485#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD word5
2486#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT 16
2487#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK 0x0000ffff
2488#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD word5
2489 uint32_t word6;
2490 uint32_t word7;
2491 uint32_t word8;
2492 uint32_t word9;
2493 uint32_t word10;
2494 uint32_t word11;
2495 uint32_t word12;
2496 uint32_t word13;
2497#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT 0
2498#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK 0x0000003f
2499#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD word13
2500#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT 6
2501#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK 0x00000003
2502#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD word13
2503#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT 8
2504#define lpfc_rsrc_desc_fcfcoe_lmc_MASK 0x00000001
2505#define lpfc_rsrc_desc_fcfcoe_lmc_WORD word13
2506#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT 9
2507#define lpfc_rsrc_desc_fcfcoe_lld_MASK 0x00000001
2508#define lpfc_rsrc_desc_fcfcoe_lld_WORD word13
2509#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
2510#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
2511#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
2512};
2513
2514struct lpfc_func_cfg {
2515#define LPFC_RSRC_DESC_MAX_NUM 2
2516 uint32_t rsrc_desc_count;
2517 struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
2518};
2519
2520struct lpfc_mbx_get_func_cfg {
2521 struct mbox_header header;
2522#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
2523#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
2524#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
2525 struct lpfc_func_cfg func_cfg;
2526};
2527
2528struct lpfc_prof_cfg {
2529#define LPFC_RSRC_DESC_MAX_NUM 2
2530 uint32_t rsrc_desc_count;
2531 struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
2532};
2533
2534struct lpfc_mbx_get_prof_cfg {
2535 struct mbox_header header;
2536#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
2537#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
2538#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
2539 union {
2540 struct {
2541 uint32_t word10;
2542#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT 0
2543#define lpfc_mbx_get_prof_cfg_prof_id_MASK 0x000000ff
2544#define lpfc_mbx_get_prof_cfg_prof_id_WORD word10
2545#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT 8
2546#define lpfc_mbx_get_prof_cfg_prof_tp_MASK 0x00000003
2547#define lpfc_mbx_get_prof_cfg_prof_tp_WORD word10
2548 } request;
2549 struct {
2550 struct lpfc_prof_cfg prof_cfg;
2551 } response;
2552 } u;
2553};
2554
2201/* Mailbox Completion Queue Error Messages */ 2555/* Mailbox Completion Queue Error Messages */
2202#define MB_CQE_STATUS_SUCCESS 0x0 2556#define MB_CQE_STATUS_SUCCESS 0x0
2203#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 2557#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -2206,6 +2560,29 @@ struct lpfc_mbx_get_sli4_parameters {
2206#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 2560#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
2207#define MB_CQE_STATUS_DMA_FAILED 0x5 2561#define MB_CQE_STATUS_DMA_FAILED 0x5
2208 2562
2563#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
2564struct lpfc_mbx_wr_object {
2565 struct mbox_header header;
2566 union {
2567 struct {
2568 uint32_t word4;
2569#define lpfc_wr_object_eof_SHIFT 31
2570#define lpfc_wr_object_eof_MASK 0x00000001
2571#define lpfc_wr_object_eof_WORD word4
2572#define lpfc_wr_object_write_length_SHIFT 0
2573#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
2574#define lpfc_wr_object_write_length_WORD word4
2575 uint32_t write_offset;
2576 uint32_t object_name[26];
2577 uint32_t bde_count;
2578 struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
2579 } request;
2580 struct {
2581 uint32_t actual_write_length;
2582 } response;
2583 } u;
2584};
2585
2209/* mailbox queue entry structure */ 2586/* mailbox queue entry structure */
2210struct lpfc_mqe { 2587struct lpfc_mqe {
2211 uint32_t word0; 2588 uint32_t word0;
@@ -2241,6 +2618,9 @@ struct lpfc_mqe {
2241 struct lpfc_mbx_cq_destroy cq_destroy; 2618 struct lpfc_mbx_cq_destroy cq_destroy;
2242 struct lpfc_mbx_wq_destroy wq_destroy; 2619 struct lpfc_mbx_wq_destroy wq_destroy;
2243 struct lpfc_mbx_rq_destroy rq_destroy; 2620 struct lpfc_mbx_rq_destroy rq_destroy;
2621 struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
2622 struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
2623 struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
2244 struct lpfc_mbx_post_sgl_pages post_sgl_pages; 2624 struct lpfc_mbx_post_sgl_pages post_sgl_pages;
2245 struct lpfc_mbx_nembed_cmd nembed_cmd; 2625 struct lpfc_mbx_nembed_cmd nembed_cmd;
2246 struct lpfc_mbx_read_rev read_rev; 2626 struct lpfc_mbx_read_rev read_rev;
@@ -2252,7 +2632,13 @@ struct lpfc_mqe {
2252 struct lpfc_mbx_supp_pages supp_pages; 2632 struct lpfc_mbx_supp_pages supp_pages;
2253 struct lpfc_mbx_pc_sli4_params sli4_params; 2633 struct lpfc_mbx_pc_sli4_params sli4_params;
2254 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; 2634 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
2635 struct lpfc_mbx_set_link_diag_state link_diag_state;
2636 struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
2637 struct lpfc_mbx_run_link_diag_test link_diag_test;
2638 struct lpfc_mbx_get_func_cfg get_func_cfg;
2639 struct lpfc_mbx_get_prof_cfg get_prof_cfg;
2255 struct lpfc_mbx_nop nop; 2640 struct lpfc_mbx_nop nop;
2641 struct lpfc_mbx_wr_object wr_object;
2256 } un; 2642 } un;
2257}; 2643};
2258 2644
@@ -2458,7 +2844,7 @@ struct lpfc_bmbx_create {
2458#define SGL_ALIGN_SZ 64 2844#define SGL_ALIGN_SZ 64
2459#define SGL_PAGE_SIZE 4096 2845#define SGL_PAGE_SIZE 4096
2460/* align SGL addr on a size boundary - adjust address up */ 2846/* align SGL addr on a size boundary - adjust address up */
2461#define NO_XRI ((uint16_t)-1) 2847#define NO_XRI 0xffff
2462 2848
2463struct wqe_common { 2849struct wqe_common {
2464 uint32_t word6; 2850 uint32_t word6;
@@ -2798,9 +3184,28 @@ union lpfc_wqe {
2798 struct gen_req64_wqe gen_req; 3184 struct gen_req64_wqe gen_req;
2799}; 3185};
2800 3186
3187#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
3188#define LPFC_FILE_TYPE_GROUP 0xf7
3189#define LPFC_FILE_ID_GROUP 0xa2
3190struct lpfc_grp_hdr {
3191 uint32_t size;
3192 uint32_t magic_number;
3193 uint32_t word2;
3194#define lpfc_grp_hdr_file_type_SHIFT 24
3195#define lpfc_grp_hdr_file_type_MASK 0x000000FF
3196#define lpfc_grp_hdr_file_type_WORD word2
3197#define lpfc_grp_hdr_id_SHIFT 16
3198#define lpfc_grp_hdr_id_MASK 0x000000FF
3199#define lpfc_grp_hdr_id_WORD word2
3200 uint8_t rev_name[128];
3201};
3202
2801#define FCP_COMMAND 0x0 3203#define FCP_COMMAND 0x0
2802#define FCP_COMMAND_DATA_OUT 0x1 3204#define FCP_COMMAND_DATA_OUT 0x1
2803#define ELS_COMMAND_NON_FIP 0xC 3205#define ELS_COMMAND_NON_FIP 0xC
2804#define ELS_COMMAND_FIP 0xD 3206#define ELS_COMMAND_FIP 0xD
2805#define OTHER_COMMAND 0x8 3207#define OTHER_COMMAND 0x8
2806 3208
3209#define LPFC_FW_DUMP 1
3210#define LPFC_FW_RESET 2
3211#define LPFC_DV_RESET 3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7dda036a1af3..148b98ddbb1d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -30,6 +30,7 @@
30#include <linux/ctype.h> 30#include <linux/ctype.h>
31#include <linux/aer.h> 31#include <linux/aer.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/firmware.h>
33 34
34#include <scsi/scsi.h> 35#include <scsi/scsi.h>
35#include <scsi/scsi_device.h> 36#include <scsi/scsi_device.h>
@@ -211,7 +212,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 212 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
212 if (!lpfc_vpd_data) 213 if (!lpfc_vpd_data)
213 goto out_free_mbox; 214 goto out_free_mbox;
214
215 do { 215 do {
216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -309,6 +309,45 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
309} 309}
310 310
311/** 311/**
312 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
313 * cfg_soft_wwnn, cfg_soft_wwpn
314 * @vport: pointer to lpfc vport data structure.
315 *
316 *
317 * Return codes
318 * None.
319 **/
320void
321lpfc_update_vport_wwn(struct lpfc_vport *vport)
322{
323 /* If the soft name exists then update it using the service params */
324 if (vport->phba->cfg_soft_wwnn)
325 u64_to_wwn(vport->phba->cfg_soft_wwnn,
326 vport->fc_sparam.nodeName.u.wwn);
327 if (vport->phba->cfg_soft_wwpn)
328 u64_to_wwn(vport->phba->cfg_soft_wwpn,
329 vport->fc_sparam.portName.u.wwn);
330
331 /*
332 * If the name is empty or there exists a soft name
333 * then copy the service params name, otherwise use the fc name
334 */
335 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
336 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
337 sizeof(struct lpfc_name));
338 else
339 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
340 sizeof(struct lpfc_name));
341
342 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
343 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
344 sizeof(struct lpfc_name));
345 else
346 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
347 sizeof(struct lpfc_name));
348}
349
350/**
312 * lpfc_config_port_post - Perform lpfc initialization after config port 351 * lpfc_config_port_post - Perform lpfc initialization after config port
313 * @phba: pointer to lpfc hba data structure. 352 * @phba: pointer to lpfc hba data structure.
314 * 353 *
@@ -377,17 +416,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
377 lpfc_mbuf_free(phba, mp->virt, mp->phys); 416 lpfc_mbuf_free(phba, mp->virt, mp->phys);
378 kfree(mp); 417 kfree(mp);
379 pmb->context1 = NULL; 418 pmb->context1 = NULL;
380 419 lpfc_update_vport_wwn(vport);
381 if (phba->cfg_soft_wwnn)
382 u64_to_wwn(phba->cfg_soft_wwnn,
383 vport->fc_sparam.nodeName.u.wwn);
384 if (phba->cfg_soft_wwpn)
385 u64_to_wwn(phba->cfg_soft_wwpn,
386 vport->fc_sparam.portName.u.wwn);
387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
388 sizeof (struct lpfc_name));
389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390 sizeof (struct lpfc_name));
391 420
392 /* Update the fc_host data structures with new wwn. */ 421 /* Update the fc_host data structures with new wwn. */
393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 422 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
@@ -573,7 +602,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
573 /* Clear all pending interrupts */ 602 /* Clear all pending interrupts */
574 writel(0xffffffff, phba->HAregaddr); 603 writel(0xffffffff, phba->HAregaddr);
575 readl(phba->HAregaddr); /* flush */ 604 readl(phba->HAregaddr); /* flush */
576
577 phba->link_state = LPFC_HBA_ERROR; 605 phba->link_state = LPFC_HBA_ERROR;
578 if (rc != MBX_BUSY) 606 if (rc != MBX_BUSY)
579 mempool_free(pmb, phba->mbox_mem_pool); 607 mempool_free(pmb, phba->mbox_mem_pool);
@@ -1755,7 +1783,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1755 && descp && descp[0] != '\0') 1783 && descp && descp[0] != '\0')
1756 return; 1784 return;
1757 1785
1758 if (phba->lmt & LMT_10Gb) 1786 if (phba->lmt & LMT_16Gb)
1787 max_speed = 16;
1788 else if (phba->lmt & LMT_10Gb)
1759 max_speed = 10; 1789 max_speed = 10;
1760 else if (phba->lmt & LMT_8Gb) 1790 else if (phba->lmt & LMT_8Gb)
1761 max_speed = 8; 1791 max_speed = 8;
@@ -1922,12 +1952,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1922 "Fibre Channel Adapter"}; 1952 "Fibre Channel Adapter"};
1923 break; 1953 break;
1924 case PCI_DEVICE_ID_LANCER_FC: 1954 case PCI_DEVICE_ID_LANCER_FC:
1925 oneConnect = 1; 1955 case PCI_DEVICE_ID_LANCER_FC_VF:
1926 m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"}; 1956 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
1927 break; 1957 break;
1928 case PCI_DEVICE_ID_LANCER_FCOE: 1958 case PCI_DEVICE_ID_LANCER_FCOE:
1959 case PCI_DEVICE_ID_LANCER_FCOE_VF:
1929 oneConnect = 1; 1960 oneConnect = 1;
1930 m = (typeof(m)){"Undefined", "PCIe", "FCoE"}; 1961 m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
1931 break; 1962 break;
1932 default: 1963 default:
1933 m = (typeof(m)){"Unknown", "", ""}; 1964 m = (typeof(m)){"Unknown", "", ""};
@@ -1936,7 +1967,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1936 1967
1937 if (mdp && mdp[0] == '\0') 1968 if (mdp && mdp[0] == '\0')
1938 snprintf(mdp, 79,"%s", m.name); 1969 snprintf(mdp, 79,"%s", m.name);
1939 /* oneConnect hba requires special processing, they are all initiators 1970 /*
1971 * oneConnect hba requires special processing, they are all initiators
1940 * and we put the port number on the end 1972 * and we put the port number on the end
1941 */ 1973 */
1942 if (descp && descp[0] == '\0') { 1974 if (descp && descp[0] == '\0') {
@@ -2656,6 +2688,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2656 kfree(io); 2688 kfree(io);
2657 phba->total_iocbq_bufs--; 2689 phba->total_iocbq_bufs--;
2658 } 2690 }
2691
2659 spin_unlock_irq(&phba->hbalock); 2692 spin_unlock_irq(&phba->hbalock);
2660 return 0; 2693 return 0;
2661} 2694}
@@ -3612,6 +3645,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3612 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3645 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3613 "2718 Clear Virtual Link Received for VPI 0x%x" 3646 "2718 Clear Virtual Link Received for VPI 0x%x"
3614 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3647 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3648
3615 vport = lpfc_find_vport_by_vpid(phba, 3649 vport = lpfc_find_vport_by_vpid(phba,
3616 acqe_fip->index - phba->vpi_base); 3650 acqe_fip->index - phba->vpi_base);
3617 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3651 ndlp = lpfc_sli4_perform_vport_cvl(vport);
@@ -3935,6 +3969,10 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
3935 pci_try_set_mwi(pdev); 3969 pci_try_set_mwi(pdev);
3936 pci_save_state(pdev); 3970 pci_save_state(pdev);
3937 3971
3972 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3973 if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
3974 pdev->needs_freset = 1;
3975
3938 return 0; 3976 return 0;
3939 3977
3940out_disable_device: 3978out_disable_device:
@@ -3997,6 +4035,36 @@ lpfc_reset_hba(struct lpfc_hba *phba)
3997} 4035}
3998 4036
3999/** 4037/**
4038 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4039 * @phba: pointer to lpfc hba data structure.
4040 * @nr_vfn: number of virtual functions to be enabled.
4041 *
4042 * This function enables the PCI SR-IOV virtual functions to a physical
4043 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4044 * enable the number of virtual functions to the physical function. As
4045 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4046 * API call does not considered as an error condition for most of the device.
4047 **/
4048int
4049lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4050{
4051 struct pci_dev *pdev = phba->pcidev;
4052 int rc;
4053
4054 rc = pci_enable_sriov(pdev, nr_vfn);
4055 if (rc) {
4056 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4057 "2806 Failed to enable sriov on this device "
4058 "with vfn number nr_vf:%d, rc:%d\n",
4059 nr_vfn, rc);
4060 } else
4061 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4062 "2807 Successful enable sriov on this device "
4063 "with vfn number nr_vf:%d\n", nr_vfn);
4064 return rc;
4065}
4066
4067/**
4000 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 4068 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4001 * @phba: pointer to lpfc hba data structure. 4069 * @phba: pointer to lpfc hba data structure.
4002 * 4070 *
@@ -4011,6 +4079,7 @@ static int
4011lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4079lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4012{ 4080{
4013 struct lpfc_sli *psli; 4081 struct lpfc_sli *psli;
4082 int rc;
4014 4083
4015 /* 4084 /*
4016 * Initialize timers used by driver 4085 * Initialize timers used by driver
@@ -4085,6 +4154,23 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4085 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4154 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4086 return -ENOMEM; 4155 return -ENOMEM;
4087 4156
4157 /*
4158 * Enable sr-iov virtual functions if supported and configured
4159 * through the module parameter.
4160 */
4161 if (phba->cfg_sriov_nr_virtfn > 0) {
4162 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4163 phba->cfg_sriov_nr_virtfn);
4164 if (rc) {
4165 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4166 "2808 Requested number of SR-IOV "
4167 "virtual functions (%d) is not "
4168 "supported\n",
4169 phba->cfg_sriov_nr_virtfn);
4170 phba->cfg_sriov_nr_virtfn = 0;
4171 }
4172 }
4173
4088 return 0; 4174 return 0;
4089} 4175}
4090 4176
@@ -4161,6 +4247,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4161 phba->fcf.redisc_wait.data = (unsigned long)phba; 4247 phba->fcf.redisc_wait.data = (unsigned long)phba;
4162 4248
4163 /* 4249 /*
4250 * Control structure for handling external multi-buffer mailbox
4251 * command pass-through.
4252 */
4253 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4254 sizeof(struct lpfc_mbox_ext_buf_ctx));
4255 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4256
4257 /*
4164 * We need to do a READ_CONFIG mailbox command here before 4258 * We need to do a READ_CONFIG mailbox command here before
4165 * calling lpfc_get_cfgparam. For VFs this will report the 4259 * calling lpfc_get_cfgparam. For VFs this will report the
4166 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4260 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
@@ -4233,7 +4327,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4233 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4327 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4234 4328
4235 /* 4329 /*
4236 * Initialize dirver internal slow-path work queues 4330 * Initialize driver internal slow-path work queues
4237 */ 4331 */
4238 4332
4239 /* Driver internel slow-path CQ Event pool */ 4333 /* Driver internel slow-path CQ Event pool */
@@ -4249,6 +4343,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4249 /* Receive queue CQ Event work queue list */ 4343 /* Receive queue CQ Event work queue list */
4250 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4344 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4251 4345
4346 /* Initialize extent block lists. */
4347 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4348 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4349 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4350 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4351
4252 /* Initialize the driver internal SLI layer lists. */ 4352 /* Initialize the driver internal SLI layer lists. */
4253 lpfc_sli_setup(phba); 4353 lpfc_sli_setup(phba);
4254 lpfc_sli_queue_setup(phba); 4354 lpfc_sli_queue_setup(phba);
@@ -4323,9 +4423,19 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4323 } 4423 }
4324 /* 4424 /*
4325 * Get sli4 parameters that override parameters from Port capabilities. 4425 * Get sli4 parameters that override parameters from Port capabilities.
4326 * If this call fails it is not a critical error so continue loading. 4426 * If this call fails, it isn't critical unless the SLI4 parameters come
4427 * back in conflict.
4327 */ 4428 */
4328 lpfc_get_sli4_parameters(phba, mboxq); 4429 rc = lpfc_get_sli4_parameters(phba, mboxq);
4430 if (rc) {
4431 if (phba->sli4_hba.extents_in_use &&
4432 phba->sli4_hba.rpi_hdrs_in_use) {
4433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4434 "2999 Unsupported SLI4 Parameters "
4435 "Extents and RPI headers enabled.\n");
4436 goto out_free_bsmbx;
4437 }
4438 }
4329 mempool_free(mboxq, phba->mbox_mem_pool); 4439 mempool_free(mboxq, phba->mbox_mem_pool);
4330 /* Create all the SLI4 queues */ 4440 /* Create all the SLI4 queues */
4331 rc = lpfc_sli4_queue_create(phba); 4441 rc = lpfc_sli4_queue_create(phba);
@@ -4350,7 +4460,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4350 "1430 Failed to initialize sgl list.\n"); 4460 "1430 Failed to initialize sgl list.\n");
4351 goto out_free_sgl_list; 4461 goto out_free_sgl_list;
4352 } 4462 }
4353
4354 rc = lpfc_sli4_init_rpi_hdrs(phba); 4463 rc = lpfc_sli4_init_rpi_hdrs(phba);
4355 if (rc) { 4464 if (rc) {
4356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -4366,6 +4475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4367 "2759 Failed allocate memory for FCF round " 4476 "2759 Failed allocate memory for FCF round "
4368 "robin failover bmask\n"); 4477 "robin failover bmask\n");
4478 rc = -ENOMEM;
4369 goto out_remove_rpi_hdrs; 4479 goto out_remove_rpi_hdrs;
4370 } 4480 }
4371 4481
@@ -4375,6 +4485,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4376 "2572 Failed allocate memory for fast-path " 4486 "2572 Failed allocate memory for fast-path "
4377 "per-EQ handle array\n"); 4487 "per-EQ handle array\n");
4488 rc = -ENOMEM;
4378 goto out_free_fcf_rr_bmask; 4489 goto out_free_fcf_rr_bmask;
4379 } 4490 }
4380 4491
@@ -4384,9 +4495,27 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4385 "2573 Failed allocate memory for msi-x " 4496 "2573 Failed allocate memory for msi-x "
4386 "interrupt vector entries\n"); 4497 "interrupt vector entries\n");
4498 rc = -ENOMEM;
4387 goto out_free_fcp_eq_hdl; 4499 goto out_free_fcp_eq_hdl;
4388 } 4500 }
4389 4501
4502 /*
4503 * Enable sr-iov virtual functions if supported and configured
4504 * through the module parameter.
4505 */
4506 if (phba->cfg_sriov_nr_virtfn > 0) {
4507 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4508 phba->cfg_sriov_nr_virtfn);
4509 if (rc) {
4510 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4511 "3020 Requested number of SR-IOV "
4512 "virtual functions (%d) is not "
4513 "supported\n",
4514 phba->cfg_sriov_nr_virtfn);
4515 phba->cfg_sriov_nr_virtfn = 0;
4516 }
4517 }
4518
4390 return rc; 4519 return rc;
4391 4520
4392out_free_fcp_eq_hdl: 4521out_free_fcp_eq_hdl:
@@ -4449,6 +4578,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4449 lpfc_sli4_cq_event_release_all(phba); 4578 lpfc_sli4_cq_event_release_all(phba);
4450 lpfc_sli4_cq_event_pool_destroy(phba); 4579 lpfc_sli4_cq_event_pool_destroy(phba);
4451 4580
4581 /* Release resource identifiers. */
4582 lpfc_sli4_dealloc_resource_identifiers(phba);
4583
4452 /* Free the bsmbx region. */ 4584 /* Free the bsmbx region. */
4453 lpfc_destroy_bootstrap_mbox(phba); 4585 lpfc_destroy_bootstrap_mbox(phba);
4454 4586
@@ -4649,6 +4781,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4649 "Unloading driver.\n", __func__); 4781 "Unloading driver.\n", __func__);
4650 goto out_free_iocbq; 4782 goto out_free_iocbq;
4651 } 4783 }
4784 iocbq_entry->sli4_lxritag = NO_XRI;
4652 iocbq_entry->sli4_xritag = NO_XRI; 4785 iocbq_entry->sli4_xritag = NO_XRI;
4653 4786
4654 spin_lock_irq(&phba->hbalock); 4787 spin_lock_irq(&phba->hbalock);
@@ -4746,7 +4879,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4746 4879
4747 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4880 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4748 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4881 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4749 "2400 lpfc_init_sgl_list els %d.\n", 4882 "2400 ELS XRI count %d.\n",
4750 els_xri_cnt); 4883 els_xri_cnt);
4751 /* Initialize and populate the sglq list per host/VF. */ 4884 /* Initialize and populate the sglq list per host/VF. */
4752 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4885 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
@@ -4779,7 +4912,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4779 phba->sli4_hba.scsi_xri_max = 4912 phba->sli4_hba.scsi_xri_max =
4780 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4913 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4781 phba->sli4_hba.scsi_xri_cnt = 0; 4914 phba->sli4_hba.scsi_xri_cnt = 0;
4782
4783 phba->sli4_hba.lpfc_scsi_psb_array = 4915 phba->sli4_hba.lpfc_scsi_psb_array =
4784 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4916 kzalloc((sizeof(struct lpfc_scsi_buf *) *
4785 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4917 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
@@ -4802,13 +4934,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4802 goto out_free_mem; 4934 goto out_free_mem;
4803 } 4935 }
4804 4936
4805 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4806 if (sglq_entry->sli4_xritag == NO_XRI) {
4807 kfree(sglq_entry);
4808 printk(KERN_ERR "%s: failed to allocate XRI.\n"
4809 "Unloading driver.\n", __func__);
4810 goto out_free_mem;
4811 }
4812 sglq_entry->buff_type = GEN_BUFF_TYPE; 4937 sglq_entry->buff_type = GEN_BUFF_TYPE;
4813 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4938 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4814 if (sglq_entry->virt == NULL) { 4939 if (sglq_entry->virt == NULL) {
@@ -4857,24 +4982,20 @@ int
4857lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4982lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4858{ 4983{
4859 int rc = 0; 4984 int rc = 0;
4860 int longs;
4861 uint16_t rpi_count;
4862 struct lpfc_rpi_hdr *rpi_hdr; 4985 struct lpfc_rpi_hdr *rpi_hdr;
4863 4986
4864 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4987 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4865
4866 /* 4988 /*
4867 * Provision an rpi bitmask range for discovery. The total count 4989 * If the SLI4 port supports extents, posting the rpi header isn't
4868 * is the difference between max and base + 1. 4990 * required. Set the expected maximum count and let the actual value
4991 * get set when extents are fully allocated.
4869 */ 4992 */
4870 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4993 if (!phba->sli4_hba.rpi_hdrs_in_use) {
4871 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4994 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
4872 4995 return rc;
4873 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4996 }
4874 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4997 if (phba->sli4_hba.extents_in_use)
4875 GFP_KERNEL); 4998 return -EIO;
4876 if (!phba->sli4_hba.rpi_bmask)
4877 return -ENOMEM;
4878 4999
4879 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5000 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4880 if (!rpi_hdr) { 5001 if (!rpi_hdr) {
@@ -4908,11 +5029,28 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4908 struct lpfc_rpi_hdr *rpi_hdr; 5029 struct lpfc_rpi_hdr *rpi_hdr;
4909 uint32_t rpi_count; 5030 uint32_t rpi_count;
4910 5031
5032 /*
5033 * If the SLI4 port supports extents, posting the rpi header isn't
5034 * required. Set the expected maximum count and let the actual value
5035 * get set when extents are fully allocated.
5036 */
5037 if (!phba->sli4_hba.rpi_hdrs_in_use)
5038 return NULL;
5039 if (phba->sli4_hba.extents_in_use)
5040 return NULL;
5041
5042 /* The limit on the logical index is just the max_rpi count. */
4911 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 5043 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4912 phba->sli4_hba.max_cfg_param.max_rpi - 1; 5044 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4913 5045
4914 spin_lock_irq(&phba->hbalock); 5046 spin_lock_irq(&phba->hbalock);
4915 curr_rpi_range = phba->sli4_hba.next_rpi; 5047 /*
5048 * Establish the starting RPI in this header block. The starting
5049 * rpi is normalized to a zero base because the physical rpi is
5050 * port based.
5051 */
5052 curr_rpi_range = phba->sli4_hba.next_rpi -
5053 phba->sli4_hba.max_cfg_param.rpi_base;
4916 spin_unlock_irq(&phba->hbalock); 5054 spin_unlock_irq(&phba->hbalock);
4917 5055
4918 /* 5056 /*
@@ -4925,6 +5063,8 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4925 else 5063 else
4926 rpi_count = LPFC_RPI_HDR_COUNT; 5064 rpi_count = LPFC_RPI_HDR_COUNT;
4927 5065
5066 if (!rpi_count)
5067 return NULL;
4928 /* 5068 /*
4929 * First allocate the protocol header region for the port. The 5069 * First allocate the protocol header region for the port. The
4930 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 5070 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -4957,12 +5097,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4957 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 5097 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4958 rpi_hdr->page_count = 1; 5098 rpi_hdr->page_count = 1;
4959 spin_lock_irq(&phba->hbalock); 5099 spin_lock_irq(&phba->hbalock);
4960 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 5100
5101 /* The rpi_hdr stores the logical index only. */
5102 rpi_hdr->start_rpi = curr_rpi_range;
4961 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 5103 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4962 5104
4963 /* 5105 /*
4964 * The next_rpi stores the next module-64 rpi value to post 5106 * The next_rpi stores the next logical module-64 rpi value used
4965 * in any subsequent rpi memory region postings. 5107 * to post physical rpis in subsequent rpi postings.
4966 */ 5108 */
4967 phba->sli4_hba.next_rpi += rpi_count; 5109 phba->sli4_hba.next_rpi += rpi_count;
4968 spin_unlock_irq(&phba->hbalock); 5110 spin_unlock_irq(&phba->hbalock);
@@ -4981,15 +5123,18 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4981 * @phba: pointer to lpfc hba data structure. 5123 * @phba: pointer to lpfc hba data structure.
4982 * 5124 *
4983 * This routine is invoked to remove all memory resources allocated 5125 * This routine is invoked to remove all memory resources allocated
4984 * to support rpis. This routine presumes the caller has released all 5126 * to support rpis for SLI4 ports not supporting extents. This routine
4985 * rpis consumed by fabric or port logins and is prepared to have 5127 * presumes the caller has released all rpis consumed by fabric or port
4986 * the header pages removed. 5128 * logins and is prepared to have the header pages removed.
4987 **/ 5129 **/
4988void 5130void
4989lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 5131lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4990{ 5132{
4991 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 5133 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4992 5134
5135 if (!phba->sli4_hba.rpi_hdrs_in_use)
5136 goto exit;
5137
4993 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 5138 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4994 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 5139 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4995 list_del(&rpi_hdr->list); 5140 list_del(&rpi_hdr->list);
@@ -4998,9 +5143,9 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4998 kfree(rpi_hdr->dmabuf); 5143 kfree(rpi_hdr->dmabuf);
4999 kfree(rpi_hdr); 5144 kfree(rpi_hdr);
5000 } 5145 }
5001 5146 exit:
5002 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5147 /* There are no rpis available to the port now. */
5003 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 5148 phba->sli4_hba.next_rpi = 0;
5004} 5149}
5005 5150
5006/** 5151/**
@@ -5487,7 +5632,8 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5487 /* Final checks. The port status should be clean. */ 5632 /* Final checks. The port status should be clean. */
5488 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 5633 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5489 &reg_data.word0) || 5634 &reg_data.word0) ||
5490 bf_get(lpfc_sliport_status_err, &reg_data)) { 5635 (bf_get(lpfc_sliport_status_err, &reg_data) &&
5636 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
5491 phba->work_status[0] = 5637 phba->work_status[0] =
5492 readl(phba->sli4_hba.u.if_type2. 5638 readl(phba->sli4_hba.u.if_type2.
5493 ERR1regaddr); 5639 ERR1regaddr);
@@ -5741,7 +5887,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5741{ 5887{
5742 LPFC_MBOXQ_t *pmb; 5888 LPFC_MBOXQ_t *pmb;
5743 struct lpfc_mbx_read_config *rd_config; 5889 struct lpfc_mbx_read_config *rd_config;
5744 uint32_t rc = 0; 5890 union lpfc_sli4_cfg_shdr *shdr;
5891 uint32_t shdr_status, shdr_add_status;
5892 struct lpfc_mbx_get_func_cfg *get_func_cfg;
5893 struct lpfc_rsrc_desc_fcfcoe *desc;
5894 uint32_t desc_count;
5895 int length, i, rc = 0;
5745 5896
5746 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5897 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5747 if (!pmb) { 5898 if (!pmb) {
@@ -5763,6 +5914,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5763 rc = -EIO; 5914 rc = -EIO;
5764 } else { 5915 } else {
5765 rd_config = &pmb->u.mqe.un.rd_config; 5916 rd_config = &pmb->u.mqe.un.rd_config;
5917 phba->sli4_hba.extents_in_use =
5918 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
5766 phba->sli4_hba.max_cfg_param.max_xri = 5919 phba->sli4_hba.max_cfg_param.max_xri =
5767 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5920 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5768 phba->sli4_hba.max_cfg_param.xri_base = 5921 phba->sli4_hba.max_cfg_param.xri_base =
@@ -5781,8 +5934,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5781 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5934 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5782 phba->sli4_hba.max_cfg_param.max_fcfi = 5935 phba->sli4_hba.max_cfg_param.max_fcfi =
5783 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5936 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5784 phba->sli4_hba.max_cfg_param.fcfi_base =
5785 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5786 phba->sli4_hba.max_cfg_param.max_eq = 5937 phba->sli4_hba.max_cfg_param.max_eq =
5787 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5938 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5788 phba->sli4_hba.max_cfg_param.max_rq = 5939 phba->sli4_hba.max_cfg_param.max_rq =
@@ -5800,11 +5951,13 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5800 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5951 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5801 phba->max_vports = phba->max_vpi; 5952 phba->max_vports = phba->max_vpi;
5802 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5953 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5803 "2003 cfg params XRI(B:%d M:%d), " 5954 "2003 cfg params Extents? %d "
5955 "XRI(B:%d M:%d), "
5804 "VPI(B:%d M:%d) " 5956 "VPI(B:%d M:%d) "
5805 "VFI(B:%d M:%d) " 5957 "VFI(B:%d M:%d) "
5806 "RPI(B:%d M:%d) " 5958 "RPI(B:%d M:%d) "
5807 "FCFI(B:%d M:%d)\n", 5959 "FCFI(Count:%d)\n",
5960 phba->sli4_hba.extents_in_use,
5808 phba->sli4_hba.max_cfg_param.xri_base, 5961 phba->sli4_hba.max_cfg_param.xri_base,
5809 phba->sli4_hba.max_cfg_param.max_xri, 5962 phba->sli4_hba.max_cfg_param.max_xri,
5810 phba->sli4_hba.max_cfg_param.vpi_base, 5963 phba->sli4_hba.max_cfg_param.vpi_base,
@@ -5813,10 +5966,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5813 phba->sli4_hba.max_cfg_param.max_vfi, 5966 phba->sli4_hba.max_cfg_param.max_vfi,
5814 phba->sli4_hba.max_cfg_param.rpi_base, 5967 phba->sli4_hba.max_cfg_param.rpi_base,
5815 phba->sli4_hba.max_cfg_param.max_rpi, 5968 phba->sli4_hba.max_cfg_param.max_rpi,
5816 phba->sli4_hba.max_cfg_param.fcfi_base,
5817 phba->sli4_hba.max_cfg_param.max_fcfi); 5969 phba->sli4_hba.max_cfg_param.max_fcfi);
5818 } 5970 }
5819 mempool_free(pmb, phba->mbox_mem_pool); 5971
5972 if (rc)
5973 goto read_cfg_out;
5820 5974
5821 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5975 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
5822 if (phba->cfg_hba_queue_depth > 5976 if (phba->cfg_hba_queue_depth >
@@ -5825,6 +5979,65 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5825 phba->cfg_hba_queue_depth = 5979 phba->cfg_hba_queue_depth =
5826 phba->sli4_hba.max_cfg_param.max_xri - 5980 phba->sli4_hba.max_cfg_param.max_xri -
5827 lpfc_sli4_get_els_iocb_cnt(phba); 5981 lpfc_sli4_get_els_iocb_cnt(phba);
5982
5983 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
5984 LPFC_SLI_INTF_IF_TYPE_2)
5985 goto read_cfg_out;
5986
5987 /* get the pf# and vf# for SLI4 if_type 2 port */
5988 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
5989 sizeof(struct lpfc_sli4_cfg_mhdr));
5990 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
5991 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
5992 length, LPFC_SLI4_MBX_EMBED);
5993
5994 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5995 shdr = (union lpfc_sli4_cfg_shdr *)
5996 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
5997 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5998 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5999 if (rc || shdr_status || shdr_add_status) {
6000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6001 "3026 Mailbox failed , mbxCmd x%x "
6002 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6003 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6004 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6005 rc = -EIO;
6006 goto read_cfg_out;
6007 }
6008
6009 /* search for fc_fcoe resrouce descriptor */
6010 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6011 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6012
6013 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6014 desc = (struct lpfc_rsrc_desc_fcfcoe *)
6015 &get_func_cfg->func_cfg.desc[i];
6016 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6017 bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
6018 phba->sli4_hba.iov.pf_number =
6019 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6020 phba->sli4_hba.iov.vf_number =
6021 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6022 break;
6023 }
6024 }
6025
6026 if (i < LPFC_RSRC_DESC_MAX_NUM)
6027 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6028 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6029 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6030 phba->sli4_hba.iov.vf_number);
6031 else {
6032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6033 "3028 GET_FUNCTION_CONFIG: failed to find "
6034 "Resrouce Descriptor:x%x\n",
6035 LPFC_RSRC_DESC_TYPE_FCFCOE);
6036 rc = -EIO;
6037 }
6038
6039read_cfg_out:
6040 mempool_free(pmb, phba->mbox_mem_pool);
5828 return rc; 6041 return rc;
5829} 6042}
5830 6043
@@ -6229,8 +6442,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6229 phba->sli4_hba.mbx_cq = NULL; 6442 phba->sli4_hba.mbx_cq = NULL;
6230 6443
6231 /* Release FCP response complete queue */ 6444 /* Release FCP response complete queue */
6232 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6445 fcp_qidx = 0;
6446 do
6233 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6447 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6448 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6234 kfree(phba->sli4_hba.fcp_cq); 6449 kfree(phba->sli4_hba.fcp_cq);
6235 phba->sli4_hba.fcp_cq = NULL; 6450 phba->sli4_hba.fcp_cq = NULL;
6236 6451
@@ -6353,16 +6568,24 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6353 phba->sli4_hba.sp_eq->queue_id); 6568 phba->sli4_hba.sp_eq->queue_id);
6354 6569
6355 /* Set up fast-path FCP Response Complete Queue */ 6570 /* Set up fast-path FCP Response Complete Queue */
6356 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6571 fcp_cqidx = 0;
6572 do {
6357 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6573 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6359 "0526 Fast-path FCP CQ (%d) not " 6575 "0526 Fast-path FCP CQ (%d) not "
6360 "allocated\n", fcp_cqidx); 6576 "allocated\n", fcp_cqidx);
6361 goto out_destroy_fcp_cq; 6577 goto out_destroy_fcp_cq;
6362 } 6578 }
6363 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 6579 if (phba->cfg_fcp_eq_count)
6364 phba->sli4_hba.fp_eq[fcp_cqidx], 6580 rc = lpfc_cq_create(phba,
6365 LPFC_WCQ, LPFC_FCP); 6581 phba->sli4_hba.fcp_cq[fcp_cqidx],
6582 phba->sli4_hba.fp_eq[fcp_cqidx],
6583 LPFC_WCQ, LPFC_FCP);
6584 else
6585 rc = lpfc_cq_create(phba,
6586 phba->sli4_hba.fcp_cq[fcp_cqidx],
6587 phba->sli4_hba.sp_eq,
6588 LPFC_WCQ, LPFC_FCP);
6366 if (rc) { 6589 if (rc) {
6367 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6368 "0527 Failed setup of fast-path FCP " 6591 "0527 Failed setup of fast-path FCP "
@@ -6371,12 +6594,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6371 } 6594 }
6372 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6595 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6373 "2588 FCP CQ setup: cq[%d]-id=%d, " 6596 "2588 FCP CQ setup: cq[%d]-id=%d, "
6374 "parent eq[%d]-id=%d\n", 6597 "parent %seq[%d]-id=%d\n",
6375 fcp_cqidx, 6598 fcp_cqidx,
6376 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6599 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6600 (phba->cfg_fcp_eq_count) ? "" : "sp_",
6377 fcp_cqidx, 6601 fcp_cqidx,
6378 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 6602 (phba->cfg_fcp_eq_count) ?
6379 } 6603 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
6604 phba->sli4_hba.sp_eq->queue_id);
6605 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6380 6606
6381 /* 6607 /*
6382 * Set up all the Work Queues (WQs) 6608 * Set up all the Work Queues (WQs)
@@ -6445,7 +6671,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6445 fcp_cq_index, 6671 fcp_cq_index,
6446 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6672 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6447 /* Round robin FCP Work Queue's Completion Queue assignment */ 6673 /* Round robin FCP Work Queue's Completion Queue assignment */
6448 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 6674 if (phba->cfg_fcp_eq_count)
6675 fcp_cq_index = ((fcp_cq_index + 1) %
6676 phba->cfg_fcp_eq_count);
6449 } 6677 }
6450 6678
6451 /* 6679 /*
@@ -6827,6 +7055,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
6827 if (rdy_chk < 1000) 7055 if (rdy_chk < 1000)
6828 break; 7056 break;
6829 } 7057 }
7058 /* delay driver action following IF_TYPE_2 function reset */
7059 msleep(100);
6830 break; 7060 break;
6831 case LPFC_SLI_INTF_IF_TYPE_1: 7061 case LPFC_SLI_INTF_IF_TYPE_1:
6832 default: 7062 default:
@@ -7419,11 +7649,15 @@ enable_msix_vectors:
7419 /* 7649 /*
7420 * Assign MSI-X vectors to interrupt handlers 7650 * Assign MSI-X vectors to interrupt handlers
7421 */ 7651 */
7422 7652 if (vectors > 1)
7423 /* The first vector must associated to slow-path handler for MQ */ 7653 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7424 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7654 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7425 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 7655 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7426 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7656 else
7657 /* All Interrupts need to be handled by one EQ */
7658 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7659 &lpfc_sli4_intr_handler, IRQF_SHARED,
7660 LPFC_DRIVER_NAME, phba);
7427 if (rc) { 7661 if (rc) {
7428 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7662 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7429 "0485 MSI-X slow-path request_irq failed " 7663 "0485 MSI-X slow-path request_irq failed "
@@ -7765,6 +7999,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7765{ 7999{
7766 int wait_cnt = 0; 8000 int wait_cnt = 0;
7767 LPFC_MBOXQ_t *mboxq; 8001 LPFC_MBOXQ_t *mboxq;
8002 struct pci_dev *pdev = phba->pcidev;
7768 8003
7769 lpfc_stop_hba_timers(phba); 8004 lpfc_stop_hba_timers(phba);
7770 phba->sli4_hba.intr_enable = 0; 8005 phba->sli4_hba.intr_enable = 0;
@@ -7804,6 +8039,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7804 /* Disable PCI subsystem interrupt */ 8039 /* Disable PCI subsystem interrupt */
7805 lpfc_sli4_disable_intr(phba); 8040 lpfc_sli4_disable_intr(phba);
7806 8041
8042 /* Disable SR-IOV if enabled */
8043 if (phba->cfg_sriov_nr_virtfn)
8044 pci_disable_sriov(pdev);
8045
7807 /* Stop kthread signal shall trigger work_done one more time */ 8046 /* Stop kthread signal shall trigger work_done one more time */
7808 kthread_stop(phba->worker_thread); 8047 kthread_stop(phba->worker_thread);
7809 8048
@@ -7878,6 +8117,11 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7878 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 8117 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7879 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 8118 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7880 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 8119 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
8120
8121 /* Make sure that sge_supp_len can be handled by the driver */
8122 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8123 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8124
7881 return rc; 8125 return rc;
7882} 8126}
7883 8127
@@ -7902,6 +8146,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7902 int length; 8146 int length;
7903 struct lpfc_sli4_parameters *mbx_sli4_parameters; 8147 struct lpfc_sli4_parameters *mbx_sli4_parameters;
7904 8148
8149 /*
8150 * By default, the driver assumes the SLI4 port requires RPI
8151 * header postings. The SLI4_PARAM response will correct this
8152 * assumption.
8153 */
8154 phba->sli4_hba.rpi_hdrs_in_use = 1;
8155
7905 /* Read the port's SLI4 Config Parameters */ 8156 /* Read the port's SLI4 Config Parameters */
7906 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 8157 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
7907 sizeof(struct lpfc_sli4_cfg_mhdr)); 8158 sizeof(struct lpfc_sli4_cfg_mhdr));
@@ -7938,6 +8189,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7938 mbx_sli4_parameters); 8189 mbx_sli4_parameters);
7939 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 8190 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
7940 mbx_sli4_parameters); 8191 mbx_sli4_parameters);
8192 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8193 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
8194
8195 /* Make sure that sge_supp_len can be handled by the driver */
8196 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8197 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8198
7941 return 0; 8199 return 0;
7942} 8200}
7943 8201
@@ -8173,6 +8431,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8173 8431
8174 lpfc_debugfs_terminate(vport); 8432 lpfc_debugfs_terminate(vport);
8175 8433
8434 /* Disable SR-IOV if enabled */
8435 if (phba->cfg_sriov_nr_virtfn)
8436 pci_disable_sriov(pdev);
8437
8176 /* Disable interrupt */ 8438 /* Disable interrupt */
8177 lpfc_sli_disable_intr(phba); 8439 lpfc_sli_disable_intr(phba);
8178 8440
@@ -8565,6 +8827,97 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8565} 8827}
8566 8828
8567/** 8829/**
8830 * lpfc_write_firmware - attempt to write a firmware image to the port
8831 * @phba: pointer to lpfc hba data structure.
8832 * @fw: pointer to firmware image returned from request_firmware.
8833 *
8834 * returns the number of bytes written if write is successful.
8835 * returns a negative error value if there were errors.
8836 * returns 0 if firmware matches currently active firmware on port.
8837 **/
8838int
8839lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8840{
8841 char fwrev[32];
8842 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
8843 struct list_head dma_buffer_list;
8844 int i, rc = 0;
8845 struct lpfc_dmabuf *dmabuf, *next;
8846 uint32_t offset = 0, temp_offset = 0;
8847
8848 INIT_LIST_HEAD(&dma_buffer_list);
8849 if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
8850 (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
8851 (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
8852 (image->size != fw->size)) {
8853 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8854 "3022 Invalid FW image found. "
8855 "Magic:%d Type:%x ID:%x\n",
8856 image->magic_number,
8857 bf_get(lpfc_grp_hdr_file_type, image),
8858 bf_get(lpfc_grp_hdr_id, image));
8859 return -EINVAL;
8860 }
8861 lpfc_decode_firmware_rev(phba, fwrev, 1);
8862 if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
8863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8864 "3023 Updating Firmware. Current Version:%s "
8865 "New Version:%s\n",
8866 fwrev, image->rev_name);
8867 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
8868 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
8869 GFP_KERNEL);
8870 if (!dmabuf) {
8871 rc = -ENOMEM;
8872 goto out;
8873 }
8874 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8875 SLI4_PAGE_SIZE,
8876 &dmabuf->phys,
8877 GFP_KERNEL);
8878 if (!dmabuf->virt) {
8879 kfree(dmabuf);
8880 rc = -ENOMEM;
8881 goto out;
8882 }
8883 list_add_tail(&dmabuf->list, &dma_buffer_list);
8884 }
8885 while (offset < fw->size) {
8886 temp_offset = offset;
8887 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
8888 if (offset + SLI4_PAGE_SIZE > fw->size) {
8889 temp_offset += fw->size - offset;
8890 memcpy(dmabuf->virt,
8891 fw->data + temp_offset,
8892 fw->size - offset);
8893 break;
8894 }
8895 temp_offset += SLI4_PAGE_SIZE;
8896 memcpy(dmabuf->virt, fw->data + temp_offset,
8897 SLI4_PAGE_SIZE);
8898 }
8899 rc = lpfc_wr_object(phba, &dma_buffer_list,
8900 (fw->size - offset), &offset);
8901 if (rc) {
8902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8903 "3024 Firmware update failed. "
8904 "%d\n", rc);
8905 goto out;
8906 }
8907 }
8908 rc = offset;
8909 }
8910out:
8911 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
8912 list_del(&dmabuf->list);
8913 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
8914 dmabuf->virt, dmabuf->phys);
8915 kfree(dmabuf);
8916 }
8917 return rc;
8918}
8919
8920/**
8568 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 8921 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8569 * @pdev: pointer to PCI device 8922 * @pdev: pointer to PCI device
8570 * @pid: pointer to PCI device identifier 8923 * @pid: pointer to PCI device identifier
@@ -8591,6 +8944,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8591 int error; 8944 int error;
8592 uint32_t cfg_mode, intr_mode; 8945 uint32_t cfg_mode, intr_mode;
8593 int mcnt; 8946 int mcnt;
8947 int adjusted_fcp_eq_count;
8948 int fcp_qidx;
8949 const struct firmware *fw;
8950 uint8_t file_name[16];
8594 8951
8595 /* Allocate memory for HBA structure */ 8952 /* Allocate memory for HBA structure */
8596 phba = lpfc_hba_alloc(pdev); 8953 phba = lpfc_hba_alloc(pdev);
@@ -8688,11 +9045,25 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8688 error = -ENODEV; 9045 error = -ENODEV;
8689 goto out_free_sysfs_attr; 9046 goto out_free_sysfs_attr;
8690 } 9047 }
8691 /* Default to single FCP EQ for non-MSI-X */ 9048 /* Default to single EQ for non-MSI-X */
8692 if (phba->intr_type != MSIX) 9049 if (phba->intr_type != MSIX)
8693 phba->cfg_fcp_eq_count = 1; 9050 adjusted_fcp_eq_count = 0;
8694 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count) 9051 else if (phba->sli4_hba.msix_vec_nr <
8695 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9052 phba->cfg_fcp_eq_count + 1)
9053 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9054 else
9055 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
9056 /* Free unused EQs */
9057 for (fcp_qidx = adjusted_fcp_eq_count;
9058 fcp_qidx < phba->cfg_fcp_eq_count;
9059 fcp_qidx++) {
9060 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
9061 /* do not delete the first fcp_cq */
9062 if (fcp_qidx)
9063 lpfc_sli4_queue_free(
9064 phba->sli4_hba.fcp_cq[fcp_qidx]);
9065 }
9066 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
8696 /* Set up SLI-4 HBA */ 9067 /* Set up SLI-4 HBA */
8697 if (lpfc_sli4_hba_setup(phba)) { 9068 if (lpfc_sli4_hba_setup(phba)) {
8698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8731,6 +9102,14 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8731 /* Perform post initialization setup */ 9102 /* Perform post initialization setup */
8732 lpfc_post_init_setup(phba); 9103 lpfc_post_init_setup(phba);
8733 9104
9105 /* check for firmware upgrade or downgrade */
9106 snprintf(file_name, 16, "%s.grp", phba->ModelName);
9107 error = request_firmware(&fw, file_name, &phba->pcidev->dev);
9108 if (!error) {
9109 lpfc_write_firmware(phba, fw);
9110 release_firmware(fw);
9111 }
9112
8734 /* Check if there are static vports to be created. */ 9113 /* Check if there are static vports to be created. */
8735 lpfc_create_static_vport(phba); 9114 lpfc_create_static_vport(phba);
8736 9115
@@ -9498,6 +9877,10 @@ static struct pci_device_id lpfc_id_table[] = {
9498 PCI_ANY_ID, PCI_ANY_ID, }, 9877 PCI_ANY_ID, PCI_ANY_ID, },
9499 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 9878 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
9500 PCI_ANY_ID, PCI_ANY_ID, }, 9879 PCI_ANY_ID, PCI_ANY_ID, },
9880 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
9881 PCI_ANY_ID, PCI_ANY_ID, },
9882 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
9883 PCI_ANY_ID, PCI_ANY_ID, },
9501 { 0 } 9884 { 0 }
9502}; 9885};
9503 9886
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e6ce9033f85e..556767028353 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -610,7 +610,8 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
610 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 610 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
611 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 611 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
612 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 612 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
613 mb->un.varRdSparm.vpi = vpi + phba->vpi_base; 613 if (phba->sli_rev >= LPFC_SLI_REV3)
614 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
614 615
615 /* save address for completion */ 616 /* save address for completion */
616 pmb->context1 = mp; 617 pmb->context1 = mp;
@@ -643,9 +644,10 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
643 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
644 645
645 mb->un.varUnregDID.did = did; 646 mb->un.varUnregDID.did = did;
646 if (vpi != 0xffff)
647 vpi += phba->vpi_base;
648 mb->un.varUnregDID.vpi = vpi; 647 mb->un.varUnregDID.vpi = vpi;
648 if ((vpi != 0xffff) &&
649 (phba->sli_rev == LPFC_SLI_REV4))
650 mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
649 651
650 mb->mbxCommand = MBX_UNREG_D_ID; 652 mb->mbxCommand = MBX_UNREG_D_ID;
651 mb->mbxOwner = OWN_HOST; 653 mb->mbxOwner = OWN_HOST;
@@ -738,12 +740,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
738 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 740 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
739 741
740 mb->un.varRegLogin.rpi = 0; 742 mb->un.varRegLogin.rpi = 0;
741 if (phba->sli_rev == LPFC_SLI_REV4) { 743 if (phba->sli_rev == LPFC_SLI_REV4)
742 mb->un.varRegLogin.rpi = rpi; 744 mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
743 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR) 745 if (phba->sli_rev >= LPFC_SLI_REV3)
744 return 1; 746 mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
745 }
746 mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
747 mb->un.varRegLogin.did = did; 747 mb->un.varRegLogin.did = did;
748 mb->mbxOwner = OWN_HOST; 748 mb->mbxOwner = OWN_HOST;
749 /* Get a buffer to hold NPorts Service Parameters */ 749 /* Get a buffer to hold NPorts Service Parameters */
@@ -757,7 +757,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
758 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, " 758 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
759 "rpi x%x\n", vpi, did, rpi); 759 "rpi x%x\n", vpi, did, rpi);
760 return (1); 760 return 1;
761 } 761 }
762 INIT_LIST_HEAD(&mp->list); 762 INIT_LIST_HEAD(&mp->list);
763 sparam = mp->virt; 763 sparam = mp->virt;
@@ -773,7 +773,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
773 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys); 773 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
774 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys); 774 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
775 775
776 return (0); 776 return 0;
777} 777}
778 778
779/** 779/**
@@ -789,6 +789,9 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
789 * 789 *
790 * This routine prepares the mailbox command for unregistering remote port 790 * This routine prepares the mailbox command for unregistering remote port
791 * login. 791 * login.
792 *
793 * For SLI4 ports, the rpi passed to this function must be the physical
794 * rpi value, not the logical index.
792 **/ 795 **/
793void 796void
794lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, 797lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
@@ -799,9 +802,10 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
799 mb = &pmb->u.mb; 802 mb = &pmb->u.mb;
800 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 803 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
801 804
802 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 805 mb->un.varUnregLogin.rpi = rpi;
803 mb->un.varUnregLogin.rsvd1 = 0; 806 mb->un.varUnregLogin.rsvd1 = 0;
804 mb->un.varUnregLogin.vpi = vpi + phba->vpi_base; 807 if (phba->sli_rev >= LPFC_SLI_REV3)
808 mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
805 809
806 mb->mbxCommand = MBX_UNREG_LOGIN; 810 mb->mbxCommand = MBX_UNREG_LOGIN;
807 mb->mbxOwner = OWN_HOST; 811 mb->mbxOwner = OWN_HOST;
@@ -825,9 +829,16 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
825 829
826 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 830 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
827 if (mbox) { 831 if (mbox) {
828 lpfc_unreg_login(phba, vport->vpi, 832 /*
829 vport->vpi + phba->vpi_base, mbox); 833 * For SLI4 functions, the rpi field is overloaded for
830 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ; 834 * the vport context unreg all. This routine passes
835 * 0 for the rpi field in lpfc_unreg_login for compatibility
836 * with SLI3 and then overrides the rpi field with the
837 * expected value for SLI4.
838 */
839 lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
840 mbox);
841 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
831 mbox->vport = vport; 842 mbox->vport = vport;
832 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 843 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
833 mbox->context1 = NULL; 844 mbox->context1 = NULL;
@@ -865,9 +876,13 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
865 if ((phba->sli_rev == LPFC_SLI_REV4) && 876 if ((phba->sli_rev == LPFC_SLI_REV4) &&
866 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) 877 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
867 mb->un.varRegVpi.upd = 1; 878 mb->un.varRegVpi.upd = 1;
868 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; 879
880 mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
869 mb->un.varRegVpi.sid = vport->fc_myDID; 881 mb->un.varRegVpi.sid = vport->fc_myDID;
870 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; 882 if (phba->sli_rev == LPFC_SLI_REV4)
883 mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
884 else
885 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
871 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname, 886 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
872 sizeof(struct lpfc_name)); 887 sizeof(struct lpfc_name));
873 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]); 888 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
@@ -901,10 +916,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
901 MAILBOX_t *mb = &pmb->u.mb; 916 MAILBOX_t *mb = &pmb->u.mb;
902 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 917 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
903 918
904 if (phba->sli_rev < LPFC_SLI_REV4) 919 if (phba->sli_rev == LPFC_SLI_REV3)
905 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; 920 mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
906 else 921 else if (phba->sli_rev >= LPFC_SLI_REV4)
907 mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base; 922 mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
908 923
909 mb->mbxCommand = MBX_UNREG_VPI; 924 mb->mbxCommand = MBX_UNREG_VPI;
910 mb->mbxOwner = OWN_HOST; 925 mb->mbxOwner = OWN_HOST;
@@ -1735,12 +1750,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1735 return length; 1750 return length;
1736 } 1751 }
1737 1752
1738 /* Setup for the none-embedded mbox command */ 1753 /* Setup for the non-embedded mbox command */
1739 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE; 1754 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1740 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? 1755 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1741 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; 1756 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1742 /* Allocate record for keeping SGE virtual addresses */ 1757 /* Allocate record for keeping SGE virtual addresses */
1743 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), 1758 mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1744 GFP_KERNEL); 1759 GFP_KERNEL);
1745 if (!mbox->sge_array) { 1760 if (!mbox->sge_array) {
1746 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1761 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
@@ -1790,12 +1805,87 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1790 /* The sub-header is in DMA memory, which needs endian converstion */ 1805 /* The sub-header is in DMA memory, which needs endian converstion */
1791 if (cfg_shdr) 1806 if (cfg_shdr)
1792 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, 1807 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1793 sizeof(union lpfc_sli4_cfg_shdr)); 1808 sizeof(union lpfc_sli4_cfg_shdr));
1794
1795 return alloc_len; 1809 return alloc_len;
1796} 1810}
1797 1811
1798/** 1812/**
1813 * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
1814 * @phba: pointer to lpfc hba data structure.
1815 * @mbox: pointer to an allocated lpfc mbox resource.
1816 * @exts_count: the number of extents, if required, to allocate.
1817 * @rsrc_type: the resource extent type.
1818 * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
1819 *
1820 * This routine completes the subcommand header for SLI4 resource extent
1821 * mailbox commands. It is called after lpfc_sli4_config. The caller must
1822 * pass an allocated mailbox and the attributes required to initialize the
1823 * mailbox correctly.
1824 *
1825 * Return: the actual length of the mbox command allocated.
1826 **/
1827int
1828lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1829 uint16_t exts_count, uint16_t rsrc_type, bool emb)
1830{
1831 uint8_t opcode = 0;
1832 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
1833 void *virtaddr = NULL;
1834
1835 /* Set up SLI4 ioctl command header fields */
1836 if (emb == LPFC_SLI4_MBX_NEMBED) {
1837 /* Get the first SGE entry from the non-embedded DMA memory */
1838 virtaddr = mbox->sge_array->addr[0];
1839 if (virtaddr == NULL)
1840 return 1;
1841 n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
1842 }
1843
1844 /*
1845 * The resource type is common to all extent Opcodes and resides in the
1846 * same position.
1847 */
1848 if (emb == LPFC_SLI4_MBX_EMBED)
1849 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1850 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1851 rsrc_type);
1852 else {
1853 /* This is DMA data. Byteswap is required. */
1854 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1855 n_rsrc_extnt, rsrc_type);
1856 lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
1857 &n_rsrc_extnt->word4,
1858 sizeof(uint32_t));
1859 }
1860
1861 /* Complete the initialization for the particular Opcode. */
1862 opcode = lpfc_sli4_mbox_opcode_get(phba, mbox);
1863 switch (opcode) {
1864 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
1865 if (emb == LPFC_SLI4_MBX_EMBED)
1866 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1867 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1868 exts_count);
1869 else
1870 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1871 n_rsrc_extnt, exts_count);
1872 break;
1873 case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
1874 case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
1875 case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
1876 /* Initialization is complete.*/
1877 break;
1878 default:
1879 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1880 "2929 Resource Extent Opcode x%x is "
1881 "unsupported\n", opcode);
1882 return 1;
1883 }
1884
1885 return 0;
1886}
1887
1888/**
1799 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command 1889 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
1800 * @phba: pointer to lpfc hba data structure. 1890 * @phba: pointer to lpfc hba data structure.
1801 * @mbox: pointer to lpfc mbox command. 1891 * @mbox: pointer to lpfc mbox command.
@@ -1939,9 +2029,12 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1939 bf_set(lpfc_init_vfi_vr, init_vfi, 1); 2029 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
1940 bf_set(lpfc_init_vfi_vt, init_vfi, 1); 2030 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
1941 bf_set(lpfc_init_vfi_vp, init_vfi, 1); 2031 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
1942 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base); 2032 bf_set(lpfc_init_vfi_vfi, init_vfi,
1943 bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base); 2033 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
1944 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); 2034 bf_set(lpfc_init_vpi_vpi, init_vfi,
2035 vport->phba->vpi_ids[vport->vpi]);
2036 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2037 vport->phba->fcf.fcfi);
1945} 2038}
1946 2039
1947/** 2040/**
@@ -1964,9 +2057,10 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1964 reg_vfi = &mbox->u.mqe.un.reg_vfi; 2057 reg_vfi = &mbox->u.mqe.un.reg_vfi;
1965 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); 2058 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
1966 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); 2059 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
1967 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base); 2060 bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2061 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
1968 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); 2062 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1969 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base); 2063 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
1970 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); 2064 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
1971 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); 2065 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
1972 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); 2066 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
@@ -1997,9 +2091,9 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
1997 memset(mbox, 0, sizeof(*mbox)); 2091 memset(mbox, 0, sizeof(*mbox));
1998 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); 2092 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1999 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, 2093 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
2000 vpi + phba->vpi_base); 2094 phba->vpi_ids[vpi]);
2001 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi, 2095 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
2002 phba->pport->vfi + phba->vfi_base); 2096 phba->sli4_hba.vfi_ids[phba->pport->vfi]);
2003} 2097}
2004 2098
2005/** 2099/**
@@ -2019,7 +2113,7 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2019 memset(mbox, 0, sizeof(*mbox)); 2113 memset(mbox, 0, sizeof(*mbox));
2020 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); 2114 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
2021 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, 2115 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
2022 vport->vfi + vport->phba->vfi_base); 2116 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2023} 2117}
2024 2118
2025/** 2119/**
@@ -2131,12 +2225,14 @@ lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
2131void 2225void
2132lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) 2226lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
2133{ 2227{
2228 struct lpfc_hba *phba = ndlp->phba;
2134 struct lpfc_mbx_resume_rpi *resume_rpi; 2229 struct lpfc_mbx_resume_rpi *resume_rpi;
2135 2230
2136 memset(mbox, 0, sizeof(*mbox)); 2231 memset(mbox, 0, sizeof(*mbox));
2137 resume_rpi = &mbox->u.mqe.un.resume_rpi; 2232 resume_rpi = &mbox->u.mqe.un.resume_rpi;
2138 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); 2233 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
2139 bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi); 2234 bf_set(lpfc_resume_rpi_index, resume_rpi,
2235 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2140 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); 2236 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
2141 resume_rpi->event_tag = ndlp->phba->fc_eventTag; 2237 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
2142} 2238}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index cbb48ee8b0bb..10d5b5e41499 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -62,7 +62,6 @@ int
62lpfc_mem_alloc(struct lpfc_hba *phba, int align) 62lpfc_mem_alloc(struct lpfc_hba *phba, int align)
63{ 63{
64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
65 int longs;
66 int i; 65 int i;
67 66
68 if (phba->sli_rev == LPFC_SLI_REV4) 67 if (phba->sli_rev == LPFC_SLI_REV4)
@@ -138,17 +137,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
138 phba->lpfc_hrb_pool = NULL; 137 phba->lpfc_hrb_pool = NULL;
139 phba->lpfc_drb_pool = NULL; 138 phba->lpfc_drb_pool = NULL;
140 } 139 }
141 /* vpi zero is reserved for the physical port so add 1 to max */
142 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
143 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
144 if (!phba->vpi_bmask)
145 goto fail_free_dbq_pool;
146 140
147 return 0; 141 return 0;
148
149 fail_free_dbq_pool:
150 pci_pool_destroy(phba->lpfc_drb_pool);
151 phba->lpfc_drb_pool = NULL;
152 fail_free_hrb_pool: 142 fail_free_hrb_pool:
153 pci_pool_destroy(phba->lpfc_hrb_pool); 143 pci_pool_destroy(phba->lpfc_hrb_pool);
154 phba->lpfc_hrb_pool = NULL; 144 phba->lpfc_hrb_pool = NULL;
@@ -191,9 +181,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
191 int i; 181 int i;
192 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 182 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
193 183
194 /* Free VPI bitmask memory */
195 kfree(phba->vpi_bmask);
196
197 /* Free HBQ pools */ 184 /* Free HBQ pools */
198 lpfc_sli_hbqbuf_free_all(phba); 185 lpfc_sli_hbqbuf_free_all(phba);
199 if (phba->lpfc_drb_pool) 186 if (phba->lpfc_drb_pool)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 0d92d4205ea6..2ddd02f7c603 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -350,11 +350,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
350 ndlp->nlp_maxframe = 350 ndlp->nlp_maxframe =
351 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 351 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
352 352
353 /* 353 /* no need to reg_login if we are already in one of these states */
354 * Need to unreg_login if we are already in one of these states and
355 * change to NPR state. This will block the port until after the ACC
356 * completes and the reg_login is issued and completed.
357 */
358 switch (ndlp->nlp_state) { 354 switch (ndlp->nlp_state) {
359 case NLP_STE_NPR_NODE: 355 case NLP_STE_NPR_NODE:
360 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) 356 if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -363,9 +359,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
363 case NLP_STE_PRLI_ISSUE: 359 case NLP_STE_PRLI_ISSUE:
364 case NLP_STE_UNMAPPED_NODE: 360 case NLP_STE_UNMAPPED_NODE:
365 case NLP_STE_MAPPED_NODE: 361 case NLP_STE_MAPPED_NODE:
366 lpfc_unreg_rpi(vport, ndlp); 362 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
367 ndlp->nlp_prev_state = ndlp->nlp_state; 363 return 1;
368 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
369 } 364 }
370 365
371 if ((vport->fc_flag & FC_PT2PT) && 366 if ((vport->fc_flag & FC_PT2PT) &&
@@ -657,6 +652,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
657 lpfc_unreg_rpi(vport, ndlp); 652 lpfc_unreg_rpi(vport, ndlp);
658 return 0; 653 return 0;
659} 654}
655
660/** 656/**
661 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd. 657 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
662 * @phba : Pointer to lpfc_hba structure. 658 * @phba : Pointer to lpfc_hba structure.
@@ -1399,8 +1395,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1399 if (mb->mbxStatus) { 1395 if (mb->mbxStatus) {
1400 /* RegLogin failed */ 1396 /* RegLogin failed */
1401 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1397 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1402 "0246 RegLogin failed Data: x%x x%x x%x\n", 1398 "0246 RegLogin failed Data: x%x x%x x%x x%x "
1403 did, mb->mbxStatus, vport->port_state); 1399 "x%x\n",
1400 did, mb->mbxStatus, vport->port_state,
1401 mb->un.varRegLogin.vpi,
1402 mb->un.varRegLogin.rpi);
1404 /* 1403 /*
1405 * If RegLogin failed due to lack of HBA resources do not 1404 * If RegLogin failed due to lack of HBA resources do not
1406 * retry discovery. 1405 * retry discovery.
@@ -1424,7 +1423,10 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1424 return ndlp->nlp_state; 1423 return ndlp->nlp_state;
1425 } 1424 }
1426 1425
1427 ndlp->nlp_rpi = mb->un.varWords[0]; 1426 /* SLI4 ports have preallocated logical rpis. */
1427 if (vport->phba->sli_rev < LPFC_SLI_REV4)
1428 ndlp->nlp_rpi = mb->un.varWords[0];
1429
1428 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1430 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1429 1431
1430 /* Only if we are not a fabric nport do we issue PRLI */ 1432 /* Only if we are not a fabric nport do we issue PRLI */
@@ -2025,7 +2027,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2025 MAILBOX_t *mb = &pmb->u.mb; 2027 MAILBOX_t *mb = &pmb->u.mb;
2026 2028
2027 if (!mb->mbxStatus) { 2029 if (!mb->mbxStatus) {
2028 ndlp->nlp_rpi = mb->un.varWords[0]; 2030 /* SLI4 ports have preallocated logical rpis. */
2031 if (vport->phba->sli_rev < LPFC_SLI_REV4)
2032 ndlp->nlp_rpi = mb->un.varWords[0];
2029 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 2033 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2030 } else { 2034 } else {
2031 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 2035 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 84e4481b2406..3ccc97496ebf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -743,7 +743,14 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
743 if (bcnt == 0) 743 if (bcnt == 0)
744 continue; 744 continue;
745 /* Now, post the SCSI buffer list sgls as a block */ 745 /* Now, post the SCSI buffer list sgls as a block */
746 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 746 if (!phba->sli4_hba.extents_in_use)
747 status = lpfc_sli4_post_scsi_sgl_block(phba,
748 &sblist,
749 bcnt);
750 else
751 status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
752 &sblist,
753 bcnt);
747 /* Reset SCSI buffer count for next round of posting */ 754 /* Reset SCSI buffer count for next round of posting */
748 bcnt = 0; 755 bcnt = 0;
749 while (!list_empty(&sblist)) { 756 while (!list_empty(&sblist)) {
@@ -787,7 +794,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
787 dma_addr_t pdma_phys_fcp_cmd; 794 dma_addr_t pdma_phys_fcp_cmd;
788 dma_addr_t pdma_phys_fcp_rsp; 795 dma_addr_t pdma_phys_fcp_rsp;
789 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; 796 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
790 uint16_t iotag, last_xritag = NO_XRI; 797 uint16_t iotag, last_xritag = NO_XRI, lxri = 0;
791 int status = 0, index; 798 int status = 0, index;
792 int bcnt; 799 int bcnt;
793 int non_sequential_xri = 0; 800 int non_sequential_xri = 0;
@@ -823,13 +830,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
823 break; 830 break;
824 } 831 }
825 832
826 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba); 833 lxri = lpfc_sli4_next_xritag(phba);
827 if (psb->cur_iocbq.sli4_xritag == NO_XRI) { 834 if (lxri == NO_XRI) {
828 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 835 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
829 psb->data, psb->dma_handle); 836 psb->data, psb->dma_handle);
830 kfree(psb); 837 kfree(psb);
831 break; 838 break;
832 } 839 }
840 psb->cur_iocbq.sli4_lxritag = lxri;
841 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
833 if (last_xritag != NO_XRI 842 if (last_xritag != NO_XRI
834 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { 843 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
835 non_sequential_xri = 1; 844 non_sequential_xri = 1;
@@ -861,6 +870,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
861 */ 870 */
862 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 871 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
863 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 872 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
873 sgl->word2 = le32_to_cpu(sgl->word2);
864 bf_set(lpfc_sli4_sge_last, sgl, 0); 874 bf_set(lpfc_sli4_sge_last, sgl, 0);
865 sgl->word2 = cpu_to_le32(sgl->word2); 875 sgl->word2 = cpu_to_le32(sgl->word2);
866 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); 876 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
@@ -869,6 +879,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
869 /* Setup the physical region for the FCP RSP */ 879 /* Setup the physical region for the FCP RSP */
870 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 880 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
871 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 881 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
882 sgl->word2 = le32_to_cpu(sgl->word2);
872 bf_set(lpfc_sli4_sge_last, sgl, 1); 883 bf_set(lpfc_sli4_sge_last, sgl, 1);
873 sgl->word2 = cpu_to_le32(sgl->word2); 884 sgl->word2 = cpu_to_le32(sgl->word2);
874 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); 885 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
@@ -914,7 +925,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
914 } 925 }
915 } 926 }
916 if (bcnt) { 927 if (bcnt) {
917 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 928 if (!phba->sli4_hba.extents_in_use)
929 status = lpfc_sli4_post_scsi_sgl_block(phba,
930 &sblist,
931 bcnt);
932 else
933 status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
934 &sblist,
935 bcnt);
936
937 if (status) {
938 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
939 "3021 SCSI SGL post error %d\n",
940 status);
941 bcnt = 0;
942 }
918 /* Reset SCSI buffer count for next round of posting */ 943 /* Reset SCSI buffer count for next round of posting */
919 while (!list_empty(&sblist)) { 944 while (!list_empty(&sblist)) {
920 list_remove_head(&sblist, psb, struct lpfc_scsi_buf, 945 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
@@ -2081,6 +2106,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2081 dma_len = sg_dma_len(sgel); 2106 dma_len = sg_dma_len(sgel);
2082 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 2107 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2083 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 2108 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2109 sgl->word2 = le32_to_cpu(sgl->word2);
2084 if ((num_bde + 1) == nseg) 2110 if ((num_bde + 1) == nseg)
2085 bf_set(lpfc_sli4_sge_last, sgl, 1); 2111 bf_set(lpfc_sli4_sge_last, sgl, 1);
2086 else 2112 else
@@ -2794,6 +2820,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2794 * of the scsi_cmnd request_buffer 2820 * of the scsi_cmnd request_buffer
2795 */ 2821 */
2796 piocbq->iocb.ulpContext = pnode->nlp_rpi; 2822 piocbq->iocb.ulpContext = pnode->nlp_rpi;
2823 if (phba->sli_rev == LPFC_SLI_REV4)
2824 piocbq->iocb.ulpContext =
2825 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
2797 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 2826 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2798 piocbq->iocb.ulpFCP2Rcvy = 1; 2827 piocbq->iocb.ulpFCP2Rcvy = 1;
2799 else 2828 else
@@ -2807,7 +2836,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2807} 2836}
2808 2837
2809/** 2838/**
2810 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit 2839 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
2811 * @vport: The virtual port for which this call is being executed. 2840 * @vport: The virtual port for which this call is being executed.
2812 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2841 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2813 * @lun: Logical unit number. 2842 * @lun: Logical unit number.
@@ -2851,6 +2880,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2851 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 2880 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
2852 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 2881 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
2853 piocb->ulpContext = ndlp->nlp_rpi; 2882 piocb->ulpContext = ndlp->nlp_rpi;
2883 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
2884 piocb->ulpContext =
2885 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
2886 }
2854 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 2887 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2855 piocb->ulpFCP2Rcvy = 1; 2888 piocb->ulpFCP2Rcvy = 1;
2856 } 2889 }
@@ -3405,9 +3438,10 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3405 3438
3406 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3439 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3407 "0702 Issue %s to TGT %d LUN %d " 3440 "0702 Issue %s to TGT %d LUN %d "
3408 "rpi x%x nlp_flag x%x\n", 3441 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
3409 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 3442 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
3410 pnode->nlp_rpi, pnode->nlp_flag); 3443 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
3444 iocbq->iocb_flag);
3411 3445
3412 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 3446 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
3413 iocbq, iocbqrsp, lpfc_cmd->timeout); 3447 iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -3419,10 +3453,12 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3419 ret = FAILED; 3453 ret = FAILED;
3420 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 3454 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3421 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3455 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3422 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n", 3456 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
3457 "iocb_flag x%x\n",
3423 lpfc_taskmgmt_name(task_mgmt_cmd), 3458 lpfc_taskmgmt_name(task_mgmt_cmd),
3424 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, 3459 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
3425 iocbqrsp->iocb.un.ulpWord[4]); 3460 iocbqrsp->iocb.un.ulpWord[4],
3461 iocbq->iocb_flag);
3426 } else if (status == IOCB_BUSY) 3462 } else if (status == IOCB_BUSY)
3427 ret = FAILED; 3463 ret = FAILED;
3428 else 3464 else
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fd5835e1c039..98999bbd8cbf 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -65,6 +65,9 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65 struct lpfc_iocbq *); 65 struct lpfc_iocbq *);
66static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 66static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *); 67 struct hbq_dmabuf *);
68static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *);
70
68static IOCB_t * 71static IOCB_t *
69lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 72lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
70{ 73{
@@ -456,7 +459,6 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
456 struct lpfc_iocbq * iocbq = NULL; 459 struct lpfc_iocbq * iocbq = NULL;
457 460
458 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 461 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
459
460 if (iocbq) 462 if (iocbq)
461 phba->iocb_cnt++; 463 phba->iocb_cnt++;
462 if (phba->iocb_cnt > phba->iocb_max) 464 if (phba->iocb_cnt > phba->iocb_max)
@@ -479,13 +481,10 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
479static struct lpfc_sglq * 481static struct lpfc_sglq *
480__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 482__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
481{ 483{
482 uint16_t adj_xri;
483 struct lpfc_sglq *sglq; 484 struct lpfc_sglq *sglq;
484 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 485
485 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 486 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
486 return NULL; 487 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
487 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
488 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
489 return sglq; 488 return sglq;
490} 489}
491 490
@@ -504,12 +503,9 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
504struct lpfc_sglq * 503struct lpfc_sglq *
505__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 504__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
506{ 505{
507 uint16_t adj_xri;
508 struct lpfc_sglq *sglq; 506 struct lpfc_sglq *sglq;
509 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 507
510 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 508 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
511 return NULL;
512 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
513 return sglq; 509 return sglq;
514} 510}
515 511
@@ -532,7 +528,6 @@ static int
532__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 528__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
533 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 529 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
534{ 530{
535 uint16_t adj_xri;
536 struct lpfc_node_rrq *rrq; 531 struct lpfc_node_rrq *rrq;
537 int empty; 532 int empty;
538 uint32_t did = 0; 533 uint32_t did = 0;
@@ -553,21 +548,19 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
553 /* 548 /*
554 * set the active bit even if there is no mem available. 549 * set the active bit even if there is no mem available.
555 */ 550 */
556 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
557
558 if (NLP_CHK_FREE_REQ(ndlp)) 551 if (NLP_CHK_FREE_REQ(ndlp))
559 goto out; 552 goto out;
560 553
561 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 554 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
562 goto out; 555 goto out;
563 556
564 if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) 557 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
565 goto out; 558 goto out;
566 559
567 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 560 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
568 if (rrq) { 561 if (rrq) {
569 rrq->send_rrq = send_rrq; 562 rrq->send_rrq = send_rrq;
570 rrq->xritag = xritag; 563 rrq->xritag = phba->sli4_hba.xri_ids[xritag];
571 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 564 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
572 rrq->ndlp = ndlp; 565 rrq->ndlp = ndlp;
573 rrq->nlp_DID = ndlp->nlp_DID; 566 rrq->nlp_DID = ndlp->nlp_DID;
@@ -603,7 +596,6 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
603 uint16_t xritag, 596 uint16_t xritag,
604 struct lpfc_node_rrq *rrq) 597 struct lpfc_node_rrq *rrq)
605{ 598{
606 uint16_t adj_xri;
607 struct lpfc_nodelist *ndlp = NULL; 599 struct lpfc_nodelist *ndlp = NULL;
608 600
609 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 601 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
@@ -619,8 +611,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
619 if (!ndlp) 611 if (!ndlp)
620 goto out; 612 goto out;
621 613
622 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 614 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
623 if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
624 rrq->send_rrq = 0; 615 rrq->send_rrq = 0;
625 rrq->xritag = 0; 616 rrq->xritag = 0;
626 rrq->rrq_stop_time = 0; 617 rrq->rrq_stop_time = 0;
@@ -796,12 +787,9 @@ int
796lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 787lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
797 uint16_t xritag) 788 uint16_t xritag)
798{ 789{
799 uint16_t adj_xri;
800
801 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
802 if (!ndlp) 790 if (!ndlp)
803 return 0; 791 return 0;
804 if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) 792 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
805 return 1; 793 return 1;
806 else 794 else
807 return 0; 795 return 0;
@@ -841,7 +829,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
841 * @piocb: Pointer to the iocbq. 829 * @piocb: Pointer to the iocbq.
842 * 830 *
843 * This function is called with hbalock held. This function 831 * This function is called with hbalock held. This function
844 * Gets a new driver sglq object from the sglq list. If the 832 * gets a new driver sglq object from the sglq list. If the
845 * list is not empty then it is successful, it returns pointer to the newly 833 * list is not empty then it is successful, it returns pointer to the newly
846 * allocated sglq object else it returns NULL. 834 * allocated sglq object else it returns NULL.
847 **/ 835 **/
@@ -851,7 +839,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
851 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 839 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
852 struct lpfc_sglq *sglq = NULL; 840 struct lpfc_sglq *sglq = NULL;
853 struct lpfc_sglq *start_sglq = NULL; 841 struct lpfc_sglq *start_sglq = NULL;
854 uint16_t adj_xri;
855 struct lpfc_scsi_buf *lpfc_cmd; 842 struct lpfc_scsi_buf *lpfc_cmd;
856 struct lpfc_nodelist *ndlp; 843 struct lpfc_nodelist *ndlp;
857 int found = 0; 844 int found = 0;
@@ -870,8 +857,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
870 while (!found) { 857 while (!found) {
871 if (!sglq) 858 if (!sglq)
872 return NULL; 859 return NULL;
873 adj_xri = sglq->sli4_xritag -
874 phba->sli4_hba.max_cfg_param.xri_base;
875 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { 860 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
876 /* This xri has an rrq outstanding for this DID. 861 /* This xri has an rrq outstanding for this DID.
877 * put it back in the list and get another xri. 862 * put it back in the list and get another xri.
@@ -888,7 +873,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
888 } 873 }
889 sglq->ndlp = ndlp; 874 sglq->ndlp = ndlp;
890 found = 1; 875 found = 1;
891 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 876 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
892 sglq->state = SGL_ALLOCATED; 877 sglq->state = SGL_ALLOCATED;
893 } 878 }
894 return sglq; 879 return sglq;
@@ -944,7 +929,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
944 if (iocbq->sli4_xritag == NO_XRI) 929 if (iocbq->sli4_xritag == NO_XRI)
945 sglq = NULL; 930 sglq = NULL;
946 else 931 else
947 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 932 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
933
948 if (sglq) { 934 if (sglq) {
949 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 935 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
950 (sglq->state != SGL_XRI_ABORTED)) { 936 (sglq->state != SGL_XRI_ABORTED)) {
@@ -971,6 +957,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
971 * Clean all volatile data fields, preserve iotag and node struct. 957 * Clean all volatile data fields, preserve iotag and node struct.
972 */ 958 */
973 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 959 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
960 iocbq->sli4_lxritag = NO_XRI;
974 iocbq->sli4_xritag = NO_XRI; 961 iocbq->sli4_xritag = NO_XRI;
975 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 962 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
976} 963}
@@ -2113,7 +2100,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2113 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2100 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2114 !pmb->u.mb.mbxStatus) { 2101 !pmb->u.mb.mbxStatus) {
2115 rpi = pmb->u.mb.un.varWords[0]; 2102 rpi = pmb->u.mb.un.varWords[0];
2116 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base; 2103 vpi = pmb->u.mb.un.varRegLogin.vpi;
2117 lpfc_unreg_login(phba, vpi, rpi, pmb); 2104 lpfc_unreg_login(phba, vpi, rpi, pmb);
2118 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2105 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2119 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2106 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -3881,8 +3868,10 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3881 list_del_init(&phba->sli4_hba.els_cq->list); 3868 list_del_init(&phba->sli4_hba.els_cq->list);
3882 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) 3869 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3883 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); 3870 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3884 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) 3871 qindx = 0;
3872 do
3885 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list); 3873 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3874 while (++qindx < phba->cfg_fcp_eq_count);
3886 spin_unlock_irq(&phba->hbalock); 3875 spin_unlock_irq(&phba->hbalock);
3887 3876
3888 /* Now physically reset the device */ 3877 /* Now physically reset the device */
@@ -4318,6 +4307,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4318 continue; 4307 continue;
4319 } else if (rc) 4308 } else if (rc)
4320 break; 4309 break;
4310
4321 phba->link_state = LPFC_INIT_MBX_CMDS; 4311 phba->link_state = LPFC_INIT_MBX_CMDS;
4322 lpfc_config_port(phba, pmb); 4312 lpfc_config_port(phba, pmb);
4323 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4313 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -4421,7 +4411,8 @@ int
4421lpfc_sli_hba_setup(struct lpfc_hba *phba) 4411lpfc_sli_hba_setup(struct lpfc_hba *phba)
4422{ 4412{
4423 uint32_t rc; 4413 uint32_t rc;
4424 int mode = 3; 4414 int mode = 3, i;
4415 int longs;
4425 4416
4426 switch (lpfc_sli_mode) { 4417 switch (lpfc_sli_mode) {
4427 case 2: 4418 case 2:
@@ -4491,6 +4482,35 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4491 if (rc) 4482 if (rc)
4492 goto lpfc_sli_hba_setup_error; 4483 goto lpfc_sli_hba_setup_error;
4493 4484
4485 /* Initialize VPIs. */
4486 if (phba->sli_rev == LPFC_SLI_REV3) {
4487 /*
4488 * The VPI bitmask and physical ID array are allocated
4489 * and initialized once only - at driver load. A port
4490 * reset doesn't need to reinitialize this memory.
4491 */
4492 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4493 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4494 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4495 GFP_KERNEL);
4496 if (!phba->vpi_bmask) {
4497 rc = -ENOMEM;
4498 goto lpfc_sli_hba_setup_error;
4499 }
4500
4501 phba->vpi_ids = kzalloc(
4502 (phba->max_vpi+1) * sizeof(uint16_t),
4503 GFP_KERNEL);
4504 if (!phba->vpi_ids) {
4505 kfree(phba->vpi_bmask);
4506 rc = -ENOMEM;
4507 goto lpfc_sli_hba_setup_error;
4508 }
4509 for (i = 0; i < phba->max_vpi; i++)
4510 phba->vpi_ids[i] = i;
4511 }
4512 }
4513
4494 /* Init HBQs */ 4514 /* Init HBQs */
4495 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4515 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4496 rc = lpfc_sli_hbq_setup(phba); 4516 rc = lpfc_sli_hbq_setup(phba);
@@ -4677,9 +4697,11 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4677 4697
4678 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4698 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4679 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4699 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4680 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4700 fcp_eqidx = 0;
4701 do
4681 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4702 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4682 LPFC_QUEUE_REARM); 4703 LPFC_QUEUE_REARM);
4704 while (++fcp_eqidx < phba->cfg_fcp_eq_count);
4683 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4705 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4684 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4706 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4685 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4707 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -4687,6 +4709,803 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4687} 4709}
4688 4710
4689/** 4711/**
4712 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4713 * @phba: Pointer to HBA context object.
4714 * @type: The resource extent type.
4715 *
4716 * This function allocates all SLI4 resource identifiers.
4717 **/
4718static int
4719lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4720 uint16_t *extnt_count, uint16_t *extnt_size)
4721{
4722 int rc = 0;
4723 uint32_t length;
4724 uint32_t mbox_tmo;
4725 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
4726 LPFC_MBOXQ_t *mbox;
4727
4728 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4729 if (!mbox)
4730 return -ENOMEM;
4731
4732 /* Find out how many extents are available for this resource type */
4733 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
4734 sizeof(struct lpfc_sli4_cfg_mhdr));
4735 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
4736 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
4737 length, LPFC_SLI4_MBX_EMBED);
4738
4739 /* Send an extents count of 0 - the GET doesn't use it. */
4740 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
4741 LPFC_SLI4_MBX_EMBED);
4742 if (unlikely(rc)) {
4743 rc = -EIO;
4744 goto err_exit;
4745 }
4746
4747 if (!phba->sli4_hba.intr_enable)
4748 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4749 else {
4750 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
4751 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
4752 }
4753 if (unlikely(rc)) {
4754 rc = -EIO;
4755 goto err_exit;
4756 }
4757
4758 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
4759 if (bf_get(lpfc_mbox_hdr_status,
4760 &rsrc_info->header.cfg_shdr.response)) {
4761 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4762 "2930 Failed to get resource extents "
4763 "Status 0x%x Add'l Status 0x%x\n",
4764 bf_get(lpfc_mbox_hdr_status,
4765 &rsrc_info->header.cfg_shdr.response),
4766 bf_get(lpfc_mbox_hdr_add_status,
4767 &rsrc_info->header.cfg_shdr.response));
4768 rc = -EIO;
4769 goto err_exit;
4770 }
4771
4772 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
4773 &rsrc_info->u.rsp);
4774 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
4775 &rsrc_info->u.rsp);
4776 err_exit:
4777 mempool_free(mbox, phba->mbox_mem_pool);
4778 return rc;
4779}
4780
4781/**
4782 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
4783 * @phba: Pointer to HBA context object.
4784 * @type: The extent type to check.
4785 *
4786 * This function reads the current available extents from the port and checks
4787 * if the extent count or extent size has changed since the last access.
4788 * Callers use this routine post port reset to understand if there is a
4789 * extent reprovisioning requirement.
4790 *
4791 * Returns:
4792 * -Error: error indicates problem.
4793 * 1: Extent count or size has changed.
4794 * 0: No changes.
4795 **/
4796static int
4797lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
4798{
4799 uint16_t curr_ext_cnt, rsrc_ext_cnt;
4800 uint16_t size_diff, rsrc_ext_size;
4801 int rc = 0;
4802 struct lpfc_rsrc_blks *rsrc_entry;
4803 struct list_head *rsrc_blk_list = NULL;
4804
4805 size_diff = 0;
4806 curr_ext_cnt = 0;
4807 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
4808 &rsrc_ext_cnt,
4809 &rsrc_ext_size);
4810 if (unlikely(rc))
4811 return -EIO;
4812
4813 switch (type) {
4814 case LPFC_RSC_TYPE_FCOE_RPI:
4815 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
4816 break;
4817 case LPFC_RSC_TYPE_FCOE_VPI:
4818 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
4819 break;
4820 case LPFC_RSC_TYPE_FCOE_XRI:
4821 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
4822 break;
4823 case LPFC_RSC_TYPE_FCOE_VFI:
4824 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
4825 break;
4826 default:
4827 break;
4828 }
4829
4830 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
4831 curr_ext_cnt++;
4832 if (rsrc_entry->rsrc_size != rsrc_ext_size)
4833 size_diff++;
4834 }
4835
4836 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
4837 rc = 1;
4838
4839 return rc;
4840}
4841
4842/**
4843 * lpfc_sli4_cfg_post_extnts -
4844 * @phba: Pointer to HBA context object.
4845 * @extnt_cnt - number of available extents.
4846 * @type - the extent type (rpi, xri, vfi, vpi).
4847 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
4848 * @mbox - pointer to the caller's allocated mailbox structure.
4849 *
4850 * This function executes the extents allocation request. It also
4851 * takes care of the amount of memory needed to allocate or get the
4852 * allocated extents. It is the caller's responsibility to evaluate
4853 * the response.
4854 *
4855 * Returns:
4856 * -Error: Error value describes the condition found.
4857 * 0: if successful
4858 **/
4859static int
4860lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
4861 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
4862{
4863 int rc = 0;
4864 uint32_t req_len;
4865 uint32_t emb_len;
4866 uint32_t alloc_len, mbox_tmo;
4867
4868 /* Calculate the total requested length of the dma memory */
4869 req_len = *extnt_cnt * sizeof(uint16_t);
4870
4871 /*
4872 * Calculate the size of an embedded mailbox. The uint32_t
4873 * accounts for extents-specific word.
4874 */
4875 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
4876 sizeof(uint32_t);
4877
4878 /*
4879 * Presume the allocation and response will fit into an embedded
4880 * mailbox. If not true, reconfigure to a non-embedded mailbox.
4881 */
4882 *emb = LPFC_SLI4_MBX_EMBED;
4883 if (req_len > emb_len) {
4884 req_len = *extnt_cnt * sizeof(uint16_t) +
4885 sizeof(union lpfc_sli4_cfg_shdr) +
4886 sizeof(uint32_t);
4887 *emb = LPFC_SLI4_MBX_NEMBED;
4888 }
4889
4890 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
4891 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
4892 req_len, *emb);
4893 if (alloc_len < req_len) {
4894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4895 "9000 Allocated DMA memory size (x%x) is "
4896 "less than the requested DMA memory "
4897 "size (x%x)\n", alloc_len, req_len);
4898 return -ENOMEM;
4899 }
4900 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
4901 if (unlikely(rc))
4902 return -EIO;
4903
4904 if (!phba->sli4_hba.intr_enable)
4905 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4906 else {
4907 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
4908 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
4909 }
4910
4911 if (unlikely(rc))
4912 rc = -EIO;
4913 return rc;
4914}
4915
4916/**
4917 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
4918 * @phba: Pointer to HBA context object.
4919 * @type: The resource extent type to allocate.
4920 *
4921 * This function allocates the number of elements for the specified
4922 * resource type.
4923 **/
4924static int
4925lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
4926{
4927 bool emb = false;
4928 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
4929 uint16_t rsrc_id, rsrc_start, j, k;
4930 uint16_t *ids;
4931 int i, rc;
4932 unsigned long longs;
4933 unsigned long *bmask;
4934 struct lpfc_rsrc_blks *rsrc_blks;
4935 LPFC_MBOXQ_t *mbox;
4936 uint32_t length;
4937 struct lpfc_id_range *id_array = NULL;
4938 void *virtaddr = NULL;
4939 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
4940 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
4941 struct list_head *ext_blk_list;
4942
4943 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
4944 &rsrc_cnt,
4945 &rsrc_size);
4946 if (unlikely(rc))
4947 return -EIO;
4948
4949 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
4950 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4951 "3009 No available Resource Extents "
4952 "for resource type 0x%x: Count: 0x%x, "
4953 "Size 0x%x\n", type, rsrc_cnt,
4954 rsrc_size);
4955 return -ENOMEM;
4956 }
4957
4958 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
4959 "2903 Available Resource Extents "
4960 "for resource type 0x%x: Count: 0x%x, "
4961 "Size 0x%x\n", type, rsrc_cnt,
4962 rsrc_size);
4963
4964 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4965 if (!mbox)
4966 return -ENOMEM;
4967
4968 rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
4969 if (unlikely(rc)) {
4970 rc = -EIO;
4971 goto err_exit;
4972 }
4973
4974 /*
4975 * Figure out where the response is located. Then get local pointers
4976 * to the response data. The port does not guarantee to respond to
4977 * all extents counts request so update the local variable with the
4978 * allocated count from the port.
4979 */
4980 if (emb == LPFC_SLI4_MBX_EMBED) {
4981 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
4982 id_array = &rsrc_ext->u.rsp.id[0];
4983 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
4984 } else {
4985 virtaddr = mbox->sge_array->addr[0];
4986 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
4987 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
4988 id_array = &n_rsrc->id;
4989 }
4990
4991 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4992 rsrc_id_cnt = rsrc_cnt * rsrc_size;
4993
4994 /*
4995 * Based on the resource size and count, correct the base and max
4996 * resource values.
4997 */
4998 length = sizeof(struct lpfc_rsrc_blks);
4999 switch (type) {
5000 case LPFC_RSC_TYPE_FCOE_RPI:
5001 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5002 sizeof(unsigned long),
5003 GFP_KERNEL);
5004 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5005 rc = -ENOMEM;
5006 goto err_exit;
5007 }
5008 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5009 sizeof(uint16_t),
5010 GFP_KERNEL);
5011 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5012 kfree(phba->sli4_hba.rpi_bmask);
5013 rc = -ENOMEM;
5014 goto err_exit;
5015 }
5016
5017 /*
5018 * The next_rpi was initialized with the maximum available
5019 * count but the port may allocate a smaller number. Catch
5020 * that case and update the next_rpi.
5021 */
5022 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5023
5024 /* Initialize local ptrs for common extent processing later. */
5025 bmask = phba->sli4_hba.rpi_bmask;
5026 ids = phba->sli4_hba.rpi_ids;
5027 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5028 break;
5029 case LPFC_RSC_TYPE_FCOE_VPI:
5030 phba->vpi_bmask = kzalloc(longs *
5031 sizeof(unsigned long),
5032 GFP_KERNEL);
5033 if (unlikely(!phba->vpi_bmask)) {
5034 rc = -ENOMEM;
5035 goto err_exit;
5036 }
5037 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5038 sizeof(uint16_t),
5039 GFP_KERNEL);
5040 if (unlikely(!phba->vpi_ids)) {
5041 kfree(phba->vpi_bmask);
5042 rc = -ENOMEM;
5043 goto err_exit;
5044 }
5045
5046 /* Initialize local ptrs for common extent processing later. */
5047 bmask = phba->vpi_bmask;
5048 ids = phba->vpi_ids;
5049 ext_blk_list = &phba->lpfc_vpi_blk_list;
5050 break;
5051 case LPFC_RSC_TYPE_FCOE_XRI:
5052 phba->sli4_hba.xri_bmask = kzalloc(longs *
5053 sizeof(unsigned long),
5054 GFP_KERNEL);
5055 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5056 rc = -ENOMEM;
5057 goto err_exit;
5058 }
5059 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5060 sizeof(uint16_t),
5061 GFP_KERNEL);
5062 if (unlikely(!phba->sli4_hba.xri_ids)) {
5063 kfree(phba->sli4_hba.xri_bmask);
5064 rc = -ENOMEM;
5065 goto err_exit;
5066 }
5067
5068 /* Initialize local ptrs for common extent processing later. */
5069 bmask = phba->sli4_hba.xri_bmask;
5070 ids = phba->sli4_hba.xri_ids;
5071 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5072 break;
5073 case LPFC_RSC_TYPE_FCOE_VFI:
5074 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5075 sizeof(unsigned long),
5076 GFP_KERNEL);
5077 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5078 rc = -ENOMEM;
5079 goto err_exit;
5080 }
5081 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5082 sizeof(uint16_t),
5083 GFP_KERNEL);
5084 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5085 kfree(phba->sli4_hba.vfi_bmask);
5086 rc = -ENOMEM;
5087 goto err_exit;
5088 }
5089
5090 /* Initialize local ptrs for common extent processing later. */
5091 bmask = phba->sli4_hba.vfi_bmask;
5092 ids = phba->sli4_hba.vfi_ids;
5093 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5094 break;
5095 default:
5096 /* Unsupported Opcode. Fail call. */
5097 id_array = NULL;
5098 bmask = NULL;
5099 ids = NULL;
5100 ext_blk_list = NULL;
5101 goto err_exit;
5102 }
5103
5104 /*
5105 * Complete initializing the extent configuration with the
5106 * allocated ids assigned to this function. The bitmask serves
5107 * as an index into the array and manages the available ids. The
5108 * array just stores the ids communicated to the port via the wqes.
5109 */
5110 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5111 if ((i % 2) == 0)
5112 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5113 &id_array[k]);
5114 else
5115 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5116 &id_array[k]);
5117
5118 rsrc_blks = kzalloc(length, GFP_KERNEL);
5119 if (unlikely(!rsrc_blks)) {
5120 rc = -ENOMEM;
5121 kfree(bmask);
5122 kfree(ids);
5123 goto err_exit;
5124 }
5125 rsrc_blks->rsrc_start = rsrc_id;
5126 rsrc_blks->rsrc_size = rsrc_size;
5127 list_add_tail(&rsrc_blks->list, ext_blk_list);
5128 rsrc_start = rsrc_id;
5129 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5130 phba->sli4_hba.scsi_xri_start = rsrc_start +
5131 lpfc_sli4_get_els_iocb_cnt(phba);
5132
5133 while (rsrc_id < (rsrc_start + rsrc_size)) {
5134 ids[j] = rsrc_id;
5135 rsrc_id++;
5136 j++;
5137 }
5138 /* Entire word processed. Get next word.*/
5139 if ((i % 2) == 1)
5140 k++;
5141 }
5142 err_exit:
5143 lpfc_sli4_mbox_cmd_free(phba, mbox);
5144 return rc;
5145}
5146
5147/**
5148 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5149 * @phba: Pointer to HBA context object.
5150 * @type: the extent's type.
5151 *
5152 * This function deallocates all extents of a particular resource type.
5153 * SLI4 does not allow for deallocating a particular extent range. It
5154 * is the caller's responsibility to release all kernel memory resources.
5155 **/
5156static int
5157lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5158{
5159 int rc;
5160 uint32_t length, mbox_tmo = 0;
5161 LPFC_MBOXQ_t *mbox;
5162 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5163 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5164
5165 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5166 if (!mbox)
5167 return -ENOMEM;
5168
5169 /*
5170 * This function sends an embedded mailbox because it only sends the
5171 * the resource type. All extents of this type are released by the
5172 * port.
5173 */
5174 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5175 sizeof(struct lpfc_sli4_cfg_mhdr));
5176 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5177 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5178 length, LPFC_SLI4_MBX_EMBED);
5179
5180 /* Send an extents count of 0 - the dealloc doesn't use it. */
5181 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5182 LPFC_SLI4_MBX_EMBED);
5183 if (unlikely(rc)) {
5184 rc = -EIO;
5185 goto out_free_mbox;
5186 }
5187 if (!phba->sli4_hba.intr_enable)
5188 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5189 else {
5190 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo);
5191 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5192 }
5193 if (unlikely(rc)) {
5194 rc = -EIO;
5195 goto out_free_mbox;
5196 }
5197
5198 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5199 if (bf_get(lpfc_mbox_hdr_status,
5200 &dealloc_rsrc->header.cfg_shdr.response)) {
5201 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5202 "2919 Failed to release resource extents "
5203 "for type %d - Status 0x%x Add'l Status 0x%x. "
5204 "Resource memory not released.\n",
5205 type,
5206 bf_get(lpfc_mbox_hdr_status,
5207 &dealloc_rsrc->header.cfg_shdr.response),
5208 bf_get(lpfc_mbox_hdr_add_status,
5209 &dealloc_rsrc->header.cfg_shdr.response));
5210 rc = -EIO;
5211 goto out_free_mbox;
5212 }
5213
5214 /* Release kernel memory resources for the specific type. */
5215 switch (type) {
5216 case LPFC_RSC_TYPE_FCOE_VPI:
5217 kfree(phba->vpi_bmask);
5218 kfree(phba->vpi_ids);
5219 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5220 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5221 &phba->lpfc_vpi_blk_list, list) {
5222 list_del_init(&rsrc_blk->list);
5223 kfree(rsrc_blk);
5224 }
5225 break;
5226 case LPFC_RSC_TYPE_FCOE_XRI:
5227 kfree(phba->sli4_hba.xri_bmask);
5228 kfree(phba->sli4_hba.xri_ids);
5229 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5230 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5231 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5232 list_del_init(&rsrc_blk->list);
5233 kfree(rsrc_blk);
5234 }
5235 break;
5236 case LPFC_RSC_TYPE_FCOE_VFI:
5237 kfree(phba->sli4_hba.vfi_bmask);
5238 kfree(phba->sli4_hba.vfi_ids);
5239 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5240 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5241 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5242 list_del_init(&rsrc_blk->list);
5243 kfree(rsrc_blk);
5244 }
5245 break;
5246 case LPFC_RSC_TYPE_FCOE_RPI:
5247 /* RPI bitmask and physical id array are cleaned up earlier. */
5248 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5249 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5250 list_del_init(&rsrc_blk->list);
5251 kfree(rsrc_blk);
5252 }
5253 break;
5254 default:
5255 break;
5256 }
5257
5258 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5259
5260 out_free_mbox:
5261 mempool_free(mbox, phba->mbox_mem_pool);
5262 return rc;
5263}
5264
5265/**
5266 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5267 * @phba: Pointer to HBA context object.
5268 *
5269 * This function allocates all SLI4 resource identifiers.
5270 **/
5271int
5272lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5273{
5274 int i, rc, error = 0;
5275 uint16_t count, base;
5276 unsigned long longs;
5277
5278 if (phba->sli4_hba.extents_in_use) {
5279 /*
5280 * The port supports resource extents. The XRI, VPI, VFI, RPI
5281 * resource extent count must be read and allocated before
5282 * provisioning the resource id arrays.
5283 */
5284 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5285 LPFC_IDX_RSRC_RDY) {
5286 /*
5287 * Extent-based resources are set - the driver could
5288 * be in a port reset. Figure out if any corrective
5289 * actions need to be taken.
5290 */
5291 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5292 LPFC_RSC_TYPE_FCOE_VFI);
5293 if (rc != 0)
5294 error++;
5295 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5296 LPFC_RSC_TYPE_FCOE_VPI);
5297 if (rc != 0)
5298 error++;
5299 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5300 LPFC_RSC_TYPE_FCOE_XRI);
5301 if (rc != 0)
5302 error++;
5303 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5304 LPFC_RSC_TYPE_FCOE_RPI);
5305 if (rc != 0)
5306 error++;
5307
5308 /*
5309 * It's possible that the number of resources
5310 * provided to this port instance changed between
5311 * resets. Detect this condition and reallocate
5312 * resources. Otherwise, there is no action.
5313 */
5314 if (error) {
5315 lpfc_printf_log(phba, KERN_INFO,
5316 LOG_MBOX | LOG_INIT,
5317 "2931 Detected extent resource "
5318 "change. Reallocating all "
5319 "extents.\n");
5320 rc = lpfc_sli4_dealloc_extent(phba,
5321 LPFC_RSC_TYPE_FCOE_VFI);
5322 rc = lpfc_sli4_dealloc_extent(phba,
5323 LPFC_RSC_TYPE_FCOE_VPI);
5324 rc = lpfc_sli4_dealloc_extent(phba,
5325 LPFC_RSC_TYPE_FCOE_XRI);
5326 rc = lpfc_sli4_dealloc_extent(phba,
5327 LPFC_RSC_TYPE_FCOE_RPI);
5328 } else
5329 return 0;
5330 }
5331
5332 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5333 if (unlikely(rc))
5334 goto err_exit;
5335
5336 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5337 if (unlikely(rc))
5338 goto err_exit;
5339
5340 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5341 if (unlikely(rc))
5342 goto err_exit;
5343
5344 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5345 if (unlikely(rc))
5346 goto err_exit;
5347 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5348 LPFC_IDX_RSRC_RDY);
5349 return rc;
5350 } else {
5351 /*
5352 * The port does not support resource extents. The XRI, VPI,
5353 * VFI, RPI resource ids were determined from READ_CONFIG.
5354 * Just allocate the bitmasks and provision the resource id
5355 * arrays. If a port reset is active, the resources don't
5356 * need any action - just exit.
5357 */
5358 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5359 LPFC_IDX_RSRC_RDY)
5360 return 0;
5361
5362 /* RPIs. */
5363 count = phba->sli4_hba.max_cfg_param.max_rpi;
5364 base = phba->sli4_hba.max_cfg_param.rpi_base;
5365 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5366 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5367 sizeof(unsigned long),
5368 GFP_KERNEL);
5369 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5370 rc = -ENOMEM;
5371 goto err_exit;
5372 }
5373 phba->sli4_hba.rpi_ids = kzalloc(count *
5374 sizeof(uint16_t),
5375 GFP_KERNEL);
5376 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5377 rc = -ENOMEM;
5378 goto free_rpi_bmask;
5379 }
5380
5381 for (i = 0; i < count; i++)
5382 phba->sli4_hba.rpi_ids[i] = base + i;
5383
5384 /* VPIs. */
5385 count = phba->sli4_hba.max_cfg_param.max_vpi;
5386 base = phba->sli4_hba.max_cfg_param.vpi_base;
5387 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5388 phba->vpi_bmask = kzalloc(longs *
5389 sizeof(unsigned long),
5390 GFP_KERNEL);
5391 if (unlikely(!phba->vpi_bmask)) {
5392 rc = -ENOMEM;
5393 goto free_rpi_ids;
5394 }
5395 phba->vpi_ids = kzalloc(count *
5396 sizeof(uint16_t),
5397 GFP_KERNEL);
5398 if (unlikely(!phba->vpi_ids)) {
5399 rc = -ENOMEM;
5400 goto free_vpi_bmask;
5401 }
5402
5403 for (i = 0; i < count; i++)
5404 phba->vpi_ids[i] = base + i;
5405
5406 /* XRIs. */
5407 count = phba->sli4_hba.max_cfg_param.max_xri;
5408 base = phba->sli4_hba.max_cfg_param.xri_base;
5409 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5410 phba->sli4_hba.xri_bmask = kzalloc(longs *
5411 sizeof(unsigned long),
5412 GFP_KERNEL);
5413 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5414 rc = -ENOMEM;
5415 goto free_vpi_ids;
5416 }
5417 phba->sli4_hba.xri_ids = kzalloc(count *
5418 sizeof(uint16_t),
5419 GFP_KERNEL);
5420 if (unlikely(!phba->sli4_hba.xri_ids)) {
5421 rc = -ENOMEM;
5422 goto free_xri_bmask;
5423 }
5424
5425 for (i = 0; i < count; i++)
5426 phba->sli4_hba.xri_ids[i] = base + i;
5427
5428 /* VFIs. */
5429 count = phba->sli4_hba.max_cfg_param.max_vfi;
5430 base = phba->sli4_hba.max_cfg_param.vfi_base;
5431 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5432 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5433 sizeof(unsigned long),
5434 GFP_KERNEL);
5435 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5436 rc = -ENOMEM;
5437 goto free_xri_ids;
5438 }
5439 phba->sli4_hba.vfi_ids = kzalloc(count *
5440 sizeof(uint16_t),
5441 GFP_KERNEL);
5442 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5443 rc = -ENOMEM;
5444 goto free_vfi_bmask;
5445 }
5446
5447 for (i = 0; i < count; i++)
5448 phba->sli4_hba.vfi_ids[i] = base + i;
5449
5450 /*
5451 * Mark all resources ready. An HBA reset doesn't need
5452 * to reset the initialization.
5453 */
5454 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5455 LPFC_IDX_RSRC_RDY);
5456 return 0;
5457 }
5458
5459 free_vfi_bmask:
5460 kfree(phba->sli4_hba.vfi_bmask);
5461 free_xri_ids:
5462 kfree(phba->sli4_hba.xri_ids);
5463 free_xri_bmask:
5464 kfree(phba->sli4_hba.xri_bmask);
5465 free_vpi_ids:
5466 kfree(phba->vpi_ids);
5467 free_vpi_bmask:
5468 kfree(phba->vpi_bmask);
5469 free_rpi_ids:
5470 kfree(phba->sli4_hba.rpi_ids);
5471 free_rpi_bmask:
5472 kfree(phba->sli4_hba.rpi_bmask);
5473 err_exit:
5474 return rc;
5475}
5476
5477/**
5478 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5479 * @phba: Pointer to HBA context object.
5480 *
5481 * This function allocates the number of elements for the specified
5482 * resource type.
5483 **/
5484int
5485lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5486{
5487 if (phba->sli4_hba.extents_in_use) {
5488 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5489 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5490 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5491 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5492 } else {
5493 kfree(phba->vpi_bmask);
5494 kfree(phba->vpi_ids);
5495 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5496 kfree(phba->sli4_hba.xri_bmask);
5497 kfree(phba->sli4_hba.xri_ids);
5498 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5499 kfree(phba->sli4_hba.vfi_bmask);
5500 kfree(phba->sli4_hba.vfi_ids);
5501 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5502 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5503 }
5504
5505 return 0;
5506}
5507
5508/**
4690 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 5509 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4691 * @phba: Pointer to HBA context object. 5510 * @phba: Pointer to HBA context object.
4692 * 5511 *
@@ -4708,10 +5527,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4708 struct lpfc_vport *vport = phba->pport; 5527 struct lpfc_vport *vport = phba->pport;
4709 struct lpfc_dmabuf *mp; 5528 struct lpfc_dmabuf *mp;
4710 5529
4711 /*
4712 * TODO: Why does this routine execute these task in a different
4713 * order from probe?
4714 */
4715 /* Perform a PCI function reset to start from clean */ 5530 /* Perform a PCI function reset to start from clean */
4716 rc = lpfc_pci_function_reset(phba); 5531 rc = lpfc_pci_function_reset(phba);
4717 if (unlikely(rc)) 5532 if (unlikely(rc))
@@ -4740,7 +5555,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4740 * to read FCoE param config regions 5555 * to read FCoE param config regions
4741 */ 5556 */
4742 if (lpfc_sli4_read_fcoe_params(phba, mboxq)) 5557 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4743 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5558 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
4744 "2570 Failed to read FCoE parameters\n"); 5559 "2570 Failed to read FCoE parameters\n");
4745 5560
4746 /* Issue READ_REV to collect vpd and FW information. */ 5561 /* Issue READ_REV to collect vpd and FW information. */
@@ -4873,6 +5688,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4873 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 5688 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4874 spin_unlock_irq(&phba->hbalock); 5689 spin_unlock_irq(&phba->hbalock);
4875 5690
5691 /*
5692 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
5693 * calls depends on these resources to complete port setup.
5694 */
5695 rc = lpfc_sli4_alloc_resource_identifiers(phba);
5696 if (rc) {
5697 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5698 "2920 Failed to alloc Resource IDs "
5699 "rc = x%x\n", rc);
5700 goto out_free_mbox;
5701 }
5702
4876 /* Read the port's service parameters. */ 5703 /* Read the port's service parameters. */
4877 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 5704 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
4878 if (rc) { 5705 if (rc) {
@@ -4906,35 +5733,37 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4906 goto out_free_mbox; 5733 goto out_free_mbox;
4907 } 5734 }
4908 5735
4909 if (phba->cfg_soft_wwnn) 5736 lpfc_update_vport_wwn(vport);
4910 u64_to_wwn(phba->cfg_soft_wwnn,
4911 vport->fc_sparam.nodeName.u.wwn);
4912 if (phba->cfg_soft_wwpn)
4913 u64_to_wwn(phba->cfg_soft_wwpn,
4914 vport->fc_sparam.portName.u.wwn);
4915 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4916 sizeof(struct lpfc_name));
4917 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4918 sizeof(struct lpfc_name));
4919 5737
4920 /* Update the fc_host data structures with new wwn. */ 5738 /* Update the fc_host data structures with new wwn. */
4921 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 5739 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4922 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 5740 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4923 5741
4924 /* Register SGL pool to the device using non-embedded mailbox command */ 5742 /* Register SGL pool to the device using non-embedded mailbox command */
4925 rc = lpfc_sli4_post_sgl_list(phba); 5743 if (!phba->sli4_hba.extents_in_use) {
4926 if (unlikely(rc)) { 5744 rc = lpfc_sli4_post_els_sgl_list(phba);
4927 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5745 if (unlikely(rc)) {
4928 "0582 Error %d during sgl post operation\n", 5746 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4929 rc); 5747 "0582 Error %d during els sgl post "
4930 rc = -ENODEV; 5748 "operation\n", rc);
4931 goto out_free_mbox; 5749 rc = -ENODEV;
5750 goto out_free_mbox;
5751 }
5752 } else {
5753 rc = lpfc_sli4_post_els_sgl_list_ext(phba);
5754 if (unlikely(rc)) {
5755 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5756 "2560 Error %d during els sgl post "
5757 "operation\n", rc);
5758 rc = -ENODEV;
5759 goto out_free_mbox;
5760 }
4932 } 5761 }
4933 5762
4934 /* Register SCSI SGL pool to the device */ 5763 /* Register SCSI SGL pool to the device */
4935 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 5764 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4936 if (unlikely(rc)) { 5765 if (unlikely(rc)) {
4937 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 5766 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4938 "0383 Error %d during scsi sgl post " 5767 "0383 Error %d during scsi sgl post "
4939 "operation\n", rc); 5768 "operation\n", rc);
4940 /* Some Scsi buffers were moved to the abort scsi list */ 5769 /* Some Scsi buffers were moved to the abort scsi list */
@@ -5747,10 +6576,15 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5747 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 6576 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5748 sizeof(struct lpfc_mcqe)); 6577 sizeof(struct lpfc_mcqe));
5749 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 6578 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5750 6579 /*
5751 /* Prefix the mailbox status with range x4000 to note SLI4 status. */ 6580 * When the CQE status indicates a failure and the mailbox status
6581 * indicates success then copy the CQE status into the mailbox status
6582 * (and prefix it with x4000).
6583 */
5752 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 6584 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5753 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status); 6585 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
6586 bf_set(lpfc_mqe_status, mb,
6587 (LPFC_MBX_ERROR_RANGE | mcqe_status));
5754 rc = MBXERR_ERROR; 6588 rc = MBXERR_ERROR;
5755 } else 6589 } else
5756 lpfc_sli4_swap_str(phba, mboxq); 6590 lpfc_sli4_swap_str(phba, mboxq);
@@ -5819,7 +6653,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5819 else 6653 else
5820 rc = -EIO; 6654 rc = -EIO;
5821 if (rc != MBX_SUCCESS) 6655 if (rc != MBX_SUCCESS)
5822 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6656 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
5823 "(%d):2541 Mailbox command x%x " 6657 "(%d):2541 Mailbox command x%x "
5824 "(x%x) cannot issue Data: x%x x%x\n", 6658 "(x%x) cannot issue Data: x%x x%x\n",
5825 mboxq->vport ? mboxq->vport->vpi : 0, 6659 mboxq->vport ? mboxq->vport->vpi : 0,
@@ -6307,6 +7141,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
6307 sgl->addr_hi = bpl->addrHigh; 7141 sgl->addr_hi = bpl->addrHigh;
6308 sgl->addr_lo = bpl->addrLow; 7142 sgl->addr_lo = bpl->addrLow;
6309 7143
7144 sgl->word2 = le32_to_cpu(sgl->word2);
6310 if ((i+1) == numBdes) 7145 if ((i+1) == numBdes)
6311 bf_set(lpfc_sli4_sge_last, sgl, 1); 7146 bf_set(lpfc_sli4_sge_last, sgl, 1);
6312 else 7147 else
@@ -6343,6 +7178,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
6343 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 7178 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
6344 sgl->addr_lo = 7179 sgl->addr_lo =
6345 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 7180 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
7181 sgl->word2 = le32_to_cpu(sgl->word2);
6346 bf_set(lpfc_sli4_sge_last, sgl, 1); 7182 bf_set(lpfc_sli4_sge_last, sgl, 1);
6347 sgl->word2 = cpu_to_le32(sgl->word2); 7183 sgl->word2 = cpu_to_le32(sgl->word2);
6348 sgl->sge_len = 7184 sgl->sge_len =
@@ -6474,7 +7310,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6474 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 7310 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
6475 >> LPFC_FIP_ELS_ID_SHIFT); 7311 >> LPFC_FIP_ELS_ID_SHIFT);
6476 } 7312 }
6477 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, ndlp->nlp_rpi); 7313 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
7314 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
6478 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 7315 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
6479 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 7316 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
6480 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 7317 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
@@ -6623,14 +7460,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6623 iocbq->iocb.ulpContext); 7460 iocbq->iocb.ulpContext);
6624 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 7461 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
6625 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7462 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6626 iocbq->vport->vpi + phba->vpi_base); 7463 phba->vpi_ids[iocbq->vport->vpi]);
6627 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 7464 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
6628 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 7465 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
6629 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 7466 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
6630 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 7467 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
6631 LPFC_WQE_LENLOC_WORD3); 7468 LPFC_WQE_LENLOC_WORD3);
6632 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 7469 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6633 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, ndlp->nlp_rpi); 7470 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
7471 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
6634 command_type = OTHER_COMMAND; 7472 command_type = OTHER_COMMAND;
6635 break; 7473 break;
6636 case CMD_CLOSE_XRI_CN: 7474 case CMD_CLOSE_XRI_CN:
@@ -6729,6 +7567,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6729 return IOCB_ERROR; 7567 return IOCB_ERROR;
6730 break; 7568 break;
6731 } 7569 }
7570
6732 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 7571 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
6733 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 7572 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
6734 wqe->generic.wqe_com.abort_tag = abort_tag; 7573 wqe->generic.wqe_com.abort_tag = abort_tag;
@@ -6776,7 +7615,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6776 return IOCB_BUSY; 7615 return IOCB_BUSY;
6777 } 7616 }
6778 } else { 7617 } else {
6779 sglq = __lpfc_sli_get_sglq(phba, piocb); 7618 sglq = __lpfc_sli_get_sglq(phba, piocb);
6780 if (!sglq) { 7619 if (!sglq) {
6781 if (!(flag & SLI_IOCB_RET_IOCB)) { 7620 if (!(flag & SLI_IOCB_RET_IOCB)) {
6782 __lpfc_sli_ringtx_put(phba, 7621 __lpfc_sli_ringtx_put(phba,
@@ -6789,11 +7628,11 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6789 } 7628 }
6790 } 7629 }
6791 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 7630 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6792 sglq = NULL; /* These IO's already have an XRI and 7631 /* These IO's already have an XRI and a mapped sgl. */
6793 * a mapped sgl. 7632 sglq = NULL;
6794 */
6795 } else { 7633 } else {
6796 /* This is a continuation of a commandi,(CX) so this 7634 /*
7635 * This is a continuation of a commandi,(CX) so this
6797 * sglq is on the active list 7636 * sglq is on the active list
6798 */ 7637 */
6799 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); 7638 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
@@ -6802,8 +7641,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6802 } 7641 }
6803 7642
6804 if (sglq) { 7643 if (sglq) {
7644 piocb->sli4_lxritag = sglq->sli4_lxritag;
6805 piocb->sli4_xritag = sglq->sli4_xritag; 7645 piocb->sli4_xritag = sglq->sli4_xritag;
6806
6807 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 7646 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
6808 return IOCB_ERROR; 7647 return IOCB_ERROR;
6809 } 7648 }
@@ -9799,7 +10638,12 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
9799 break; 10638 break;
9800 case LPFC_WCQ: 10639 case LPFC_WCQ:
9801 while ((cqe = lpfc_sli4_cq_get(cq))) { 10640 while ((cqe = lpfc_sli4_cq_get(cq))) {
9802 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe); 10641 if (cq->subtype == LPFC_FCP)
10642 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
10643 cqe);
10644 else
10645 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
10646 cqe);
9803 if (!(++ecount % LPFC_GET_QE_REL_INT)) 10647 if (!(++ecount % LPFC_GET_QE_REL_INT))
9804 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 10648 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
9805 } 10649 }
@@ -11446,6 +12290,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11446 LPFC_MBOXQ_t *mbox; 12290 LPFC_MBOXQ_t *mbox;
11447 int rc; 12291 int rc;
11448 uint32_t shdr_status, shdr_add_status; 12292 uint32_t shdr_status, shdr_add_status;
12293 uint32_t mbox_tmo;
11449 union lpfc_sli4_cfg_shdr *shdr; 12294 union lpfc_sli4_cfg_shdr *shdr;
11450 12295
11451 if (xritag == NO_XRI) { 12296 if (xritag == NO_XRI) {
@@ -11479,8 +12324,10 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11479 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 12324 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
11480 if (!phba->sli4_hba.intr_enable) 12325 if (!phba->sli4_hba.intr_enable)
11481 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12326 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
11482 else 12327 else {
11483 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 12328 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12329 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12330 }
11484 /* The IOCTL status is embedded in the mailbox subheader. */ 12331 /* The IOCTL status is embedded in the mailbox subheader. */
11485 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 12332 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
11486 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12333 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -11498,6 +12345,76 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11498} 12345}
11499 12346
11500/** 12347/**
12348 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
12349 * @phba: pointer to lpfc hba data structure.
12350 *
12351 * This routine is invoked to post rpi header templates to the
12352 * port for those SLI4 ports that do not support extents. This routine
12353 * posts a PAGE_SIZE memory region to the port to hold up to
12354 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
12355 * and should be called only when interrupts are disabled.
12356 *
12357 * Return codes
12358 * 0 - successful
12359 * -ERROR - otherwise.
12360 */
12361uint16_t
12362lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
12363{
12364 unsigned long xri;
12365
12366 /*
12367 * Fetch the next logical xri. Because this index is logical,
12368 * the driver starts at 0 each time.
12369 */
12370 spin_lock_irq(&phba->hbalock);
12371 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
12372 phba->sli4_hba.max_cfg_param.max_xri, 0);
12373 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
12374 spin_unlock_irq(&phba->hbalock);
12375 return NO_XRI;
12376 } else {
12377 set_bit(xri, phba->sli4_hba.xri_bmask);
12378 phba->sli4_hba.max_cfg_param.xri_used++;
12379 phba->sli4_hba.xri_count++;
12380 }
12381
12382 spin_unlock_irq(&phba->hbalock);
12383 return xri;
12384}
12385
12386/**
12387 * lpfc_sli4_free_xri - Release an xri for reuse.
12388 * @phba: pointer to lpfc hba data structure.
12389 *
12390 * This routine is invoked to release an xri to the pool of
12391 * available rpis maintained by the driver.
12392 **/
12393void
12394__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
12395{
12396 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
12397 phba->sli4_hba.xri_count--;
12398 phba->sli4_hba.max_cfg_param.xri_used--;
12399 }
12400}
12401
12402/**
12403 * lpfc_sli4_free_xri - Release an xri for reuse.
12404 * @phba: pointer to lpfc hba data structure.
12405 *
12406 * This routine is invoked to release an xri to the pool of
12407 * available rpis maintained by the driver.
12408 **/
12409void
12410lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
12411{
12412 spin_lock_irq(&phba->hbalock);
12413 __lpfc_sli4_free_xri(phba, xri);
12414 spin_unlock_irq(&phba->hbalock);
12415}
12416
12417/**
11501 * lpfc_sli4_next_xritag - Get an xritag for the io 12418 * lpfc_sli4_next_xritag - Get an xritag for the io
11502 * @phba: Pointer to HBA context object. 12419 * @phba: Pointer to HBA context object.
11503 * 12420 *
@@ -11510,30 +12427,23 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11510uint16_t 12427uint16_t
11511lpfc_sli4_next_xritag(struct lpfc_hba *phba) 12428lpfc_sli4_next_xritag(struct lpfc_hba *phba)
11512{ 12429{
11513 uint16_t xritag; 12430 uint16_t xri_index;
11514 12431
11515 spin_lock_irq(&phba->hbalock); 12432 xri_index = lpfc_sli4_alloc_xri(phba);
11516 xritag = phba->sli4_hba.next_xri; 12433 if (xri_index != NO_XRI)
11517 if ((xritag != (uint16_t) -1) && xritag < 12434 return xri_index;
11518 (phba->sli4_hba.max_cfg_param.max_xri 12435
11519 + phba->sli4_hba.max_cfg_param.xri_base)) { 12436 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11520 phba->sli4_hba.next_xri++;
11521 phba->sli4_hba.max_cfg_param.xri_used++;
11522 spin_unlock_irq(&phba->hbalock);
11523 return xritag;
11524 }
11525 spin_unlock_irq(&phba->hbalock);
11526 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11527 "2004 Failed to allocate XRI.last XRITAG is %d" 12437 "2004 Failed to allocate XRI.last XRITAG is %d"
11528 " Max XRI is %d, Used XRI is %d\n", 12438 " Max XRI is %d, Used XRI is %d\n",
11529 phba->sli4_hba.next_xri, 12439 xri_index,
11530 phba->sli4_hba.max_cfg_param.max_xri, 12440 phba->sli4_hba.max_cfg_param.max_xri,
11531 phba->sli4_hba.max_cfg_param.xri_used); 12441 phba->sli4_hba.max_cfg_param.xri_used);
11532 return -1; 12442 return NO_XRI;
11533} 12443}
11534 12444
11535/** 12445/**
11536 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware. 12446 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
11537 * @phba: pointer to lpfc hba data structure. 12447 * @phba: pointer to lpfc hba data structure.
11538 * 12448 *
11539 * This routine is invoked to post a block of driver's sgl pages to the 12449 * This routine is invoked to post a block of driver's sgl pages to the
@@ -11542,7 +12452,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
11542 * stopped. 12452 * stopped.
11543 **/ 12453 **/
11544int 12454int
11545lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) 12455lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
11546{ 12456{
11547 struct lpfc_sglq *sglq_entry; 12457 struct lpfc_sglq *sglq_entry;
11548 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 12458 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -11551,7 +12461,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11551 LPFC_MBOXQ_t *mbox; 12461 LPFC_MBOXQ_t *mbox;
11552 uint32_t reqlen, alloclen, pg_pairs; 12462 uint32_t reqlen, alloclen, pg_pairs;
11553 uint32_t mbox_tmo; 12463 uint32_t mbox_tmo;
11554 uint16_t xritag_start = 0; 12464 uint16_t xritag_start = 0, lxri = 0;
11555 int els_xri_cnt, rc = 0; 12465 int els_xri_cnt, rc = 0;
11556 uint32_t shdr_status, shdr_add_status; 12466 uint32_t shdr_status, shdr_add_status;
11557 union lpfc_sli4_cfg_shdr *shdr; 12467 union lpfc_sli4_cfg_shdr *shdr;
@@ -11568,11 +12478,8 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11568 return -ENOMEM; 12478 return -ENOMEM;
11569 } 12479 }
11570 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12480 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11571 if (!mbox) { 12481 if (!mbox)
11572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11573 "2560 Failed to allocate mbox cmd memory\n");
11574 return -ENOMEM; 12482 return -ENOMEM;
11575 }
11576 12483
11577 /* Allocate DMA memory and set up the non-embedded mailbox command */ 12484 /* Allocate DMA memory and set up the non-embedded mailbox command */
11578 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12485 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -11587,15 +12494,30 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11587 lpfc_sli4_mbox_cmd_free(phba, mbox); 12494 lpfc_sli4_mbox_cmd_free(phba, mbox);
11588 return -ENOMEM; 12495 return -ENOMEM;
11589 } 12496 }
11590 /* Get the first SGE entry from the non-embedded DMA memory */
11591 viraddr = mbox->sge_array->addr[0];
11592
11593 /* Set up the SGL pages in the non-embedded DMA pages */ 12497 /* Set up the SGL pages in the non-embedded DMA pages */
12498 viraddr = mbox->sge_array->addr[0];
11594 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 12499 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
11595 sgl_pg_pairs = &sgl->sgl_pg_pairs; 12500 sgl_pg_pairs = &sgl->sgl_pg_pairs;
11596 12501
11597 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { 12502 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
11598 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; 12503 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
12504
12505 /*
12506 * Assign the sglq a physical xri only if the driver has not
12507 * initialized those resources. A port reset only needs
12508 * the sglq's posted.
12509 */
12510 if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
12511 LPFC_XRI_RSRC_RDY) {
12512 lxri = lpfc_sli4_next_xritag(phba);
12513 if (lxri == NO_XRI) {
12514 lpfc_sli4_mbox_cmd_free(phba, mbox);
12515 return -ENOMEM;
12516 }
12517 sglq_entry->sli4_lxritag = lxri;
12518 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
12519 }
12520
11599 /* Set up the sge entry */ 12521 /* Set up the sge entry */
11600 sgl_pg_pairs->sgl_pg0_addr_lo = 12522 sgl_pg_pairs->sgl_pg0_addr_lo =
11601 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 12523 cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -11605,16 +12527,17 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11605 cpu_to_le32(putPaddrLow(0)); 12527 cpu_to_le32(putPaddrLow(0));
11606 sgl_pg_pairs->sgl_pg1_addr_hi = 12528 sgl_pg_pairs->sgl_pg1_addr_hi =
11607 cpu_to_le32(putPaddrHigh(0)); 12529 cpu_to_le32(putPaddrHigh(0));
12530
11608 /* Keep the first xritag on the list */ 12531 /* Keep the first xritag on the list */
11609 if (pg_pairs == 0) 12532 if (pg_pairs == 0)
11610 xritag_start = sglq_entry->sli4_xritag; 12533 xritag_start = sglq_entry->sli4_xritag;
11611 sgl_pg_pairs++; 12534 sgl_pg_pairs++;
11612 } 12535 }
12536
12537 /* Complete initialization and perform endian conversion. */
11613 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 12538 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
11614 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); 12539 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
11615 /* Perform endian conversion if necessary */
11616 sgl->word0 = cpu_to_le32(sgl->word0); 12540 sgl->word0 = cpu_to_le32(sgl->word0);
11617
11618 if (!phba->sli4_hba.intr_enable) 12541 if (!phba->sli4_hba.intr_enable)
11619 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12542 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
11620 else { 12543 else {
@@ -11633,6 +12556,181 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11633 shdr_status, shdr_add_status, rc); 12556 shdr_status, shdr_add_status, rc);
11634 rc = -ENXIO; 12557 rc = -ENXIO;
11635 } 12558 }
12559
12560 if (rc == 0)
12561 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
12562 LPFC_XRI_RSRC_RDY);
12563 return rc;
12564}
12565
12566/**
12567 * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
12568 * @phba: pointer to lpfc hba data structure.
12569 *
12570 * This routine is invoked to post a block of driver's sgl pages to the
12571 * HBA using non-embedded mailbox command. No Lock is held. This routine
12572 * is only called when the driver is loading and after all IO has been
12573 * stopped.
12574 **/
12575int
12576lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
12577{
12578 struct lpfc_sglq *sglq_entry;
12579 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
12580 struct sgl_page_pairs *sgl_pg_pairs;
12581 void *viraddr;
12582 LPFC_MBOXQ_t *mbox;
12583 uint32_t reqlen, alloclen, index;
12584 uint32_t mbox_tmo;
12585 uint16_t rsrc_start, rsrc_size, els_xri_cnt;
12586 uint16_t xritag_start = 0, lxri = 0;
12587 struct lpfc_rsrc_blks *rsrc_blk;
12588 int cnt, ttl_cnt, rc = 0;
12589 int loop_cnt;
12590 uint32_t shdr_status, shdr_add_status;
12591 union lpfc_sli4_cfg_shdr *shdr;
12592
12593 /* The number of sgls to be posted */
12594 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
12595
12596 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
12597 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
12598 if (reqlen > SLI4_PAGE_SIZE) {
12599 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12600 "2989 Block sgl registration required DMA "
12601 "size (%d) great than a page\n", reqlen);
12602 return -ENOMEM;
12603 }
12604
12605 cnt = 0;
12606 ttl_cnt = 0;
12607 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
12608 list) {
12609 rsrc_start = rsrc_blk->rsrc_start;
12610 rsrc_size = rsrc_blk->rsrc_size;
12611
12612 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12613 "3014 Working ELS Extent start %d, cnt %d\n",
12614 rsrc_start, rsrc_size);
12615
12616 loop_cnt = min(els_xri_cnt, rsrc_size);
12617 if (ttl_cnt + loop_cnt >= els_xri_cnt) {
12618 loop_cnt = els_xri_cnt - ttl_cnt;
12619 ttl_cnt = els_xri_cnt;
12620 }
12621
12622 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12623 if (!mbox)
12624 return -ENOMEM;
12625 /*
12626 * Allocate DMA memory and set up the non-embedded mailbox
12627 * command.
12628 */
12629 alloclen = lpfc_sli4_config(phba, mbox,
12630 LPFC_MBOX_SUBSYSTEM_FCOE,
12631 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
12632 reqlen, LPFC_SLI4_MBX_NEMBED);
12633 if (alloclen < reqlen) {
12634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12635 "2987 Allocated DMA memory size (%d) "
12636 "is less than the requested DMA memory "
12637 "size (%d)\n", alloclen, reqlen);
12638 lpfc_sli4_mbox_cmd_free(phba, mbox);
12639 return -ENOMEM;
12640 }
12641
12642 /* Set up the SGL pages in the non-embedded DMA pages */
12643 viraddr = mbox->sge_array->addr[0];
12644 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
12645 sgl_pg_pairs = &sgl->sgl_pg_pairs;
12646
12647 /*
12648 * The starting resource may not begin at zero. Control
12649 * the loop variants via the block resource parameters,
12650 * but handle the sge pointers with a zero-based index
12651 * that doesn't get reset per loop pass.
12652 */
12653 for (index = rsrc_start;
12654 index < rsrc_start + loop_cnt;
12655 index++) {
12656 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
12657
12658 /*
12659 * Assign the sglq a physical xri only if the driver
12660 * has not initialized those resources. A port reset
12661 * only needs the sglq's posted.
12662 */
12663 if (bf_get(lpfc_xri_rsrc_rdy,
12664 &phba->sli4_hba.sli4_flags) !=
12665 LPFC_XRI_RSRC_RDY) {
12666 lxri = lpfc_sli4_next_xritag(phba);
12667 if (lxri == NO_XRI) {
12668 lpfc_sli4_mbox_cmd_free(phba, mbox);
12669 rc = -ENOMEM;
12670 goto err_exit;
12671 }
12672 sglq_entry->sli4_lxritag = lxri;
12673 sglq_entry->sli4_xritag =
12674 phba->sli4_hba.xri_ids[lxri];
12675 }
12676
12677 /* Set up the sge entry */
12678 sgl_pg_pairs->sgl_pg0_addr_lo =
12679 cpu_to_le32(putPaddrLow(sglq_entry->phys));
12680 sgl_pg_pairs->sgl_pg0_addr_hi =
12681 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
12682 sgl_pg_pairs->sgl_pg1_addr_lo =
12683 cpu_to_le32(putPaddrLow(0));
12684 sgl_pg_pairs->sgl_pg1_addr_hi =
12685 cpu_to_le32(putPaddrHigh(0));
12686
12687 /* Track the starting physical XRI for the mailbox. */
12688 if (index == rsrc_start)
12689 xritag_start = sglq_entry->sli4_xritag;
12690 sgl_pg_pairs++;
12691 cnt++;
12692 }
12693
12694 /* Complete initialization and perform endian conversion. */
12695 rsrc_blk->rsrc_used += loop_cnt;
12696 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
12697 bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
12698 sgl->word0 = cpu_to_le32(sgl->word0);
12699
12700 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12701 "3015 Post ELS Extent SGL, start %d, "
12702 "cnt %d, used %d\n",
12703 xritag_start, loop_cnt, rsrc_blk->rsrc_used);
12704 if (!phba->sli4_hba.intr_enable)
12705 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12706 else {
12707 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12708 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12709 }
12710 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
12711 shdr_status = bf_get(lpfc_mbox_hdr_status,
12712 &shdr->response);
12713 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12714 &shdr->response);
12715 if (rc != MBX_TIMEOUT)
12716 lpfc_sli4_mbox_cmd_free(phba, mbox);
12717 if (shdr_status || shdr_add_status || rc) {
12718 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12719 "2988 POST_SGL_BLOCK mailbox "
12720 "command failed status x%x "
12721 "add_status x%x mbx status x%x\n",
12722 shdr_status, shdr_add_status, rc);
12723 rc = -ENXIO;
12724 goto err_exit;
12725 }
12726 if (ttl_cnt >= els_xri_cnt)
12727 break;
12728 }
12729
12730 err_exit:
12731 if (rc == 0)
12732 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
12733 LPFC_XRI_RSRC_RDY);
11636 return rc; 12734 return rc;
11637} 12735}
11638 12736
@@ -11693,6 +12791,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
11693 lpfc_sli4_mbox_cmd_free(phba, mbox); 12791 lpfc_sli4_mbox_cmd_free(phba, mbox);
11694 return -ENOMEM; 12792 return -ENOMEM;
11695 } 12793 }
12794
11696 /* Get the first SGE entry from the non-embedded DMA memory */ 12795 /* Get the first SGE entry from the non-embedded DMA memory */
11697 viraddr = mbox->sge_array->addr[0]; 12796 viraddr = mbox->sge_array->addr[0];
11698 12797
@@ -11748,6 +12847,169 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
11748} 12847}
11749 12848
11750/** 12849/**
12850 * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
12851 * @phba: pointer to lpfc hba data structure.
12852 * @sblist: pointer to scsi buffer list.
12853 * @count: number of scsi buffers on the list.
12854 *
12855 * This routine is invoked to post a block of @count scsi sgl pages from a
12856 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
12857 * No Lock is held.
12858 *
12859 **/
12860int
12861lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
12862 int cnt)
12863{
12864 struct lpfc_scsi_buf *psb = NULL;
12865 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
12866 struct sgl_page_pairs *sgl_pg_pairs;
12867 void *viraddr;
12868 LPFC_MBOXQ_t *mbox;
12869 uint32_t reqlen, alloclen, pg_pairs;
12870 uint32_t mbox_tmo;
12871 uint16_t xri_start = 0, scsi_xri_start;
12872 uint16_t rsrc_range;
12873 int rc = 0, avail_cnt;
12874 uint32_t shdr_status, shdr_add_status;
12875 dma_addr_t pdma_phys_bpl1;
12876 union lpfc_sli4_cfg_shdr *shdr;
12877 struct lpfc_rsrc_blks *rsrc_blk;
12878 uint32_t xri_cnt = 0;
12879
12880 /* Calculate the total requested length of the dma memory */
12881 reqlen = cnt * sizeof(struct sgl_page_pairs) +
12882 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
12883 if (reqlen > SLI4_PAGE_SIZE) {
12884 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12885 "2932 Block sgl registration required DMA "
12886 "size (%d) great than a page\n", reqlen);
12887 return -ENOMEM;
12888 }
12889
12890 /*
12891 * The use of extents requires the driver to post the sgl headers
12892 * in multiple postings to meet the contiguous resource assignment.
12893 */
12894 psb = list_prepare_entry(psb, sblist, list);
12895 scsi_xri_start = phba->sli4_hba.scsi_xri_start;
12896 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
12897 list) {
12898 rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
12899 if (rsrc_range < scsi_xri_start)
12900 continue;
12901 else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
12902 continue;
12903 else
12904 avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
12905
12906 reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
12907 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
12908 /*
12909 * Allocate DMA memory and set up the non-embedded mailbox
12910 * command. The mbox is used to post an SGL page per loop
12911 * but the DMA memory has a use-once semantic so the mailbox
12912 * is used and freed per loop pass.
12913 */
12914 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12915 if (!mbox) {
12916 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12917 "2933 Failed to allocate mbox cmd "
12918 "memory\n");
12919 return -ENOMEM;
12920 }
12921 alloclen = lpfc_sli4_config(phba, mbox,
12922 LPFC_MBOX_SUBSYSTEM_FCOE,
12923 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
12924 reqlen,
12925 LPFC_SLI4_MBX_NEMBED);
12926 if (alloclen < reqlen) {
12927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12928 "2934 Allocated DMA memory size (%d) "
12929 "is less than the requested DMA memory "
12930 "size (%d)\n", alloclen, reqlen);
12931 lpfc_sli4_mbox_cmd_free(phba, mbox);
12932 return -ENOMEM;
12933 }
12934
12935 /* Get the first SGE entry from the non-embedded DMA memory */
12936 viraddr = mbox->sge_array->addr[0];
12937
12938 /* Set up the SGL pages in the non-embedded DMA pages */
12939 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
12940 sgl_pg_pairs = &sgl->sgl_pg_pairs;
12941
12942 /* pg_pairs tracks posted SGEs per loop iteration. */
12943 pg_pairs = 0;
12944 list_for_each_entry_continue(psb, sblist, list) {
12945 /* Set up the sge entry */
12946 sgl_pg_pairs->sgl_pg0_addr_lo =
12947 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
12948 sgl_pg_pairs->sgl_pg0_addr_hi =
12949 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
12950 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
12951 pdma_phys_bpl1 = psb->dma_phys_bpl +
12952 SGL_PAGE_SIZE;
12953 else
12954 pdma_phys_bpl1 = 0;
12955 sgl_pg_pairs->sgl_pg1_addr_lo =
12956 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
12957 sgl_pg_pairs->sgl_pg1_addr_hi =
12958 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
12959 /* Keep the first xri for this extent. */
12960 if (pg_pairs == 0)
12961 xri_start = psb->cur_iocbq.sli4_xritag;
12962 sgl_pg_pairs++;
12963 pg_pairs++;
12964 xri_cnt++;
12965
12966 /*
12967 * Track two exit conditions - the loop has constructed
12968 * all of the caller's SGE pairs or all available
12969 * resource IDs in this extent are consumed.
12970 */
12971 if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
12972 break;
12973 }
12974 rsrc_blk->rsrc_used += pg_pairs;
12975 bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
12976 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
12977
12978 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12979 "3016 Post SCSI Extent SGL, start %d, cnt %d "
12980 "blk use %d\n",
12981 xri_start, pg_pairs, rsrc_blk->rsrc_used);
12982 /* Perform endian conversion if necessary */
12983 sgl->word0 = cpu_to_le32(sgl->word0);
12984 if (!phba->sli4_hba.intr_enable)
12985 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12986 else {
12987 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12988 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12989 }
12990 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
12991 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12992 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12993 &shdr->response);
12994 if (rc != MBX_TIMEOUT)
12995 lpfc_sli4_mbox_cmd_free(phba, mbox);
12996 if (shdr_status || shdr_add_status || rc) {
12997 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12998 "2935 POST_SGL_BLOCK mailbox command "
12999 "failed status x%x add_status x%x "
13000 "mbx status x%x\n",
13001 shdr_status, shdr_add_status, rc);
13002 return -ENXIO;
13003 }
13004
13005 /* Post only what is requested. */
13006 if (xri_cnt >= cnt)
13007 break;
13008 }
13009 return rc;
13010}
13011
13012/**
11751 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 13013 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
11752 * @phba: pointer to lpfc_hba struct that the frame was received on 13014 * @phba: pointer to lpfc_hba struct that the frame was received on
11753 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13015 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -12137,6 +13399,28 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
12137} 13399}
12138 13400
12139/** 13401/**
13402 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
13403 * @phba: Pointer to HBA context object.
13404 * @xri: xri id in transaction.
13405 *
13406 * This function validates the xri maps to the known range of XRIs allocated an
13407 * used by the driver.
13408 **/
13409static uint16_t
13410lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
13411 uint16_t xri)
13412{
13413 int i;
13414
13415 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
13416 if (xri == phba->sli4_hba.xri_ids[i])
13417 return i;
13418 }
13419 return NO_XRI;
13420}
13421
13422
13423/**
12140 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 13424 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
12141 * @phba: Pointer to HBA context object. 13425 * @phba: Pointer to HBA context object.
12142 * @fc_hdr: pointer to a FC frame header. 13426 * @fc_hdr: pointer to a FC frame header.
@@ -12169,9 +13453,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
12169 "SID:x%x\n", oxid, sid); 13453 "SID:x%x\n", oxid, sid);
12170 return; 13454 return;
12171 } 13455 }
12172 if (rxid >= phba->sli4_hba.max_cfg_param.xri_base 13456 if (lpfc_sli4_xri_inrange(phba, rxid))
12173 && rxid <= (phba->sli4_hba.max_cfg_param.max_xri
12174 + phba->sli4_hba.max_cfg_param.xri_base))
12175 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); 13457 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
12176 13458
12177 /* Allocate buffer for rsp iocb */ 13459 /* Allocate buffer for rsp iocb */
@@ -12194,12 +13476,13 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
12194 icmd->ulpBdeCount = 0; 13476 icmd->ulpBdeCount = 0;
12195 icmd->ulpLe = 1; 13477 icmd->ulpLe = 1;
12196 icmd->ulpClass = CLASS3; 13478 icmd->ulpClass = CLASS3;
12197 icmd->ulpContext = ndlp->nlp_rpi; 13479 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
12198 ctiocb->context1 = ndlp; 13480 ctiocb->context1 = ndlp;
12199 13481
12200 ctiocb->iocb_cmpl = NULL; 13482 ctiocb->iocb_cmpl = NULL;
12201 ctiocb->vport = phba->pport; 13483 ctiocb->vport = phba->pport;
12202 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 13484 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
13485 ctiocb->sli4_lxritag = NO_XRI;
12203 ctiocb->sli4_xritag = NO_XRI; 13486 ctiocb->sli4_xritag = NO_XRI;
12204 13487
12205 /* If the oxid maps to the FCP XRI range or if it is out of range, 13488 /* If the oxid maps to the FCP XRI range or if it is out of range,
@@ -12380,8 +13663,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
12380 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 13663 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
12381 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 13664 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
12382 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); 13665 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
12383 first_iocbq->iocb.unsli3.rcvsli3.vpi = 13666 /* iocbq is prepped for internal consumption. Logical vpi. */
12384 vport->vpi + vport->phba->vpi_base; 13667 first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
12385 /* put the first buffer into the first IOCBq */ 13668 /* put the first buffer into the first IOCBq */
12386 first_iocbq->context2 = &seq_dmabuf->dbuf; 13669 first_iocbq->context2 = &seq_dmabuf->dbuf;
12387 first_iocbq->context3 = NULL; 13670 first_iocbq->context3 = NULL;
@@ -12461,7 +13744,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
12461 &phba->sli.ring[LPFC_ELS_RING], 13744 &phba->sli.ring[LPFC_ELS_RING],
12462 iocbq, fc_hdr->fh_r_ctl, 13745 iocbq, fc_hdr->fh_r_ctl,
12463 fc_hdr->fh_type)) 13746 fc_hdr->fh_type))
12464 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13747 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12465 "2540 Ring %d handler: unexpected Rctl " 13748 "2540 Ring %d handler: unexpected Rctl "
12466 "x%x Type x%x received\n", 13749 "x%x Type x%x received\n",
12467 LPFC_ELS_RING, 13750 LPFC_ELS_RING,
@@ -12558,9 +13841,24 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
12558{ 13841{
12559 struct lpfc_rpi_hdr *rpi_page; 13842 struct lpfc_rpi_hdr *rpi_page;
12560 uint32_t rc = 0; 13843 uint32_t rc = 0;
13844 uint16_t lrpi = 0;
13845
13846 /* SLI4 ports that support extents do not require RPI headers. */
13847 if (!phba->sli4_hba.rpi_hdrs_in_use)
13848 goto exit;
13849 if (phba->sli4_hba.extents_in_use)
13850 return -EIO;
12561 13851
12562 /* Post all rpi memory regions to the port. */
12563 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 13852 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
13853 /*
13854 * Assign the rpi headers a physical rpi only if the driver
13855 * has not initialized those resources. A port reset only
13856 * needs the headers posted.
13857 */
13858 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
13859 LPFC_RPI_RSRC_RDY)
13860 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
13861
12564 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 13862 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
12565 if (rc != MBX_SUCCESS) { 13863 if (rc != MBX_SUCCESS) {
12566 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13864 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12571,6 +13869,9 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
12571 } 13869 }
12572 } 13870 }
12573 13871
13872 exit:
13873 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
13874 LPFC_RPI_RSRC_RDY);
12574 return rc; 13875 return rc;
12575} 13876}
12576 13877
@@ -12594,10 +13895,15 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
12594 LPFC_MBOXQ_t *mboxq; 13895 LPFC_MBOXQ_t *mboxq;
12595 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 13896 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
12596 uint32_t rc = 0; 13897 uint32_t rc = 0;
12597 uint32_t mbox_tmo;
12598 uint32_t shdr_status, shdr_add_status; 13898 uint32_t shdr_status, shdr_add_status;
12599 union lpfc_sli4_cfg_shdr *shdr; 13899 union lpfc_sli4_cfg_shdr *shdr;
12600 13900
13901 /* SLI4 ports that support extents do not require RPI headers. */
13902 if (!phba->sli4_hba.rpi_hdrs_in_use)
13903 return rc;
13904 if (phba->sli4_hba.extents_in_use)
13905 return -EIO;
13906
12601 /* The port is notified of the header region via a mailbox command. */ 13907 /* The port is notified of the header region via a mailbox command. */
12602 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13908 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12603 if (!mboxq) { 13909 if (!mboxq) {
@@ -12609,16 +13915,19 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
12609 13915
12610 /* Post all rpi memory regions to the port. */ 13916 /* Post all rpi memory regions to the port. */
12611 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 13917 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
12612 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12613 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 13918 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
12614 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 13919 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
12615 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 13920 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
12616 sizeof(struct lpfc_sli4_cfg_mhdr), 13921 sizeof(struct lpfc_sli4_cfg_mhdr),
12617 LPFC_SLI4_MBX_EMBED); 13922 LPFC_SLI4_MBX_EMBED);
12618 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 13923
12619 hdr_tmpl, rpi_page->page_count); 13924
13925 /* Post the physical rpi to the port for this rpi header. */
12620 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 13926 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
12621 rpi_page->start_rpi); 13927 rpi_page->start_rpi);
13928 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
13929 hdr_tmpl, rpi_page->page_count);
13930
12622 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 13931 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
12623 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 13932 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
12624 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13933 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -12653,22 +13962,21 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
12653int 13962int
12654lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 13963lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12655{ 13964{
12656 int rpi; 13965 unsigned long rpi;
12657 uint16_t max_rpi, rpi_base, rpi_limit; 13966 uint16_t max_rpi, rpi_limit;
12658 uint16_t rpi_remaining; 13967 uint16_t rpi_remaining, lrpi = 0;
12659 struct lpfc_rpi_hdr *rpi_hdr; 13968 struct lpfc_rpi_hdr *rpi_hdr;
12660 13969
12661 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 13970 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
12662 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
12663 rpi_limit = phba->sli4_hba.next_rpi; 13971 rpi_limit = phba->sli4_hba.next_rpi;
12664 13972
12665 /* 13973 /*
12666 * The valid rpi range is not guaranteed to be zero-based. Start 13974 * Fetch the next logical rpi. Because this index is logical,
12667 * the search at the rpi_base as reported by the port. 13975 * the driver starts at 0 each time.
12668 */ 13976 */
12669 spin_lock_irq(&phba->hbalock); 13977 spin_lock_irq(&phba->hbalock);
12670 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base); 13978 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
12671 if (rpi >= rpi_limit || rpi < rpi_base) 13979 if (rpi >= rpi_limit)
12672 rpi = LPFC_RPI_ALLOC_ERROR; 13980 rpi = LPFC_RPI_ALLOC_ERROR;
12673 else { 13981 else {
12674 set_bit(rpi, phba->sli4_hba.rpi_bmask); 13982 set_bit(rpi, phba->sli4_hba.rpi_bmask);
@@ -12678,7 +13986,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12678 13986
12679 /* 13987 /*
12680 * Don't try to allocate more rpi header regions if the device limit 13988 * Don't try to allocate more rpi header regions if the device limit
12681 * on available rpis max has been exhausted. 13989 * has been exhausted.
12682 */ 13990 */
12683 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 13991 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
12684 (phba->sli4_hba.rpi_count >= max_rpi)) { 13992 (phba->sli4_hba.rpi_count >= max_rpi)) {
@@ -12687,13 +13995,21 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12687 } 13995 }
12688 13996
12689 /* 13997 /*
13998 * RPI header postings are not required for SLI4 ports capable of
13999 * extents.
14000 */
14001 if (!phba->sli4_hba.rpi_hdrs_in_use) {
14002 spin_unlock_irq(&phba->hbalock);
14003 return rpi;
14004 }
14005
14006 /*
12690 * If the driver is running low on rpi resources, allocate another 14007 * If the driver is running low on rpi resources, allocate another
12691 * page now. Note that the next_rpi value is used because 14008 * page now. Note that the next_rpi value is used because
12692 * it represents how many are actually in use whereas max_rpi notes 14009 * it represents how many are actually in use whereas max_rpi notes
12693 * how many are supported max by the device. 14010 * how many are supported max by the device.
12694 */ 14011 */
12695 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base - 14012 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
12696 phba->sli4_hba.rpi_count;
12697 spin_unlock_irq(&phba->hbalock); 14013 spin_unlock_irq(&phba->hbalock);
12698 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 14014 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
12699 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 14015 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
@@ -12702,6 +14018,8 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12702 "2002 Error Could not grow rpi " 14018 "2002 Error Could not grow rpi "
12703 "count\n"); 14019 "count\n");
12704 } else { 14020 } else {
14021 lrpi = rpi_hdr->start_rpi;
14022 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
12705 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 14023 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
12706 } 14024 }
12707 } 14025 }
@@ -12751,6 +14069,8 @@ void
12751lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 14069lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
12752{ 14070{
12753 kfree(phba->sli4_hba.rpi_bmask); 14071 kfree(phba->sli4_hba.rpi_bmask);
14072 kfree(phba->sli4_hba.rpi_ids);
14073 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
12754} 14074}
12755 14075
12756/** 14076/**
@@ -13490,6 +14810,96 @@ out:
13490} 14810}
13491 14811
13492/** 14812/**
14813 * lpfc_wr_object - write an object to the firmware
14814 * @phba: HBA structure that indicates port to create a queue on.
14815 * @dmabuf_list: list of dmabufs to write to the port.
14816 * @size: the total byte value of the objects to write to the port.
14817 * @offset: the current offset to be used to start the transfer.
14818 *
14819 * This routine will create a wr_object mailbox command to send to the port.
14820 * the mailbox command will be constructed using the dma buffers described in
14821 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
14822 * BDEs that the imbedded mailbox can support. The @offset variable will be
14823 * used to indicate the starting offset of the transfer and will also return
14824 * the offset after the write object mailbox has completed. @size is used to
14825 * determine the end of the object and whether the eof bit should be set.
14826 *
14827 * Return 0 is successful and offset will contain the the new offset to use
14828 * for the next write.
14829 * Return negative value for error cases.
14830 **/
14831int
14832lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
14833 uint32_t size, uint32_t *offset)
14834{
14835 struct lpfc_mbx_wr_object *wr_object;
14836 LPFC_MBOXQ_t *mbox;
14837 int rc = 0, i = 0;
14838 uint32_t shdr_status, shdr_add_status;
14839 uint32_t mbox_tmo;
14840 union lpfc_sli4_cfg_shdr *shdr;
14841 struct lpfc_dmabuf *dmabuf;
14842 uint32_t written = 0;
14843
14844 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14845 if (!mbox)
14846 return -ENOMEM;
14847
14848 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14849 LPFC_MBOX_OPCODE_WRITE_OBJECT,
14850 sizeof(struct lpfc_mbx_wr_object) -
14851 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
14852
14853 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
14854 wr_object->u.request.write_offset = *offset;
14855 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
14856 wr_object->u.request.object_name[0] =
14857 cpu_to_le32(wr_object->u.request.object_name[0]);
14858 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
14859 list_for_each_entry(dmabuf, dmabuf_list, list) {
14860 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
14861 break;
14862 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
14863 wr_object->u.request.bde[i].addrHigh =
14864 putPaddrHigh(dmabuf->phys);
14865 if (written + SLI4_PAGE_SIZE >= size) {
14866 wr_object->u.request.bde[i].tus.f.bdeSize =
14867 (size - written);
14868 written += (size - written);
14869 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
14870 } else {
14871 wr_object->u.request.bde[i].tus.f.bdeSize =
14872 SLI4_PAGE_SIZE;
14873 written += SLI4_PAGE_SIZE;
14874 }
14875 i++;
14876 }
14877 wr_object->u.request.bde_count = i;
14878 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
14879 if (!phba->sli4_hba.intr_enable)
14880 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14881 else {
14882 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
14883 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14884 }
14885 /* The IOCTL status is embedded in the mailbox subheader. */
14886 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
14887 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14888 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14889 if (rc != MBX_TIMEOUT)
14890 mempool_free(mbox, phba->mbox_mem_pool);
14891 if (shdr_status || shdr_add_status || rc) {
14892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14893 "3025 Write Object mailbox failed with "
14894 "status x%x add_status x%x, mbx status x%x\n",
14895 shdr_status, shdr_add_status, rc);
14896 rc = -ENXIO;
14897 } else
14898 *offset += wr_object->u.response.actual_write_length;
14899 return rc;
14900}
14901
14902/**
13493 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 14903 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
13494 * @vport: pointer to vport data structure. 14904 * @vport: pointer to vport data structure.
13495 * 14905 *
@@ -13644,7 +15054,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
13644 * never happen 15054 * never happen
13645 */ 15055 */
13646 sglq = __lpfc_clear_active_sglq(phba, 15056 sglq = __lpfc_clear_active_sglq(phba,
13647 sglq->sli4_xritag); 15057 sglq->sli4_lxritag);
13648 spin_unlock_irqrestore(&phba->hbalock, iflags); 15058 spin_unlock_irqrestore(&phba->hbalock, iflags);
13649 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15059 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13650 "2823 txq empty and txq_cnt is %d\n ", 15060 "2823 txq empty and txq_cnt is %d\n ",
@@ -13656,6 +15066,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
13656 /* The xri and iocb resources secured, 15066 /* The xri and iocb resources secured,
13657 * attempt to issue request 15067 * attempt to issue request
13658 */ 15068 */
15069 piocbq->sli4_lxritag = sglq->sli4_lxritag;
13659 piocbq->sli4_xritag = sglq->sli4_xritag; 15070 piocbq->sli4_xritag = sglq->sli4_xritag;
13660 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 15071 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
13661 fail_msg = "to convert bpl to sgl"; 15072 fail_msg = "to convert bpl to sgl";
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 453577c21c14..a0075b0af142 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -52,6 +52,7 @@ struct lpfc_iocbq {
52 struct list_head clist; 52 struct list_head clist;
53 struct list_head dlist; 53 struct list_head dlist;
54 uint16_t iotag; /* pre-assigned IO tag */ 54 uint16_t iotag; /* pre-assigned IO tag */
55 uint16_t sli4_lxritag; /* logical pre-assigned XRI. */
55 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 56 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
56 struct lpfc_cq_event cq_event; 57 struct lpfc_cq_event cq_event;
57 58
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 1a3cbf88f2ce..4b1703554a26 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -310,7 +310,6 @@ struct lpfc_max_cfg_param {
310 uint16_t vfi_base; 310 uint16_t vfi_base;
311 uint16_t vfi_used; 311 uint16_t vfi_used;
312 uint16_t max_fcfi; 312 uint16_t max_fcfi;
313 uint16_t fcfi_base;
314 uint16_t fcfi_used; 313 uint16_t fcfi_used;
315 uint16_t max_eq; 314 uint16_t max_eq;
316 uint16_t max_rq; 315 uint16_t max_rq;
@@ -365,6 +364,11 @@ struct lpfc_pc_sli4_params {
365 uint8_t rqv; 364 uint8_t rqv;
366}; 365};
367 366
367struct lpfc_iov {
368 uint32_t pf_number;
369 uint32_t vf_number;
370};
371
368/* SLI4 HBA data structure entries */ 372/* SLI4 HBA data structure entries */
369struct lpfc_sli4_hba { 373struct lpfc_sli4_hba {
370 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for 374 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -444,10 +448,13 @@ struct lpfc_sli4_hba {
444 uint32_t intr_enable; 448 uint32_t intr_enable;
445 struct lpfc_bmbx bmbx; 449 struct lpfc_bmbx bmbx;
446 struct lpfc_max_cfg_param max_cfg_param; 450 struct lpfc_max_cfg_param max_cfg_param;
451 uint16_t extents_in_use; /* must allocate resource extents. */
452 uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
447 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ 453 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
448 uint16_t next_rpi; 454 uint16_t next_rpi;
449 uint16_t scsi_xri_max; 455 uint16_t scsi_xri_max;
450 uint16_t scsi_xri_cnt; 456 uint16_t scsi_xri_cnt;
457 uint16_t scsi_xri_start;
451 struct list_head lpfc_free_sgl_list; 458 struct list_head lpfc_free_sgl_list;
452 struct list_head lpfc_sgl_list; 459 struct list_head lpfc_sgl_list;
453 struct lpfc_sglq **lpfc_els_sgl_array; 460 struct lpfc_sglq **lpfc_els_sgl_array;
@@ -458,7 +465,17 @@ struct lpfc_sli4_hba {
458 struct lpfc_sglq **lpfc_sglq_active_list; 465 struct lpfc_sglq **lpfc_sglq_active_list;
459 struct list_head lpfc_rpi_hdr_list; 466 struct list_head lpfc_rpi_hdr_list;
460 unsigned long *rpi_bmask; 467 unsigned long *rpi_bmask;
468 uint16_t *rpi_ids;
461 uint16_t rpi_count; 469 uint16_t rpi_count;
470 struct list_head lpfc_rpi_blk_list;
471 unsigned long *xri_bmask;
472 uint16_t *xri_ids;
473 uint16_t xri_count;
474 struct list_head lpfc_xri_blk_list;
475 unsigned long *vfi_bmask;
476 uint16_t *vfi_ids;
477 uint16_t vfi_count;
478 struct list_head lpfc_vfi_blk_list;
462 struct lpfc_sli4_flags sli4_flags; 479 struct lpfc_sli4_flags sli4_flags;
463 struct list_head sp_queue_event; 480 struct list_head sp_queue_event;
464 struct list_head sp_cqe_event_pool; 481 struct list_head sp_cqe_event_pool;
@@ -467,6 +484,7 @@ struct lpfc_sli4_hba {
467 struct list_head sp_els_xri_aborted_work_queue; 484 struct list_head sp_els_xri_aborted_work_queue;
468 struct list_head sp_unsol_work_queue; 485 struct list_head sp_unsol_work_queue;
469 struct lpfc_sli4_link link_state; 486 struct lpfc_sli4_link link_state;
487 struct lpfc_iov iov;
470 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ 488 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
471 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ 489 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
472}; 490};
@@ -490,6 +508,7 @@ struct lpfc_sglq {
490 enum lpfc_sgl_state state; 508 enum lpfc_sgl_state state;
491 struct lpfc_nodelist *ndlp; /* ndlp associated with IO */ 509 struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
492 uint16_t iotag; /* pre-assigned IO tag */ 510 uint16_t iotag; /* pre-assigned IO tag */
511 uint16_t sli4_lxritag; /* logical pre-assigned xri. */
493 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 512 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
494 struct sli4_sge *sgl; /* pre-assigned SGL */ 513 struct sli4_sge *sgl; /* pre-assigned SGL */
495 void *virt; /* virtual address. */ 514 void *virt; /* virtual address. */
@@ -504,6 +523,13 @@ struct lpfc_rpi_hdr {
504 uint32_t start_rpi; 523 uint32_t start_rpi;
505}; 524};
506 525
526struct lpfc_rsrc_blks {
527 struct list_head list;
528 uint16_t rsrc_start;
529 uint16_t rsrc_size;
530 uint16_t rsrc_used;
531};
532
507/* 533/*
508 * SLI4 specific function prototypes 534 * SLI4 specific function prototypes
509 */ 535 */
@@ -543,8 +569,11 @@ int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
543int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); 569int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
544uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); 570uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
545int lpfc_sli4_post_async_mbox(struct lpfc_hba *); 571int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
546int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba); 572int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
573int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
547int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); 574int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
575int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
576 int);
548struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); 577struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
549struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); 578struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
550void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); 579void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 30ba5440c67a..1feb551a57bc 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -83,7 +83,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
83static int 83static int
84lpfc_alloc_vpi(struct lpfc_hba *phba) 84lpfc_alloc_vpi(struct lpfc_hba *phba)
85{ 85{
86 int vpi; 86 unsigned long vpi;
87 87
88 spin_lock_irq(&phba->hbalock); 88 spin_lock_irq(&phba->hbalock);
89 /* Start at bit 1 because vpi zero is reserved for the physical port */ 89 /* Start at bit 1 because vpi zero is reserved for the physical port */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 046dcc672ec1..7370c084b178 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.05.34-rc1" 36#define MEGASAS_VERSION "00.00.05.38-rc1"
37#define MEGASAS_RELDATE "Feb. 24, 2011" 37#define MEGASAS_RELDATE "May. 11, 2011"
38#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011" 38#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
@@ -76,8 +76,8 @@
76#define MFI_STATE_READY 0xB0000000 76#define MFI_STATE_READY 0xB0000000
77#define MFI_STATE_OPERATIONAL 0xC0000000 77#define MFI_STATE_OPERATIONAL 0xC0000000
78#define MFI_STATE_FAULT 0xF0000000 78#define MFI_STATE_FAULT 0xF0000000
79#define MFI_RESET_REQUIRED 0x00000001 79#define MFI_RESET_REQUIRED 0x00000001
80 80#define MFI_RESET_ADAPTER 0x00000002
81#define MEGAMFI_FRAME_SIZE 64 81#define MEGAMFI_FRAME_SIZE 64
82 82
83/* 83/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 89c623ebadbc..2d8cdce7b2f5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.05.34-rc1 21 * Version : v00.00.05.38-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -437,15 +437,18 @@ megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
437static int 437static int
438megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) 438megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
439{ 439{
440 u32 status; 440 u32 status, mfiStatus = 0;
441
441 /* 442 /*
442 * Check if it is our interrupt 443 * Check if it is our interrupt
443 */ 444 */
444 status = readl(&regs->outbound_intr_status); 445 status = readl(&regs->outbound_intr_status);
445 446
446 if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) { 447 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
447 return 0; 448 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
448 } 449
450 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
451 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
449 452
450 /* 453 /*
451 * Clear the interrupt by writing back the same value 454 * Clear the interrupt by writing back the same value
@@ -455,8 +458,9 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
455 /* Dummy readl to force pci flush */ 458 /* Dummy readl to force pci flush */
456 readl(&regs->outbound_doorbell_clear); 459 readl(&regs->outbound_doorbell_clear);
457 460
458 return 1; 461 return mfiStatus;
459} 462}
463
460/** 464/**
461 * megasas_fire_cmd_ppc - Sends command to the FW 465 * megasas_fire_cmd_ppc - Sends command to the FW
462 * @frame_phys_addr : Physical address of cmd 466 * @frame_phys_addr : Physical address of cmd
@@ -477,17 +481,6 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
477} 481}
478 482
479/** 483/**
480 * megasas_adp_reset_ppc - For controller reset
481 * @regs: MFI register set
482 */
483static int
484megasas_adp_reset_ppc(struct megasas_instance *instance,
485 struct megasas_register_set __iomem *regs)
486{
487 return 0;
488}
489
490/**
491 * megasas_check_reset_ppc - For controller reset check 484 * megasas_check_reset_ppc - For controller reset check
492 * @regs: MFI register set 485 * @regs: MFI register set
493 */ 486 */
@@ -495,8 +488,12 @@ static int
495megasas_check_reset_ppc(struct megasas_instance *instance, 488megasas_check_reset_ppc(struct megasas_instance *instance,
496 struct megasas_register_set __iomem *regs) 489 struct megasas_register_set __iomem *regs)
497{ 490{
491 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
492 return 1;
493
498 return 0; 494 return 0;
499} 495}
496
500static struct megasas_instance_template megasas_instance_template_ppc = { 497static struct megasas_instance_template megasas_instance_template_ppc = {
501 498
502 .fire_cmd = megasas_fire_cmd_ppc, 499 .fire_cmd = megasas_fire_cmd_ppc,
@@ -504,7 +501,7 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
504 .disable_intr = megasas_disable_intr_ppc, 501 .disable_intr = megasas_disable_intr_ppc,
505 .clear_intr = megasas_clear_intr_ppc, 502 .clear_intr = megasas_clear_intr_ppc,
506 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 503 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
507 .adp_reset = megasas_adp_reset_ppc, 504 .adp_reset = megasas_adp_reset_xscale,
508 .check_reset = megasas_check_reset_ppc, 505 .check_reset = megasas_check_reset_ppc,
509 .service_isr = megasas_isr, 506 .service_isr = megasas_isr,
510 .tasklet = megasas_complete_cmd_dpc, 507 .tasklet = megasas_complete_cmd_dpc,
@@ -620,6 +617,9 @@ static int
620megasas_check_reset_skinny(struct megasas_instance *instance, 617megasas_check_reset_skinny(struct megasas_instance *instance,
621 struct megasas_register_set __iomem *regs) 618 struct megasas_register_set __iomem *regs)
622{ 619{
620 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
621 return 1;
622
623 return 0; 623 return 0;
624} 624}
625 625
@@ -3454,7 +3454,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
3454{ 3454{
3455 u32 max_sectors_1; 3455 u32 max_sectors_1;
3456 u32 max_sectors_2; 3456 u32 max_sectors_2;
3457 u32 tmp_sectors; 3457 u32 tmp_sectors, msix_enable;
3458 struct megasas_register_set __iomem *reg_set; 3458 struct megasas_register_set __iomem *reg_set;
3459 struct megasas_ctrl_info *ctrl_info; 3459 struct megasas_ctrl_info *ctrl_info;
3460 unsigned long bar_list; 3460 unsigned long bar_list;
@@ -3507,6 +3507,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
3507 if (megasas_transition_to_ready(instance)) 3507 if (megasas_transition_to_ready(instance))
3508 goto fail_ready_state; 3508 goto fail_ready_state;
3509 3509
3510 /* Check if MSI-X is supported while in ready state */
3511 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
3512 0x4000000) >> 0x1a;
3513 if (msix_enable && !msix_disable &&
3514 !pci_enable_msix(instance->pdev, &instance->msixentry, 1))
3515 instance->msi_flag = 1;
3516
3510 /* Get operational params, sge flags, send init cmd to controller */ 3517 /* Get operational params, sge flags, send init cmd to controller */
3511 if (instance->instancet->init_adapter(instance)) 3518 if (instance->instancet->init_adapter(instance))
3512 goto fail_init_adapter; 3519 goto fail_init_adapter;
@@ -4076,14 +4083,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4076 else 4083 else
4077 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 4084 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
4078 4085
4079 /* Try to enable MSI-X */
4080 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
4081 (instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
4082 (instance->pdev->device != PCI_DEVICE_ID_LSI_VERDE_ZCR) &&
4083 !msix_disable && !pci_enable_msix(instance->pdev,
4084 &instance->msixentry, 1))
4085 instance->msi_flag = 1;
4086
4087 /* 4086 /*
4088 * Initialize MFI Firmware 4087 * Initialize MFI Firmware
4089 */ 4088 */
@@ -4116,6 +4115,14 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4116 megasas_mgmt_info.max_index++; 4115 megasas_mgmt_info.max_index++;
4117 4116
4118 /* 4117 /*
4118 * Register with SCSI mid-layer
4119 */
4120 if (megasas_io_attach(instance))
4121 goto fail_io_attach;
4122
4123 instance->unload = 0;
4124
4125 /*
4119 * Initiate AEN (Asynchronous Event Notification) 4126 * Initiate AEN (Asynchronous Event Notification)
4120 */ 4127 */
4121 if (megasas_start_aen(instance)) { 4128 if (megasas_start_aen(instance)) {
@@ -4123,13 +4130,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4123 goto fail_start_aen; 4130 goto fail_start_aen;
4124 } 4131 }
4125 4132
4126 /*
4127 * Register with SCSI mid-layer
4128 */
4129 if (megasas_io_attach(instance))
4130 goto fail_io_attach;
4131
4132 instance->unload = 0;
4133 return 0; 4133 return 0;
4134 4134
4135 fail_start_aen: 4135 fail_start_aen:
@@ -4332,10 +4332,6 @@ megasas_resume(struct pci_dev *pdev)
4332 if (megasas_set_dma_mask(pdev)) 4332 if (megasas_set_dma_mask(pdev))
4333 goto fail_set_dma_mask; 4333 goto fail_set_dma_mask;
4334 4334
4335 /* Now re-enable MSI-X */
4336 if (instance->msi_flag)
4337 pci_enable_msix(instance->pdev, &instance->msixentry, 1);
4338
4339 /* 4335 /*
4340 * Initialize MFI Firmware 4336 * Initialize MFI Firmware
4341 */ 4337 */
@@ -4348,6 +4344,10 @@ megasas_resume(struct pci_dev *pdev)
4348 if (megasas_transition_to_ready(instance)) 4344 if (megasas_transition_to_ready(instance))
4349 goto fail_ready_state; 4345 goto fail_ready_state;
4350 4346
4347 /* Now re-enable MSI-X */
4348 if (instance->msi_flag)
4349 pci_enable_msix(instance->pdev, &instance->msixentry, 1);
4350
4351 switch (instance->pdev->device) { 4351 switch (instance->pdev->device) {
4352 case PCI_DEVICE_ID_LSI_FUSION: 4352 case PCI_DEVICE_ID_LSI_FUSION:
4353 { 4353 {
@@ -4384,12 +4384,6 @@ megasas_resume(struct pci_dev *pdev)
4384 4384
4385 instance->instancet->enable_intr(instance->reg_set); 4385 instance->instancet->enable_intr(instance->reg_set);
4386 4386
4387 /*
4388 * Initiate AEN (Asynchronous Event Notification)
4389 */
4390 if (megasas_start_aen(instance))
4391 printk(KERN_ERR "megasas: Start AEN failed\n");
4392
4393 /* Initialize the cmd completion timer */ 4387 /* Initialize the cmd completion timer */
4394 if (poll_mode_io) 4388 if (poll_mode_io)
4395 megasas_start_timer(instance, &instance->io_completion_timer, 4389 megasas_start_timer(instance, &instance->io_completion_timer,
@@ -4397,6 +4391,12 @@ megasas_resume(struct pci_dev *pdev)
4397 MEGASAS_COMPLETION_TIMER_INTERVAL); 4391 MEGASAS_COMPLETION_TIMER_INTERVAL);
4398 instance->unload = 0; 4392 instance->unload = 0;
4399 4393
4394 /*
4395 * Initiate AEN (Asynchronous Event Notification)
4396 */
4397 if (megasas_start_aen(instance))
4398 printk(KERN_ERR "megasas: Start AEN failed\n");
4399
4400 return 0; 4400 return 0;
4401 4401
4402fail_irq: 4402fail_irq:
@@ -4527,6 +4527,11 @@ static void megasas_shutdown(struct pci_dev *pdev)
4527 instance->unload = 1; 4527 instance->unload = 1;
4528 megasas_flush_cache(instance); 4528 megasas_flush_cache(instance);
4529 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 4529 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
4530 instance->instancet->disable_intr(instance->reg_set);
4531 free_irq(instance->msi_flag ? instance->msixentry.vector :
4532 instance->pdev->irq, instance);
4533 if (instance->msi_flag)
4534 pci_disable_msix(instance->pdev);
4530} 4535}
4531 4536
4532/** 4537/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 145a8cffb1fa..f13e7abd345a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -696,22 +696,6 @@ fail_get_cmd:
696} 696}
697 697
698/* 698/*
699 * megasas_return_cmd_for_smid - Returns a cmd_fusion for a SMID
700 * @instance: Adapter soft state
701 *
702 */
703void
704megasas_return_cmd_for_smid(struct megasas_instance *instance, u16 smid)
705{
706 struct fusion_context *fusion;
707 struct megasas_cmd_fusion *cmd;
708
709 fusion = instance->ctrl_context;
710 cmd = fusion->cmd_list[smid - 1];
711 megasas_return_cmd_fusion(instance, cmd);
712}
713
714/*
715 * megasas_get_ld_map_info - Returns FW's ld_map structure 699 * megasas_get_ld_map_info - Returns FW's ld_map structure
716 * @instance: Adapter soft state 700 * @instance: Adapter soft state
717 * @pend: Pend the command or not 701 * @pend: Pend the command or not
@@ -1153,7 +1137,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1153 u64 start_blk = io_info->pdBlock; 1137 u64 start_blk = io_info->pdBlock;
1154 u8 *cdb = io_request->CDB.CDB32; 1138 u8 *cdb = io_request->CDB.CDB32;
1155 u32 num_blocks = io_info->numBlocks; 1139 u32 num_blocks = io_info->numBlocks;
1156 u8 opcode, flagvals, groupnum, control; 1140 u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
1157 1141
1158 /* Check if T10 PI (DIF) is enabled for this LD */ 1142 /* Check if T10 PI (DIF) is enabled for this LD */
1159 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 1143 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
@@ -1235,7 +1219,46 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1235 cdb[8] = (u8)(num_blocks & 0xff); 1219 cdb[8] = (u8)(num_blocks & 0xff);
1236 cdb[7] = (u8)((num_blocks >> 8) & 0xff); 1220 cdb[7] = (u8)((num_blocks >> 8) & 0xff);
1237 1221
1222 io_request->IoFlags = 10; /* Specify 10-byte cdb */
1238 cdb_len = 10; 1223 cdb_len = 10;
1224 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
1225 /* Convert to 16 byte CDB for large LBA's */
1226 switch (cdb_len) {
1227 case 6:
1228 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
1229 control = cdb[5];
1230 break;
1231 case 10:
1232 opcode =
1233 cdb[0] == READ_10 ? READ_16 : WRITE_16;
1234 flagvals = cdb[1];
1235 groupnum = cdb[6];
1236 control = cdb[9];
1237 break;
1238 case 12:
1239 opcode =
1240 cdb[0] == READ_12 ? READ_16 : WRITE_16;
1241 flagvals = cdb[1];
1242 groupnum = cdb[10];
1243 control = cdb[11];
1244 break;
1245 }
1246
1247 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1248
1249 cdb[0] = opcode;
1250 cdb[1] = flagvals;
1251 cdb[14] = groupnum;
1252 cdb[15] = control;
1253
1254 /* Transfer length */
1255 cdb[13] = (u8)(num_blocks & 0xff);
1256 cdb[12] = (u8)((num_blocks >> 8) & 0xff);
1257 cdb[11] = (u8)((num_blocks >> 16) & 0xff);
1258 cdb[10] = (u8)((num_blocks >> 24) & 0xff);
1259
1260 io_request->IoFlags = 16; /* Specify 16-byte cdb */
1261 cdb_len = 16;
1239 } 1262 }
1240 1263
1241 /* Normal case, just load LBA here */ 1264 /* Normal case, just load LBA here */
@@ -2026,17 +2049,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2026 struct fusion_context *fusion; 2049 struct fusion_context *fusion;
2027 struct megasas_cmd *cmd_mfi; 2050 struct megasas_cmd *cmd_mfi;
2028 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2051 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2029 u32 host_diag, abs_state; 2052 u32 host_diag, abs_state, status_reg, reset_adapter;
2030 2053
2031 instance = (struct megasas_instance *)shost->hostdata; 2054 instance = (struct megasas_instance *)shost->hostdata;
2032 fusion = instance->ctrl_context; 2055 fusion = instance->ctrl_context;
2033 2056
2034 mutex_lock(&instance->reset_mutex);
2035 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2036 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2037 instance->instancet->disable_intr(instance->reg_set);
2038 msleep(1000);
2039
2040 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 2057 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
2041 printk(KERN_WARNING "megaraid_sas: Hardware critical error, " 2058 printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
2042 "returning FAILED.\n"); 2059 "returning FAILED.\n");
@@ -2044,6 +2061,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2044 goto out; 2061 goto out;
2045 } 2062 }
2046 2063
2064 mutex_lock(&instance->reset_mutex);
2065 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2066 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2067 instance->instancet->disable_intr(instance->reg_set);
2068 msleep(1000);
2069
2047 /* First try waiting for commands to complete */ 2070 /* First try waiting for commands to complete */
2048 if (megasas_wait_for_outstanding_fusion(instance)) { 2071 if (megasas_wait_for_outstanding_fusion(instance)) {
2049 printk(KERN_WARNING "megaraid_sas: resetting fusion " 2072 printk(KERN_WARNING "megaraid_sas: resetting fusion "
@@ -2060,7 +2083,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2060 } 2083 }
2061 } 2084 }
2062 2085
2063 if (instance->disableOnlineCtrlReset == 1) { 2086 status_reg = instance->instancet->read_fw_status_reg(
2087 instance->reg_set);
2088 abs_state = status_reg & MFI_STATE_MASK;
2089 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2090 if (instance->disableOnlineCtrlReset ||
2091 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2064 /* Reset not supported, kill adapter */ 2092 /* Reset not supported, kill adapter */
2065 printk(KERN_WARNING "megaraid_sas: Reset not supported" 2093 printk(KERN_WARNING "megaraid_sas: Reset not supported"
2066 ", killing adapter.\n"); 2094 ", killing adapter.\n");
@@ -2089,6 +2117,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2089 2117
2090 /* Check that the diag write enable (DRWE) bit is on */ 2118 /* Check that the diag write enable (DRWE) bit is on */
2091 host_diag = readl(&instance->reg_set->fusion_host_diag); 2119 host_diag = readl(&instance->reg_set->fusion_host_diag);
2120 retry = 0;
2092 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 2121 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2093 msleep(100); 2122 msleep(100);
2094 host_diag = 2123 host_diag =
@@ -2126,7 +2155,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2126 2155
2127 abs_state = 2156 abs_state =
2128 instance->instancet->read_fw_status_reg( 2157 instance->instancet->read_fw_status_reg(
2129 instance->reg_set); 2158 instance->reg_set) & MFI_STATE_MASK;
2130 retry = 0; 2159 retry = 0;
2131 2160
2132 while ((abs_state <= MFI_STATE_FW_INIT) && 2161 while ((abs_state <= MFI_STATE_FW_INIT) &&
@@ -2134,7 +2163,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2134 msleep(100); 2163 msleep(100);
2135 abs_state = 2164 abs_state =
2136 instance->instancet->read_fw_status_reg( 2165 instance->instancet->read_fw_status_reg(
2137 instance->reg_set); 2166 instance->reg_set) & MFI_STATE_MASK;
2138 } 2167 }
2139 if (abs_state <= MFI_STATE_FW_INIT) { 2168 if (abs_state <= MFI_STATE_FW_INIT) {
2140 printk(KERN_WARNING "megaraid_sas: firmware " 2169 printk(KERN_WARNING "megaraid_sas: firmware "
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 2a3c05f6db8b..dcc289c25459 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "08.100.00.01" 72#define MPT2SAS_DRIVER_VERSION "08.100.00.02"
73#define MPT2SAS_MAJOR_VERSION 08 73#define MPT2SAS_MAJOR_VERSION 08
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 01 76#define MPT2SAS_RELEASE_VERSION 02
77 77
78/* 78/*
79 * Set MPT2SAS_SG_DEPTH value based on user input. 79 * Set MPT2SAS_SG_DEPTH value based on user input.
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index f12e02358d6d..a7dbc6825f5f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -113,6 +113,7 @@ struct sense_info {
113}; 113};
114 114
115 115
116#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
116#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF) 117#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
117 118
118/** 119/**
@@ -121,6 +122,7 @@ struct sense_info {
121 * @work: work object (ioc->fault_reset_work_q) 122 * @work: work object (ioc->fault_reset_work_q)
122 * @cancel_pending_work: flag set during reset handling 123 * @cancel_pending_work: flag set during reset handling
123 * @ioc: per adapter object 124 * @ioc: per adapter object
125 * @device_handle: device handle
124 * @VF_ID: virtual function id 126 * @VF_ID: virtual function id
125 * @VP_ID: virtual port id 127 * @VP_ID: virtual port id
126 * @ignore: flag meaning this event has been marked to ignore 128 * @ignore: flag meaning this event has been marked to ignore
@@ -134,6 +136,7 @@ struct fw_event_work {
134 u8 cancel_pending_work; 136 u8 cancel_pending_work;
135 struct delayed_work delayed_work; 137 struct delayed_work delayed_work;
136 struct MPT2SAS_ADAPTER *ioc; 138 struct MPT2SAS_ADAPTER *ioc;
139 u16 device_handle;
137 u8 VF_ID; 140 u8 VF_ID;
138 u8 VP_ID; 141 u8 VP_ID;
139 u8 ignore; 142 u8 ignore;
@@ -3499,6 +3502,7 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
3499 3502
3500 switch (prot_type) { 3503 switch (prot_type) {
3501 case SCSI_PROT_DIF_TYPE1: 3504 case SCSI_PROT_DIF_TYPE1:
3505 case SCSI_PROT_DIF_TYPE2:
3502 3506
3503 /* 3507 /*
3504 * enable ref/guard checking 3508 * enable ref/guard checking
@@ -3511,13 +3515,6 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
3511 cpu_to_be32(scsi_get_lba(scmd)); 3515 cpu_to_be32(scsi_get_lba(scmd));
3512 break; 3516 break;
3513 3517
3514 case SCSI_PROT_DIF_TYPE2:
3515
3516 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3517 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
3518 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
3519 break;
3520
3521 case SCSI_PROT_DIF_TYPE3: 3518 case SCSI_PROT_DIF_TYPE3:
3522 3519
3523 /* 3520 /*
@@ -4047,17 +4044,75 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4047#endif 4044#endif
4048 4045
4049/** 4046/**
4050 * _scsih_smart_predicted_fault - illuminate Fault LED 4047 * _scsih_turn_on_fault_led - illuminate Fault LED
4051 * @ioc: per adapter object 4048 * @ioc: per adapter object
4052 * @handle: device handle 4049 * @handle: device handle
4050 * Context: process
4053 * 4051 *
4054 * Return nothing. 4052 * Return nothing.
4055 */ 4053 */
4056static void 4054static void
4057_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) 4055_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4058{ 4056{
4059 Mpi2SepReply_t mpi_reply; 4057 Mpi2SepReply_t mpi_reply;
4060 Mpi2SepRequest_t mpi_request; 4058 Mpi2SepRequest_t mpi_request;
4059
4060 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
4061 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
4062 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
4063 mpi_request.SlotStatus =
4064 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
4065 mpi_request.DevHandle = cpu_to_le16(handle);
4066 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
4067 if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
4068 &mpi_request)) != 0) {
4069 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
4070 __FILE__, __LINE__, __func__);
4071 return;
4072 }
4073
4074 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
4075 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
4076 "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name,
4077 le16_to_cpu(mpi_reply.IOCStatus),
4078 le32_to_cpu(mpi_reply.IOCLogInfo)));
4079 return;
4080 }
4081}
4082
4083/**
4084 * _scsih_send_event_to_turn_on_fault_led - fire delayed event
4085 * @ioc: per adapter object
4086 * @handle: device handle
4087 * Context: interrupt.
4088 *
4089 * Return nothing.
4090 */
4091static void
4092_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4093{
4094 struct fw_event_work *fw_event;
4095
4096 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
4097 if (!fw_event)
4098 return;
4099 fw_event->event = MPT2SAS_TURN_ON_FAULT_LED;
4100 fw_event->device_handle = handle;
4101 fw_event->ioc = ioc;
4102 _scsih_fw_event_add(ioc, fw_event);
4103}
4104
4105/**
4106 * _scsih_smart_predicted_fault - process smart errors
4107 * @ioc: per adapter object
4108 * @handle: device handle
4109 * Context: interrupt.
4110 *
4111 * Return nothing.
4112 */
4113static void
4114_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4115{
4061 struct scsi_target *starget; 4116 struct scsi_target *starget;
4062 struct MPT2SAS_TARGET *sas_target_priv_data; 4117 struct MPT2SAS_TARGET *sas_target_priv_data;
4063 Mpi2EventNotificationReply_t *event_reply; 4118 Mpi2EventNotificationReply_t *event_reply;
@@ -4084,30 +4139,8 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4084 starget_printk(KERN_WARNING, starget, "predicted fault\n"); 4139 starget_printk(KERN_WARNING, starget, "predicted fault\n");
4085 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4140 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4086 4141
4087 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) { 4142 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
4088 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 4143 _scsih_send_event_to_turn_on_fault_led(ioc, handle);
4089 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
4090 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
4091 mpi_request.SlotStatus =
4092 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
4093 mpi_request.DevHandle = cpu_to_le16(handle);
4094 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
4095 if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
4096 &mpi_request)) != 0) {
4097 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
4098 ioc->name, __FILE__, __LINE__, __func__);
4099 return;
4100 }
4101
4102 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
4103 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
4104 "enclosure_processor: ioc_status (0x%04x), "
4105 "loginfo(0x%08x)\n", ioc->name,
4106 le16_to_cpu(mpi_reply.IOCStatus),
4107 le32_to_cpu(mpi_reply.IOCLogInfo)));
4108 return;
4109 }
4110 }
4111 4144
4112 /* insert into event log */ 4145 /* insert into event log */
4113 sz = offsetof(Mpi2EventNotificationReply_t, EventData) + 4146 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
@@ -6753,6 +6786,9 @@ _firmware_event_work(struct work_struct *work)
6753 } 6786 }
6754 6787
6755 switch (fw_event->event) { 6788 switch (fw_event->event) {
6789 case MPT2SAS_TURN_ON_FAULT_LED:
6790 _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
6791 break;
6756 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 6792 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6757 _scsih_sas_topology_change_event(ioc, fw_event); 6793 _scsih_sas_topology_change_event(ioc, fw_event);
6758 break; 6794 break;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 58f5be4740e9..de0b1a704fb5 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -4698,12 +4698,14 @@ static int __os_scsi_tape_open(struct inode * inode, struct file * filp)
4698 break; 4698 break;
4699 4699
4700 if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) { 4700 if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) {
4701 int j;
4702
4701 STp->pos_unknown = 0; 4703 STp->pos_unknown = 0;
4702 STp->partition = STp->new_partition = 0; 4704 STp->partition = STp->new_partition = 0;
4703 if (STp->can_partitions) 4705 if (STp->can_partitions)
4704 STp->nbr_partitions = 1; /* This guess will be updated later if necessary */ 4706 STp->nbr_partitions = 1; /* This guess will be updated later if necessary */
4705 for (i=0; i < ST_NBR_PARTITIONS; i++) { 4707 for (j = 0; j < ST_NBR_PARTITIONS; j++) {
4706 STps = &(STp->ps[i]); 4708 STps = &(STp->ps[j]);
4707 STps->rw = ST_IDLE; 4709 STps->rw = ST_IDLE;
4708 STps->eof = ST_NOEOF; 4710 STps->eof = ST_NOEOF;
4709 STps->at_sm = 0; 4711 STps->at_sm = 0;
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 0339ff03a535..252523d7847e 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,5 +1,5 @@
1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \ 1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
2 ql4_nx.o ql4_nvram.o ql4_dbg.o 2 ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o
3 3
4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o 4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
5 5
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
new file mode 100644
index 000000000000..864d018631c0
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -0,0 +1,69 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11
12/* Scsi_Host attributes. */
13static ssize_t
14qla4xxx_fw_version_show(struct device *dev,
15 struct device_attribute *attr, char *buf)
16{
17 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
18
19 if (is_qla8022(ha))
20 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
21 ha->firmware_version[0],
22 ha->firmware_version[1],
23 ha->patch_number, ha->build_number);
24 else
25 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
26 ha->firmware_version[0],
27 ha->firmware_version[1],
28 ha->patch_number, ha->build_number);
29}
30
31static ssize_t
32qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr,
33 char *buf)
34{
35 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
36 return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number);
37}
38
39static ssize_t
40qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
41 char *buf)
42{
43 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
44 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major,
45 ha->iscsi_minor);
46}
47
48static ssize_t
49qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
50 char *buf)
51{
52 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
53 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
54 ha->bootload_major, ha->bootload_minor,
55 ha->bootload_patch, ha->bootload_build);
56}
57
58static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
59static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
60static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
61static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
62
63struct device_attribute *qla4xxx_host_attrs[] = {
64 &dev_attr_fw_version,
65 &dev_attr_serial_num,
66 &dev_attr_iscsi_version,
67 &dev_attr_optrom_version,
68 NULL,
69};
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 4757878d59dd..473c5c872b39 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -115,7 +115,7 @@
115#define INVALID_ENTRY 0xFFFF 115#define INVALID_ENTRY 0xFFFF
116#define MAX_CMDS_TO_RISC 1024 116#define MAX_CMDS_TO_RISC 1024
117#define MAX_SRBS MAX_CMDS_TO_RISC 117#define MAX_SRBS MAX_CMDS_TO_RISC
118#define MBOX_AEN_REG_COUNT 5 118#define MBOX_AEN_REG_COUNT 8
119#define MAX_INIT_RETRIES 5 119#define MAX_INIT_RETRIES 5
120 120
121/* 121/*
@@ -368,7 +368,6 @@ struct scsi_qla_host {
368#define AF_INIT_DONE 1 /* 0x00000002 */ 368#define AF_INIT_DONE 1 /* 0x00000002 */
369#define AF_MBOX_COMMAND 2 /* 0x00000004 */ 369#define AF_MBOX_COMMAND 2 /* 0x00000004 */
370#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */ 370#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
371#define AF_DPC_SCHEDULED 5 /* 0x00000020 */
372#define AF_INTERRUPTS_ON 6 /* 0x00000040 */ 371#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
373#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */ 372#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
374#define AF_LINK_UP 8 /* 0x00000100 */ 373#define AF_LINK_UP 8 /* 0x00000100 */
@@ -584,6 +583,14 @@ struct scsi_qla_host {
584 uint32_t nx_reset_timeout; 583 uint32_t nx_reset_timeout;
585 584
586 struct completion mbx_intr_comp; 585 struct completion mbx_intr_comp;
586
587 /* --- From About Firmware --- */
588 uint16_t iscsi_major;
589 uint16_t iscsi_minor;
590 uint16_t bootload_major;
591 uint16_t bootload_minor;
592 uint16_t bootload_patch;
593 uint16_t bootload_build;
587}; 594};
588 595
589static inline int is_ipv4_enabled(struct scsi_qla_host *ha) 596static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 31e2bf97198c..01082aa77098 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -690,6 +690,29 @@ struct mbx_sys_info {
690 uint8_t reserved[12]; /* 34-3f */ 690 uint8_t reserved[12]; /* 34-3f */
691}; 691};
692 692
693struct about_fw_info {
694 uint16_t fw_major; /* 00 - 01 */
695 uint16_t fw_minor; /* 02 - 03 */
696 uint16_t fw_patch; /* 04 - 05 */
697 uint16_t fw_build; /* 06 - 07 */
698 uint8_t fw_build_date[16]; /* 08 - 17 ASCII String */
699 uint8_t fw_build_time[16]; /* 18 - 27 ASCII String */
700 uint8_t fw_build_user[16]; /* 28 - 37 ASCII String */
701 uint16_t fw_load_source; /* 38 - 39 */
702 /* 1 = Flash Primary,
703 2 = Flash Secondary,
704 3 = Host Download
705 */
706 uint8_t reserved1[6]; /* 3A - 3F */
707 uint16_t iscsi_major; /* 40 - 41 */
708 uint16_t iscsi_minor; /* 42 - 43 */
709 uint16_t bootload_major; /* 44 - 45 */
710 uint16_t bootload_minor; /* 46 - 47 */
711 uint16_t bootload_patch; /* 48 - 49 */
712 uint16_t bootload_build; /* 4A - 4B */
713 uint8_t reserved2[180]; /* 4C - FF */
714};
715
693struct crash_record { 716struct crash_record {
694 uint16_t fw_major_version; /* 00 - 01 */ 717 uint16_t fw_major_version; /* 00 - 01 */
695 uint16_t fw_minor_version; /* 02 - 03 */ 718 uint16_t fw_minor_version; /* 02 - 03 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index cc53e3fbd78c..a53a256c1f8d 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -61,7 +61,7 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
61int qla4xxx_add_sess(struct ddb_entry *); 61int qla4xxx_add_sess(struct ddb_entry *);
62void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry); 62void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
63int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha); 63int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
64int qla4xxx_get_fw_version(struct scsi_qla_host * ha); 64int qla4xxx_about_firmware(struct scsi_qla_host *ha);
65void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha, 65void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
66 uint32_t intr_status); 66 uint32_t intr_status);
67int qla4xxx_init_rings(struct scsi_qla_host *ha); 67int qla4xxx_init_rings(struct scsi_qla_host *ha);
@@ -139,4 +139,5 @@ extern int ql4xextended_error_logging;
139extern int ql4xdontresethba; 139extern int ql4xdontresethba;
140extern int ql4xenablemsix; 140extern int ql4xenablemsix;
141 141
142extern struct device_attribute *qla4xxx_host_attrs[];
142#endif /* _QLA4x_GBL_H */ 143#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 48e2241ddaf4..42ed5db2d530 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1275,7 +1275,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1275 if (ha->isp_ops->start_firmware(ha) == QLA_ERROR) 1275 if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
1276 goto exit_init_hba; 1276 goto exit_init_hba;
1277 1277
1278 if (qla4xxx_get_fw_version(ha) == QLA_ERROR) 1278 if (qla4xxx_about_firmware(ha) == QLA_ERROR)
1279 goto exit_init_hba; 1279 goto exit_init_hba;
1280 1280
1281 if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR) 1281 if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 2f40ac761cd4..0e72921c752d 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -25,9 +25,14 @@ static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
25 25
26 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 26 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
27 sense_len = le16_to_cpu(sts_entry->senseDataByteCnt); 27 sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
28 if (sense_len == 0) 28 if (sense_len == 0) {
29 DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
30 " sense len 0\n", ha->host_no,
31 cmd->device->channel, cmd->device->id,
32 cmd->device->lun, __func__));
33 ha->status_srb = NULL;
29 return; 34 return;
30 35 }
31 /* Save total available sense length, 36 /* Save total available sense length,
32 * not to exceed cmd's sense buffer size */ 37 * not to exceed cmd's sense buffer size */
33 sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE); 38 sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
@@ -541,6 +546,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
541 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */ 546 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
542 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR: 547 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
543 case MBOX_ASTS_SUBNET_STATE_CHANGE: 548 case MBOX_ASTS_SUBNET_STATE_CHANGE:
549 case MBOX_ASTS_DUPLICATE_IP:
544 /* No action */ 550 /* No action */
545 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no, 551 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
546 mbox_status)); 552 mbox_status));
@@ -593,11 +599,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
593 mbox_sts[i]; 599 mbox_sts[i];
594 600
595 /* print debug message */ 601 /* print debug message */
596 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued" 602 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
597 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n", 603 "mb1:0x%x mb2:0x%x mb3:0x%x "
598 ha->host_no, ha->aen_in, mbox_sts[0], 604 "mb4:0x%x mb5:0x%x\n",
599 mbox_sts[1], mbox_sts[2], mbox_sts[3], 605 ha->host_no, ha->aen_in,
600 mbox_sts[4])); 606 mbox_sts[0], mbox_sts[1],
607 mbox_sts[2], mbox_sts[3],
608 mbox_sts[4], mbox_sts[5]));
601 609
602 /* advance pointer */ 610 /* advance pointer */
603 ha->aen_in++; 611 ha->aen_in++;
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index d78b58dc5011..fce8289e9752 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -86,22 +86,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
86 msleep(10); 86 msleep(10);
87 } 87 }
88 88
89 /* To prevent overwriting mailbox registers for a command that has
90 * not yet been serviced, check to see if an active command
91 * (AEN, IOCB, etc.) is interrupting, then service it.
92 * -----------------------------------------------------------------
93 */
94 spin_lock_irqsave(&ha->hardware_lock, flags); 89 spin_lock_irqsave(&ha->hardware_lock, flags);
95 90
96 if (!is_qla8022(ha)) {
97 intr_status = readl(&ha->reg->ctrl_status);
98 if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
99 /* Service existing interrupt */
100 ha->isp_ops->interrupt_service_routine(ha, intr_status);
101 clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
102 }
103 }
104
105 ha->mbox_status_count = outCount; 91 ha->mbox_status_count = outCount;
106 for (i = 0; i < outCount; i++) 92 for (i = 0; i < outCount; i++)
107 ha->mbox_status[i] = 0; 93 ha->mbox_status[i] = 0;
@@ -1057,38 +1043,65 @@ int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
1057} 1043}
1058 1044
1059/** 1045/**
1060 * qla4xxx_get_fw_version - gets firmware version 1046 * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
1061 * @ha: Pointer to host adapter structure. 1047 * @ha: Pointer to host adapter structure.
1062 * 1048 *
1063 * Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may 1049 * Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
1064 * hold an address for data. Make sure that we write 0 to those mailboxes, 1050 * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
1065 * if unused. 1051 * those mailboxes, if unused.
1066 **/ 1052 **/
1067int qla4xxx_get_fw_version(struct scsi_qla_host * ha) 1053int qla4xxx_about_firmware(struct scsi_qla_host *ha)
1068{ 1054{
1055 struct about_fw_info *about_fw = NULL;
1056 dma_addr_t about_fw_dma;
1069 uint32_t mbox_cmd[MBOX_REG_COUNT]; 1057 uint32_t mbox_cmd[MBOX_REG_COUNT];
1070 uint32_t mbox_sts[MBOX_REG_COUNT]; 1058 uint32_t mbox_sts[MBOX_REG_COUNT];
1059 int status = QLA_ERROR;
1060
1061 about_fw = dma_alloc_coherent(&ha->pdev->dev,
1062 sizeof(struct about_fw_info),
1063 &about_fw_dma, GFP_KERNEL);
1064 if (!about_fw) {
1065 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
1066 "for about_fw\n", __func__));
1067 return status;
1068 }
1071 1069
1072 /* Get firmware version. */ 1070 memset(about_fw, 0, sizeof(struct about_fw_info));
1073 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 1071 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1074 memset(&mbox_sts, 0, sizeof(mbox_sts)); 1072 memset(&mbox_sts, 0, sizeof(mbox_sts));
1075 1073
1076 mbox_cmd[0] = MBOX_CMD_ABOUT_FW; 1074 mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
1077 1075 mbox_cmd[2] = LSDW(about_fw_dma);
1078 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) != 1076 mbox_cmd[3] = MSDW(about_fw_dma);
1079 QLA_SUCCESS) { 1077 mbox_cmd[4] = sizeof(struct about_fw_info);
1080 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ " 1078
1081 "status %04X\n", ha->host_no, __func__, mbox_sts[0])); 1079 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
1082 return QLA_ERROR; 1080 &mbox_cmd[0], &mbox_sts[0]);
1081 if (status != QLA_SUCCESS) {
1082 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
1083 "failed w/ status %04X\n", __func__,
1084 mbox_sts[0]));
1085 goto exit_about_fw;
1083 } 1086 }
1084 1087
1085 /* Save firmware version information. */ 1088 /* Save version information. */
1086 ha->firmware_version[0] = mbox_sts[1]; 1089 ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major);
1087 ha->firmware_version[1] = mbox_sts[2]; 1090 ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor);
1088 ha->patch_number = mbox_sts[3]; 1091 ha->patch_number = le16_to_cpu(about_fw->fw_patch);
1089 ha->build_number = mbox_sts[4]; 1092 ha->build_number = le16_to_cpu(about_fw->fw_build);
1093 ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major);
1094 ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
1095 ha->bootload_major = le16_to_cpu(about_fw->bootload_major);
1096 ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor);
1097 ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch);
1098 ha->bootload_build = le16_to_cpu(about_fw->bootload_build);
1099 status = QLA_SUCCESS;
1090 1100
1091 return QLA_SUCCESS; 1101exit_about_fw:
1102 dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
1103 about_fw, about_fw_dma);
1104 return status;
1092} 1105}
1093 1106
1094static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, 1107static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 03e522b2fe0b..fdfe27b38698 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -964,12 +964,26 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
964 /* Halt all the indiviual PEGs and other blocks of the ISP */ 964 /* Halt all the indiviual PEGs and other blocks of the ISP */
965 qla4_8xxx_rom_lock(ha); 965 qla4_8xxx_rom_lock(ha);
966 966
967 /* mask all niu interrupts */ 967 /* disable all I2Q */
968 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
969 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
970 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
971 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
972 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
973 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
974
975 /* disable all niu interrupts */
968 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); 976 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
969 /* disable xge rx/tx */ 977 /* disable xge rx/tx */
970 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); 978 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
971 /* disable xg1 rx/tx */ 979 /* disable xg1 rx/tx */
972 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); 980 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
981 /* disable sideband mac */
982 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
983 /* disable ap0 mac */
984 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
985 /* disable ap1 mac */
986 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
973 987
974 /* halt sre */ 988 /* halt sre */
975 val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); 989 val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
@@ -984,6 +998,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
984 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); 998 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
985 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); 999 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
986 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); 1000 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1001 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
987 1002
988 /* halt pegs */ 1003 /* halt pegs */
989 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); 1004 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
@@ -991,9 +1006,9 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
991 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); 1006 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
992 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); 1007 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
993 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); 1008 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1009 msleep(5);
994 1010
995 /* big hammer */ 1011 /* big hammer */
996 msleep(1000);
997 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 1012 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
998 /* don't reset CAM block on reset */ 1013 /* don't reset CAM block on reset */
999 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); 1014 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c22f2a764d9d..f2364ec59f03 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -124,6 +124,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
124 .sg_tablesize = SG_ALL, 124 .sg_tablesize = SG_ALL,
125 125
126 .max_sectors = 0xFFFF, 126 .max_sectors = 0xFFFF,
127 .shost_attrs = qla4xxx_host_attrs,
127}; 128};
128 129
129static struct iscsi_transport qla4xxx_iscsi_transport = { 130static struct iscsi_transport qla4xxx_iscsi_transport = {
@@ -412,8 +413,7 @@ void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
412 413
413static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 414static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
414 struct ddb_entry *ddb_entry, 415 struct ddb_entry *ddb_entry,
415 struct scsi_cmnd *cmd, 416 struct scsi_cmnd *cmd)
416 void (*done)(struct scsi_cmnd *))
417{ 417{
418 struct srb *srb; 418 struct srb *srb;
419 419
@@ -427,7 +427,6 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
427 srb->cmd = cmd; 427 srb->cmd = cmd;
428 srb->flags = 0; 428 srb->flags = 0;
429 CMD_SP(cmd) = (void *)srb; 429 CMD_SP(cmd) = (void *)srb;
430 cmd->scsi_done = done;
431 430
432 return srb; 431 return srb;
433} 432}
@@ -458,9 +457,8 @@ void qla4xxx_srb_compl(struct kref *ref)
458 457
459/** 458/**
460 * qla4xxx_queuecommand - scsi layer issues scsi command to driver. 459 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
460 * @host: scsi host
461 * @cmd: Pointer to Linux's SCSI command structure 461 * @cmd: Pointer to Linux's SCSI command structure
462 * @done_fn: Function that the driver calls to notify the SCSI mid-layer
463 * that the command has been processed.
464 * 462 *
465 * Remarks: 463 * Remarks:
466 * This routine is invoked by Linux to send a SCSI command to the driver. 464 * This routine is invoked by Linux to send a SCSI command to the driver.
@@ -470,10 +468,9 @@ void qla4xxx_srb_compl(struct kref *ref)
470 * completion handling). Unfortunely, it sometimes calls the scheduler 468 * completion handling). Unfortunely, it sometimes calls the scheduler
471 * in interrupt context which is a big NO! NO!. 469 * in interrupt context which is a big NO! NO!.
472 **/ 470 **/
473static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd, 471static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
474 void (*done)(struct scsi_cmnd *))
475{ 472{
476 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 473 struct scsi_qla_host *ha = to_qla_host(host);
477 struct ddb_entry *ddb_entry = cmd->device->hostdata; 474 struct ddb_entry *ddb_entry = cmd->device->hostdata;
478 struct iscsi_cls_session *sess = ddb_entry->sess; 475 struct iscsi_cls_session *sess = ddb_entry->sess;
479 struct srb *srb; 476 struct srb *srb;
@@ -515,37 +512,29 @@ static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
515 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 512 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
516 goto qc_host_busy; 513 goto qc_host_busy;
517 514
518 spin_unlock_irq(ha->host->host_lock); 515 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
519
520 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd, done);
521 if (!srb) 516 if (!srb)
522 goto qc_host_busy_lock; 517 goto qc_host_busy;
523 518
524 rval = qla4xxx_send_command_to_isp(ha, srb); 519 rval = qla4xxx_send_command_to_isp(ha, srb);
525 if (rval != QLA_SUCCESS) 520 if (rval != QLA_SUCCESS)
526 goto qc_host_busy_free_sp; 521 goto qc_host_busy_free_sp;
527 522
528 spin_lock_irq(ha->host->host_lock);
529 return 0; 523 return 0;
530 524
531qc_host_busy_free_sp: 525qc_host_busy_free_sp:
532 qla4xxx_srb_free_dma(ha, srb); 526 qla4xxx_srb_free_dma(ha, srb);
533 mempool_free(srb, ha->srb_mempool); 527 mempool_free(srb, ha->srb_mempool);
534 528
535qc_host_busy_lock:
536 spin_lock_irq(ha->host->host_lock);
537
538qc_host_busy: 529qc_host_busy:
539 return SCSI_MLQUEUE_HOST_BUSY; 530 return SCSI_MLQUEUE_HOST_BUSY;
540 531
541qc_fail_command: 532qc_fail_command:
542 done(cmd); 533 cmd->scsi_done(cmd);
543 534
544 return 0; 535 return 0;
545} 536}
546 537
547static DEF_SCSI_QCMD(qla4xxx_queuecommand)
548
549/** 538/**
550 * qla4xxx_mem_free - frees memory allocated to adapter 539 * qla4xxx_mem_free - frees memory allocated to adapter
551 * @ha: Pointer to host adapter structure. 540 * @ha: Pointer to host adapter structure.
@@ -679,7 +668,27 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
679 if (ha->seconds_since_last_heartbeat == 2) { 668 if (ha->seconds_since_last_heartbeat == 2) {
680 ha->seconds_since_last_heartbeat = 0; 669 ha->seconds_since_last_heartbeat = 0;
681 halt_status = qla4_8xxx_rd_32(ha, 670 halt_status = qla4_8xxx_rd_32(ha,
682 QLA82XX_PEG_HALT_STATUS1); 671 QLA82XX_PEG_HALT_STATUS1);
672
673 ql4_printk(KERN_INFO, ha,
674 "scsi(%ld): %s, Dumping hw/fw registers:\n "
675 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
676 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
677 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
678 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
679 ha->host_no, __func__, halt_status,
680 qla4_8xxx_rd_32(ha,
681 QLA82XX_PEG_HALT_STATUS2),
682 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
683 0x3c),
684 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
685 0x3c),
686 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
687 0x3c),
688 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
689 0x3c),
690 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
691 0x3c));
683 692
684 /* Since we cannot change dev_state in interrupt 693 /* Since we cannot change dev_state in interrupt
685 * context, set appropriate DPC flag then wakeup 694 * context, set appropriate DPC flag then wakeup
@@ -715,7 +724,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
715 /* don't poll if reset is going on */ 724 /* don't poll if reset is going on */
716 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 725 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
717 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 726 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
718 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags))) { 727 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
719 if (dev_state == QLA82XX_DEV_NEED_RESET && 728 if (dev_state == QLA82XX_DEV_NEED_RESET &&
720 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 729 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
721 if (!ql4xdontresethba) { 730 if (!ql4xdontresethba) {
@@ -839,7 +848,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
839 } 848 }
840 849
841 /* Wakeup the dpc routine for this adapter, if needed. */ 850 /* Wakeup the dpc routine for this adapter, if needed. */
842 if ((start_dpc || 851 if (start_dpc ||
843 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 852 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
844 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || 853 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
845 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || 854 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
@@ -849,9 +858,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
849 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || 858 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
850 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 859 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
851 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 860 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
852 test_bit(DPC_AEN, &ha->dpc_flags)) && 861 test_bit(DPC_AEN, &ha->dpc_flags)) {
853 !test_bit(AF_DPC_SCHEDULED, &ha->flags) &&
854 ha->dpc_thread) {
855 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 862 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
856 " - dpc flags = 0x%lx\n", 863 " - dpc flags = 0x%lx\n",
857 ha->host_no, __func__, ha->dpc_flags)); 864 ha->host_no, __func__, ha->dpc_flags));
@@ -1241,11 +1248,8 @@ static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
1241 1248
1242void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 1249void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
1243{ 1250{
1244 if (ha->dpc_thread && 1251 if (ha->dpc_thread)
1245 !test_bit(AF_DPC_SCHEDULED, &ha->flags)) {
1246 set_bit(AF_DPC_SCHEDULED, &ha->flags);
1247 queue_work(ha->dpc_thread, &ha->dpc_work); 1252 queue_work(ha->dpc_thread, &ha->dpc_work);
1248 }
1249} 1253}
1250 1254
1251/** 1255/**
@@ -1272,12 +1276,12 @@ static void qla4xxx_do_dpc(struct work_struct *work)
1272 1276
1273 /* Initialization not yet finished. Don't do anything yet. */ 1277 /* Initialization not yet finished. Don't do anything yet. */
1274 if (!test_bit(AF_INIT_DONE, &ha->flags)) 1278 if (!test_bit(AF_INIT_DONE, &ha->flags))
1275 goto do_dpc_exit; 1279 return;
1276 1280
1277 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 1281 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1278 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", 1282 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
1279 ha->host_no, __func__, ha->flags)); 1283 ha->host_no, __func__, ha->flags));
1280 goto do_dpc_exit; 1284 return;
1281 } 1285 }
1282 1286
1283 if (is_qla8022(ha)) { 1287 if (is_qla8022(ha)) {
@@ -1384,8 +1388,6 @@ dpc_post_reset_ha:
1384 } 1388 }
1385 } 1389 }
1386 1390
1387do_dpc_exit:
1388 clear_bit(AF_DPC_SCHEDULED, &ha->flags);
1389} 1391}
1390 1392
1391/** 1393/**
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 603155769407..610492877253 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k6" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k7"
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index abea2cf05c2e..a4b9cdbaaa0b 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,6 +50,8 @@
50#define BUS_RESET_SETTLE_TIME (10) 50#define BUS_RESET_SETTLE_TIME (10)
51#define HOST_RESET_SETTLE_TIME (10) 51#define HOST_RESET_SETTLE_TIME (10)
52 52
53static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
54
53/* called with shost->host_lock held */ 55/* called with shost->host_lock held */
54void scsi_eh_wakeup(struct Scsi_Host *shost) 56void scsi_eh_wakeup(struct Scsi_Host *shost)
55{ 57{
@@ -947,6 +949,48 @@ retry_tur:
947} 949}
948 950
949/** 951/**
952 * scsi_eh_test_devices - check if devices are responding from error recovery.
953 * @cmd_list: scsi commands in error recovery.
954 * @work_q: queue for commands which still need more error recovery
955 * @done_q: queue for commands which are finished
956 * @try_stu: boolean on if a STU command should be tried in addition to TUR.
957 *
958 * Decription:
959 * Tests if devices are in a working state. Commands to devices now in
960 * a working state are sent to the done_q while commands to devices which
961 * are still failing to respond are returned to the work_q for more
962 * processing.
963 **/
964static int scsi_eh_test_devices(struct list_head *cmd_list,
965 struct list_head *work_q,
966 struct list_head *done_q, int try_stu)
967{
968 struct scsi_cmnd *scmd, *next;
969 struct scsi_device *sdev;
970 int finish_cmds;
971
972 while (!list_empty(cmd_list)) {
973 scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
974 sdev = scmd->device;
975
976 finish_cmds = !scsi_device_online(scmd->device) ||
977 (try_stu && !scsi_eh_try_stu(scmd) &&
978 !scsi_eh_tur(scmd)) ||
979 !scsi_eh_tur(scmd);
980
981 list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
982 if (scmd->device == sdev) {
983 if (finish_cmds)
984 scsi_eh_finish_cmd(scmd, done_q);
985 else
986 list_move_tail(&scmd->eh_entry, work_q);
987 }
988 }
989 return list_empty(work_q);
990}
991
992
993/**
950 * scsi_eh_abort_cmds - abort pending commands. 994 * scsi_eh_abort_cmds - abort pending commands.
951 * @work_q: &list_head for pending commands. 995 * @work_q: &list_head for pending commands.
952 * @done_q: &list_head for processed commands. 996 * @done_q: &list_head for processed commands.
@@ -962,6 +1006,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
962 struct list_head *done_q) 1006 struct list_head *done_q)
963{ 1007{
964 struct scsi_cmnd *scmd, *next; 1008 struct scsi_cmnd *scmd, *next;
1009 LIST_HEAD(check_list);
965 int rtn; 1010 int rtn;
966 1011
967 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1012 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
@@ -973,11 +1018,10 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
973 rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd); 1018 rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
974 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1019 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
975 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; 1020 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
976 if (!scsi_device_online(scmd->device) || 1021 if (rtn == FAST_IO_FAIL)
977 rtn == FAST_IO_FAIL ||
978 !scsi_eh_tur(scmd)) {
979 scsi_eh_finish_cmd(scmd, done_q); 1022 scsi_eh_finish_cmd(scmd, done_q);
980 } 1023 else
1024 list_move_tail(&scmd->eh_entry, &check_list);
981 } else 1025 } else
982 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting" 1026 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
983 " cmd failed:" 1027 " cmd failed:"
@@ -986,7 +1030,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
986 scmd)); 1030 scmd));
987 } 1031 }
988 1032
989 return list_empty(work_q); 1033 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
990} 1034}
991 1035
992/** 1036/**
@@ -1137,6 +1181,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1137 struct list_head *done_q) 1181 struct list_head *done_q)
1138{ 1182{
1139 LIST_HEAD(tmp_list); 1183 LIST_HEAD(tmp_list);
1184 LIST_HEAD(check_list);
1140 1185
1141 list_splice_init(work_q, &tmp_list); 1186 list_splice_init(work_q, &tmp_list);
1142 1187
@@ -1161,9 +1206,9 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1161 if (scmd_id(scmd) != id) 1206 if (scmd_id(scmd) != id)
1162 continue; 1207 continue;
1163 1208
1164 if ((rtn == SUCCESS || rtn == FAST_IO_FAIL) 1209 if (rtn == SUCCESS)
1165 && (!scsi_device_online(scmd->device) || 1210 list_move_tail(&scmd->eh_entry, &check_list);
1166 rtn == FAST_IO_FAIL || !scsi_eh_tur(scmd))) 1211 else if (rtn == FAST_IO_FAIL)
1167 scsi_eh_finish_cmd(scmd, done_q); 1212 scsi_eh_finish_cmd(scmd, done_q);
1168 else 1213 else
1169 /* push back on work queue for further processing */ 1214 /* push back on work queue for further processing */
@@ -1171,7 +1216,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1171 } 1216 }
1172 } 1217 }
1173 1218
1174 return list_empty(work_q); 1219 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1175} 1220}
1176 1221
1177/** 1222/**
@@ -1185,6 +1230,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1185 struct list_head *done_q) 1230 struct list_head *done_q)
1186{ 1231{
1187 struct scsi_cmnd *scmd, *chan_scmd, *next; 1232 struct scsi_cmnd *scmd, *chan_scmd, *next;
1233 LIST_HEAD(check_list);
1188 unsigned int channel; 1234 unsigned int channel;
1189 int rtn; 1235 int rtn;
1190 1236
@@ -1216,12 +1262,14 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1216 rtn = scsi_try_bus_reset(chan_scmd); 1262 rtn = scsi_try_bus_reset(chan_scmd);
1217 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1263 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1218 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1264 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1219 if (channel == scmd_channel(scmd)) 1265 if (channel == scmd_channel(scmd)) {
1220 if (!scsi_device_online(scmd->device) || 1266 if (rtn == FAST_IO_FAIL)
1221 rtn == FAST_IO_FAIL ||
1222 !scsi_eh_tur(scmd))
1223 scsi_eh_finish_cmd(scmd, 1267 scsi_eh_finish_cmd(scmd,
1224 done_q); 1268 done_q);
1269 else
1270 list_move_tail(&scmd->eh_entry,
1271 &check_list);
1272 }
1225 } 1273 }
1226 } else { 1274 } else {
1227 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST" 1275 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
@@ -1230,7 +1278,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1230 channel)); 1278 channel));
1231 } 1279 }
1232 } 1280 }
1233 return list_empty(work_q); 1281 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1234} 1282}
1235 1283
1236/** 1284/**
@@ -1242,6 +1290,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
1242 struct list_head *done_q) 1290 struct list_head *done_q)
1243{ 1291{
1244 struct scsi_cmnd *scmd, *next; 1292 struct scsi_cmnd *scmd, *next;
1293 LIST_HEAD(check_list);
1245 int rtn; 1294 int rtn;
1246 1295
1247 if (!list_empty(work_q)) { 1296 if (!list_empty(work_q)) {
@@ -1252,12 +1301,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
1252 , current->comm)); 1301 , current->comm));
1253 1302
1254 rtn = scsi_try_host_reset(scmd); 1303 rtn = scsi_try_host_reset(scmd);
1255 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1304 if (rtn == SUCCESS) {
1305 list_splice_init(work_q, &check_list);
1306 } else if (rtn == FAST_IO_FAIL) {
1256 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1307 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1257 if (!scsi_device_online(scmd->device) ||
1258 rtn == FAST_IO_FAIL ||
1259 (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
1260 !scsi_eh_tur(scmd))
1261 scsi_eh_finish_cmd(scmd, done_q); 1308 scsi_eh_finish_cmd(scmd, done_q);
1262 } 1309 }
1263 } else { 1310 } else {
@@ -1266,7 +1313,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
1266 current->comm)); 1313 current->comm));
1267 } 1314 }
1268 } 1315 }
1269 return list_empty(work_q); 1316 return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
1270} 1317}
1271 1318
1272/** 1319/**
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index f46855cd853d..ad747dc337da 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -381,11 +381,6 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
381 return err; 381 return err;
382} 382}
383 383
384/**
385 * proc_scsi_show - show contents of /proc/scsi/scsi (attached devices)
386 * @s: output goes here
387 * @p: not used
388 */
389static int always_match(struct device *dev, void *data) 384static int always_match(struct device *dev, void *data)
390{ 385{
391 return 1; 386 return 1;
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index b587289cfacb..2bea4f0b684a 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -59,6 +59,10 @@ scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
59 trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", 59 trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
60 (unsigned long long)lba, (unsigned long long)txlen, 60 (unsigned long long)lba, (unsigned long long)txlen,
61 cdb[1] >> 5); 61 cdb[1] >> 5);
62
63 if (cdb[0] == WRITE_SAME)
64 trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
65
62 trace_seq_putc(p, 0); 66 trace_seq_putc(p, 0);
63 67
64 return ret; 68 return ret;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bd0806e64e85..953773cb26d9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -490,7 +490,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
490 unsigned int max_blocks = 0; 490 unsigned int max_blocks = 0;
491 491
492 q->limits.discard_zeroes_data = sdkp->lbprz; 492 q->limits.discard_zeroes_data = sdkp->lbprz;
493 q->limits.discard_alignment = sdkp->unmap_alignment; 493 q->limits.discard_alignment = sdkp->unmap_alignment *
494 logical_block_size;
494 q->limits.discard_granularity = 495 q->limits.discard_granularity =
495 max(sdkp->physical_block_size, 496 max(sdkp->physical_block_size,
496 sdkp->unmap_granularity * logical_block_size); 497 sdkp->unmap_granularity * logical_block_size);
@@ -2021,16 +2022,26 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2021 2022
2022 int dbd; 2023 int dbd;
2023 int modepage; 2024 int modepage;
2025 int first_len;
2024 struct scsi_mode_data data; 2026 struct scsi_mode_data data;
2025 struct scsi_sense_hdr sshdr; 2027 struct scsi_sense_hdr sshdr;
2026 int old_wce = sdkp->WCE; 2028 int old_wce = sdkp->WCE;
2027 int old_rcd = sdkp->RCD; 2029 int old_rcd = sdkp->RCD;
2028 int old_dpofua = sdkp->DPOFUA; 2030 int old_dpofua = sdkp->DPOFUA;
2029 2031
2030 if (sdp->skip_ms_page_8) 2032 first_len = 4;
2031 goto defaults; 2033 if (sdp->skip_ms_page_8) {
2032 2034 if (sdp->type == TYPE_RBC)
2033 if (sdp->type == TYPE_RBC) { 2035 goto defaults;
2036 else {
2037 if (sdp->skip_ms_page_3f)
2038 goto defaults;
2039 modepage = 0x3F;
2040 if (sdp->use_192_bytes_for_3f)
2041 first_len = 192;
2042 dbd = 0;
2043 }
2044 } else if (sdp->type == TYPE_RBC) {
2034 modepage = 6; 2045 modepage = 6;
2035 dbd = 8; 2046 dbd = 8;
2036 } else { 2047 } else {
@@ -2039,13 +2050,15 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2039 } 2050 }
2040 2051
2041 /* cautiously ask */ 2052 /* cautiously ask */
2042 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, 4, &data, &sshdr); 2053 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
2054 &data, &sshdr);
2043 2055
2044 if (!scsi_status_is_good(res)) 2056 if (!scsi_status_is_good(res))
2045 goto bad_sense; 2057 goto bad_sense;
2046 2058
2047 if (!data.header_length) { 2059 if (!data.header_length) {
2048 modepage = 6; 2060 modepage = 6;
2061 first_len = 0;
2049 sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n"); 2062 sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
2050 } 2063 }
2051 2064
@@ -2058,30 +2071,61 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2058 */ 2071 */
2059 if (len < 3) 2072 if (len < 3)
2060 goto bad_sense; 2073 goto bad_sense;
2061 if (len > 20) 2074 else if (len > SD_BUF_SIZE) {
2062 len = 20; 2075 sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
2063 2076 "data from %d to %d bytes\n", len, SD_BUF_SIZE);
2064 /* Take headers and block descriptors into account */ 2077 len = SD_BUF_SIZE;
2065 len += data.header_length + data.block_descriptor_length; 2078 }
2066 if (len > SD_BUF_SIZE) 2079 if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
2067 goto bad_sense; 2080 len = 192;
2068 2081
2069 /* Get the data */ 2082 /* Get the data */
2070 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); 2083 if (len > first_len)
2084 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
2085 &data, &sshdr);
2071 2086
2072 if (scsi_status_is_good(res)) { 2087 if (scsi_status_is_good(res)) {
2073 int offset = data.header_length + data.block_descriptor_length; 2088 int offset = data.header_length + data.block_descriptor_length;
2074 2089
2075 if (offset >= SD_BUF_SIZE - 2) { 2090 while (offset < len) {
2076 sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n"); 2091 u8 page_code = buffer[offset] & 0x3F;
2077 goto defaults; 2092 u8 spf = buffer[offset] & 0x40;
2093
2094 if (page_code == 8 || page_code == 6) {
2095 /* We're interested only in the first 3 bytes.
2096 */
2097 if (len - offset <= 2) {
2098 sd_printk(KERN_ERR, sdkp, "Incomplete "
2099 "mode parameter data\n");
2100 goto defaults;
2101 } else {
2102 modepage = page_code;
2103 goto Page_found;
2104 }
2105 } else {
2106 /* Go to the next page */
2107 if (spf && len - offset > 3)
2108 offset += 4 + (buffer[offset+2] << 8) +
2109 buffer[offset+3];
2110 else if (!spf && len - offset > 1)
2111 offset += 2 + buffer[offset+1];
2112 else {
2113 sd_printk(KERN_ERR, sdkp, "Incomplete "
2114 "mode parameter data\n");
2115 goto defaults;
2116 }
2117 }
2078 } 2118 }
2079 2119
2080 if ((buffer[offset] & 0x3f) != modepage) { 2120 if (modepage == 0x3F) {
2121 sd_printk(KERN_ERR, sdkp, "No Caching mode page "
2122 "present\n");
2123 goto defaults;
2124 } else if ((buffer[offset] & 0x3f) != modepage) {
2081 sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); 2125 sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
2082 goto defaults; 2126 goto defaults;
2083 } 2127 }
2084 2128 Page_found:
2085 if (modepage == 8) { 2129 if (modepage == 8) {
2086 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); 2130 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
2087 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); 2131 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 9f4b58b7daad..7e22b737dfd8 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -307,7 +307,7 @@ static inline int find_and_clear_bit_16(unsigned long *field)
307 "0: bsfw %1,%w0\n\t" 307 "0: bsfw %1,%w0\n\t"
308 "btr %0,%1\n\t" 308 "btr %0,%1\n\t"
309 "jnc 0b" 309 "jnc 0b"
310 : "=&r" (rv), "=m" (*field) :); 310 : "=&r" (rv), "+m" (*field) :);
311 311
312 return rv; 312 return rv;
313} 313}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index fbd96b29530d..de35c3ad8a69 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -80,6 +80,15 @@ config SPI_BFIN
80 help 80 help
81 This is the SPI controller master driver for Blackfin 5xx processor. 81 This is the SPI controller master driver for Blackfin 5xx processor.
82 82
83config SPI_BFIN_SPORT
84 tristate "SPI bus via Blackfin SPORT"
85 depends on BLACKFIN
86 help
87 Enable support for a SPI bus via the Blackfin SPORT peripheral.
88
89 This driver can also be built as a module. If so, the module
90 will be called spi_bfin_sport.
91
83config SPI_AU1550 92config SPI_AU1550
84 tristate "Au1550/Au12x0 SPI Controller" 93 tristate "Au1550/Au12x0 SPI Controller"
85 depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL 94 depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index fd2fc5f6505f..0f8c69b6b19e 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SPI_ALTERA) += spi_altera.o
13obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o 13obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o
14obj-$(CONFIG_SPI_ATH79) += ath79_spi.o 14obj-$(CONFIG_SPI_ATH79) += ath79_spi.o
15obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o 15obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
16obj-$(CONFIG_SPI_BFIN_SPORT) += spi_bfin_sport.o
16obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o 17obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o 18obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 19obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
diff --git a/drivers/spi/spi_bfin_sport.c b/drivers/spi/spi_bfin_sport.c
new file mode 100644
index 000000000000..e557ff617b11
--- /dev/null
+++ b/drivers/spi/spi_bfin_sport.c
@@ -0,0 +1,952 @@
1/*
2 * SPI bus via the Blackfin SPORT peripheral
3 *
4 * Enter bugs at http://blackfin.uclinux.org/
5 *
6 * Copyright 2009-2011 Analog Devices Inc.
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/gpio.h>
16#include <linux/io.h>
17#include <linux/ioport.h>
18#include <linux/irq.h>
19#include <linux/errno.h>
20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22#include <linux/spi/spi.h>
23#include <linux/workqueue.h>
24
25#include <asm/portmux.h>
26#include <asm/bfin5xx_spi.h>
27#include <asm/blackfin.h>
28#include <asm/bfin_sport.h>
29#include <asm/cacheflush.h>
30
31#define DRV_NAME "bfin-sport-spi"
32#define DRV_DESC "SPI bus via the Blackfin SPORT"
33
34MODULE_AUTHOR("Cliff Cai");
35MODULE_DESCRIPTION(DRV_DESC);
36MODULE_LICENSE("GPL");
37MODULE_ALIAS("platform:bfin-sport-spi");
38
39enum bfin_sport_spi_state {
40 START_STATE,
41 RUNNING_STATE,
42 DONE_STATE,
43 ERROR_STATE,
44};
45
46struct bfin_sport_spi_master_data;
47
48struct bfin_sport_transfer_ops {
49 void (*write) (struct bfin_sport_spi_master_data *);
50 void (*read) (struct bfin_sport_spi_master_data *);
51 void (*duplex) (struct bfin_sport_spi_master_data *);
52};
53
54struct bfin_sport_spi_master_data {
55 /* Driver model hookup */
56 struct device *dev;
57
58 /* SPI framework hookup */
59 struct spi_master *master;
60
61 /* Regs base of SPI controller */
62 struct sport_register __iomem *regs;
63 int err_irq;
64
65 /* Pin request list */
66 u16 *pin_req;
67
68 /* Driver message queue */
69 struct workqueue_struct *workqueue;
70 struct work_struct pump_messages;
71 spinlock_t lock;
72 struct list_head queue;
73 int busy;
74 bool run;
75
76 /* Message Transfer pump */
77 struct tasklet_struct pump_transfers;
78
79 /* Current message transfer state info */
80 enum bfin_sport_spi_state state;
81 struct spi_message *cur_msg;
82 struct spi_transfer *cur_transfer;
83 struct bfin_sport_spi_slave_data *cur_chip;
84 union {
85 void *tx;
86 u8 *tx8;
87 u16 *tx16;
88 };
89 void *tx_end;
90 union {
91 void *rx;
92 u8 *rx8;
93 u16 *rx16;
94 };
95 void *rx_end;
96
97 int cs_change;
98 struct bfin_sport_transfer_ops *ops;
99};
100
101struct bfin_sport_spi_slave_data {
102 u16 ctl_reg;
103 u16 baud;
104 u16 cs_chg_udelay; /* Some devices require > 255usec delay */
105 u32 cs_gpio;
106 u16 idle_tx_val;
107 struct bfin_sport_transfer_ops *ops;
108};
109
110static void
111bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data)
112{
113 bfin_write_or(&drv_data->regs->tcr1, TSPEN);
114 bfin_write_or(&drv_data->regs->rcr1, TSPEN);
115 SSYNC();
116}
117
118static void
119bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data)
120{
121 bfin_write_and(&drv_data->regs->tcr1, ~TSPEN);
122 bfin_write_and(&drv_data->regs->rcr1, ~TSPEN);
123 SSYNC();
124}
125
126/* Caculate the SPI_BAUD register value based on input HZ */
127static u16
128bfin_sport_hz_to_spi_baud(u32 speed_hz)
129{
130 u_long clk, sclk = get_sclk();
131 int div = (sclk / (2 * speed_hz)) - 1;
132
133 if (div < 0)
134 div = 0;
135
136 clk = sclk / (2 * (div + 1));
137
138 if (clk > speed_hz)
139 div++;
140
141 return div;
142}
143
144/* Chip select operation functions for cs_change flag */
145static void
146bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip)
147{
148 gpio_direction_output(chip->cs_gpio, 0);
149}
150
151static void
152bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip)
153{
154 gpio_direction_output(chip->cs_gpio, 1);
155 /* Move delay here for consistency */
156 if (chip->cs_chg_udelay)
157 udelay(chip->cs_chg_udelay);
158}
159
160static void
161bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data)
162{
163 unsigned long timeout = jiffies + HZ;
164 while (!(bfin_read(&drv_data->regs->stat) & RXNE)) {
165 if (!time_before(jiffies, timeout))
166 break;
167 }
168}
169
170static void
171bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data)
172{
173 u16 dummy;
174
175 while (drv_data->tx < drv_data->tx_end) {
176 bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
177 bfin_sport_spi_stat_poll_complete(drv_data);
178 dummy = bfin_read(&drv_data->regs->rx16);
179 }
180}
181
182static void
183bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data)
184{
185 u16 tx_val = drv_data->cur_chip->idle_tx_val;
186
187 while (drv_data->rx < drv_data->rx_end) {
188 bfin_write(&drv_data->regs->tx16, tx_val);
189 bfin_sport_spi_stat_poll_complete(drv_data);
190 *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
191 }
192}
193
194static void
195bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data)
196{
197 while (drv_data->rx < drv_data->rx_end) {
198 bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
199 bfin_sport_spi_stat_poll_complete(drv_data);
200 *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
201 }
202}
203
204static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = {
205 .write = bfin_sport_spi_u8_writer,
206 .read = bfin_sport_spi_u8_reader,
207 .duplex = bfin_sport_spi_u8_duplex,
208};
209
210static void
211bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data)
212{
213 u16 dummy;
214
215 while (drv_data->tx < drv_data->tx_end) {
216 bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
217 bfin_sport_spi_stat_poll_complete(drv_data);
218 dummy = bfin_read(&drv_data->regs->rx16);
219 }
220}
221
222static void
223bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data)
224{
225 u16 tx_val = drv_data->cur_chip->idle_tx_val;
226
227 while (drv_data->rx < drv_data->rx_end) {
228 bfin_write(&drv_data->regs->tx16, tx_val);
229 bfin_sport_spi_stat_poll_complete(drv_data);
230 *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
231 }
232}
233
234static void
235bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data)
236{
237 while (drv_data->rx < drv_data->rx_end) {
238 bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
239 bfin_sport_spi_stat_poll_complete(drv_data);
240 *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
241 }
242}
243
244static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = {
245 .write = bfin_sport_spi_u16_writer,
246 .read = bfin_sport_spi_u16_reader,
247 .duplex = bfin_sport_spi_u16_duplex,
248};
249
250/* stop controller and re-config current chip */
251static void
252bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
253{
254 struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
255 unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15);
256
257 bfin_sport_spi_disable(drv_data);
258 dev_dbg(drv_data->dev, "restoring spi ctl state\n");
259
260 bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
261 bfin_write(&drv_data->regs->tcr2, bits);
262 bfin_write(&drv_data->regs->tclkdiv, chip->baud);
263 bfin_write(&drv_data->regs->tfsdiv, bits);
264 SSYNC();
265
266 bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
267 bfin_write(&drv_data->regs->rcr2, bits);
268 SSYNC();
269
270 bfin_sport_spi_cs_active(chip);
271}
272
273/* test if there is more transfer to be done */
274static enum bfin_sport_spi_state
275bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data)
276{
277 struct spi_message *msg = drv_data->cur_msg;
278 struct spi_transfer *trans = drv_data->cur_transfer;
279
280 /* Move to next transfer */
281 if (trans->transfer_list.next != &msg->transfers) {
282 drv_data->cur_transfer =
283 list_entry(trans->transfer_list.next,
284 struct spi_transfer, transfer_list);
285 return RUNNING_STATE;
286 }
287
288 return DONE_STATE;
289}
290
291/*
292 * caller already set message->status;
293 * dma and pio irqs are blocked give finished message back
294 */
295static void
296bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data)
297{
298 struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
299 unsigned long flags;
300 struct spi_message *msg;
301
302 spin_lock_irqsave(&drv_data->lock, flags);
303 msg = drv_data->cur_msg;
304 drv_data->state = START_STATE;
305 drv_data->cur_msg = NULL;
306 drv_data->cur_transfer = NULL;
307 drv_data->cur_chip = NULL;
308 queue_work(drv_data->workqueue, &drv_data->pump_messages);
309 spin_unlock_irqrestore(&drv_data->lock, flags);
310
311 if (!drv_data->cs_change)
312 bfin_sport_spi_cs_deactive(chip);
313
314 if (msg->complete)
315 msg->complete(msg->context);
316}
317
318static irqreturn_t
319sport_err_handler(int irq, void *dev_id)
320{
321 struct bfin_sport_spi_master_data *drv_data = dev_id;
322 u16 status;
323
324 dev_dbg(drv_data->dev, "%s enter\n", __func__);
325 status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF);
326
327 if (status) {
328 bfin_write(&drv_data->regs->stat, status);
329 SSYNC();
330
331 bfin_sport_spi_disable(drv_data);
332 dev_err(drv_data->dev, "status error:%s%s%s%s\n",
333 status & TOVF ? " TOVF" : "",
334 status & TUVF ? " TUVF" : "",
335 status & ROVF ? " ROVF" : "",
336 status & RUVF ? " RUVF" : "");
337 }
338
339 return IRQ_HANDLED;
340}
341
342static void
343bfin_sport_spi_pump_transfers(unsigned long data)
344{
345 struct bfin_sport_spi_master_data *drv_data = (void *)data;
346 struct spi_message *message = NULL;
347 struct spi_transfer *transfer = NULL;
348 struct spi_transfer *previous = NULL;
349 struct bfin_sport_spi_slave_data *chip = NULL;
350 unsigned int bits_per_word;
351 u32 tranf_success = 1;
352 u32 transfer_speed;
353 u8 full_duplex = 0;
354
355 /* Get current state information */
356 message = drv_data->cur_msg;
357 transfer = drv_data->cur_transfer;
358 chip = drv_data->cur_chip;
359
360 if (transfer->speed_hz)
361 transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
362 else
363 transfer_speed = chip->baud;
364 bfin_write(&drv_data->regs->tclkdiv, transfer_speed);
365 SSYNC();
366
367 /*
368 * if msg is error or done, report it back using complete() callback
369 */
370
371 /* Handle for abort */
372 if (drv_data->state == ERROR_STATE) {
373 dev_dbg(drv_data->dev, "transfer: we've hit an error\n");
374 message->status = -EIO;
375 bfin_sport_spi_giveback(drv_data);
376 return;
377 }
378
379 /* Handle end of message */
380 if (drv_data->state == DONE_STATE) {
381 dev_dbg(drv_data->dev, "transfer: all done!\n");
382 message->status = 0;
383 bfin_sport_spi_giveback(drv_data);
384 return;
385 }
386
387 /* Delay if requested at end of transfer */
388 if (drv_data->state == RUNNING_STATE) {
389 dev_dbg(drv_data->dev, "transfer: still running ...\n");
390 previous = list_entry(transfer->transfer_list.prev,
391 struct spi_transfer, transfer_list);
392 if (previous->delay_usecs)
393 udelay(previous->delay_usecs);
394 }
395
396 if (transfer->len == 0) {
397 /* Move to next transfer of this msg */
398 drv_data->state = bfin_sport_spi_next_transfer(drv_data);
399 /* Schedule next transfer tasklet */
400 tasklet_schedule(&drv_data->pump_transfers);
401 }
402
403 if (transfer->tx_buf != NULL) {
404 drv_data->tx = (void *)transfer->tx_buf;
405 drv_data->tx_end = drv_data->tx + transfer->len;
406 dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n",
407 transfer->tx_buf, drv_data->tx_end);
408 } else
409 drv_data->tx = NULL;
410
411 if (transfer->rx_buf != NULL) {
412 full_duplex = transfer->tx_buf != NULL;
413 drv_data->rx = transfer->rx_buf;
414 drv_data->rx_end = drv_data->rx + transfer->len;
415 dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n",
416 transfer->rx_buf, drv_data->rx_end);
417 } else
418 drv_data->rx = NULL;
419
420 drv_data->cs_change = transfer->cs_change;
421
422 /* Bits per word setup */
423 bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
424 if (bits_per_word == 8)
425 drv_data->ops = &bfin_sport_transfer_ops_u8;
426 else
427 drv_data->ops = &bfin_sport_transfer_ops_u16;
428
429 drv_data->state = RUNNING_STATE;
430
431 if (drv_data->cs_change)
432 bfin_sport_spi_cs_active(chip);
433
434 dev_dbg(drv_data->dev,
435 "now pumping a transfer: width is %d, len is %d\n",
436 bits_per_word, transfer->len);
437
438 /* PIO mode write then read */
439 dev_dbg(drv_data->dev, "doing IO transfer\n");
440
441 bfin_sport_spi_enable(drv_data);
442 if (full_duplex) {
443 /* full duplex mode */
444 BUG_ON((drv_data->tx_end - drv_data->tx) !=
445 (drv_data->rx_end - drv_data->rx));
446 drv_data->ops->duplex(drv_data);
447
448 if (drv_data->tx != drv_data->tx_end)
449 tranf_success = 0;
450 } else if (drv_data->tx != NULL) {
451 /* write only half duplex */
452
453 drv_data->ops->write(drv_data);
454
455 if (drv_data->tx != drv_data->tx_end)
456 tranf_success = 0;
457 } else if (drv_data->rx != NULL) {
458 /* read only half duplex */
459
460 drv_data->ops->read(drv_data);
461 if (drv_data->rx != drv_data->rx_end)
462 tranf_success = 0;
463 }
464 bfin_sport_spi_disable(drv_data);
465
466 if (!tranf_success) {
467 dev_dbg(drv_data->dev, "IO write error!\n");
468 drv_data->state = ERROR_STATE;
469 } else {
470 /* Update total byte transfered */
471 message->actual_length += transfer->len;
472 /* Move to next transfer of this msg */
473 drv_data->state = bfin_sport_spi_next_transfer(drv_data);
474 if (drv_data->cs_change)
475 bfin_sport_spi_cs_deactive(chip);
476 }
477
478 /* Schedule next transfer tasklet */
479 tasklet_schedule(&drv_data->pump_transfers);
480}
481
482/* pop a msg from queue and kick off real transfer */
483static void
484bfin_sport_spi_pump_messages(struct work_struct *work)
485{
486 struct bfin_sport_spi_master_data *drv_data;
487 unsigned long flags;
488 struct spi_message *next_msg;
489
490 drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages);
491
492 /* Lock queue and check for queue work */
493 spin_lock_irqsave(&drv_data->lock, flags);
494 if (list_empty(&drv_data->queue) || !drv_data->run) {
495 /* pumper kicked off but no work to do */
496 drv_data->busy = 0;
497 spin_unlock_irqrestore(&drv_data->lock, flags);
498 return;
499 }
500
501 /* Make sure we are not already running a message */
502 if (drv_data->cur_msg) {
503 spin_unlock_irqrestore(&drv_data->lock, flags);
504 return;
505 }
506
507 /* Extract head of queue */
508 next_msg = list_entry(drv_data->queue.next,
509 struct spi_message, queue);
510
511 drv_data->cur_msg = next_msg;
512
513 /* Setup the SSP using the per chip configuration */
514 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
515
516 list_del_init(&drv_data->cur_msg->queue);
517
518 /* Initialize message state */
519 drv_data->cur_msg->state = START_STATE;
520 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
521 struct spi_transfer, transfer_list);
522 bfin_sport_spi_restore_state(drv_data);
523 dev_dbg(drv_data->dev, "got a message to pump, "
524 "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n",
525 drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio,
526 drv_data->cur_chip->ctl_reg);
527
528 dev_dbg(drv_data->dev,
529 "the first transfer len is %d\n",
530 drv_data->cur_transfer->len);
531
532 /* Mark as busy and launch transfers */
533 tasklet_schedule(&drv_data->pump_transfers);
534
535 drv_data->busy = 1;
536 spin_unlock_irqrestore(&drv_data->lock, flags);
537}
538
539/*
540 * got a msg to transfer, queue it in drv_data->queue.
541 * And kick off message pumper
542 */
543static int
544bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg)
545{
546 struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
547 unsigned long flags;
548
549 spin_lock_irqsave(&drv_data->lock, flags);
550
551 if (!drv_data->run) {
552 spin_unlock_irqrestore(&drv_data->lock, flags);
553 return -ESHUTDOWN;
554 }
555
556 msg->actual_length = 0;
557 msg->status = -EINPROGRESS;
558 msg->state = START_STATE;
559
560 dev_dbg(&spi->dev, "adding an msg in transfer()\n");
561 list_add_tail(&msg->queue, &drv_data->queue);
562
563 if (drv_data->run && !drv_data->busy)
564 queue_work(drv_data->workqueue, &drv_data->pump_messages);
565
566 spin_unlock_irqrestore(&drv_data->lock, flags);
567
568 return 0;
569}
570
571/* Called every time common spi devices change state */
572static int
573bfin_sport_spi_setup(struct spi_device *spi)
574{
575 struct bfin_sport_spi_slave_data *chip, *first = NULL;
576 int ret;
577
578 /* Only alloc (or use chip_info) on first setup */
579 chip = spi_get_ctldata(spi);
580 if (chip == NULL) {
581 struct bfin5xx_spi_chip *chip_info;
582
583 chip = first = kzalloc(sizeof(*chip), GFP_KERNEL);
584 if (!chip)
585 return -ENOMEM;
586
587 /* platform chip_info isn't required */
588 chip_info = spi->controller_data;
589 if (chip_info) {
590 /*
591 * DITFS and TDTYPE are only thing we don't set, but
592 * they probably shouldn't be changed by people.
593 */
594 if (chip_info->ctl_reg || chip_info->enable_dma) {
595 ret = -EINVAL;
596 dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields");
597 goto error;
598 }
599 chip->cs_chg_udelay = chip_info->cs_chg_udelay;
600 chip->idle_tx_val = chip_info->idle_tx_val;
601 spi->bits_per_word = chip_info->bits_per_word;
602 }
603 }
604
605 if (spi->bits_per_word != 8 && spi->bits_per_word != 16) {
606 ret = -EINVAL;
607 goto error;
608 }
609
610 /* translate common spi framework into our register
611 * following configure contents are same for tx and rx.
612 */
613
614 if (spi->mode & SPI_CPHA)
615 chip->ctl_reg &= ~TCKFE;
616 else
617 chip->ctl_reg |= TCKFE;
618
619 if (spi->mode & SPI_LSB_FIRST)
620 chip->ctl_reg |= TLSBIT;
621 else
622 chip->ctl_reg &= ~TLSBIT;
623
624 /* Sport in master mode */
625 chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS;
626
627 chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz);
628
629 chip->cs_gpio = spi->chip_select;
630 ret = gpio_request(chip->cs_gpio, spi->modalias);
631 if (ret)
632 goto error;
633
634 dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n",
635 spi->modalias, spi->bits_per_word);
636 dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n",
637 chip->ctl_reg, spi->chip_select);
638
639 spi_set_ctldata(spi, chip);
640
641 bfin_sport_spi_cs_deactive(chip);
642
643 return ret;
644
645 error:
646 kfree(first);
647 return ret;
648}
649
650/*
651 * callback for spi framework.
652 * clean driver specific data
653 */
654static void
655bfin_sport_spi_cleanup(struct spi_device *spi)
656{
657 struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi);
658
659 if (!chip)
660 return;
661
662 gpio_free(chip->cs_gpio);
663
664 kfree(chip);
665}
666
667static int
668bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data)
669{
670 INIT_LIST_HEAD(&drv_data->queue);
671 spin_lock_init(&drv_data->lock);
672
673 drv_data->run = false;
674 drv_data->busy = 0;
675
676 /* init transfer tasklet */
677 tasklet_init(&drv_data->pump_transfers,
678 bfin_sport_spi_pump_transfers, (unsigned long)drv_data);
679
680 /* init messages workqueue */
681 INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages);
682 drv_data->workqueue =
683 create_singlethread_workqueue(dev_name(drv_data->master->dev.parent));
684 if (drv_data->workqueue == NULL)
685 return -EBUSY;
686
687 return 0;
688}
689
690static int
691bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data)
692{
693 unsigned long flags;
694
695 spin_lock_irqsave(&drv_data->lock, flags);
696
697 if (drv_data->run || drv_data->busy) {
698 spin_unlock_irqrestore(&drv_data->lock, flags);
699 return -EBUSY;
700 }
701
702 drv_data->run = true;
703 drv_data->cur_msg = NULL;
704 drv_data->cur_transfer = NULL;
705 drv_data->cur_chip = NULL;
706 spin_unlock_irqrestore(&drv_data->lock, flags);
707
708 queue_work(drv_data->workqueue, &drv_data->pump_messages);
709
710 return 0;
711}
712
713static inline int
714bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data)
715{
716 unsigned long flags;
717 unsigned limit = 500;
718 int status = 0;
719
720 spin_lock_irqsave(&drv_data->lock, flags);
721
722 /*
723 * This is a bit lame, but is optimized for the common execution path.
724 * A wait_queue on the drv_data->busy could be used, but then the common
725 * execution path (pump_messages) would be required to call wake_up or
726 * friends on every SPI message. Do this instead
727 */
728 drv_data->run = false;
729 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
730 spin_unlock_irqrestore(&drv_data->lock, flags);
731 msleep(10);
732 spin_lock_irqsave(&drv_data->lock, flags);
733 }
734
735 if (!list_empty(&drv_data->queue) || drv_data->busy)
736 status = -EBUSY;
737
738 spin_unlock_irqrestore(&drv_data->lock, flags);
739
740 return status;
741}
742
743static inline int
744bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
745{
746 int status;
747
748 status = bfin_sport_spi_stop_queue(drv_data);
749 if (status)
750 return status;
751
752 destroy_workqueue(drv_data->workqueue);
753
754 return 0;
755}
756
757static int __devinit
758bfin_sport_spi_probe(struct platform_device *pdev)
759{
760 struct device *dev = &pdev->dev;
761 struct bfin5xx_spi_master *platform_info;
762 struct spi_master *master;
763 struct resource *res, *ires;
764 struct bfin_sport_spi_master_data *drv_data;
765 int status;
766
767 platform_info = dev->platform_data;
768
769 /* Allocate master with space for drv_data */
770 master = spi_alloc_master(dev, sizeof(*master) + 16);
771 if (!master) {
772 dev_err(dev, "cannot alloc spi_master\n");
773 return -ENOMEM;
774 }
775
776 drv_data = spi_master_get_devdata(master);
777 drv_data->master = master;
778 drv_data->dev = dev;
779 drv_data->pin_req = platform_info->pin_req;
780
781 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
782 master->bus_num = pdev->id;
783 master->num_chipselect = platform_info->num_chipselect;
784 master->cleanup = bfin_sport_spi_cleanup;
785 master->setup = bfin_sport_spi_setup;
786 master->transfer = bfin_sport_spi_transfer;
787
788 /* Find and map our resources */
789 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
790 if (res == NULL) {
791 dev_err(dev, "cannot get IORESOURCE_MEM\n");
792 status = -ENOENT;
793 goto out_error_get_res;
794 }
795
796 drv_data->regs = ioremap(res->start, resource_size(res));
797 if (drv_data->regs == NULL) {
798 dev_err(dev, "cannot map registers\n");
799 status = -ENXIO;
800 goto out_error_ioremap;
801 }
802
803 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
804 if (!ires) {
805 dev_err(dev, "cannot get IORESOURCE_IRQ\n");
806 status = -ENODEV;
807 goto out_error_get_ires;
808 }
809 drv_data->err_irq = ires->start;
810
811 /* Initial and start queue */
812 status = bfin_sport_spi_init_queue(drv_data);
813 if (status) {
814 dev_err(dev, "problem initializing queue\n");
815 goto out_error_queue_alloc;
816 }
817
818 status = bfin_sport_spi_start_queue(drv_data);
819 if (status) {
820 dev_err(dev, "problem starting queue\n");
821 goto out_error_queue_alloc;
822 }
823
824 status = request_irq(drv_data->err_irq, sport_err_handler,
825 0, "sport_spi_err", drv_data);
826 if (status) {
827 dev_err(dev, "unable to request sport err irq\n");
828 goto out_error_irq;
829 }
830
831 status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
832 if (status) {
833 dev_err(dev, "requesting peripherals failed\n");
834 goto out_error_peripheral;
835 }
836
837 /* Register with the SPI framework */
838 platform_set_drvdata(pdev, drv_data);
839 status = spi_register_master(master);
840 if (status) {
841 dev_err(dev, "problem registering spi master\n");
842 goto out_error_master;
843 }
844
845 dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs);
846 return 0;
847
848 out_error_master:
849 peripheral_free_list(drv_data->pin_req);
850 out_error_peripheral:
851 free_irq(drv_data->err_irq, drv_data);
852 out_error_irq:
853 out_error_queue_alloc:
854 bfin_sport_spi_destroy_queue(drv_data);
855 out_error_get_ires:
856 iounmap(drv_data->regs);
857 out_error_ioremap:
858 out_error_get_res:
859 spi_master_put(master);
860
861 return status;
862}
863
864/* stop hardware and remove the driver */
865static int __devexit
866bfin_sport_spi_remove(struct platform_device *pdev)
867{
868 struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
869 int status = 0;
870
871 if (!drv_data)
872 return 0;
873
874 /* Remove the queue */
875 status = bfin_sport_spi_destroy_queue(drv_data);
876 if (status)
877 return status;
878
879 /* Disable the SSP at the peripheral and SOC level */
880 bfin_sport_spi_disable(drv_data);
881
882 /* Disconnect from the SPI framework */
883 spi_unregister_master(drv_data->master);
884
885 peripheral_free_list(drv_data->pin_req);
886
887 /* Prevent double remove */
888 platform_set_drvdata(pdev, NULL);
889
890 return 0;
891}
892
893#ifdef CONFIG_PM
894static int
895bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state)
896{
897 struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
898 int status;
899
900 status = bfin_sport_spi_stop_queue(drv_data);
901 if (status)
902 return status;
903
904 /* stop hardware */
905 bfin_sport_spi_disable(drv_data);
906
907 return status;
908}
909
910static int
911bfin_sport_spi_resume(struct platform_device *pdev)
912{
913 struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
914 int status;
915
916 /* Enable the SPI interface */
917 bfin_sport_spi_enable(drv_data);
918
919 /* Start the queue running */
920 status = bfin_sport_spi_start_queue(drv_data);
921 if (status)
922 dev_err(drv_data->dev, "problem resuming queue\n");
923
924 return status;
925}
926#else
927# define bfin_sport_spi_suspend NULL
928# define bfin_sport_spi_resume NULL
929#endif
930
931static struct platform_driver bfin_sport_spi_driver = {
932 .driver = {
933 .name = DRV_NAME,
934 .owner = THIS_MODULE,
935 },
936 .probe = bfin_sport_spi_probe,
937 .remove = __devexit_p(bfin_sport_spi_remove),
938 .suspend = bfin_sport_spi_suspend,
939 .resume = bfin_sport_spi_resume,
940};
941
942static int __init bfin_sport_spi_init(void)
943{
944 return platform_driver_register(&bfin_sport_spi_driver);
945}
946module_init(bfin_sport_spi_init);
947
948static void __exit bfin_sport_spi_exit(void)
949{
950 platform_driver_unregister(&bfin_sport_spi_driver);
951}
952module_exit(bfin_sport_spi_exit);
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
index a3938958147c..32a40876532f 100644
--- a/drivers/spi/tle62x0.c
+++ b/drivers/spi/tle62x0.c
@@ -283,7 +283,7 @@ static int __devinit tle62x0_probe(struct spi_device *spi)
283 return 0; 283 return 0;
284 284
285 err_gpios: 285 err_gpios:
286 for (; ptr > 0; ptr--) 286 while (--ptr >= 0)
287 device_remove_file(&spi->dev, gpio_attrs[ptr]); 287 device_remove_file(&spi->dev, gpio_attrs[ptr]);
288 288
289 device_remove_file(&spi->dev, &dev_attr_status_show); 289 device_remove_file(&spi->dev, &dev_attr_status_show);
@@ -301,6 +301,7 @@ static int __devexit tle62x0_remove(struct spi_device *spi)
301 for (ptr = 0; ptr < st->nr_gpio; ptr++) 301 for (ptr = 0; ptr < st->nr_gpio; ptr++)
302 device_remove_file(&spi->dev, gpio_attrs[ptr]); 302 device_remove_file(&spi->dev, gpio_attrs[ptr]);
303 303
304 device_remove_file(&spi->dev, &dev_attr_status_show);
304 kfree(st); 305 kfree(st);
305 return 0; 306 return 0;
306} 307}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index aed4e464d31c..dee2a2c909f5 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -31,7 +31,7 @@
31#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
32#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
33#include <scsi/scsi_cmnd.h> 33#include <scsi/scsi_cmnd.h>
34#include <scsi/libsas.h> /* For TASK_ATTR_* */ 34#include <scsi/scsi_tcq.h>
35 35
36#include <target/target_core_base.h> 36#include <target/target_core_base.h>
37#include <target/target_core_transport.h> 37#include <target/target_core_transport.h>
@@ -95,17 +95,17 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
95 if (sc->device->tagged_supported) { 95 if (sc->device->tagged_supported) {
96 switch (sc->tag) { 96 switch (sc->tag) {
97 case HEAD_OF_QUEUE_TAG: 97 case HEAD_OF_QUEUE_TAG:
98 sam_task_attr = TASK_ATTR_HOQ; 98 sam_task_attr = MSG_HEAD_TAG;
99 break; 99 break;
100 case ORDERED_QUEUE_TAG: 100 case ORDERED_QUEUE_TAG:
101 sam_task_attr = TASK_ATTR_ORDERED; 101 sam_task_attr = MSG_ORDERED_TAG;
102 break; 102 break;
103 default: 103 default:
104 sam_task_attr = TASK_ATTR_SIMPLE; 104 sam_task_attr = MSG_SIMPLE_TAG;
105 break; 105 break;
106 } 106 }
107 } else 107 } else
108 sam_task_attr = TASK_ATTR_SIMPLE; 108 sam_task_attr = MSG_SIMPLE_TAG;
109 109
110 /* 110 /*
111 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 111 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
@@ -379,7 +379,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
379 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 379 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
380 */ 380 */
381 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, 381 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
382 DMA_NONE, TASK_ATTR_SIMPLE, 382 DMA_NONE, MSG_SIMPLE_TAG,
383 &tl_cmd->tl_sense_buf[0]); 383 &tl_cmd->tl_sense_buf[0]);
384 /* 384 /*
385 * Allocate the LUN_RESET TMR 385 * Allocate the LUN_RESET TMR
@@ -939,18 +939,6 @@ static u16 tcm_loop_get_fabric_sense_len(void)
939 return 0; 939 return 0;
940} 940}
941 941
942static u64 tcm_loop_pack_lun(unsigned int lun)
943{
944 u64 result;
945
946 /* LSB of lun into byte 1 big-endian */
947 result = ((lun & 0xff) << 8);
948 /* use flat space addressing method */
949 result |= 0x40 | ((lun >> 8) & 0x3f);
950
951 return cpu_to_le64(result);
952}
953
954static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) 942static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
955{ 943{
956 switch (tl_hba->tl_proto_id) { 944 switch (tl_hba->tl_proto_id) {
@@ -1481,7 +1469,6 @@ static int tcm_loop_register_configfs(void)
1481 fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len; 1469 fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
1482 fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len; 1470 fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
1483 fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove; 1471 fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove;
1484 fabric->tf_ops.pack_lun = &tcm_loop_pack_lun;
1485 1472
1486 tf_cg = &fabric->tf_group; 1473 tf_cg = &fabric->tf_group;
1487 /* 1474 /*
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index a5f44a6e6e1d..ee6fad979b50 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -497,10 +497,6 @@ static int target_fabric_tf_ops_check(
497 printk(KERN_ERR "Missing tfo->is_state_remove()\n"); 497 printk(KERN_ERR "Missing tfo->is_state_remove()\n");
498 return -EINVAL; 498 return -EINVAL;
499 } 499 }
500 if (!(tfo->pack_lun)) {
501 printk(KERN_ERR "Missing tfo->pack_lun()\n");
502 return -EINVAL;
503 }
504 /* 500 /*
505 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() 501 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
506 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in 502 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d25e20829012..8407f9ca2b31 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -38,6 +38,7 @@
38#include <net/sock.h> 38#include <net/sock.h>
39#include <net/tcp.h> 39#include <net/tcp.h>
40#include <scsi/scsi.h> 40#include <scsi/scsi.h>
41#include <scsi/scsi_device.h>
41 42
42#include <target/target_core_base.h> 43#include <target/target_core_base.h>
43#include <target/target_core_device.h> 44#include <target/target_core_device.h>
@@ -150,13 +151,13 @@ out:
150 151
151 { 152 {
152 struct se_device *dev = se_lun->lun_se_dev; 153 struct se_device *dev = se_lun->lun_se_dev;
153 spin_lock(&dev->stats_lock); 154 spin_lock_irq(&dev->stats_lock);
154 dev->num_cmds++; 155 dev->num_cmds++;
155 if (se_cmd->data_direction == DMA_TO_DEVICE) 156 if (se_cmd->data_direction == DMA_TO_DEVICE)
156 dev->write_bytes += se_cmd->data_length; 157 dev->write_bytes += se_cmd->data_length;
157 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 158 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
158 dev->read_bytes += se_cmd->data_length; 159 dev->read_bytes += se_cmd->data_length;
159 spin_unlock(&dev->stats_lock); 160 spin_unlock_irq(&dev->stats_lock);
160 } 161 }
161 162
162 /* 163 /*
@@ -658,8 +659,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
658 struct se_session *se_sess = SE_SESS(se_cmd); 659 struct se_session *se_sess = SE_SESS(se_cmd);
659 struct se_task *se_task; 660 struct se_task *se_task;
660 unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; 661 unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
661 u32 cdb_offset = 0, lun_count = 0, offset = 8; 662 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
662 u64 i, lun;
663 663
664 list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) 664 list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
665 break; 665 break;
@@ -675,15 +675,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
675 * a $FABRIC_MOD. In that case, report LUN=0 only. 675 * a $FABRIC_MOD. In that case, report LUN=0 only.
676 */ 676 */
677 if (!(se_sess)) { 677 if (!(se_sess)) {
678 lun = 0; 678 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
679 buf[offset++] = ((lun >> 56) & 0xff);
680 buf[offset++] = ((lun >> 48) & 0xff);
681 buf[offset++] = ((lun >> 40) & 0xff);
682 buf[offset++] = ((lun >> 32) & 0xff);
683 buf[offset++] = ((lun >> 24) & 0xff);
684 buf[offset++] = ((lun >> 16) & 0xff);
685 buf[offset++] = ((lun >> 8) & 0xff);
686 buf[offset++] = (lun & 0xff);
687 lun_count = 1; 679 lun_count = 1;
688 goto done; 680 goto done;
689 } 681 }
@@ -703,15 +695,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
703 if ((cdb_offset + 8) >= se_cmd->data_length) 695 if ((cdb_offset + 8) >= se_cmd->data_length)
704 continue; 696 continue;
705 697
706 lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun)); 698 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
707 buf[offset++] = ((lun >> 56) & 0xff); 699 offset += 8;
708 buf[offset++] = ((lun >> 48) & 0xff);
709 buf[offset++] = ((lun >> 40) & 0xff);
710 buf[offset++] = ((lun >> 32) & 0xff);
711 buf[offset++] = ((lun >> 24) & 0xff);
712 buf[offset++] = ((lun >> 16) & 0xff);
713 buf[offset++] = ((lun >> 8) & 0xff);
714 buf[offset++] = (lun & 0xff);
715 cdb_offset += 8; 700 cdb_offset += 8;
716 } 701 }
717 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); 702 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7ff6a35f26ac..331d423fd0e0 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -41,7 +41,7 @@
41#include <scsi/scsi_device.h> 41#include <scsi/scsi_device.h>
42#include <scsi/scsi_cmnd.h> 42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_host.h> 43#include <scsi/scsi_host.h>
44#include <scsi/libsas.h> /* For TASK_ATTR_* */ 44#include <scsi/scsi_tcq.h>
45 45
46#include <target/target_core_base.h> 46#include <target/target_core_base.h>
47#include <target/target_core_device.h> 47#include <target/target_core_device.h>
@@ -911,7 +911,7 @@ static int pscsi_do_task(struct se_task *task)
911 * descriptor 911 * descriptor
912 */ 912 */
913 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req, 913 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
914 (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ), 914 (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
915 pscsi_req_done); 915 pscsi_req_done);
916 916
917 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 917 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 4a109835e420..59b8b9c5ad72 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -55,7 +55,8 @@ struct se_tmr_req *core_tmr_alloc_req(
55{ 55{
56 struct se_tmr_req *tmr; 56 struct se_tmr_req *tmr;
57 57
58 tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL); 58 tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
59 GFP_ATOMIC : GFP_KERNEL);
59 if (!(tmr)) { 60 if (!(tmr)) {
60 printk(KERN_ERR "Unable to allocate struct se_tmr_req\n"); 61 printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
61 return ERR_PTR(-ENOMEM); 62 return ERR_PTR(-ENOMEM);
@@ -398,9 +399,9 @@ int core_tmr_lun_reset(
398 printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n"); 399 printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
399 } 400 }
400 401
401 spin_lock(&dev->stats_lock); 402 spin_lock_irq(&dev->stats_lock);
402 dev->num_resets++; 403 dev->num_resets++;
403 spin_unlock(&dev->stats_lock); 404 spin_unlock_irq(&dev->stats_lock);
404 405
405 DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", 406 DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
406 (preempt_and_abort_list) ? "Preempt" : "TMR", 407 (preempt_and_abort_list) ? "Preempt" : "TMR",
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b9d3501bdd91..4dafeb8b5638 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -42,7 +42,7 @@
42#include <net/tcp.h> 42#include <net/tcp.h>
43#include <scsi/scsi.h> 43#include <scsi/scsi.h>
44#include <scsi/scsi_cmnd.h> 44#include <scsi/scsi_cmnd.h>
45#include <scsi/libsas.h> /* For TASK_ATTR_* */ 45#include <scsi/scsi_tcq.h>
46 46
47#include <target/target_core_base.h> 47#include <target/target_core_base.h>
48#include <target/target_core_device.h> 48#include <target/target_core_device.h>
@@ -762,7 +762,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
762 transport_all_task_dev_remove_state(cmd); 762 transport_all_task_dev_remove_state(cmd);
763 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); 763 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
764 764
765 transport_free_dev_tasks(cmd);
766 765
767check_lun: 766check_lun:
768 spin_lock_irqsave(&lun->lun_cmd_lock, flags); 767 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
@@ -1075,7 +1074,7 @@ static inline int transport_add_task_check_sam_attr(
1075 * head of the struct se_device->execute_task_list, and task_prev 1074 * head of the struct se_device->execute_task_list, and task_prev
1076 * after that for each subsequent task 1075 * after that for each subsequent task
1077 */ 1076 */
1078 if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) { 1077 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
1079 list_add(&task->t_execute_list, 1078 list_add(&task->t_execute_list,
1080 (task_prev != NULL) ? 1079 (task_prev != NULL) ?
1081 &task_prev->t_execute_list : 1080 &task_prev->t_execute_list :
@@ -1195,6 +1194,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
1195 break; 1194 break;
1196 1195
1197 list_del(&task->t_execute_list); 1196 list_del(&task->t_execute_list);
1197 atomic_set(&task->task_execute_queue, 0);
1198 atomic_dec(&dev->execute_tasks); 1198 atomic_dec(&dev->execute_tasks);
1199 1199
1200 return task; 1200 return task;
@@ -1210,8 +1210,14 @@ void transport_remove_task_from_execute_queue(
1210{ 1210{
1211 unsigned long flags; 1211 unsigned long flags;
1212 1212
1213 if (atomic_read(&task->task_execute_queue) == 0) {
1214 dump_stack();
1215 return;
1216 }
1217
1213 spin_lock_irqsave(&dev->execute_task_lock, flags); 1218 spin_lock_irqsave(&dev->execute_task_lock, flags);
1214 list_del(&task->t_execute_list); 1219 list_del(&task->t_execute_list);
1220 atomic_set(&task->task_execute_queue, 0);
1215 atomic_dec(&dev->execute_tasks); 1221 atomic_dec(&dev->execute_tasks);
1216 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 1222 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
1217} 1223}
@@ -1867,7 +1873,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1867 if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) 1873 if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1868 return 0; 1874 return 0;
1869 1875
1870 if (cmd->sam_task_attr == TASK_ATTR_ACA) { 1876 if (cmd->sam_task_attr == MSG_ACA_TAG) {
1871 DEBUG_STA("SAM Task Attribute ACA" 1877 DEBUG_STA("SAM Task Attribute ACA"
1872 " emulation is not supported\n"); 1878 " emulation is not supported\n");
1873 return -1; 1879 return -1;
@@ -2058,6 +2064,13 @@ int transport_generic_handle_tmr(
2058} 2064}
2059EXPORT_SYMBOL(transport_generic_handle_tmr); 2065EXPORT_SYMBOL(transport_generic_handle_tmr);
2060 2066
2067void transport_generic_free_cmd_intr(
2068 struct se_cmd *cmd)
2069{
2070 transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
2071}
2072EXPORT_SYMBOL(transport_generic_free_cmd_intr);
2073
2061static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) 2074static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
2062{ 2075{
2063 struct se_task *task, *task_tmp; 2076 struct se_task *task, *task_tmp;
@@ -2504,7 +2517,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2504 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 2517 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2505 * to allow the passed struct se_cmd list of tasks to the front of the list. 2518 * to allow the passed struct se_cmd list of tasks to the front of the list.
2506 */ 2519 */
2507 if (cmd->sam_task_attr == TASK_ATTR_HOQ) { 2520 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2508 atomic_inc(&SE_DEV(cmd)->dev_hoq_count); 2521 atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
2509 smp_mb__after_atomic_inc(); 2522 smp_mb__after_atomic_inc();
2510 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" 2523 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
@@ -2512,7 +2525,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2512 T_TASK(cmd)->t_task_cdb[0], 2525 T_TASK(cmd)->t_task_cdb[0],
2513 cmd->se_ordered_id); 2526 cmd->se_ordered_id);
2514 return 1; 2527 return 1;
2515 } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { 2528 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2516 spin_lock(&SE_DEV(cmd)->ordered_cmd_lock); 2529 spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
2517 list_add_tail(&cmd->se_ordered_list, 2530 list_add_tail(&cmd->se_ordered_list,
2518 &SE_DEV(cmd)->ordered_cmd_list); 2531 &SE_DEV(cmd)->ordered_cmd_list);
@@ -3411,7 +3424,7 @@ static int transport_generic_cmd_sequencer(
3411 * See spc4r17 section 5.3 3424 * See spc4r17 section 5.3
3412 */ 3425 */
3413 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3426 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3414 cmd->sam_task_attr = TASK_ATTR_HOQ; 3427 cmd->sam_task_attr = MSG_HEAD_TAG;
3415 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; 3428 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3416 break; 3429 break;
3417 case READ_BUFFER: 3430 case READ_BUFFER:
@@ -3619,7 +3632,7 @@ static int transport_generic_cmd_sequencer(
3619 * See spc4r17 section 5.3 3632 * See spc4r17 section 5.3
3620 */ 3633 */
3621 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3634 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3622 cmd->sam_task_attr = TASK_ATTR_HOQ; 3635 cmd->sam_task_attr = MSG_HEAD_TAG;
3623 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; 3636 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3624 break; 3637 break;
3625 default: 3638 default:
@@ -3777,21 +3790,21 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3777 struct se_cmd *cmd_p, *cmd_tmp; 3790 struct se_cmd *cmd_p, *cmd_tmp;
3778 int new_active_tasks = 0; 3791 int new_active_tasks = 0;
3779 3792
3780 if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) { 3793 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3781 atomic_dec(&dev->simple_cmds); 3794 atomic_dec(&dev->simple_cmds);
3782 smp_mb__after_atomic_dec(); 3795 smp_mb__after_atomic_dec();
3783 dev->dev_cur_ordered_id++; 3796 dev->dev_cur_ordered_id++;
3784 DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" 3797 DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
3785 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 3798 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3786 cmd->se_ordered_id); 3799 cmd->se_ordered_id);
3787 } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) { 3800 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3788 atomic_dec(&dev->dev_hoq_count); 3801 atomic_dec(&dev->dev_hoq_count);
3789 smp_mb__after_atomic_dec(); 3802 smp_mb__after_atomic_dec();
3790 dev->dev_cur_ordered_id++; 3803 dev->dev_cur_ordered_id++;
3791 DEBUG_STA("Incremented dev_cur_ordered_id: %u for" 3804 DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
3792 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 3805 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3793 cmd->se_ordered_id); 3806 cmd->se_ordered_id);
3794 } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { 3807 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3795 spin_lock(&dev->ordered_cmd_lock); 3808 spin_lock(&dev->ordered_cmd_lock);
3796 list_del(&cmd->se_ordered_list); 3809 list_del(&cmd->se_ordered_list);
3797 atomic_dec(&dev->dev_ordered_sync); 3810 atomic_dec(&dev->dev_ordered_sync);
@@ -3824,7 +3837,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3824 new_active_tasks++; 3837 new_active_tasks++;
3825 3838
3826 spin_lock(&dev->delayed_cmd_lock); 3839 spin_lock(&dev->delayed_cmd_lock);
3827 if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED) 3840 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3828 break; 3841 break;
3829 } 3842 }
3830 spin_unlock(&dev->delayed_cmd_lock); 3843 spin_unlock(&dev->delayed_cmd_lock);
@@ -4776,18 +4789,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4776 sg_end_cur->page_link &= ~0x02; 4789 sg_end_cur->page_link &= ~0x02;
4777 4790
4778 sg_chain(sg_head, task_sg_num, sg_head_cur); 4791 sg_chain(sg_head, task_sg_num, sg_head_cur);
4779 sg_count += (task->task_sg_num + 1);
4780 } else
4781 sg_count += task->task_sg_num; 4792 sg_count += task->task_sg_num;
4793 task_sg_num = (task->task_sg_num + 1);
4794 } else {
4795 sg_chain(sg_head, task_sg_num, sg_head_cur);
4796 sg_count += task->task_sg_num;
4797 task_sg_num = task->task_sg_num;
4798 }
4782 4799
4783 sg_head = sg_head_cur; 4800 sg_head = sg_head_cur;
4784 sg_link = sg_link_cur; 4801 sg_link = sg_link_cur;
4785 task_sg_num = task->task_sg_num;
4786 continue; 4802 continue;
4787 } 4803 }
4788 sg_head = sg_first = &task->task_sg[0]; 4804 sg_head = sg_first = &task->task_sg[0];
4789 sg_link = &task->task_sg[task->task_sg_num]; 4805 sg_link = &task->task_sg[task->task_sg_num];
4790 task_sg_num = task->task_sg_num;
4791 /* 4806 /*
4792 * Check for single task.. 4807 * Check for single task..
4793 */ 4808 */
@@ -4798,9 +4813,12 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4798 */ 4813 */
4799 sg_end = &task->task_sg[task->task_sg_num - 1]; 4814 sg_end = &task->task_sg[task->task_sg_num - 1];
4800 sg_end->page_link &= ~0x02; 4815 sg_end->page_link &= ~0x02;
4801 sg_count += (task->task_sg_num + 1);
4802 } else
4803 sg_count += task->task_sg_num; 4816 sg_count += task->task_sg_num;
4817 task_sg_num = (task->task_sg_num + 1);
4818 } else {
4819 sg_count += task->task_sg_num;
4820 task_sg_num = task->task_sg_num;
4821 }
4804 } 4822 }
4805 /* 4823 /*
4806 * Setup the starting pointer and total t_tasks_sg_linked_no including 4824 * Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4809,21 +4827,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4809 T_TASK(cmd)->t_tasks_sg_chained = sg_first; 4827 T_TASK(cmd)->t_tasks_sg_chained = sg_first;
4810 T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; 4828 T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
4811 4829
4812 DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and" 4830 DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
4813 " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained, 4831 " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
4814 T_TASK(cmd)->t_tasks_sg_chained_no); 4832 T_TASK(cmd)->t_tasks_sg_chained_no);
4815 4833
4816 for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, 4834 for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
4817 T_TASK(cmd)->t_tasks_sg_chained_no, i) { 4835 T_TASK(cmd)->t_tasks_sg_chained_no, i) {
4818 4836
4819 DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n", 4837 DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
4820 sg, sg_page(sg), sg->length, sg->offset); 4838 i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
4821 if (sg_is_chain(sg)) 4839 if (sg_is_chain(sg))
4822 DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); 4840 DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
4823 if (sg_is_last(sg)) 4841 if (sg_is_last(sg))
4824 DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); 4842 DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
4825 } 4843 }
4826
4827} 4844}
4828EXPORT_SYMBOL(transport_do_task_sg_chain); 4845EXPORT_SYMBOL(transport_do_task_sg_chain);
4829 4846
@@ -5297,6 +5314,8 @@ void transport_generic_free_cmd(
5297 if (wait_for_tasks && cmd->transport_wait_for_tasks) 5314 if (wait_for_tasks && cmd->transport_wait_for_tasks)
5298 cmd->transport_wait_for_tasks(cmd, 0, 0); 5315 cmd->transport_wait_for_tasks(cmd, 0, 0);
5299 5316
5317 transport_free_dev_tasks(cmd);
5318
5300 transport_generic_remove(cmd, release_to_pool, 5319 transport_generic_remove(cmd, release_to_pool,
5301 session_reinstatement); 5320 session_reinstatement);
5302 } 5321 }
@@ -6132,6 +6151,9 @@ get_cmd:
6132 case TRANSPORT_REMOVE: 6151 case TRANSPORT_REMOVE:
6133 transport_generic_remove(cmd, 1, 0); 6152 transport_generic_remove(cmd, 1, 0);
6134 break; 6153 break;
6154 case TRANSPORT_FREE_CMD_INTR:
6155 transport_generic_free_cmd(cmd, 0, 1, 0);
6156 break;
6135 case TRANSPORT_PROCESS_TMR: 6157 case TRANSPORT_PROCESS_TMR:
6136 transport_generic_do_tmr(cmd); 6158 transport_generic_do_tmr(cmd);
6137 break; 6159 break;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 49e51778f733..c056a1132ae1 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -35,6 +35,7 @@
35#include <scsi/scsi_host.h> 35#include <scsi/scsi_host.h>
36#include <scsi/scsi_device.h> 36#include <scsi/scsi_device.h>
37#include <scsi/scsi_cmnd.h> 37#include <scsi/scsi_cmnd.h>
38#include <scsi/scsi_tcq.h>
38#include <scsi/libfc.h> 39#include <scsi/libfc.h>
39#include <scsi/fc_encode.h> 40#include <scsi/fc_encode.h>
40 41
@@ -592,8 +593,25 @@ static void ft_send_cmd(struct ft_cmd *cmd)
592 case FCP_CFL_WRDATA | FCP_CFL_RDDATA: 593 case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
593 goto err; /* TBD not supported by tcm_fc yet */ 594 goto err; /* TBD not supported by tcm_fc yet */
594 } 595 }
596 /*
597 * Locate the SAM Task Attr from fc_pri_ta
598 */
599 switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
600 case FCP_PTA_HEADQ:
601 task_attr = MSG_HEAD_TAG;
602 break;
603 case FCP_PTA_ORDERED:
604 task_attr = MSG_ORDERED_TAG;
605 break;
606 case FCP_PTA_ACA:
607 task_attr = MSG_ACA_TAG;
608 break;
609 case FCP_PTA_SIMPLE: /* Fallthrough */
610 default:
611 task_attr = MSG_SIMPLE_TAG;
612 }
613
595 614
596 /* FCP_PTA_ maps 1:1 to TASK_ATTR_ */
597 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK; 615 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
598 data_len = ntohl(fcp->fc_dl); 616 data_len = ntohl(fcp->fc_dl);
599 cmd->cdb = fcp->fc_cdb; 617 cmd->cdb = fcp->fc_cdb;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index fcdbbffe88cc..84e868c255dd 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -519,13 +519,6 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
519 return tpg->index; 519 return tpg->index;
520} 520}
521 521
522static u64 ft_pack_lun(unsigned int index)
523{
524 WARN_ON(index >= 256);
525 /* Caller wants this byte-swapped */
526 return cpu_to_le64((index & 0xff) << 8);
527}
528
529static struct target_core_fabric_ops ft_fabric_ops = { 522static struct target_core_fabric_ops ft_fabric_ops = {
530 .get_fabric_name = ft_get_fabric_name, 523 .get_fabric_name = ft_get_fabric_name,
531 .get_fabric_proto_ident = fc_get_fabric_proto_ident, 524 .get_fabric_proto_ident = fc_get_fabric_proto_ident,
@@ -564,7 +557,6 @@ static struct target_core_fabric_ops ft_fabric_ops = {
564 .get_fabric_sense_len = ft_get_fabric_sense_len, 557 .get_fabric_sense_len = ft_get_fabric_sense_len,
565 .set_fabric_sense_len = ft_set_fabric_sense_len, 558 .set_fabric_sense_len = ft_set_fabric_sense_len,
566 .is_state_remove = ft_is_state_remove, 559 .is_state_remove = ft_is_state_remove,
567 .pack_lun = ft_pack_lun,
568 /* 560 /*
569 * Setup function pointers for generic logic in 561 * Setup function pointers for generic logic in
570 * target_core_fabric_configfs.c 562 * target_core_fabric_configfs.c
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index fc6f2a5bde01..0b1c82ad6805 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -499,7 +499,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
499 dev_set_drvdata(hwmon->device, hwmon); 499 dev_set_drvdata(hwmon->device, hwmon);
500 result = device_create_file(hwmon->device, &dev_attr_name); 500 result = device_create_file(hwmon->device, &dev_attr_name);
501 if (result) 501 if (result)
502 goto unregister_hwmon_device; 502 goto free_mem;
503 503
504 register_sys_interface: 504 register_sys_interface:
505 tz->hwmon = hwmon; 505 tz->hwmon = hwmon;
@@ -513,7 +513,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
513 sysfs_attr_init(&tz->temp_input.attr.attr); 513 sysfs_attr_init(&tz->temp_input.attr.attr);
514 result = device_create_file(hwmon->device, &tz->temp_input.attr); 514 result = device_create_file(hwmon->device, &tz->temp_input.attr);
515 if (result) 515 if (result)
516 goto unregister_hwmon_device; 516 goto unregister_name;
517 517
518 if (tz->ops->get_crit_temp) { 518 if (tz->ops->get_crit_temp) {
519 unsigned long temperature; 519 unsigned long temperature;
@@ -527,7 +527,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
527 result = device_create_file(hwmon->device, 527 result = device_create_file(hwmon->device,
528 &tz->temp_crit.attr); 528 &tz->temp_crit.attr);
529 if (result) 529 if (result)
530 goto unregister_hwmon_device; 530 goto unregister_input;
531 } 531 }
532 } 532 }
533 533
@@ -539,9 +539,9 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
539 539
540 return 0; 540 return 0;
541 541
542 unregister_hwmon_device: 542 unregister_input:
543 device_remove_file(hwmon->device, &tz->temp_crit.attr);
544 device_remove_file(hwmon->device, &tz->temp_input.attr); 543 device_remove_file(hwmon->device, &tz->temp_input.attr);
544 unregister_name:
545 if (new_hwmon_device) { 545 if (new_hwmon_device) {
546 device_remove_file(hwmon->device, &dev_attr_name); 546 device_remove_file(hwmon->device, &dev_attr_name);
547 hwmon_device_unregister(hwmon->device); 547 hwmon_device_unregister(hwmon->device);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 652bdac8ce8e..6d5d6e679fc7 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1420,7 +1420,7 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
1420 port->flags = UPF_BOOT_AUTOCONF; 1420 port->flags = UPF_BOOT_AUTOCONF;
1421 port->ops = &atmel_pops; 1421 port->ops = &atmel_pops;
1422 port->fifosize = 1; 1422 port->fifosize = 1;
1423 port->line = pdev->id; 1423 port->line = data->num;
1424 port->dev = &pdev->dev; 1424 port->dev = &pdev->dev;
1425 port->mapbase = pdev->resource[0].start; 1425 port->mapbase = pdev->resource[0].start;
1426 port->irq = pdev->resource[1].start; 1426 port->irq = pdev->resource[1].start;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 660b80a75cac..1102ce65a3a9 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -348,11 +348,50 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
348 return rc; 348 return rc;
349} 349}
350 350
351static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev)
352{
353 return pdev->class == PCI_CLASS_SERIAL_USB_EHCI &&
354 pdev->vendor == PCI_VENDOR_ID_INTEL &&
355 pdev->device == 0x1E26;
356}
357
358static void ehci_enable_xhci_companion(void)
359{
360 struct pci_dev *companion = NULL;
361
362 /* The xHCI and EHCI controllers are not on the same PCI slot */
363 for_each_pci_dev(companion) {
364 if (!usb_is_intel_switchable_xhci(companion))
365 continue;
366 usb_enable_xhci_ports(companion);
367 return;
368 }
369}
370
351static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated) 371static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
352{ 372{
353 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 373 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
354 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 374 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
355 375
376 /* The BIOS on systems with the Intel Panther Point chipset may or may
377 * not support xHCI natively. That means that during system resume, it
378 * may switch the ports back to EHCI so that users can use their
379 * keyboard to select a kernel from GRUB after resume from hibernate.
380 *
381 * The BIOS is supposed to remember whether the OS had xHCI ports
382 * enabled before resume, and switch the ports back to xHCI when the
383 * BIOS/OS semaphore is written, but we all know we can't trust BIOS
384 * writers.
385 *
386 * Unconditionally switch the ports back to xHCI after a system resume.
387 * We can't tell whether the EHCI or xHCI controller will be resumed
388 * first, so we have to do the port switchover in both drivers. Writing
389 * a '1' to the port switchover registers should have no effect if the
390 * port was already switched over.
391 */
392 if (usb_is_intel_switchable_ehci(pdev))
393 ehci_enable_xhci_companion();
394
356 // maybe restore FLADJ 395 // maybe restore FLADJ
357 396
358 if (time_before(jiffies, ehci->next_statechange)) 397 if (time_before(jiffies, ehci->next_statechange))
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index f16c59d5f487..fd930618c28f 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -69,6 +69,9 @@
69#define NB_PIF0_PWRDOWN_0 0x01100012 69#define NB_PIF0_PWRDOWN_0 0x01100012
70#define NB_PIF0_PWRDOWN_1 0x01100013 70#define NB_PIF0_PWRDOWN_1 0x01100013
71 71
72#define USB_INTEL_XUSB2PR 0xD0
73#define USB_INTEL_USB3_PSSEN 0xD8
74
72static struct amd_chipset_info { 75static struct amd_chipset_info {
73 struct pci_dev *nb_dev; 76 struct pci_dev *nb_dev;
74 struct pci_dev *smbus_dev; 77 struct pci_dev *smbus_dev;
@@ -673,6 +676,64 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
673 return -ETIMEDOUT; 676 return -ETIMEDOUT;
674} 677}
675 678
679bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
680{
681 return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
682 pdev->vendor == PCI_VENDOR_ID_INTEL &&
683 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
684}
685EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
686
687/*
688 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
689 * share some number of ports. These ports can be switched between either
690 * controller. Not all of the ports under the EHCI host controller may be
691 * switchable.
692 *
693 * The ports should be switched over to xHCI before PCI probes for any device
694 * start. This avoids active devices under EHCI being disconnected during the
695 * port switchover, which could cause loss of data on USB storage devices, or
696 * failed boot when the root file system is on a USB mass storage device and is
697 * enumerated under EHCI first.
698 *
699 * We write into the xHC's PCI configuration space in some Intel-specific
700 * registers to switch the ports over. The USB 3.0 terminations and the USB
701 * 2.0 data wires are switched separately. We want to enable the SuperSpeed
702 * terminations before switching the USB 2.0 wires over, so that USB 3.0
703 * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
704 */
705void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
706{
707 u32 ports_available;
708
709 ports_available = 0xffffffff;
710 /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
711 * Register, to turn on SuperSpeed terminations for all
712 * available ports.
713 */
714 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
715 cpu_to_le32(ports_available));
716
717 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
718 &ports_available);
719 dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
720 "under xHCI: 0x%x\n", ports_available);
721
722 ports_available = 0xffffffff;
723 /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
724 * switch the USB 2.0 power and data lines over to the xHCI
725 * host.
726 */
727 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
728 cpu_to_le32(ports_available));
729
730 pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
731 &ports_available);
732 dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
733 "to xHCI: 0x%x\n", ports_available);
734}
735EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
736
676/** 737/**
677 * PCI Quirks for xHCI. 738 * PCI Quirks for xHCI.
678 * 739 *
@@ -732,6 +793,8 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
732 writel(XHCI_LEGACY_DISABLE_SMI, 793 writel(XHCI_LEGACY_DISABLE_SMI,
733 base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); 794 base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
734 795
796 if (usb_is_intel_switchable_xhci(pdev))
797 usb_enable_xhci_ports(pdev);
735hc_init: 798hc_init:
736 op_reg_base = base + XHCI_HC_LENGTH(readl(base)); 799 op_reg_base = base + XHCI_HC_LENGTH(readl(base));
737 800
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 6ae9f78e9938..b1002a8ef96f 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -8,6 +8,8 @@ int usb_amd_find_chipset_info(void);
8void usb_amd_dev_put(void); 8void usb_amd_dev_put(void);
9void usb_amd_quirk_pll_disable(void); 9void usb_amd_quirk_pll_disable(void);
10void usb_amd_quirk_pll_enable(void); 10void usb_amd_quirk_pll_enable(void);
11bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
12void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
11#else 13#else
12static inline void usb_amd_quirk_pll_disable(void) {} 14static inline void usb_amd_quirk_pll_disable(void) {}
13static inline void usb_amd_quirk_pll_enable(void) {} 15static inline void usb_amd_quirk_pll_enable(void) {}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cbc4d491e626..c408e9f6a707 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -118,6 +118,12 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
118 /* AMD PLL quirk */ 118 /* AMD PLL quirk */
119 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) 119 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
120 xhci->quirks |= XHCI_AMD_PLL_FIX; 120 xhci->quirks |= XHCI_AMD_PLL_FIX;
121 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
122 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
123 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
124 xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
125 xhci->limit_active_eps = 64;
126 }
121 127
122 /* Make sure the HC is halted. */ 128 /* Make sure the HC is halted. */
123 retval = xhci_halt(xhci); 129 retval = xhci_halt(xhci);
@@ -242,8 +248,28 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
242static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) 248static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
243{ 249{
244 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 250 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
251 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
245 int retval = 0; 252 int retval = 0;
246 253
254 /* The BIOS on systems with the Intel Panther Point chipset may or may
255 * not support xHCI natively. That means that during system resume, it
256 * may switch the ports back to EHCI so that users can use their
257 * keyboard to select a kernel from GRUB after resume from hibernate.
258 *
259 * The BIOS is supposed to remember whether the OS had xHCI ports
260 * enabled before resume, and switch the ports back to xHCI when the
261 * BIOS/OS semaphore is written, but we all know we can't trust BIOS
262 * writers.
263 *
264 * Unconditionally switch the ports back to xHCI after a system resume.
265 * We can't tell whether the EHCI or xHCI controller will be resumed
266 * first, so we have to do the port switchover in both drivers. Writing
267 * a '1' to the port switchover registers should have no effect if the
268 * port was already switched over.
269 */
270 if (usb_is_intel_switchable_xhci(pdev))
271 usb_enable_xhci_ports(pdev);
272
247 retval = xhci_resume(xhci, hibernated); 273 retval = xhci_resume(xhci, hibernated);
248 return retval; 274 return retval;
249} 275}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 237a765f8d18..cc1485bfed38 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -167,12 +167,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
167 next = ring->dequeue; 167 next = ring->dequeue;
168 } 168 }
169 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); 169 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
170 if (ring == xhci->event_ring)
171 xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
172 else if (ring == xhci->cmd_ring)
173 xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
174 else
175 xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
176} 170}
177 171
178/* 172/*
@@ -248,12 +242,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
248 next = ring->enqueue; 242 next = ring->enqueue;
249 } 243 }
250 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); 244 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
251 if (ring == xhci->event_ring)
252 xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
253 else if (ring == xhci->cmd_ring)
254 xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
255 else
256 xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
257} 245}
258 246
259/* 247/*
@@ -636,13 +624,11 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
636 } 624 }
637 } 625 }
638 usb_hcd_unlink_urb_from_ep(hcd, urb); 626 usb_hcd_unlink_urb_from_ep(hcd, urb);
639 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
640 627
641 spin_unlock(&xhci->lock); 628 spin_unlock(&xhci->lock);
642 usb_hcd_giveback_urb(hcd, urb, status); 629 usb_hcd_giveback_urb(hcd, urb, status);
643 xhci_urb_free_priv(xhci, urb_priv); 630 xhci_urb_free_priv(xhci, urb_priv);
644 spin_lock(&xhci->lock); 631 spin_lock(&xhci->lock);
645 xhci_dbg(xhci, "%s URB given back\n", adjective);
646 } 632 }
647} 633}
648 634
@@ -692,6 +678,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
692 678
693 if (list_empty(&ep->cancelled_td_list)) { 679 if (list_empty(&ep->cancelled_td_list)) {
694 xhci_stop_watchdog_timer_in_irq(xhci, ep); 680 xhci_stop_watchdog_timer_in_irq(xhci, ep);
681 ep->stopped_td = NULL;
682 ep->stopped_trb = NULL;
695 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 683 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
696 return; 684 return;
697 } 685 }
@@ -1093,8 +1081,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1093 complete(&xhci->addr_dev); 1081 complete(&xhci->addr_dev);
1094 break; 1082 break;
1095 case TRB_TYPE(TRB_DISABLE_SLOT): 1083 case TRB_TYPE(TRB_DISABLE_SLOT):
1096 if (xhci->devs[slot_id]) 1084 if (xhci->devs[slot_id]) {
1085 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1086 /* Delete default control endpoint resources */
1087 xhci_free_device_endpoint_resources(xhci,
1088 xhci->devs[slot_id], true);
1097 xhci_free_virt_device(xhci, slot_id); 1089 xhci_free_virt_device(xhci, slot_id);
1090 }
1098 break; 1091 break;
1099 case TRB_TYPE(TRB_CONFIG_EP): 1092 case TRB_TYPE(TRB_CONFIG_EP):
1100 virt_dev = xhci->devs[slot_id]; 1093 virt_dev = xhci->devs[slot_id];
@@ -1630,7 +1623,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1630 "without IOC set??\n"); 1623 "without IOC set??\n");
1631 *status = -ESHUTDOWN; 1624 *status = -ESHUTDOWN;
1632 } else { 1625 } else {
1633 xhci_dbg(xhci, "Successful control transfer!\n");
1634 *status = 0; 1626 *status = 0;
1635 } 1627 }
1636 break; 1628 break;
@@ -1727,7 +1719,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1727 switch (trb_comp_code) { 1719 switch (trb_comp_code) {
1728 case COMP_SUCCESS: 1720 case COMP_SUCCESS:
1729 frame->status = 0; 1721 frame->status = 0;
1730 xhci_dbg(xhci, "Successful isoc transfer!\n");
1731 break; 1722 break;
1732 case COMP_SHORT_TX: 1723 case COMP_SHORT_TX:
1733 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? 1724 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
@@ -1837,12 +1828,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1837 else 1828 else
1838 *status = 0; 1829 *status = 0;
1839 } else { 1830 } else {
1840 if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
1841 xhci_dbg(xhci, "Successful bulk "
1842 "transfer!\n");
1843 else
1844 xhci_dbg(xhci, "Successful interrupt "
1845 "transfer!\n");
1846 *status = 0; 1831 *status = 0;
1847 } 1832 }
1848 break; 1833 break;
@@ -1856,11 +1841,12 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1856 /* Others already handled above */ 1841 /* Others already handled above */
1857 break; 1842 break;
1858 } 1843 }
1859 xhci_dbg(xhci, "ep %#x - asked for %d bytes, " 1844 if (trb_comp_code == COMP_SHORT_TX)
1860 "%d bytes untransferred\n", 1845 xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
1861 td->urb->ep->desc.bEndpointAddress, 1846 "%d bytes untransferred\n",
1862 td->urb->transfer_buffer_length, 1847 td->urb->ep->desc.bEndpointAddress,
1863 TRB_LEN(le32_to_cpu(event->transfer_len))); 1848 td->urb->transfer_buffer_length,
1849 TRB_LEN(le32_to_cpu(event->transfer_len)));
1864 /* Fast path - was this the last TRB in the TD for this URB? */ 1850 /* Fast path - was this the last TRB in the TD for this URB? */
1865 if (event_trb == td->last_trb) { 1851 if (event_trb == td->last_trb) {
1866 if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 1852 if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
@@ -1954,7 +1940,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1954 1940
1955 /* Endpoint ID is 1 based, our index is zero based */ 1941 /* Endpoint ID is 1 based, our index is zero based */
1956 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1942 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1957 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
1958 ep = &xdev->eps[ep_index]; 1943 ep = &xdev->eps[ep_index];
1959 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1944 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1960 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1945 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
@@ -2081,6 +2066,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2081 if (!event_seg) { 2066 if (!event_seg) {
2082 if (!ep->skip || 2067 if (!ep->skip ||
2083 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { 2068 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2069 /* Some host controllers give a spurious
2070 * successful event after a short transfer.
2071 * Ignore it.
2072 */
2073 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2074 ep_ring->last_td_was_short) {
2075 ep_ring->last_td_was_short = false;
2076 ret = 0;
2077 goto cleanup;
2078 }
2084 /* HC is busted, give up! */ 2079 /* HC is busted, give up! */
2085 xhci_err(xhci, 2080 xhci_err(xhci,
2086 "ERROR Transfer event TRB DMA ptr not " 2081 "ERROR Transfer event TRB DMA ptr not "
@@ -2091,6 +2086,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2091 ret = skip_isoc_td(xhci, td, event, ep, &status); 2086 ret = skip_isoc_td(xhci, td, event, ep, &status);
2092 goto cleanup; 2087 goto cleanup;
2093 } 2088 }
2089 if (trb_comp_code == COMP_SHORT_TX)
2090 ep_ring->last_td_was_short = true;
2091 else
2092 ep_ring->last_td_was_short = false;
2094 2093
2095 if (ep->skip) { 2094 if (ep->skip) {
2096 xhci_dbg(xhci, "Found td. Clear skip flag.\n"); 2095 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
@@ -2149,9 +2148,15 @@ cleanup:
2149 xhci_urb_free_priv(xhci, urb_priv); 2148 xhci_urb_free_priv(xhci, urb_priv);
2150 2149
2151 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 2150 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2152 xhci_dbg(xhci, "Giveback URB %p, len = %d, " 2151 if ((urb->actual_length != urb->transfer_buffer_length &&
2153 "status = %d\n", 2152 (urb->transfer_flags &
2154 urb, urb->actual_length, status); 2153 URB_SHORT_NOT_OK)) ||
2154 status != 0)
2155 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2156 "expected = %x, status = %d\n",
2157 urb, urb->actual_length,
2158 urb->transfer_buffer_length,
2159 status);
2155 spin_unlock(&xhci->lock); 2160 spin_unlock(&xhci->lock);
2156 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status); 2161 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2157 spin_lock(&xhci->lock); 2162 spin_lock(&xhci->lock);
@@ -2180,7 +2185,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
2180 int update_ptrs = 1; 2185 int update_ptrs = 1;
2181 int ret; 2186 int ret;
2182 2187
2183 xhci_dbg(xhci, "In %s\n", __func__);
2184 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 2188 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2185 xhci->error_bitmask |= 1 << 1; 2189 xhci->error_bitmask |= 1 << 1;
2186 return 0; 2190 return 0;
@@ -2193,7 +2197,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
2193 xhci->error_bitmask |= 1 << 2; 2197 xhci->error_bitmask |= 1 << 2;
2194 return 0; 2198 return 0;
2195 } 2199 }
2196 xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
2197 2200
2198 /* 2201 /*
2199 * Barrier between reading the TRB_CYCLE (valid) flag above and any 2202 * Barrier between reading the TRB_CYCLE (valid) flag above and any
@@ -2203,20 +2206,14 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
2203 /* FIXME: Handle more event types. */ 2206 /* FIXME: Handle more event types. */
2204 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) { 2207 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2205 case TRB_TYPE(TRB_COMPLETION): 2208 case TRB_TYPE(TRB_COMPLETION):
2206 xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
2207 handle_cmd_completion(xhci, &event->event_cmd); 2209 handle_cmd_completion(xhci, &event->event_cmd);
2208 xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
2209 break; 2210 break;
2210 case TRB_TYPE(TRB_PORT_STATUS): 2211 case TRB_TYPE(TRB_PORT_STATUS):
2211 xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
2212 handle_port_status(xhci, event); 2212 handle_port_status(xhci, event);
2213 xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
2214 update_ptrs = 0; 2213 update_ptrs = 0;
2215 break; 2214 break;
2216 case TRB_TYPE(TRB_TRANSFER): 2215 case TRB_TYPE(TRB_TRANSFER):
2217 xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
2218 ret = handle_tx_event(xhci, &event->trans_event); 2216 ret = handle_tx_event(xhci, &event->trans_event);
2219 xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
2220 if (ret < 0) 2217 if (ret < 0)
2221 xhci->error_bitmask |= 1 << 9; 2218 xhci->error_bitmask |= 1 << 9;
2222 else 2219 else
@@ -2273,16 +2270,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
2273 spin_unlock(&xhci->lock); 2270 spin_unlock(&xhci->lock);
2274 return IRQ_NONE; 2271 return IRQ_NONE;
2275 } 2272 }
2276 xhci_dbg(xhci, "op reg status = %08x\n", status);
2277 xhci_dbg(xhci, "Event ring dequeue ptr:\n");
2278 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
2279 (unsigned long long)
2280 xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
2281 lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
2282 upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
2283 (unsigned int) le32_to_cpu(trb->link.intr_target),
2284 (unsigned int) le32_to_cpu(trb->link.control));
2285
2286 if (status & STS_FATAL) { 2273 if (status & STS_FATAL) {
2287 xhci_warn(xhci, "WARNING: Host System Error\n"); 2274 xhci_warn(xhci, "WARNING: Host System Error\n");
2288 xhci_halt(xhci); 2275 xhci_halt(xhci);
@@ -2397,7 +2384,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2397 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 2384 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2398{ 2385{
2399 /* Make sure the endpoint has been added to xHC schedule */ 2386 /* Make sure the endpoint has been added to xHC schedule */
2400 xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
2401 switch (ep_state) { 2387 switch (ep_state) {
2402 case EP_STATE_DISABLED: 2388 case EP_STATE_DISABLED:
2403 /* 2389 /*
@@ -2434,7 +2420,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2434 struct xhci_ring *ring = ep_ring; 2420 struct xhci_ring *ring = ep_ring;
2435 union xhci_trb *next; 2421 union xhci_trb *next;
2436 2422
2437 xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
2438 next = ring->enqueue; 2423 next = ring->enqueue;
2439 2424
2440 while (last_trb(xhci, ring, ring->enq_seg, next)) { 2425 while (last_trb(xhci, ring, ring->enq_seg, next)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 8f2a56ece44f..d9660eb97eb9 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1314,8 +1314,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1314 if (ret <= 0) 1314 if (ret <= 0)
1315 return ret; 1315 return ret;
1316 xhci = hcd_to_xhci(hcd); 1316 xhci = hcd_to_xhci(hcd);
1317 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1317 if (xhci->xhc_state & XHCI_STATE_DYING)
1318 return -ENODEV;
1318 1319
1320 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1319 drop_flag = xhci_get_endpoint_flag(&ep->desc); 1321 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1320 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { 1322 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1321 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", 1323 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
@@ -1401,6 +1403,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1401 return ret; 1403 return ret;
1402 } 1404 }
1403 xhci = hcd_to_xhci(hcd); 1405 xhci = hcd_to_xhci(hcd);
1406 if (xhci->xhc_state & XHCI_STATE_DYING)
1407 return -ENODEV;
1404 1408
1405 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 1409 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1406 last_ctx = xhci_last_valid_endpoint(added_ctxs); 1410 last_ctx = xhci_last_valid_endpoint(added_ctxs);
@@ -1578,6 +1582,113 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1578 return ret; 1582 return ret;
1579} 1583}
1580 1584
1585static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1586 struct xhci_container_ctx *in_ctx)
1587{
1588 struct xhci_input_control_ctx *ctrl_ctx;
1589 u32 valid_add_flags;
1590 u32 valid_drop_flags;
1591
1592 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1593 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1594 * (bit 1). The default control endpoint is added during the Address
1595 * Device command and is never removed until the slot is disabled.
1596 */
1597 valid_add_flags = ctrl_ctx->add_flags >> 2;
1598 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1599
1600 /* Use hweight32 to count the number of ones in the add flags, or
1601 * number of endpoints added. Don't count endpoints that are changed
1602 * (both added and dropped).
1603 */
1604 return hweight32(valid_add_flags) -
1605 hweight32(valid_add_flags & valid_drop_flags);
1606}
1607
1608static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1609 struct xhci_container_ctx *in_ctx)
1610{
1611 struct xhci_input_control_ctx *ctrl_ctx;
1612 u32 valid_add_flags;
1613 u32 valid_drop_flags;
1614
1615 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1616 valid_add_flags = ctrl_ctx->add_flags >> 2;
1617 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1618
1619 return hweight32(valid_drop_flags) -
1620 hweight32(valid_add_flags & valid_drop_flags);
1621}
1622
1623/*
1624 * We need to reserve the new number of endpoints before the configure endpoint
1625 * command completes. We can't subtract the dropped endpoints from the number
1626 * of active endpoints until the command completes because we can oversubscribe
1627 * the host in this case:
1628 *
1629 * - the first configure endpoint command drops more endpoints than it adds
1630 * - a second configure endpoint command that adds more endpoints is queued
1631 * - the first configure endpoint command fails, so the config is unchanged
1632 * - the second command may succeed, even though there isn't enough resources
1633 *
1634 * Must be called with xhci->lock held.
1635 */
1636static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1637 struct xhci_container_ctx *in_ctx)
1638{
1639 u32 added_eps;
1640
1641 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1642 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1643 xhci_dbg(xhci, "Not enough ep ctxs: "
1644 "%u active, need to add %u, limit is %u.\n",
1645 xhci->num_active_eps, added_eps,
1646 xhci->limit_active_eps);
1647 return -ENOMEM;
1648 }
1649 xhci->num_active_eps += added_eps;
1650 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1651 xhci->num_active_eps);
1652 return 0;
1653}
1654
1655/*
1656 * The configure endpoint was failed by the xHC for some other reason, so we
1657 * need to revert the resources that failed configuration would have used.
1658 *
1659 * Must be called with xhci->lock held.
1660 */
1661static void xhci_free_host_resources(struct xhci_hcd *xhci,
1662 struct xhci_container_ctx *in_ctx)
1663{
1664 u32 num_failed_eps;
1665
1666 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1667 xhci->num_active_eps -= num_failed_eps;
1668 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1669 num_failed_eps,
1670 xhci->num_active_eps);
1671}
1672
1673/*
1674 * Now that the command has completed, clean up the active endpoint count by
1675 * subtracting out the endpoints that were dropped (but not changed).
1676 *
1677 * Must be called with xhci->lock held.
1678 */
1679static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1680 struct xhci_container_ctx *in_ctx)
1681{
1682 u32 num_dropped_eps;
1683
1684 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1685 xhci->num_active_eps -= num_dropped_eps;
1686 if (num_dropped_eps)
1687 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1688 num_dropped_eps,
1689 xhci->num_active_eps);
1690}
1691
1581/* Issue a configure endpoint command or evaluate context command 1692/* Issue a configure endpoint command or evaluate context command
1582 * and wait for it to finish. 1693 * and wait for it to finish.
1583 */ 1694 */
@@ -1598,6 +1709,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1598 virt_dev = xhci->devs[udev->slot_id]; 1709 virt_dev = xhci->devs[udev->slot_id];
1599 if (command) { 1710 if (command) {
1600 in_ctx = command->in_ctx; 1711 in_ctx = command->in_ctx;
1712 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
1713 xhci_reserve_host_resources(xhci, in_ctx)) {
1714 spin_unlock_irqrestore(&xhci->lock, flags);
1715 xhci_warn(xhci, "Not enough host resources, "
1716 "active endpoint contexts = %u\n",
1717 xhci->num_active_eps);
1718 return -ENOMEM;
1719 }
1720
1601 cmd_completion = command->completion; 1721 cmd_completion = command->completion;
1602 cmd_status = &command->status; 1722 cmd_status = &command->status;
1603 command->command_trb = xhci->cmd_ring->enqueue; 1723 command->command_trb = xhci->cmd_ring->enqueue;
@@ -1613,6 +1733,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1613 list_add_tail(&command->cmd_list, &virt_dev->cmd_list); 1733 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
1614 } else { 1734 } else {
1615 in_ctx = virt_dev->in_ctx; 1735 in_ctx = virt_dev->in_ctx;
1736 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
1737 xhci_reserve_host_resources(xhci, in_ctx)) {
1738 spin_unlock_irqrestore(&xhci->lock, flags);
1739 xhci_warn(xhci, "Not enough host resources, "
1740 "active endpoint contexts = %u\n",
1741 xhci->num_active_eps);
1742 return -ENOMEM;
1743 }
1616 cmd_completion = &virt_dev->cmd_completion; 1744 cmd_completion = &virt_dev->cmd_completion;
1617 cmd_status = &virt_dev->cmd_status; 1745 cmd_status = &virt_dev->cmd_status;
1618 } 1746 }
@@ -1627,6 +1755,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1627 if (ret < 0) { 1755 if (ret < 0) {
1628 if (command) 1756 if (command)
1629 list_del(&command->cmd_list); 1757 list_del(&command->cmd_list);
1758 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
1759 xhci_free_host_resources(xhci, in_ctx);
1630 spin_unlock_irqrestore(&xhci->lock, flags); 1760 spin_unlock_irqrestore(&xhci->lock, flags);
1631 xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); 1761 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1632 return -ENOMEM; 1762 return -ENOMEM;
@@ -1649,8 +1779,22 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1649 } 1779 }
1650 1780
1651 if (!ctx_change) 1781 if (!ctx_change)
1652 return xhci_configure_endpoint_result(xhci, udev, cmd_status); 1782 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
1653 return xhci_evaluate_context_result(xhci, udev, cmd_status); 1783 else
1784 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
1785
1786 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
1787 spin_lock_irqsave(&xhci->lock, flags);
1788 /* If the command failed, remove the reserved resources.
1789 * Otherwise, clean up the estimate to include dropped eps.
1790 */
1791 if (ret)
1792 xhci_free_host_resources(xhci, in_ctx);
1793 else
1794 xhci_finish_resource_reservation(xhci, in_ctx);
1795 spin_unlock_irqrestore(&xhci->lock, flags);
1796 }
1797 return ret;
1654} 1798}
1655 1799
1656/* Called after one or more calls to xhci_add_endpoint() or 1800/* Called after one or more calls to xhci_add_endpoint() or
@@ -1676,6 +1820,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1676 if (ret <= 0) 1820 if (ret <= 0)
1677 return ret; 1821 return ret;
1678 xhci = hcd_to_xhci(hcd); 1822 xhci = hcd_to_xhci(hcd);
1823 if (xhci->xhc_state & XHCI_STATE_DYING)
1824 return -ENODEV;
1679 1825
1680 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1826 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1681 virt_dev = xhci->devs[udev->slot_id]; 1827 virt_dev = xhci->devs[udev->slot_id];
@@ -2266,6 +2412,34 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
2266} 2412}
2267 2413
2268/* 2414/*
2415 * Deletes endpoint resources for endpoints that were active before a Reset
2416 * Device command, or a Disable Slot command. The Reset Device command leaves
2417 * the control endpoint intact, whereas the Disable Slot command deletes it.
2418 *
2419 * Must be called with xhci->lock held.
2420 */
2421void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
2422 struct xhci_virt_device *virt_dev, bool drop_control_ep)
2423{
2424 int i;
2425 unsigned int num_dropped_eps = 0;
2426 unsigned int drop_flags = 0;
2427
2428 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
2429 if (virt_dev->eps[i].ring) {
2430 drop_flags |= 1 << i;
2431 num_dropped_eps++;
2432 }
2433 }
2434 xhci->num_active_eps -= num_dropped_eps;
2435 if (num_dropped_eps)
2436 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
2437 "%u now active.\n",
2438 num_dropped_eps, drop_flags,
2439 xhci->num_active_eps);
2440}
2441
2442/*
2269 * This submits a Reset Device Command, which will set the device state to 0, 2443 * This submits a Reset Device Command, which will set the device state to 0,
2270 * set the device address to 0, and disable all the endpoints except the default 2444 * set the device address to 0, and disable all the endpoints except the default
2271 * control endpoint. The USB core should come back and call 2445 * control endpoint. The USB core should come back and call
@@ -2406,6 +2580,14 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2406 goto command_cleanup; 2580 goto command_cleanup;
2407 } 2581 }
2408 2582
2583 /* Free up host controller endpoint resources */
2584 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2585 spin_lock_irqsave(&xhci->lock, flags);
2586 /* Don't delete the default control endpoint resources */
2587 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
2588 spin_unlock_irqrestore(&xhci->lock, flags);
2589 }
2590
2409 /* Everything but endpoint 0 is disabled, so free or cache the rings. */ 2591 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
2410 last_freed_endpoint = 1; 2592 last_freed_endpoint = 1;
2411 for (i = 1; i < 31; ++i) { 2593 for (i = 1; i < 31; ++i) {
@@ -2479,6 +2661,27 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2479} 2661}
2480 2662
2481/* 2663/*
2664 * Checks if we have enough host controller resources for the default control
2665 * endpoint.
2666 *
2667 * Must be called with xhci->lock held.
2668 */
2669static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
2670{
2671 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
2672 xhci_dbg(xhci, "Not enough ep ctxs: "
2673 "%u active, need to add 1, limit is %u.\n",
2674 xhci->num_active_eps, xhci->limit_active_eps);
2675 return -ENOMEM;
2676 }
2677 xhci->num_active_eps += 1;
2678 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
2679 xhci->num_active_eps);
2680 return 0;
2681}
2682
2683
2684/*
2482 * Returns 0 if the xHC ran out of device slots, the Enable Slot command 2685 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
2483 * timed out, or allocating memory failed. Returns 1 on success. 2686 * timed out, or allocating memory failed. Returns 1 on success.
2484 */ 2687 */
@@ -2513,24 +2716,39 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2513 xhci_err(xhci, "Error while assigning device slot ID\n"); 2716 xhci_err(xhci, "Error while assigning device slot ID\n");
2514 return 0; 2717 return 0;
2515 } 2718 }
2516 /* xhci_alloc_virt_device() does not touch rings; no need to lock. 2719
2517 * Use GFP_NOIO, since this function can be called from 2720 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2721 spin_lock_irqsave(&xhci->lock, flags);
2722 ret = xhci_reserve_host_control_ep_resources(xhci);
2723 if (ret) {
2724 spin_unlock_irqrestore(&xhci->lock, flags);
2725 xhci_warn(xhci, "Not enough host resources, "
2726 "active endpoint contexts = %u\n",
2727 xhci->num_active_eps);
2728 goto disable_slot;
2729 }
2730 spin_unlock_irqrestore(&xhci->lock, flags);
2731 }
2732 /* Use GFP_NOIO, since this function can be called from
2518 * xhci_discover_or_reset_device(), which may be called as part of 2733 * xhci_discover_or_reset_device(), which may be called as part of
2519 * mass storage driver error handling. 2734 * mass storage driver error handling.
2520 */ 2735 */
2521 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { 2736 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
2522 /* Disable slot, if we can do it without mem alloc */
2523 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 2737 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
2524 spin_lock_irqsave(&xhci->lock, flags); 2738 goto disable_slot;
2525 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
2526 xhci_ring_cmd_db(xhci);
2527 spin_unlock_irqrestore(&xhci->lock, flags);
2528 return 0;
2529 } 2739 }
2530 udev->slot_id = xhci->slot_id; 2740 udev->slot_id = xhci->slot_id;
2531 /* Is this a LS or FS device under a HS hub? */ 2741 /* Is this a LS or FS device under a HS hub? */
2532 /* Hub or peripherial? */ 2742 /* Hub or peripherial? */
2533 return 1; 2743 return 1;
2744
2745disable_slot:
2746 /* Disable slot, if we can do it without mem alloc */
2747 spin_lock_irqsave(&xhci->lock, flags);
2748 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
2749 xhci_ring_cmd_db(xhci);
2750 spin_unlock_irqrestore(&xhci->lock, flags);
2751 return 0;
2534} 2752}
2535 2753
2536/* 2754/*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e12db7cfb9bb..ac0196e7fcf1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1123,6 +1123,7 @@ struct xhci_ring {
1123 */ 1123 */
1124 u32 cycle_state; 1124 u32 cycle_state;
1125 unsigned int stream_id; 1125 unsigned int stream_id;
1126 bool last_td_was_short;
1126}; 1127};
1127 1128
1128struct xhci_erst_entry { 1129struct xhci_erst_entry {
@@ -1290,6 +1291,19 @@ struct xhci_hcd {
1290#define XHCI_RESET_EP_QUIRK (1 << 1) 1291#define XHCI_RESET_EP_QUIRK (1 << 1)
1291#define XHCI_NEC_HOST (1 << 2) 1292#define XHCI_NEC_HOST (1 << 2)
1292#define XHCI_AMD_PLL_FIX (1 << 3) 1293#define XHCI_AMD_PLL_FIX (1 << 3)
1294#define XHCI_SPURIOUS_SUCCESS (1 << 4)
1295/*
1296 * Certain Intel host controllers have a limit to the number of endpoint
1297 * contexts they can handle. Ideally, they would signal that they can't handle
1298 * anymore endpoint contexts by returning a Resource Error for the Configure
1299 * Endpoint command, but they don't. Instead they expect software to keep track
1300 * of the number of active endpoints for them, across configure endpoint
1301 * commands, reset device commands, disable slot commands, and address device
1302 * commands.
1303 */
1304#define XHCI_EP_LIMIT_QUIRK (1 << 5)
1305 unsigned int num_active_eps;
1306 unsigned int limit_active_eps;
1293 /* There are two roothubs to keep track of bus suspend info for */ 1307 /* There are two roothubs to keep track of bus suspend info for */
1294 struct xhci_bus_state bus_state[2]; 1308 struct xhci_bus_state bus_state[2];
1295 /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ 1309 /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
@@ -1338,9 +1352,6 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
1338static inline void xhci_writel(struct xhci_hcd *xhci, 1352static inline void xhci_writel(struct xhci_hcd *xhci,
1339 const unsigned int val, __le32 __iomem *regs) 1353 const unsigned int val, __le32 __iomem *regs)
1340{ 1354{
1341 xhci_dbg(xhci,
1342 "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
1343 regs, val);
1344 writel(val, regs); 1355 writel(val, regs);
1345} 1356}
1346 1357
@@ -1368,9 +1379,6 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
1368 u32 val_lo = lower_32_bits(val); 1379 u32 val_lo = lower_32_bits(val);
1369 u32 val_hi = upper_32_bits(val); 1380 u32 val_hi = upper_32_bits(val);
1370 1381
1371 xhci_dbg(xhci,
1372 "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
1373 regs, (long unsigned int) val);
1374 writel(val_lo, ptr); 1382 writel(val_lo, ptr);
1375 writel(val_hi, ptr + 1); 1383 writel(val_hi, ptr + 1);
1376} 1384}
@@ -1439,6 +1447,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
1439void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, 1447void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
1440 struct xhci_ep_ctx *ep_ctx, 1448 struct xhci_ep_ctx *ep_ctx,
1441 struct xhci_virt_ep *ep); 1449 struct xhci_virt_ep *ep);
1450void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
1451 struct xhci_virt_device *virt_dev, bool drop_control_ep);
1442struct xhci_ring *xhci_dma_to_transfer_ring( 1452struct xhci_ring *xhci_dma_to_transfer_ring(
1443 struct xhci_virt_ep *ep, 1453 struct xhci_virt_ep *ep,
1444 u64 address); 1454 u64 address);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2f7c76a85e53..e224a92baa16 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -144,7 +144,7 @@ static void handle_tx(struct vhost_net *net)
144 } 144 }
145 145
146 mutex_lock(&vq->mutex); 146 mutex_lock(&vq->mutex);
147 vhost_disable_notify(vq); 147 vhost_disable_notify(&net->dev, vq);
148 148
149 if (wmem < sock->sk->sk_sndbuf / 2) 149 if (wmem < sock->sk->sk_sndbuf / 2)
150 tx_poll_stop(net); 150 tx_poll_stop(net);
@@ -166,8 +166,8 @@ static void handle_tx(struct vhost_net *net)
166 set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); 166 set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
167 break; 167 break;
168 } 168 }
169 if (unlikely(vhost_enable_notify(vq))) { 169 if (unlikely(vhost_enable_notify(&net->dev, vq))) {
170 vhost_disable_notify(vq); 170 vhost_disable_notify(&net->dev, vq);
171 continue; 171 continue;
172 } 172 }
173 break; 173 break;
@@ -315,7 +315,7 @@ static void handle_rx(struct vhost_net *net)
315 return; 315 return;
316 316
317 mutex_lock(&vq->mutex); 317 mutex_lock(&vq->mutex);
318 vhost_disable_notify(vq); 318 vhost_disable_notify(&net->dev, vq);
319 vhost_hlen = vq->vhost_hlen; 319 vhost_hlen = vq->vhost_hlen;
320 sock_hlen = vq->sock_hlen; 320 sock_hlen = vq->sock_hlen;
321 321
@@ -334,10 +334,10 @@ static void handle_rx(struct vhost_net *net)
334 break; 334 break;
335 /* OK, now we need to know about added descriptors. */ 335 /* OK, now we need to know about added descriptors. */
336 if (!headcount) { 336 if (!headcount) {
337 if (unlikely(vhost_enable_notify(vq))) { 337 if (unlikely(vhost_enable_notify(&net->dev, vq))) {
338 /* They have slipped one in as we were 338 /* They have slipped one in as we were
339 * doing that: check again. */ 339 * doing that: check again. */
340 vhost_disable_notify(vq); 340 vhost_disable_notify(&net->dev, vq);
341 continue; 341 continue;
342 } 342 }
343 /* Nothing new? Wait for eventfd to tell us 343 /* Nothing new? Wait for eventfd to tell us
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 099f30230d06..734e1d74ad80 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -49,7 +49,7 @@ static void handle_vq(struct vhost_test *n)
49 return; 49 return;
50 50
51 mutex_lock(&vq->mutex); 51 mutex_lock(&vq->mutex);
52 vhost_disable_notify(vq); 52 vhost_disable_notify(&n->dev, vq);
53 53
54 for (;;) { 54 for (;;) {
55 head = vhost_get_vq_desc(&n->dev, vq, vq->iov, 55 head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
@@ -61,8 +61,8 @@ static void handle_vq(struct vhost_test *n)
61 break; 61 break;
62 /* Nothing new? Wait for eventfd to tell us they refilled. */ 62 /* Nothing new? Wait for eventfd to tell us they refilled. */
63 if (head == vq->num) { 63 if (head == vq->num) {
64 if (unlikely(vhost_enable_notify(vq))) { 64 if (unlikely(vhost_enable_notify(&n->dev, vq))) {
65 vhost_disable_notify(vq); 65 vhost_disable_notify(&n->dev, vq);
66 continue; 66 continue;
67 } 67 }
68 break; 68 break;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 7aa4eea930f1..ea966b356352 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -37,6 +37,9 @@ enum {
37 VHOST_MEMORY_F_LOG = 0x1, 37 VHOST_MEMORY_F_LOG = 0x1,
38}; 38};
39 39
40#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
41#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
42
40static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, 43static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
41 poll_table *pt) 44 poll_table *pt)
42{ 45{
@@ -161,6 +164,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
161 vq->last_avail_idx = 0; 164 vq->last_avail_idx = 0;
162 vq->avail_idx = 0; 165 vq->avail_idx = 0;
163 vq->last_used_idx = 0; 166 vq->last_used_idx = 0;
167 vq->signalled_used = 0;
168 vq->signalled_used_valid = false;
164 vq->used_flags = 0; 169 vq->used_flags = 0;
165 vq->log_used = false; 170 vq->log_used = false;
166 vq->log_addr = -1ull; 171 vq->log_addr = -1ull;
@@ -489,16 +494,17 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
489 return 1; 494 return 1;
490} 495}
491 496
492static int vq_access_ok(unsigned int num, 497static int vq_access_ok(struct vhost_dev *d, unsigned int num,
493 struct vring_desc __user *desc, 498 struct vring_desc __user *desc,
494 struct vring_avail __user *avail, 499 struct vring_avail __user *avail,
495 struct vring_used __user *used) 500 struct vring_used __user *used)
496{ 501{
502 size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
497 return access_ok(VERIFY_READ, desc, num * sizeof *desc) && 503 return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
498 access_ok(VERIFY_READ, avail, 504 access_ok(VERIFY_READ, avail,
499 sizeof *avail + num * sizeof *avail->ring) && 505 sizeof *avail + num * sizeof *avail->ring + s) &&
500 access_ok(VERIFY_WRITE, used, 506 access_ok(VERIFY_WRITE, used,
501 sizeof *used + num * sizeof *used->ring); 507 sizeof *used + num * sizeof *used->ring + s);
502} 508}
503 509
504/* Can we log writes? */ 510/* Can we log writes? */
@@ -514,9 +520,11 @@ int vhost_log_access_ok(struct vhost_dev *dev)
514 520
515/* Verify access for write logging. */ 521/* Verify access for write logging. */
516/* Caller should have vq mutex and device mutex */ 522/* Caller should have vq mutex and device mutex */
517static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) 523static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
524 void __user *log_base)
518{ 525{
519 struct vhost_memory *mp; 526 struct vhost_memory *mp;
527 size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
520 528
521 mp = rcu_dereference_protected(vq->dev->memory, 529 mp = rcu_dereference_protected(vq->dev->memory,
522 lockdep_is_held(&vq->mutex)); 530 lockdep_is_held(&vq->mutex));
@@ -524,15 +532,15 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
524 vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && 532 vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
525 (!vq->log_used || log_access_ok(log_base, vq->log_addr, 533 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
526 sizeof *vq->used + 534 sizeof *vq->used +
527 vq->num * sizeof *vq->used->ring)); 535 vq->num * sizeof *vq->used->ring + s));
528} 536}
529 537
530/* Can we start vq? */ 538/* Can we start vq? */
531/* Caller should have vq mutex and device mutex */ 539/* Caller should have vq mutex and device mutex */
532int vhost_vq_access_ok(struct vhost_virtqueue *vq) 540int vhost_vq_access_ok(struct vhost_virtqueue *vq)
533{ 541{
534 return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) && 542 return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
535 vq_log_access_ok(vq, vq->log_base); 543 vq_log_access_ok(vq->dev, vq, vq->log_base);
536} 544}
537 545
538static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 546static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
@@ -577,6 +585,7 @@ static int init_used(struct vhost_virtqueue *vq,
577 585
578 if (r) 586 if (r)
579 return r; 587 return r;
588 vq->signalled_used_valid = false;
580 return get_user(vq->last_used_idx, &used->idx); 589 return get_user(vq->last_used_idx, &used->idx);
581} 590}
582 591
@@ -674,7 +683,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
674 * If it is not, we don't as size might not have been setup. 683 * If it is not, we don't as size might not have been setup.
675 * We will verify when backend is configured. */ 684 * We will verify when backend is configured. */
676 if (vq->private_data) { 685 if (vq->private_data) {
677 if (!vq_access_ok(vq->num, 686 if (!vq_access_ok(d, vq->num,
678 (void __user *)(unsigned long)a.desc_user_addr, 687 (void __user *)(unsigned long)a.desc_user_addr,
679 (void __user *)(unsigned long)a.avail_user_addr, 688 (void __user *)(unsigned long)a.avail_user_addr,
680 (void __user *)(unsigned long)a.used_user_addr)) { 689 (void __user *)(unsigned long)a.used_user_addr)) {
@@ -818,7 +827,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
818 vq = d->vqs + i; 827 vq = d->vqs + i;
819 mutex_lock(&vq->mutex); 828 mutex_lock(&vq->mutex);
820 /* If ring is inactive, will check when it's enabled. */ 829 /* If ring is inactive, will check when it's enabled. */
821 if (vq->private_data && !vq_log_access_ok(vq, base)) 830 if (vq->private_data && !vq_log_access_ok(d, vq, base))
822 r = -EFAULT; 831 r = -EFAULT;
823 else 832 else
824 vq->log_base = base; 833 vq->log_base = base;
@@ -1219,6 +1228,10 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1219 1228
1220 /* On success, increment avail index. */ 1229 /* On success, increment avail index. */
1221 vq->last_avail_idx++; 1230 vq->last_avail_idx++;
1231
1232 /* Assume notifications from guest are disabled at this point,
1233 * if they aren't we would need to update avail_event index. */
1234 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
1222 return head; 1235 return head;
1223} 1236}
1224 1237
@@ -1267,6 +1280,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1267 eventfd_signal(vq->log_ctx, 1); 1280 eventfd_signal(vq->log_ctx, 1);
1268 } 1281 }
1269 vq->last_used_idx++; 1282 vq->last_used_idx++;
1283 /* If the driver never bothers to signal in a very long while,
1284 * used index might wrap around. If that happens, invalidate
1285 * signalled_used index we stored. TODO: make sure driver
1286 * signals at least once in 2^16 and remove this. */
1287 if (unlikely(vq->last_used_idx == vq->signalled_used))
1288 vq->signalled_used_valid = false;
1270 return 0; 1289 return 0;
1271} 1290}
1272 1291
@@ -1275,6 +1294,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1275 unsigned count) 1294 unsigned count)
1276{ 1295{
1277 struct vring_used_elem __user *used; 1296 struct vring_used_elem __user *used;
1297 u16 old, new;
1278 int start; 1298 int start;
1279 1299
1280 start = vq->last_used_idx % vq->num; 1300 start = vq->last_used_idx % vq->num;
@@ -1292,7 +1312,14 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1292 ((void __user *)used - (void __user *)vq->used), 1312 ((void __user *)used - (void __user *)vq->used),
1293 count * sizeof *used); 1313 count * sizeof *used);
1294 } 1314 }
1295 vq->last_used_idx += count; 1315 old = vq->last_used_idx;
1316 new = (vq->last_used_idx += count);
1317 /* If the driver never bothers to signal in a very long while,
1318 * used index might wrap around. If that happens, invalidate
1319 * signalled_used index we stored. TODO: make sure driver
1320 * signals at least once in 2^16 and remove this. */
1321 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
1322 vq->signalled_used_valid = false;
1296 return 0; 1323 return 0;
1297} 1324}
1298 1325
@@ -1331,29 +1358,47 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1331 return r; 1358 return r;
1332} 1359}
1333 1360
1334/* This actually signals the guest, using eventfd. */ 1361static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1335void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1336{ 1362{
1337 __u16 flags; 1363 __u16 old, new, event;
1338 1364 bool v;
1339 /* Flush out used index updates. This is paired 1365 /* Flush out used index updates. This is paired
1340 * with the barrier that the Guest executes when enabling 1366 * with the barrier that the Guest executes when enabling
1341 * interrupts. */ 1367 * interrupts. */
1342 smp_mb(); 1368 smp_mb();
1343 1369
1344 if (__get_user(flags, &vq->avail->flags)) { 1370 if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1345 vq_err(vq, "Failed to get flags"); 1371 unlikely(vq->avail_idx == vq->last_avail_idx))
1346 return; 1372 return true;
1373
1374 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1375 __u16 flags;
1376 if (__get_user(flags, &vq->avail->flags)) {
1377 vq_err(vq, "Failed to get flags");
1378 return true;
1379 }
1380 return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
1347 } 1381 }
1382 old = vq->signalled_used;
1383 v = vq->signalled_used_valid;
1384 new = vq->signalled_used = vq->last_used_idx;
1385 vq->signalled_used_valid = true;
1348 1386
1349 /* If they don't want an interrupt, don't signal, unless empty. */ 1387 if (unlikely(!v))
1350 if ((flags & VRING_AVAIL_F_NO_INTERRUPT) && 1388 return true;
1351 (vq->avail_idx != vq->last_avail_idx ||
1352 !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY)))
1353 return;
1354 1389
1390 if (get_user(event, vhost_used_event(vq))) {
1391 vq_err(vq, "Failed to get used event idx");
1392 return true;
1393 }
1394 return vring_need_event(event, new, old);
1395}
1396
1397/* This actually signals the guest, using eventfd. */
1398void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1399{
1355 /* Signal the Guest tell them we used something up. */ 1400 /* Signal the Guest tell them we used something up. */
1356 if (vq->call_ctx) 1401 if (vq->call_ctx && vhost_notify(dev, vq))
1357 eventfd_signal(vq->call_ctx, 1); 1402 eventfd_signal(vq->call_ctx, 1);
1358} 1403}
1359 1404
@@ -1376,7 +1421,7 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1376} 1421}
1377 1422
1378/* OK, now we need to know about added descriptors. */ 1423/* OK, now we need to know about added descriptors. */
1379bool vhost_enable_notify(struct vhost_virtqueue *vq) 1424bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1380{ 1425{
1381 u16 avail_idx; 1426 u16 avail_idx;
1382 int r; 1427 int r;
@@ -1384,11 +1429,34 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
1384 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) 1429 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1385 return false; 1430 return false;
1386 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; 1431 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1387 r = put_user(vq->used_flags, &vq->used->flags); 1432 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1388 if (r) { 1433 r = put_user(vq->used_flags, &vq->used->flags);
1389 vq_err(vq, "Failed to enable notification at %p: %d\n", 1434 if (r) {
1390 &vq->used->flags, r); 1435 vq_err(vq, "Failed to enable notification at %p: %d\n",
1391 return false; 1436 &vq->used->flags, r);
1437 return false;
1438 }
1439 } else {
1440 r = put_user(vq->avail_idx, vhost_avail_event(vq));
1441 if (r) {
1442 vq_err(vq, "Failed to update avail event index at %p: %d\n",
1443 vhost_avail_event(vq), r);
1444 return false;
1445 }
1446 }
1447 if (unlikely(vq->log_used)) {
1448 void __user *used;
1449 /* Make sure data is seen before log. */
1450 smp_wmb();
1451 used = vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX) ?
1452 &vq->used->flags : vhost_avail_event(vq);
1453 /* Log used flags or event index entry write. Both are 16 bit
1454 * fields. */
1455 log_write(vq->log_base, vq->log_addr +
1456 (used - (void __user *)vq->used),
1457 sizeof(u16));
1458 if (vq->log_ctx)
1459 eventfd_signal(vq->log_ctx, 1);
1392 } 1460 }
1393 /* They could have slipped one in as we were doing that: make 1461 /* They could have slipped one in as we were doing that: make
1394 * sure it's written, then check again. */ 1462 * sure it's written, then check again. */
@@ -1404,15 +1472,17 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
1404} 1472}
1405 1473
1406/* We don't need to be notified again. */ 1474/* We don't need to be notified again. */
1407void vhost_disable_notify(struct vhost_virtqueue *vq) 1475void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1408{ 1476{
1409 int r; 1477 int r;
1410 1478
1411 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) 1479 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1412 return; 1480 return;
1413 vq->used_flags |= VRING_USED_F_NO_NOTIFY; 1481 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1414 r = put_user(vq->used_flags, &vq->used->flags); 1482 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1415 if (r) 1483 r = put_user(vq->used_flags, &vq->used->flags);
1416 vq_err(vq, "Failed to enable notification at %p: %d\n", 1484 if (r)
1417 &vq->used->flags, r); 1485 vq_err(vq, "Failed to enable notification at %p: %d\n",
1486 &vq->used->flags, r);
1487 }
1418} 1488}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index b3363ae38518..8e03379dd30f 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -84,6 +84,12 @@ struct vhost_virtqueue {
84 /* Used flags */ 84 /* Used flags */
85 u16 used_flags; 85 u16 used_flags;
86 86
87 /* Last used index value we have signalled on */
88 u16 signalled_used;
89
90 /* Last used index value we have signalled on */
91 bool signalled_used_valid;
92
87 /* Log writes to used structure. */ 93 /* Log writes to used structure. */
88 bool log_used; 94 bool log_used;
89 u64 log_addr; 95 u64 log_addr;
@@ -149,8 +155,8 @@ void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
149void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *, 155void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
150 struct vring_used_elem *heads, unsigned count); 156 struct vring_used_elem *heads, unsigned count);
151void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *); 157void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
152void vhost_disable_notify(struct vhost_virtqueue *); 158void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
153bool vhost_enable_notify(struct vhost_virtqueue *); 159bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
154 160
155int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 161int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
156 unsigned int log_num, u64 len); 162 unsigned int log_num, u64 len);
@@ -162,11 +168,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
162 } while (0) 168 } while (0)
163 169
164enum { 170enum {
165 VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) | 171 VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
166 (1 << VIRTIO_RING_F_INDIRECT_DESC) | 172 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
167 (1 << VHOST_F_LOG_ALL) | 173 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
168 (1 << VHOST_NET_F_VIRTIO_NET_HDR) | 174 (1ULL << VHOST_F_LOG_ALL) |
169 (1 << VIRTIO_NET_F_MRG_RXBUF), 175 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
176 (1ULL << VIRTIO_NET_F_MRG_RXBUF),
170}; 177};
171 178
172static inline int vhost_has_feature(struct vhost_dev *dev, int bit) 179static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0f1da45ba47d..e058ace2a4ad 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -40,9 +40,6 @@ struct virtio_balloon
40 /* Waiting for host to ack the pages we released. */ 40 /* Waiting for host to ack the pages we released. */
41 struct completion acked; 41 struct completion acked;
42 42
43 /* Do we have to tell Host *before* we reuse pages? */
44 bool tell_host_first;
45
46 /* The pages we've told the Host we're not using. */ 43 /* The pages we've told the Host we're not using. */
47 unsigned int num_pages; 44 unsigned int num_pages;
48 struct list_head pages; 45 struct list_head pages;
@@ -151,13 +148,14 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
151 vb->num_pages--; 148 vb->num_pages--;
152 } 149 }
153 150
154 if (vb->tell_host_first) { 151
155 tell_host(vb, vb->deflate_vq); 152 /*
156 release_pages_by_pfn(vb->pfns, vb->num_pfns); 153 * Note that if
157 } else { 154 * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
158 release_pages_by_pfn(vb->pfns, vb->num_pfns); 155 * is true, we *have* to do it in this order
159 tell_host(vb, vb->deflate_vq); 156 */
160 } 157 tell_host(vb, vb->deflate_vq);
158 release_pages_by_pfn(vb->pfns, vb->num_pfns);
161} 159}
162 160
163static inline void update_stat(struct virtio_balloon *vb, int idx, 161static inline void update_stat(struct virtio_balloon *vb, int idx,
@@ -325,9 +323,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
325 goto out_del_vqs; 323 goto out_del_vqs;
326 } 324 }
327 325
328 vb->tell_host_first
329 = virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
330
331 return 0; 326 return 0;
332 327
333out_del_vqs: 328out_del_vqs:
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index b0043fb26a4d..68b9136847af 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -82,6 +82,9 @@ struct vring_virtqueue
82 /* Host supports indirect buffers */ 82 /* Host supports indirect buffers */
83 bool indirect; 83 bool indirect;
84 84
85 /* Host publishes avail event idx */
86 bool event;
87
85 /* Number of free buffers */ 88 /* Number of free buffers */
86 unsigned int num_free; 89 unsigned int num_free;
87 /* Head of free buffer list. */ 90 /* Head of free buffer list. */
@@ -237,18 +240,22 @@ EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
237void virtqueue_kick(struct virtqueue *_vq) 240void virtqueue_kick(struct virtqueue *_vq)
238{ 241{
239 struct vring_virtqueue *vq = to_vvq(_vq); 242 struct vring_virtqueue *vq = to_vvq(_vq);
243 u16 new, old;
240 START_USE(vq); 244 START_USE(vq);
241 /* Descriptors and available array need to be set before we expose the 245 /* Descriptors and available array need to be set before we expose the
242 * new available array entries. */ 246 * new available array entries. */
243 virtio_wmb(); 247 virtio_wmb();
244 248
245 vq->vring.avail->idx += vq->num_added; 249 old = vq->vring.avail->idx;
250 new = vq->vring.avail->idx = old + vq->num_added;
246 vq->num_added = 0; 251 vq->num_added = 0;
247 252
248 /* Need to update avail index before checking if we should notify */ 253 /* Need to update avail index before checking if we should notify */
249 virtio_mb(); 254 virtio_mb();
250 255
251 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) 256 if (vq->event ?
257 vring_need_event(vring_avail_event(&vq->vring), new, old) :
258 !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
252 /* Prod other side to tell it about changes. */ 259 /* Prod other side to tell it about changes. */
253 vq->notify(&vq->vq); 260 vq->notify(&vq->vq);
254 261
@@ -324,6 +331,14 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
324 ret = vq->data[i]; 331 ret = vq->data[i];
325 detach_buf(vq, i); 332 detach_buf(vq, i);
326 vq->last_used_idx++; 333 vq->last_used_idx++;
334 /* If we expect an interrupt for the next entry, tell host
335 * by writing event index and flush out the write before
336 * the read in the next get_buf call. */
337 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
338 vring_used_event(&vq->vring) = vq->last_used_idx;
339 virtio_mb();
340 }
341
327 END_USE(vq); 342 END_USE(vq);
328 return ret; 343 return ret;
329} 344}
@@ -345,7 +360,11 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
345 360
346 /* We optimistically turn back on interrupts, then check if there was 361 /* We optimistically turn back on interrupts, then check if there was
347 * more to do. */ 362 * more to do. */
363 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
364 * either clear the flags bit or point the event index at the next
365 * entry. Always do both to keep code simple. */
348 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 366 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
367 vring_used_event(&vq->vring) = vq->last_used_idx;
349 virtio_mb(); 368 virtio_mb();
350 if (unlikely(more_used(vq))) { 369 if (unlikely(more_used(vq))) {
351 END_USE(vq); 370 END_USE(vq);
@@ -357,6 +376,33 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
357} 376}
358EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 377EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
359 378
379bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
380{
381 struct vring_virtqueue *vq = to_vvq(_vq);
382 u16 bufs;
383
384 START_USE(vq);
385
386 /* We optimistically turn back on interrupts, then check if there was
387 * more to do. */
388 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
389 * either clear the flags bit or point the event index at the next
390 * entry. Always do both to keep code simple. */
391 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
392 /* TODO: tune this threshold */
393 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
394 vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
395 virtio_mb();
396 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
397 END_USE(vq);
398 return false;
399 }
400
401 END_USE(vq);
402 return true;
403}
404EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
405
360void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 406void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
361{ 407{
362 struct vring_virtqueue *vq = to_vvq(_vq); 408 struct vring_virtqueue *vq = to_vvq(_vq);
@@ -438,6 +484,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
438#endif 484#endif
439 485
440 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 486 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
487 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
441 488
442 /* No callback? Tell other side not to bother us. */ 489 /* No callback? Tell other side not to bother us. */
443 if (!callback) 490 if (!callback)
@@ -472,6 +519,8 @@ void vring_transport_features(struct virtio_device *vdev)
472 switch (i) { 519 switch (i) {
473 case VIRTIO_RING_F_INDIRECT_DESC: 520 case VIRTIO_RING_F_INDIRECT_DESC:
474 break; 521 break;
522 case VIRTIO_RING_F_EVENT_IDX:
523 break;
475 default: 524 default:
476 /* We don't understand this bit. */ 525 /* We don't understand this bit. */
477 clear_bit(i, vdev->features); 526 clear_bit(i, vdev->features);