aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/processor_idle.c12
-rw-r--r--drivers/acpi/scan.c16
-rw-r--r--drivers/acpi/video.c4
-rw-r--r--drivers/amba/bus.c9
-rw-r--r--drivers/ata/Kconfig59
-rw-r--r--drivers/ata/Makefile8
-rw-r--r--drivers/ata/ahci.c451
-rw-r--r--drivers/ata/ata_generic.c16
-rw-r--r--drivers/ata/ata_piix.c71
-rw-r--r--drivers/ata/libata-acpi.c165
-rw-r--r--drivers/ata/libata-core.c1329
-rw-r--r--drivers/ata/libata-eh.c922
-rw-r--r--drivers/ata/libata-pmp.c1191
-rw-r--r--drivers/ata/libata-scsi.c496
-rw-r--r--drivers/ata/libata-sff.c69
-rw-r--r--drivers/ata/libata.h41
-rw-r--r--drivers/ata/pata_acpi.c395
-rw-r--r--drivers/ata/pata_ali.c17
-rw-r--r--drivers/ata/pata_amd.c43
-rw-r--r--drivers/ata/pata_artop.c20
-rw-r--r--drivers/ata/pata_at32.c441
-rw-r--r--drivers/ata/pata_atiixp.c15
-rw-r--r--drivers/ata/pata_bf54x.c1627
-rw-r--r--drivers/ata/pata_cmd640.c4
-rw-r--r--drivers/ata/pata_cmd64x.c43
-rw-r--r--drivers/ata/pata_cs5520.c47
-rw-r--r--drivers/ata/pata_cs5530.c4
-rw-r--r--drivers/ata/pata_cs5535.c4
-rw-r--r--drivers/ata/pata_cypress.c4
-rw-r--r--drivers/ata/pata_efar.c11
-rw-r--r--drivers/ata/pata_hpt366.c4
-rw-r--r--drivers/ata/pata_hpt37x.c28
-rw-r--r--drivers/ata/pata_hpt3x2n.c11
-rw-r--r--drivers/ata/pata_hpt3x3.c10
-rw-r--r--drivers/ata/pata_icside.c39
-rw-r--r--drivers/ata/pata_isapnp.c8
-rw-r--r--drivers/ata/pata_it8213.c11
-rw-r--r--drivers/ata/pata_it821x.c17
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c24
-rw-r--r--drivers/ata/pata_jmicron.c24
-rw-r--r--drivers/ata/pata_legacy.c27
-rw-r--r--drivers/ata/pata_marvell.c12
-rw-r--r--drivers/ata/pata_mpc52xx.c9
-rw-r--r--drivers/ata/pata_mpiix.c25
-rw-r--r--drivers/ata/pata_netcell.c5
-rw-r--r--drivers/ata/pata_ns87410.c11
-rw-r--r--drivers/ata/pata_ns87415.c467
-rw-r--r--drivers/ata/pata_oldpiix.c11
-rw-r--r--drivers/ata/pata_opti.c11
-rw-r--r--drivers/ata/pata_optidma.c26
-rw-r--r--drivers/ata/pata_pcmcia.c16
-rw-r--r--drivers/ata/pata_pdc2027x.c114
-rw-r--r--drivers/ata/pata_pdc202xx_old.c23
-rw-r--r--drivers/ata/pata_platform.c16
-rw-r--r--drivers/ata/pata_qdi.c15
-rw-r--r--drivers/ata/pata_radisys.c4
-rw-r--r--drivers/ata/pata_rz1000.c13
-rw-r--r--drivers/ata/pata_sc1200.c4
-rw-r--r--drivers/ata/pata_scc.c66
-rw-r--r--drivers/ata/pata_serverworks.c8
-rw-r--r--drivers/ata/pata_sil680.c11
-rw-r--r--drivers/ata/pata_sis.c33
-rw-r--r--drivers/ata/pata_sl82c105.c11
-rw-r--r--drivers/ata/pata_triflex.c11
-rw-r--r--drivers/ata/pata_via.c16
-rw-r--r--drivers/ata/pata_winbond.c13
-rw-r--r--drivers/ata/pdc_adma.c103
-rw-r--r--drivers/ata/sata_inic162x.c34
-rw-r--r--drivers/ata/sata_mv.c68
-rw-r--r--drivers/ata/sata_nv.c53
-rw-r--r--drivers/ata/sata_promise.c27
-rw-r--r--drivers/ata/sata_qstor.c17
-rw-r--r--drivers/ata/sata_sil.c53
-rw-r--r--drivers/ata/sata_sil24.c341
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/ata/sata_svw.c14
-rw-r--r--drivers/ata/sata_sx4.c25
-rw-r--r--drivers/ata/sata_uli.c16
-rw-r--r--drivers/ata/sata_via.c35
-rw-r--r--drivers/ata/sata_vsc.c16
-rw-r--r--drivers/base/Kconfig8
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/bus.c116
-rw-r--r--drivers/base/class.c60
-rw-r--r--drivers/base/core.c108
-rw-r--r--drivers/base/firmware_class.c14
-rw-r--r--drivers/base/memory.c3
-rw-r--r--drivers/base/platform.c26
-rw-r--r--drivers/base/power/Makefile2
-rw-r--r--drivers/base/power/main.c344
-rw-r--r--drivers/base/power/power.h38
-rw-r--r--drivers/base/power/resume.c149
-rw-r--r--drivers/base/power/suspend.c210
-rw-r--r--drivers/base/sys.c73
-rw-r--r--drivers/block/Kconfig3
-rw-r--r--drivers/char/dsp56k.c4
-rw-r--r--drivers/char/ip2/ip2main.c12
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c6
-rw-r--r--drivers/char/istallion.c8
-rw-r--r--drivers/char/lp.c5
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c5
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c5
-rw-r--r--drivers/char/pty.c9
-rw-r--r--drivers/char/raw.c5
-rw-r--r--drivers/char/snsc.c3
-rw-r--r--drivers/char/stallion.c7
-rw-r--r--drivers/char/tipar.c6
-rw-r--r--drivers/char/viotape.c10
-rw-r--r--drivers/cpufreq/Kconfig27
-rw-r--r--drivers/cpufreq/cpufreq.c36
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c19
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c22
-rw-r--r--drivers/cpufreq/cpufreq_stats.c18
-rw-r--r--drivers/edac/edac_mc_sysfs.c3
-rw-r--r--drivers/eisa/eisa-bus.c9
-rw-r--r--drivers/firewire/fw-device.c11
-rw-r--r--drivers/firmware/dmi-id.c62
-rw-r--r--drivers/firmware/edd.c4
-rw-r--r--drivers/firmware/efivars.c4
-rw-r--r--drivers/i2c/busses/Kconfig23
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-au1550.c11
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c16
-rw-r--r--drivers/i2c/busses/i2c-davinci.c586
-rw-r--r--drivers/i2c/busses/i2c-i801.c5
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c9
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c8
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c83
-rw-r--r--drivers/i2c/busses/i2c-stub.c79
-rw-r--r--drivers/i2c/chips/pcf8574.c14
-rw-r--r--drivers/i2c/chips/tps65010.c299
-rw-r--r--drivers/i2c/i2c-core.c58
-rw-r--r--drivers/i2c/i2c-dev.c20
-rw-r--r--drivers/ide/Kconfig16
-rw-r--r--drivers/ide/arm/icside.c45
-rw-r--r--drivers/ide/cris/ide-cris.c8
-rw-r--r--drivers/ide/ide-acpi.c1
-rw-r--r--drivers/ide/ide-dma.c28
-rw-r--r--drivers/ide/ide-io.c10
-rw-r--r--drivers/ide/ide-iops.c133
-rw-r--r--drivers/ide/ide-lib.c78
-rw-r--r--drivers/ide/ide-probe.c7
-rw-r--r--drivers/ide/ide.c19
-rw-r--r--drivers/ide/legacy/ide_platform.c2
-rw-r--r--drivers/ide/mips/au1xxx-ide.c28
-rw-r--r--drivers/ide/pci/aec62xx.c12
-rw-r--r--drivers/ide/pci/alim15x3.c58
-rw-r--r--drivers/ide/pci/amd74xx.c26
-rw-r--r--drivers/ide/pci/atiixp.c33
-rw-r--r--drivers/ide/pci/cmd64x.c9
-rw-r--r--drivers/ide/pci/cs5520.c32
-rw-r--r--drivers/ide/pci/cs5530.c50
-rw-r--r--drivers/ide/pci/cs5535.c38
-rw-r--r--drivers/ide/pci/hpt34x.c9
-rw-r--r--drivers/ide/pci/hpt366.c18
-rw-r--r--drivers/ide/pci/it8213.c34
-rw-r--r--drivers/ide/pci/it821x.c90
-rw-r--r--drivers/ide/pci/jmicron.c15
-rw-r--r--drivers/ide/pci/pdc202xx_new.c24
-rw-r--r--drivers/ide/pci/pdc202xx_old.c9
-rw-r--r--drivers/ide/pci/piix.c46
-rw-r--r--drivers/ide/pci/sc1200.c54
-rw-r--r--drivers/ide/pci/scc_pata.c28
-rw-r--r--drivers/ide/pci/serverworks.c14
-rw-r--r--drivers/ide/pci/sgiioc4.c24
-rw-r--r--drivers/ide/pci/siimage.c29
-rw-r--r--drivers/ide/pci/sis5513.c16
-rw-r--r--drivers/ide/pci/sl82c105.c23
-rw-r--r--drivers/ide/pci/slc90e66.c18
-rw-r--r--drivers/ide/pci/tc86c001.c9
-rw-r--r--drivers/ide/pci/triflex.c10
-rw-r--r--drivers/ide/pci/via82cxxx.c29
-rw-r--r--drivers/ide/ppc/pmac.c323
-rw-r--r--drivers/ieee1394/nodemgr.c17
-rw-r--r--drivers/infiniband/core/sysfs.c9
-rw-r--r--drivers/input/input.c62
-rw-r--r--drivers/input/keyboard/atakbd.c157
-rw-r--r--drivers/input/misc/pcspkr.c3
-rw-r--r--drivers/input/mouse/atarimouse.c18
-rw-r--r--drivers/input/serio/serio.c11
-rw-r--r--drivers/isdn/hisax/avm_pci.c224
-rw-r--r--drivers/isdn/hisax/bkm_a8.c8
-rw-r--r--drivers/isdn/hisax/diva.c513
-rw-r--r--drivers/isdn/hisax/elsa.c494
-rw-r--r--drivers/isdn/hisax/sedlbauer.c276
-rw-r--r--drivers/isdn/hisax/telespci.c8
-rw-r--r--drivers/isdn/hisax/w6692.c7
-rw-r--r--drivers/isdn/hysdn/hysdn_init.c274
-rw-r--r--drivers/kvm/Kconfig1
-rw-r--r--drivers/kvm/Makefile2
-rw-r--r--drivers/kvm/i8259.c450
-rw-r--r--drivers/kvm/ioapic.c388
-rw-r--r--drivers/kvm/irq.c98
-rw-r--r--drivers/kvm/irq.h165
-rw-r--r--drivers/kvm/kvm.h201
-rw-r--r--drivers/kvm/kvm_main.c1486
-rw-r--r--drivers/kvm/kvm_svm.h3
-rw-r--r--drivers/kvm/lapic.c1064
-rw-r--r--drivers/kvm/mmu.c51
-rw-r--r--drivers/kvm/paging_tmpl.h84
-rw-r--r--drivers/kvm/svm.c1046
-rw-r--r--drivers/kvm/vmx.c1034
-rw-r--r--drivers/kvm/vmx.h73
-rw-r--r--drivers/kvm/x86_emulate.c411
-rw-r--r--drivers/kvm/x86_emulate.h20
-rw-r--r--drivers/md/dm-emc.c2
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c5
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c7
-rw-r--r--drivers/media/video/cx23885/cx23885-i2c.c7
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c10
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.c7
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-sysfs.c4
-rw-r--r--drivers/media/video/saa7134/saa7134-i2c.c7
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c6
-rw-r--r--drivers/media/video/videobuf-core.c2
-rw-r--r--drivers/media/video/videobuf-dma-sg.c2
-rw-r--r--drivers/media/video/videobuf-vmalloc.c2
-rw-r--r--drivers/media/video/w9968cf.c11
-rw-r--r--drivers/misc/tifm_core.c9
-rw-r--r--drivers/mmc/core/bus.c21
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mtd/Kconfig8
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c38
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c2
-rw-r--r--drivers/mtd/chips/jedec_probe.c37
-rw-r--r--drivers/mtd/devices/Kconfig25
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/at91_dataflash26.c485
-rw-r--r--drivers/mtd/devices/docprobe.c4
-rw-r--r--drivers/mtd/devices/m25p80.c271
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c17
-rw-r--r--drivers/mtd/devices/pmc551.c27
-rw-r--r--drivers/mtd/inftlmount.c3
-rw-r--r--drivers/mtd/maps/Kconfig43
-rw-r--r--drivers/mtd/maps/Makefile6
-rw-r--r--drivers/mtd/maps/alchemy-flash.c14
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c298
-rw-r--r--drivers/mtd/maps/lubbock-flash.c168
-rw-r--r--drivers/mtd/maps/mainstone-flash.c180
-rw-r--r--drivers/mtd/maps/nettel.c65
-rw-r--r--drivers/mtd/maps/ocelot.c175
-rw-r--r--drivers/mtd/maps/physmap_of.c1
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c22
-rw-r--r--drivers/mtd/maps/pmcmsp-ramroot.c1
-rw-r--r--drivers/mtd/maps/pq2fads.c88
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c200
-rw-r--r--drivers/mtd/maps/tqm834x.c286
-rw-r--r--drivers/mtd/mtd_blkdevs.c7
-rw-r--r--drivers/mtd/mtdchar.c3
-rw-r--r--drivers/mtd/mtdconcat.c2
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/mtdcore.h11
-rw-r--r--drivers/mtd/mtdoops.c376
-rw-r--r--drivers/mtd/nand/Kconfig31
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/alauda.c742
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c788
-rw-r--r--drivers/mtd/nand/cafe_nand.c51
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/mtd/nand/excite_nandflash.c1
-rw-r--r--drivers/mtd/nand/nand_base.c9
-rw-r--r--drivers/mtd/nand/nand_ids.c1
-rw-r--r--drivers/mtd/nand/nandsim.c8
-rw-r--r--drivers/mtd/nand/ndfc.c8
-rw-r--r--drivers/mtd/nand/s3c2410.c4
-rw-r--r--drivers/mtd/onenand/Kconfig23
-rw-r--r--drivers/mtd/onenand/Makefile3
-rw-r--r--drivers/mtd/onenand/onenand_base.c665
-rw-r--r--drivers/mtd/onenand/onenand_sim.c495
-rw-r--r--drivers/mtd/rfd_ftl.c8
-rw-r--r--drivers/mtd/ubi/scan.c17
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/bnx2.c16
-rw-r--r--drivers/net/ibmveth.c2
-rw-r--r--drivers/net/macmace.c6
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/mvme147.c1
-rw-r--r--drivers/net/sky2.c114
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/tg3.c31
-rw-r--r--drivers/net/wireless/b43/phy.c1
-rw-r--r--drivers/net/wireless/b43/pio.h1
-rw-r--r--drivers/net/wireless/b43/sysfs.c5
-rw-r--r--drivers/pci/hotplug.c28
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c74
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c57
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c60
-rw-r--r--drivers/pci/hotplug/pciehp_core.c24
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c20
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c203
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c24
-rw-r--r--drivers/pci/hotplug/rpadlpar_sysfs.c6
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pci/pci-driver.c6
-rw-r--r--drivers/pci/pci.c20
-rw-r--r--drivers/pci/pci.h9
-rw-r--r--drivers/pci/pcie/Kconfig9
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c9
-rw-r--r--drivers/pci/probe.c53
-rw-r--r--drivers/pci/proc.c7
-rw-r--r--drivers/pci/quirks.c43
-rw-r--r--drivers/pci/setup-bus.c5
-rw-r--r--drivers/pci/setup-irq.c2
-rw-r--r--drivers/pcmcia/cs.c10
-rw-r--r--drivers/pcmcia/ds.c26
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c2
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c2
-rw-r--r--drivers/power/power_supply.h3
-rw-r--r--drivers/power/power_supply_sysfs.c17
-rw-r--r--drivers/rtc/rtc-sh.c51
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/con3215.c3
-rw-r--r--drivers/s390/char/con3270.c3
-rw-r--r--drivers/s390/char/sclp.c5
-rw-r--r--drivers/s390/char/tape_3590.c26
-rw-r--r--drivers/s390/char/tty3270.c9
-rw-r--r--drivers/s390/char/tty3270.h16
-rw-r--r--drivers/s390/char/vmwatchdog.c4
-rw-r--r--drivers/s390/char/zcore.c7
-rw-r--r--drivers/s390/cio/ccwgroup.c70
-rw-r--r--drivers/s390/cio/chp.c30
-rw-r--r--drivers/s390/cio/cio.c5
-rw-r--r--drivers/s390/cio/cmf.c232
-rw-r--r--drivers/s390/cio/css.c98
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/device.c105
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c147
-rw-r--r--drivers/s390/cio/device_ops.c241
-rw-r--r--drivers/s390/cio/qdio.c39
-rw-r--r--drivers/s390/crypto/ap_bus.c19
-rw-r--r--drivers/s390/crypto/zcrypt_mono.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c9
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h45
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c10
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c10
-rw-r--r--drivers/s390/scsi/zfcp_erp.c18
-rw-r--r--drivers/scsi/ipr.c19
-rw-r--r--drivers/scsi/libsas/sas_ata.c11
-rw-r--r--drivers/scsi/scsi_sysfs.c9
-rw-r--r--drivers/serial/sh-sci.c39
-rw-r--r--drivers/serial/sh-sci.h34
-rw-r--r--drivers/sh/Makefile4
-rw-r--r--drivers/sh/maple/Makefile3
-rw-r--r--drivers/sh/maple/maple.c735
-rw-r--r--drivers/spi/spi.c7
-rw-r--r--drivers/ssb/main.c1
-rw-r--r--drivers/usb/Makefile22
-rw-r--r--drivers/usb/atm/cxacru.c43
-rw-r--r--drivers/usb/atm/speedtch.c3
-rw-r--r--drivers/usb/atm/ueagle-atm.c1384
-rw-r--r--drivers/usb/class/usblp.c116
-rw-r--r--drivers/usb/core/config.c24
-rw-r--r--drivers/usb/core/devio.c83
-rw-r--r--drivers/usb/core/driver.c119
-rw-r--r--drivers/usb/core/endpoint.c1
-rw-r--r--drivers/usb/core/generic.c26
-rw-r--r--drivers/usb/core/hcd.c718
-rw-r--r--drivers/usb/core/hcd.h46
-rw-r--r--drivers/usb/core/hub.c276
-rw-r--r--drivers/usb/core/message.c78
-rw-r--r--drivers/usb/core/quirks.c81
-rw-r--r--drivers/usb/core/sysfs.c50
-rw-r--r--drivers/usb/core/urb.c106
-rw-r--r--drivers/usb/core/usb.c41
-rw-r--r--drivers/usb/core/usb.h5
-rw-r--r--drivers/usb/gadget/Kconfig26
-rw-r--r--drivers/usb/gadget/Makefile1
-rw-r--r--drivers/usb/gadget/amd5536udc.c9
-rw-r--r--drivers/usb/gadget/at91_udc.c2
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c2077
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.h352
-rw-r--r--drivers/usb/gadget/config.c2
-rw-r--r--drivers/usb/gadget/dummy_hcd.c93
-rw-r--r--drivers/usb/gadget/epautoconf.c2
-rw-r--r--drivers/usb/gadget/ether.c155
-rw-r--r--drivers/usb/gadget/file_storage.c249
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.c13
-rw-r--r--drivers/usb/gadget/gmidi.c82
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/inode.c46
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.h2
-rw-r--r--drivers/usb/gadget/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/net2280.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c12
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c2
-rw-r--r--drivers/usb/gadget/serial.c174
-rw-r--r--drivers/usb/gadget/usbstring.c2
-rw-r--r--drivers/usb/gadget/zero.c239
-rw-r--r--drivers/usb/host/Kconfig13
-rw-r--r--drivers/usb/host/ehci-au1xxx.c6
-rw-r--r--drivers/usb/host/ehci-hcd.c22
-rw-r--r--drivers/usb/host/ehci-pci.c5
-rw-r--r--drivers/usb/host/ehci-ppc-soc.c6
-rw-r--r--drivers/usb/host/ehci-ps3.c2
-rw-r--r--drivers/usb/host/ehci-q.c99
-rw-r--r--drivers/usb/host/ehci-sched.c47
-rw-r--r--drivers/usb/host/isp116x-hcd.c61
-rw-r--r--drivers/usb/host/ohci-dbg.c8
-rw-r--r--drivers/usb/host/ohci-hcd.c234
-rw-r--r--drivers/usb/host/ohci-mem.c1
-rw-r--r--drivers/usb/host/ohci-pci.c22
-rw-r--r--drivers/usb/host/ohci-ppc-of.c5
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c5
-rw-r--r--drivers/usb/host/ohci-q.c187
-rw-r--r--drivers/usb/host/ohci-ssb.c247
-rw-r--r--drivers/usb/host/ohci.h39
-rw-r--r--drivers/usb/host/r8a66597-hcd.c172
-rw-r--r--drivers/usb/host/sl811-hcd.c74
-rw-r--r--drivers/usb/host/u132-hcd.c370
-rw-r--r--drivers/usb/host/uhci-debug.c4
-rw-r--r--drivers/usb/host/uhci-hcd.h16
-rw-r--r--drivers/usb/host/uhci-q.c70
-rw-r--r--drivers/usb/misc/adutux.c3
-rw-r--r--drivers/usb/misc/berry_charge.c7
-rw-r--r--drivers/usb/misc/ftdi-elan.c8
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c200
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.h130
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c60
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.c354
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.h1315
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_struct.h144
-rw-r--r--drivers/usb/mon/mon_bin.c42
-rw-r--r--drivers/usb/mon/mon_main.c25
-rw-r--r--drivers/usb/mon/mon_text.c72
-rw-r--r--drivers/usb/mon/usb_mon.h2
-rw-r--r--drivers/usb/serial/Kconfig10
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/ark3116.c5
-rw-r--r--drivers/usb/serial/bus.c16
-rw-r--r--drivers/usb/serial/ch341.c354
-rw-r--r--drivers/usb/serial/cp2101.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/funsoft.c21
-rw-r--r--drivers/usb/serial/ipaq.c3
-rw-r--r--drivers/usb/serial/kl5kusb105.c28
-rw-r--r--drivers/usb/serial/kobil_sct.c144
-rw-r--r--drivers/usb/serial/mct_u232.c28
-rw-r--r--drivers/usb/serial/oti6858.c2
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/safe_serial.c11
-rw-r--r--drivers/usb/serial/usb-serial.c42
-rw-r--r--drivers/usb/serial/visor.c64
-rw-r--r--drivers/usb/storage/initializers.c14
-rw-r--r--drivers/usb/storage/initializers.h3
-rw-r--r--drivers/usb/storage/shuttle_usbat.c3
-rw-r--r--drivers/usb/storage/unusual_devs.h34
-rw-r--r--drivers/usb/storage/usb.c4
-rw-r--r--drivers/usb/usb-skeleton.c1
-rw-r--r--drivers/video/backlight/hp680_bl.c4
-rw-r--r--drivers/video/output.c29
-rw-r--r--drivers/video/pvr2fb.c4
-rw-r--r--drivers/w1/w1.c19
461 files changed, 29112 insertions, 13681 deletions
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index feab124d8e..cbfc81579c 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -194,7 +194,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
194 194
195 if (!device->flags.power_manageable) { 195 if (!device->flags.power_manageable) {
196 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n", 196 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
197 device->dev.kobj.name)); 197 kobject_name(&device->dev.kobj)));
198 return -ENODEV; 198 return -ENODEV;
199 } 199 }
200 /* 200 /*
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 1e8287b4f4..1f6fb38de0 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -276,21 +276,12 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
276 276
277static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) 277static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
278{ 278{
279#ifdef CONFIG_GENERIC_CLOCKEVENTS
280 unsigned long reason; 279 unsigned long reason;
281 280
282 reason = pr->power.timer_broadcast_on_state < INT_MAX ? 281 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
283 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; 282 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
284 283
285 clockevents_notify(reason, &pr->id); 284 clockevents_notify(reason, &pr->id);
286#else
287 cpumask_t mask = cpumask_of_cpu(pr->id);
288
289 if (pr->power.timer_broadcast_on_state < INT_MAX)
290 on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
291 else
292 on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
293#endif
294} 285}
295 286
296/* Power(C) State timer broadcast control */ 287/* Power(C) State timer broadcast control */
@@ -298,8 +289,6 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr,
298 struct acpi_processor_cx *cx, 289 struct acpi_processor_cx *cx,
299 int broadcast) 290 int broadcast)
300{ 291{
301#ifdef CONFIG_GENERIC_CLOCKEVENTS
302
303 int state = cx - pr->power.states; 292 int state = cx - pr->power.states;
304 293
305 if (state >= pr->power.timer_broadcast_on_state) { 294 if (state >= pr->power.timer_broadcast_on_state) {
@@ -309,7 +298,6 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr,
309 CLOCK_EVT_NOTIFY_BROADCAST_EXIT; 298 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
310 clockevents_notify(reason, &pr->id); 299 clockevents_notify(reason, &pr->id);
311 } 300 }
312#endif
313} 301}
314 302
315#else 303#else
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 64620d6687..5b4d462117 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -319,16 +319,18 @@ static int acpi_bus_match(struct device *dev, struct device_driver *drv)
319 return !acpi_match_device_ids(acpi_dev, acpi_drv->ids); 319 return !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
320} 320}
321 321
322static int acpi_device_uevent(struct device *dev, char **envp, int num_envp, 322static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
323 char *buffer, int buffer_size)
324{ 323{
325 struct acpi_device *acpi_dev = to_acpi_device(dev); 324 struct acpi_device *acpi_dev = to_acpi_device(dev);
325 int len;
326 326
327 strcpy(buffer, "MODALIAS="); 327 if (add_uevent_var(env, "MODALIAS="))
328 if (create_modalias(acpi_dev, buffer + 9, buffer_size - 9) > 0) { 328 return -ENOMEM;
329 envp[0] = buffer; 329 len = create_modalias(acpi_dev, &env->buf[env->buflen - 1],
330 envp[1] = NULL; 330 sizeof(env->buf) - env->buflen);
331 } 331 if (len >= (sizeof(env->buf) - env->buflen))
332 return -ENOMEM;
333 env->buflen += len;
332 return 0; 334 return 0;
333} 335}
334 336
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index d05891f162..b8a2095cb5 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -316,7 +316,7 @@ static int acpi_video_output_get(struct output_device *od)
316{ 316{
317 unsigned long state; 317 unsigned long state;
318 struct acpi_video_device *vd = 318 struct acpi_video_device *vd =
319 (struct acpi_video_device *)class_get_devdata(&od->class_dev); 319 (struct acpi_video_device *)dev_get_drvdata(&od->dev);
320 acpi_video_device_get_state(vd, &state); 320 acpi_video_device_get_state(vd, &state);
321 return (int)state; 321 return (int)state;
322} 322}
@@ -325,7 +325,7 @@ static int acpi_video_output_set(struct output_device *od)
325{ 325{
326 unsigned long state = od->request_state; 326 unsigned long state = od->request_state;
327 struct acpi_video_device *vd= 327 struct acpi_video_device *vd=
328 (struct acpi_video_device *)class_get_devdata(&od->class_dev); 328 (struct acpi_video_device *)dev_get_drvdata(&od->dev);
329 return acpi_video_device_set_state(vd, state); 329 return acpi_video_device_set_state(vd, state);
330} 330}
331 331
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 268e301775..6b94fb7be5 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -44,15 +44,12 @@ static int amba_match(struct device *dev, struct device_driver *drv)
44} 44}
45 45
46#ifdef CONFIG_HOTPLUG 46#ifdef CONFIG_HOTPLUG
47static int amba_uevent(struct device *dev, char **envp, int nr_env, char *buf, int bufsz) 47static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
48{ 48{
49 struct amba_device *pcdev = to_amba_device(dev); 49 struct amba_device *pcdev = to_amba_device(dev);
50 int retval = 0, i = 0, len = 0; 50 int retval = 0;
51 51
52 retval = add_uevent_var(envp, nr_env, &i, 52 retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid);
53 buf, bufsz, &len,
54 "AMBA_ID=%08x", pcdev->periphid);
55 envp[i] = NULL;
56 return retval; 53 return retval;
57} 54}
58#else 55#else
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index d8046a113c..4672066167 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -173,6 +173,15 @@ config SATA_INIC162X
173 help 173 help
174 This option enables support for Initio 162x Serial ATA. 174 This option enables support for Initio 162x Serial ATA.
175 175
176config PATA_ACPI
177 tristate "ACPI firmware driver for PATA"
178 depends on ATA_ACPI
179 help
180 This option enables an ACPI method driver which drives
181 motherboard PATA controller interfaces through the ACPI
182 firmware in the BIOS. This driver can sometimes handle
183 otherwise unsupported hardware.
184
176config PATA_ALI 185config PATA_ALI
177 tristate "ALi PATA support (Experimental)" 186 tristate "ALi PATA support (Experimental)"
178 depends on PCI && EXPERIMENTAL 187 depends on PCI && EXPERIMENTAL
@@ -192,16 +201,25 @@ config PATA_AMD
192 If unsure, say N. 201 If unsure, say N.
193 202
194config PATA_ARTOP 203config PATA_ARTOP
195 tristate "ARTOP 6210/6260 PATA support (Experimental)" 204 tristate "ARTOP 6210/6260 PATA support"
196 depends on PCI && EXPERIMENTAL 205 depends on PCI
197 help 206 help
198 This option enables support for ARTOP PATA controllers. 207 This option enables support for ARTOP PATA controllers.
199 208
200 If unsure, say N. 209 If unsure, say N.
201 210
211config PATA_AT32
212 tristate "Atmel AVR32 PATA support (Experimental)"
213 depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
214 help
215 This option enables support for the IDE devices on the
216 Atmel AT32AP platform.
217
218 If unsure, say N.
219
202config PATA_ATIIXP 220config PATA_ATIIXP
203 tristate "ATI PATA support (Experimental)" 221 tristate "ATI PATA support"
204 depends on PCI && EXPERIMENTAL 222 depends on PCI
205 help 223 help
206 This option enables support for the ATI ATA interfaces 224 This option enables support for the ATI ATA interfaces
207 found on the many ATI chipsets. 225 found on the many ATI chipsets.
@@ -219,8 +237,8 @@ config PATA_CMD640_PCI
219 If unsure, say N. 237 If unsure, say N.
220 238
221config PATA_CMD64X 239config PATA_CMD64X
222 tristate "CMD64x PATA support (Very Experimental)" 240 tristate "CMD64x PATA support"
223 depends on PCI&& EXPERIMENTAL 241 depends on PCI
224 help 242 help
225 This option enables support for the CMD64x series chips 243 This option enables support for the CMD64x series chips
226 except for the CMD640. 244 except for the CMD640.
@@ -282,8 +300,8 @@ config ATA_GENERIC
282 If unsure, say N. 300 If unsure, say N.
283 301
284config PATA_HPT366 302config PATA_HPT366
285 tristate "HPT 366/368 PATA support (Experimental)" 303 tristate "HPT 366/368 PATA support"
286 depends on PCI && EXPERIMENTAL 304 depends on PCI
287 help 305 help
288 This option enables support for the HPT 366 and 368 306 This option enables support for the HPT 366 and 368
289 PATA controllers via the new ATA layer. 307 PATA controllers via the new ATA layer.
@@ -432,6 +450,15 @@ config PATA_NS87410
432 450
433 If unsure, say N. 451 If unsure, say N.
434 452
453config PATA_NS87415
454 tristate "Nat Semi NS87415 PATA support (Experimental)"
455 depends on PCI && EXPERIMENTAL
456 help
457 This option enables support for the National Semiconductor
458 NS87415 PCI-IDE controller.
459
460 If unsure, say N.
461
435config PATA_OPTI 462config PATA_OPTI
436 tristate "OPTI621/6215 PATA support (Very Experimental)" 463 tristate "OPTI621/6215 PATA support (Very Experimental)"
437 depends on PCI && EXPERIMENTAL 464 depends on PCI && EXPERIMENTAL
@@ -596,4 +623,20 @@ config PATA_SCC
596 623
597 If unsure, say N. 624 If unsure, say N.
598 625
626config PATA_BF54X
627 tristate "Blackfin 54x ATAPI support"
628 depends on BF542 || BF548 || BF549
629 help
630 This option enables support for the built-in ATAPI controller on
631 Blackfin 54x family chips.
632
633 If unsure, say N.
634
635config PATA_BF54X_DMA
636 bool "DMA mode"
637 depends on PATA_BF54X
638 default y
639 help
640 Enable DMA mode for Blackfin ATAPI controller.
641
599endif # ATA 642endif # ATA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 8149c68ac2..2a63645003 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
21obj-$(CONFIG_PATA_ALI) += pata_ali.o 21obj-$(CONFIG_PATA_ALI) += pata_ali.o
22obj-$(CONFIG_PATA_AMD) += pata_amd.o 22obj-$(CONFIG_PATA_AMD) += pata_amd.o
23obj-$(CONFIG_PATA_ARTOP) += pata_artop.o 23obj-$(CONFIG_PATA_ARTOP) += pata_artop.o
24obj-$(CONFIG_PATA_AT32) += pata_at32.o
24obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o 25obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o
25obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o 26obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
26obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o 27obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o
@@ -39,6 +40,7 @@ obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
39obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o 40obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
40obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o 41obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
41obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o 42obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
43obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o
42obj-$(CONFIG_PATA_OPTI) += pata_opti.o 44obj-$(CONFIG_PATA_OPTI) += pata_opti.o
43obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o 45obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
44obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o 46obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
@@ -61,12 +63,16 @@ obj-$(CONFIG_PATA_SIS) += pata_sis.o
61obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o 63obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
62obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o 64obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
63obj-$(CONFIG_PATA_SCC) += pata_scc.o 65obj-$(CONFIG_PATA_SCC) += pata_scc.o
66obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
64obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o 67obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
65obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o 68obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
69# Should be last but two libata driver
70obj-$(CONFIG_PATA_ACPI) += pata_acpi.o
66# Should be last but one libata driver 71# Should be last but one libata driver
67obj-$(CONFIG_ATA_GENERIC) += ata_generic.o 72obj-$(CONFIG_ATA_GENERIC) += ata_generic.o
68# Should be last libata driver 73# Should be last libata driver
69obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o 74obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o
70 75
71libata-objs := libata-core.o libata-scsi.o libata-sff.o libata-eh.o 76libata-objs := libata-core.o libata-scsi.o libata-sff.o libata-eh.o \
77 libata-pmp.o
72libata-$(CONFIG_ATA_ACPI) += libata-acpi.o 78libata-$(CONFIG_ATA_ACPI) += libata-acpi.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index c16820325d..10bc3f64c4 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -46,7 +46,7 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47 47
48#define DRV_NAME "ahci" 48#define DRV_NAME "ahci"
49#define DRV_VERSION "2.3" 49#define DRV_VERSION "3.0"
50 50
51 51
52enum { 52enum {
@@ -77,11 +77,10 @@ enum {
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ 77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78 78
79 board_ahci = 0, 79 board_ahci = 0,
80 board_ahci_pi = 1, 80 board_ahci_vt8251 = 1,
81 board_ahci_vt8251 = 2, 81 board_ahci_ign_iferr = 2,
82 board_ahci_ign_iferr = 3, 82 board_ahci_sb600 = 3,
83 board_ahci_sb600 = 4, 83 board_ahci_mv = 4,
84 board_ahci_mv = 5,
85 84
86 /* global controller registers */ 85 /* global controller registers */
87 HOST_CAP = 0x00, /* host capabilities */ 86 HOST_CAP = 0x00, /* host capabilities */
@@ -97,6 +96,7 @@ enum {
97 96
98 /* HOST_CAP bits */ 97 /* HOST_CAP bits */
99 HOST_CAP_SSC = (1 << 14), /* Slumber capable */ 98 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
99 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
100 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 100 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
101 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ 101 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
102 HOST_CAP_SNTF = (1 << 29), /* SNotification register */ 102 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
@@ -144,7 +144,8 @@ enum {
144 PORT_IRQ_IF_ERR | 144 PORT_IRQ_IF_ERR |
145 PORT_IRQ_CONNECT | 145 PORT_IRQ_CONNECT |
146 PORT_IRQ_PHYRDY | 146 PORT_IRQ_PHYRDY |
147 PORT_IRQ_UNK_FIS, 147 PORT_IRQ_UNK_FIS |
148 PORT_IRQ_BAD_PMP,
148 PORT_IRQ_ERROR = PORT_IRQ_FREEZE | 149 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
149 PORT_IRQ_TF_ERR | 150 PORT_IRQ_TF_ERR |
150 PORT_IRQ_HBUS_DATA_ERR, 151 PORT_IRQ_HBUS_DATA_ERR,
@@ -154,6 +155,7 @@ enum {
154 155
155 /* PORT_CMD bits */ 156 /* PORT_CMD bits */
156 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ 157 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
158 PORT_CMD_PMP = (1 << 17), /* PMP attached */
157 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 159 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
158 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 160 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
159 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 161 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
@@ -167,19 +169,22 @@ enum {
167 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ 169 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
168 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ 170 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
169 171
172 /* hpriv->flags bits */
173 AHCI_HFLAG_NO_NCQ = (1 << 0),
174 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
175 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
176 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
177 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
178 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
179 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
180
170 /* ap->flags bits */ 181 /* ap->flags bits */
171 AHCI_FLAG_NO_NCQ = (1 << 24), 182 AHCI_FLAG_NO_HOTPLUG = (1 << 24), /* ignore PxSERR.DIAG.N */
172 AHCI_FLAG_IGN_IRQ_IF_ERR = (1 << 25), /* ignore IRQ_IF_ERR */
173 AHCI_FLAG_HONOR_PI = (1 << 26), /* honor PORTS_IMPL */
174 AHCI_FLAG_IGN_SERR_INTERNAL = (1 << 27), /* ignore SERR_INTERNAL */
175 AHCI_FLAG_32BIT_ONLY = (1 << 28), /* force 32bit */
176 AHCI_FLAG_MV_PATA = (1 << 29), /* PATA port */
177 AHCI_FLAG_NO_MSI = (1 << 30), /* no PCI MSI */
178 183
179 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 184 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
180 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 185 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
181 ATA_FLAG_SKIP_D2H_BSY | 186 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
182 ATA_FLAG_ACPI_SATA, 187 AHCI_LFLAG_COMMON = ATA_LFLAG_SKIP_D2H_BSY,
183}; 188};
184 189
185struct ahci_cmd_hdr { 190struct ahci_cmd_hdr {
@@ -198,6 +203,7 @@ struct ahci_sg {
198}; 203};
199 204
200struct ahci_host_priv { 205struct ahci_host_priv {
206 unsigned int flags; /* AHCI_HFLAG_* */
201 u32 cap; /* cap to use */ 207 u32 cap; /* cap to use */
202 u32 port_map; /* port map to use */ 208 u32 port_map; /* port map to use */
203 u32 saved_cap; /* saved initial cap */ 209 u32 saved_cap; /* saved initial cap */
@@ -205,6 +211,7 @@ struct ahci_host_priv {
205}; 211};
206 212
207struct ahci_port_priv { 213struct ahci_port_priv {
214 struct ata_link *active_link;
208 struct ahci_cmd_hdr *cmd_slot; 215 struct ahci_cmd_hdr *cmd_slot;
209 dma_addr_t cmd_slot_dma; 216 dma_addr_t cmd_slot_dma;
210 void *cmd_tbl; 217 void *cmd_tbl;
@@ -215,6 +222,7 @@ struct ahci_port_priv {
215 unsigned int ncq_saw_d2h:1; 222 unsigned int ncq_saw_d2h:1;
216 unsigned int ncq_saw_dmas:1; 223 unsigned int ncq_saw_dmas:1;
217 unsigned int ncq_saw_sdb:1; 224 unsigned int ncq_saw_sdb:1;
225 u32 intr_mask; /* interrupts to enable */
218}; 226};
219 227
220static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 228static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
@@ -229,6 +237,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc);
229static u8 ahci_check_status(struct ata_port *ap); 237static u8 ahci_check_status(struct ata_port *ap);
230static void ahci_freeze(struct ata_port *ap); 238static void ahci_freeze(struct ata_port *ap);
231static void ahci_thaw(struct ata_port *ap); 239static void ahci_thaw(struct ata_port *ap);
240static void ahci_pmp_attach(struct ata_port *ap);
241static void ahci_pmp_detach(struct ata_port *ap);
232static void ahci_error_handler(struct ata_port *ap); 242static void ahci_error_handler(struct ata_port *ap);
233static void ahci_vt8251_error_handler(struct ata_port *ap); 243static void ahci_vt8251_error_handler(struct ata_port *ap);
234static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 244static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
@@ -262,20 +272,17 @@ static struct scsi_host_template ahci_sht = {
262}; 272};
263 273
264static const struct ata_port_operations ahci_ops = { 274static const struct ata_port_operations ahci_ops = {
265 .port_disable = ata_port_disable,
266
267 .check_status = ahci_check_status, 275 .check_status = ahci_check_status,
268 .check_altstatus = ahci_check_status, 276 .check_altstatus = ahci_check_status,
269 .dev_select = ata_noop_dev_select, 277 .dev_select = ata_noop_dev_select,
270 278
271 .tf_read = ahci_tf_read, 279 .tf_read = ahci_tf_read,
272 280
281 .qc_defer = sata_pmp_qc_defer_cmd_switch,
273 .qc_prep = ahci_qc_prep, 282 .qc_prep = ahci_qc_prep,
274 .qc_issue = ahci_qc_issue, 283 .qc_issue = ahci_qc_issue,
275 284
276 .irq_clear = ahci_irq_clear, 285 .irq_clear = ahci_irq_clear,
277 .irq_on = ata_dummy_irq_on,
278 .irq_ack = ata_dummy_irq_ack,
279 286
280 .scr_read = ahci_scr_read, 287 .scr_read = ahci_scr_read,
281 .scr_write = ahci_scr_write, 288 .scr_write = ahci_scr_write,
@@ -286,6 +293,9 @@ static const struct ata_port_operations ahci_ops = {
286 .error_handler = ahci_error_handler, 293 .error_handler = ahci_error_handler,
287 .post_internal_cmd = ahci_post_internal_cmd, 294 .post_internal_cmd = ahci_post_internal_cmd,
288 295
296 .pmp_attach = ahci_pmp_attach,
297 .pmp_detach = ahci_pmp_detach,
298
289#ifdef CONFIG_PM 299#ifdef CONFIG_PM
290 .port_suspend = ahci_port_suspend, 300 .port_suspend = ahci_port_suspend,
291 .port_resume = ahci_port_resume, 301 .port_resume = ahci_port_resume,
@@ -296,20 +306,17 @@ static const struct ata_port_operations ahci_ops = {
296}; 306};
297 307
298static const struct ata_port_operations ahci_vt8251_ops = { 308static const struct ata_port_operations ahci_vt8251_ops = {
299 .port_disable = ata_port_disable,
300
301 .check_status = ahci_check_status, 309 .check_status = ahci_check_status,
302 .check_altstatus = ahci_check_status, 310 .check_altstatus = ahci_check_status,
303 .dev_select = ata_noop_dev_select, 311 .dev_select = ata_noop_dev_select,
304 312
305 .tf_read = ahci_tf_read, 313 .tf_read = ahci_tf_read,
306 314
315 .qc_defer = sata_pmp_qc_defer_cmd_switch,
307 .qc_prep = ahci_qc_prep, 316 .qc_prep = ahci_qc_prep,
308 .qc_issue = ahci_qc_issue, 317 .qc_issue = ahci_qc_issue,
309 318
310 .irq_clear = ahci_irq_clear, 319 .irq_clear = ahci_irq_clear,
311 .irq_on = ata_dummy_irq_on,
312 .irq_ack = ata_dummy_irq_ack,
313 320
314 .scr_read = ahci_scr_read, 321 .scr_read = ahci_scr_read,
315 .scr_write = ahci_scr_write, 322 .scr_write = ahci_scr_write,
@@ -320,6 +327,9 @@ static const struct ata_port_operations ahci_vt8251_ops = {
320 .error_handler = ahci_vt8251_error_handler, 327 .error_handler = ahci_vt8251_error_handler,
321 .post_internal_cmd = ahci_post_internal_cmd, 328 .post_internal_cmd = ahci_post_internal_cmd,
322 329
330 .pmp_attach = ahci_pmp_attach,
331 .pmp_detach = ahci_pmp_detach,
332
323#ifdef CONFIG_PM 333#ifdef CONFIG_PM
324 .port_suspend = ahci_port_suspend, 334 .port_suspend = ahci_port_suspend,
325 .port_resume = ahci_port_resume, 335 .port_resume = ahci_port_resume,
@@ -329,53 +339,52 @@ static const struct ata_port_operations ahci_vt8251_ops = {
329 .port_stop = ahci_port_stop, 339 .port_stop = ahci_port_stop,
330}; 340};
331 341
342#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
343
332static const struct ata_port_info ahci_port_info[] = { 344static const struct ata_port_info ahci_port_info[] = {
333 /* board_ahci */ 345 /* board_ahci */
334 { 346 {
335 .flags = AHCI_FLAG_COMMON, 347 .flags = AHCI_FLAG_COMMON,
336 .pio_mask = 0x1f, /* pio0-4 */ 348 .link_flags = AHCI_LFLAG_COMMON,
337 .udma_mask = ATA_UDMA6,
338 .port_ops = &ahci_ops,
339 },
340 /* board_ahci_pi */
341 {
342 .flags = AHCI_FLAG_COMMON | AHCI_FLAG_HONOR_PI,
343 .pio_mask = 0x1f, /* pio0-4 */ 349 .pio_mask = 0x1f, /* pio0-4 */
344 .udma_mask = ATA_UDMA6, 350 .udma_mask = ATA_UDMA6,
345 .port_ops = &ahci_ops, 351 .port_ops = &ahci_ops,
346 }, 352 },
347 /* board_ahci_vt8251 */ 353 /* board_ahci_vt8251 */
348 { 354 {
349 .flags = AHCI_FLAG_COMMON | ATA_FLAG_HRST_TO_RESUME | 355 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
350 AHCI_FLAG_NO_NCQ, 356 .flags = AHCI_FLAG_COMMON,
357 .link_flags = AHCI_LFLAG_COMMON | ATA_LFLAG_HRST_TO_RESUME,
351 .pio_mask = 0x1f, /* pio0-4 */ 358 .pio_mask = 0x1f, /* pio0-4 */
352 .udma_mask = ATA_UDMA6, 359 .udma_mask = ATA_UDMA6,
353 .port_ops = &ahci_vt8251_ops, 360 .port_ops = &ahci_vt8251_ops,
354 }, 361 },
355 /* board_ahci_ign_iferr */ 362 /* board_ahci_ign_iferr */
356 { 363 {
357 .flags = AHCI_FLAG_COMMON | AHCI_FLAG_IGN_IRQ_IF_ERR, 364 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
365 .flags = AHCI_FLAG_COMMON,
366 .link_flags = AHCI_LFLAG_COMMON,
358 .pio_mask = 0x1f, /* pio0-4 */ 367 .pio_mask = 0x1f, /* pio0-4 */
359 .udma_mask = ATA_UDMA6, 368 .udma_mask = ATA_UDMA6,
360 .port_ops = &ahci_ops, 369 .port_ops = &ahci_ops,
361 }, 370 },
362 /* board_ahci_sb600 */ 371 /* board_ahci_sb600 */
363 { 372 {
364 .flags = AHCI_FLAG_COMMON | 373 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
365 AHCI_FLAG_IGN_SERR_INTERNAL | 374 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_PMP),
366 AHCI_FLAG_32BIT_ONLY, 375 .flags = AHCI_FLAG_COMMON,
376 .link_flags = AHCI_LFLAG_COMMON,
367 .pio_mask = 0x1f, /* pio0-4 */ 377 .pio_mask = 0x1f, /* pio0-4 */
368 .udma_mask = ATA_UDMA6, 378 .udma_mask = ATA_UDMA6,
369 .port_ops = &ahci_ops, 379 .port_ops = &ahci_ops,
370 }, 380 },
371 /* board_ahci_mv */ 381 /* board_ahci_mv */
372 { 382 {
373 .sht = &ahci_sht, 383 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
384 AHCI_HFLAG_MV_PATA),
374 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 385 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
375 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 386 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
376 ATA_FLAG_SKIP_D2H_BSY | AHCI_FLAG_HONOR_PI | 387 .link_flags = AHCI_LFLAG_COMMON,
377 AHCI_FLAG_NO_NCQ | AHCI_FLAG_NO_MSI |
378 AHCI_FLAG_MV_PATA,
379 .pio_mask = 0x1f, /* pio0-4 */ 388 .pio_mask = 0x1f, /* pio0-4 */
380 .udma_mask = ATA_UDMA6, 389 .udma_mask = ATA_UDMA6,
381 .port_ops = &ahci_ops, 390 .port_ops = &ahci_ops,
@@ -394,23 +403,25 @@ static const struct pci_device_id ahci_pci_tbl[] = {
394 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ 403 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
395 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ 404 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
396 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ 405 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
397 { PCI_VDEVICE(INTEL, 0x2821), board_ahci_pi }, /* ICH8 */ 406 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
398 { PCI_VDEVICE(INTEL, 0x2822), board_ahci_pi }, /* ICH8 */ 407 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
399 { PCI_VDEVICE(INTEL, 0x2824), board_ahci_pi }, /* ICH8 */ 408 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
400 { PCI_VDEVICE(INTEL, 0x2829), board_ahci_pi }, /* ICH8M */ 409 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
401 { PCI_VDEVICE(INTEL, 0x282a), board_ahci_pi }, /* ICH8M */ 410 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
402 { PCI_VDEVICE(INTEL, 0x2922), board_ahci_pi }, /* ICH9 */ 411 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
403 { PCI_VDEVICE(INTEL, 0x2923), board_ahci_pi }, /* ICH9 */ 412 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
404 { PCI_VDEVICE(INTEL, 0x2924), board_ahci_pi }, /* ICH9 */ 413 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
405 { PCI_VDEVICE(INTEL, 0x2925), board_ahci_pi }, /* ICH9 */ 414 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
406 { PCI_VDEVICE(INTEL, 0x2927), board_ahci_pi }, /* ICH9 */ 415 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
407 { PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */ 416 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
408 { PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */ 417 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
409 { PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */ 418 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
410 { PCI_VDEVICE(INTEL, 0x292c), board_ahci_pi }, /* ICH9M */ 419 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
411 { PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */ 420 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
412 { PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */ 421 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
413 { PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */ 422 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
423 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
424 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
414 425
415 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 426 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
416 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 427 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -474,6 +485,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
474 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ 485 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
475 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ 486 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
476 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ 487 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
488 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
489 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
490 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
491 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
492 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
493 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
494 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
495 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
477 496
478 /* SiS */ 497 /* SiS */
479 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 498 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
@@ -524,7 +543,6 @@ static inline void __iomem *ahci_port_base(struct ata_port *ap)
524/** 543/**
525 * ahci_save_initial_config - Save and fixup initial config values 544 * ahci_save_initial_config - Save and fixup initial config values
526 * @pdev: target PCI device 545 * @pdev: target PCI device
527 * @pi: associated ATA port info
528 * @hpriv: host private area to store config values 546 * @hpriv: host private area to store config values
529 * 547 *
530 * Some registers containing configuration info might be setup by 548 * Some registers containing configuration info might be setup by
@@ -538,7 +556,6 @@ static inline void __iomem *ahci_port_base(struct ata_port *ap)
538 * None. 556 * None.
539 */ 557 */
540static void ahci_save_initial_config(struct pci_dev *pdev, 558static void ahci_save_initial_config(struct pci_dev *pdev,
541 const struct ata_port_info *pi,
542 struct ahci_host_priv *hpriv) 559 struct ahci_host_priv *hpriv)
543{ 560{
544 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; 561 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
@@ -552,26 +569,22 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
552 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); 569 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
553 570
554 /* some chips have errata preventing 64bit use */ 571 /* some chips have errata preventing 64bit use */
555 if ((cap & HOST_CAP_64) && (pi->flags & AHCI_FLAG_32BIT_ONLY)) { 572 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
556 dev_printk(KERN_INFO, &pdev->dev, 573 dev_printk(KERN_INFO, &pdev->dev,
557 "controller can't do 64bit DMA, forcing 32bit\n"); 574 "controller can't do 64bit DMA, forcing 32bit\n");
558 cap &= ~HOST_CAP_64; 575 cap &= ~HOST_CAP_64;
559 } 576 }
560 577
561 if ((cap & HOST_CAP_NCQ) && (pi->flags & AHCI_FLAG_NO_NCQ)) { 578 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
562 dev_printk(KERN_INFO, &pdev->dev, 579 dev_printk(KERN_INFO, &pdev->dev,
563 "controller can't do NCQ, turning off CAP_NCQ\n"); 580 "controller can't do NCQ, turning off CAP_NCQ\n");
564 cap &= ~HOST_CAP_NCQ; 581 cap &= ~HOST_CAP_NCQ;
565 } 582 }
566 583
567 /* fixup zero port_map */ 584 if ((cap && HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
568 if (!port_map) { 585 dev_printk(KERN_INFO, &pdev->dev,
569 port_map = (1 << ahci_nr_ports(cap)) - 1; 586 "controller can't do PMP, turning off CAP_PMP\n");
570 dev_printk(KERN_WARNING, &pdev->dev, 587 cap &= ~HOST_CAP_PMP;
571 "PORTS_IMPL is zero, forcing 0x%x\n", port_map);
572
573 /* write the fixed up value to the PI register */
574 hpriv->saved_port_map = port_map;
575 } 588 }
576 589
577 /* 590 /*
@@ -579,7 +592,7 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
579 * is asserted through the standard AHCI port 592 * is asserted through the standard AHCI port
580 * presence register, as bit 4 (counting from 0) 593 * presence register, as bit 4 (counting from 0)
581 */ 594 */
582 if (pi->flags & AHCI_FLAG_MV_PATA) { 595 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
583 dev_printk(KERN_ERR, &pdev->dev, 596 dev_printk(KERN_ERR, &pdev->dev,
584 "MV_AHCI HACK: port_map %x -> %x\n", 597 "MV_AHCI HACK: port_map %x -> %x\n",
585 hpriv->port_map, 598 hpriv->port_map,
@@ -589,7 +602,7 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
589 } 602 }
590 603
591 /* cross check port_map and cap.n_ports */ 604 /* cross check port_map and cap.n_ports */
592 if (pi->flags & AHCI_FLAG_HONOR_PI) { 605 if (port_map) {
593 u32 tmp_port_map = port_map; 606 u32 tmp_port_map = port_map;
594 int n_ports = ahci_nr_ports(cap); 607 int n_ports = ahci_nr_ports(cap);
595 608
@@ -600,17 +613,26 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
600 } 613 }
601 } 614 }
602 615
603 /* Whine if inconsistent. No need to update cap. 616 /* If n_ports and port_map are inconsistent, whine and
604 * port_map is used to determine number of ports. 617 * clear port_map and let it be generated from n_ports.
605 */ 618 */
606 if (n_ports || tmp_port_map) 619 if (n_ports || tmp_port_map) {
607 dev_printk(KERN_WARNING, &pdev->dev, 620 dev_printk(KERN_WARNING, &pdev->dev,
608 "nr_ports (%u) and implemented port map " 621 "nr_ports (%u) and implemented port map "
609 "(0x%x) don't match\n", 622 "(0x%x) don't match, using nr_ports\n",
610 ahci_nr_ports(cap), port_map); 623 ahci_nr_ports(cap), port_map);
611 } else { 624 port_map = 0;
612 /* fabricate port_map from cap.nr_ports */ 625 }
626 }
627
628 /* fabricate port_map from cap.nr_ports */
629 if (!port_map) {
613 port_map = (1 << ahci_nr_ports(cap)) - 1; 630 port_map = (1 << ahci_nr_ports(cap)) - 1;
631 dev_printk(KERN_WARNING, &pdev->dev,
632 "forcing PORTS_IMPL to 0x%x\n", port_map);
633
634 /* write the fixed up value to the PI register */
635 hpriv->saved_port_map = port_map;
614 } 636 }
615 637
616 /* record values to use during operation */ 638 /* record values to use during operation */
@@ -836,8 +858,14 @@ static int ahci_reset_controller(struct ata_host *host)
836 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 858 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
837 u32 tmp; 859 u32 tmp;
838 860
839 /* global controller reset */ 861 /* we must be in AHCI mode, before using anything
862 * AHCI-specific, such as HOST_RESET.
863 */
840 tmp = readl(mmio + HOST_CTL); 864 tmp = readl(mmio + HOST_CTL);
865 if (!(tmp & HOST_AHCI_EN))
866 writel(tmp | HOST_AHCI_EN, mmio + HOST_CTL);
867
868 /* global controller reset */
841 if ((tmp & HOST_RESET) == 0) { 869 if ((tmp & HOST_RESET) == 0) {
842 writel(tmp | HOST_RESET, mmio + HOST_CTL); 870 writel(tmp | HOST_RESET, mmio + HOST_CTL);
843 readl(mmio + HOST_CTL); /* flush */ 871 readl(mmio + HOST_CTL); /* flush */
@@ -904,13 +932,14 @@ static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
904 932
905static void ahci_init_controller(struct ata_host *host) 933static void ahci_init_controller(struct ata_host *host)
906{ 934{
935 struct ahci_host_priv *hpriv = host->private_data;
907 struct pci_dev *pdev = to_pci_dev(host->dev); 936 struct pci_dev *pdev = to_pci_dev(host->dev);
908 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 937 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
909 int i; 938 int i;
910 void __iomem *port_mmio; 939 void __iomem *port_mmio;
911 u32 tmp; 940 u32 tmp;
912 941
913 if (host->ports[0]->flags & AHCI_FLAG_MV_PATA) { 942 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
914 port_mmio = __ahci_port_base(host, 4); 943 port_mmio = __ahci_port_base(host, 4);
915 944
916 writel(0, port_mmio + PORT_IRQ_MASK); 945 writel(0, port_mmio + PORT_IRQ_MASK);
@@ -1042,9 +1071,10 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1042 return 0; 1071 return 0;
1043} 1072}
1044 1073
1045static int ahci_do_softreset(struct ata_port *ap, unsigned int *class, 1074static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1046 int pmp, unsigned long deadline) 1075 int pmp, unsigned long deadline)
1047{ 1076{
1077 struct ata_port *ap = link->ap;
1048 const char *reason = NULL; 1078 const char *reason = NULL;
1049 unsigned long now, msecs; 1079 unsigned long now, msecs;
1050 struct ata_taskfile tf; 1080 struct ata_taskfile tf;
@@ -1052,7 +1082,7 @@ static int ahci_do_softreset(struct ata_port *ap, unsigned int *class,
1052 1082
1053 DPRINTK("ENTER\n"); 1083 DPRINTK("ENTER\n");
1054 1084
1055 if (ata_port_offline(ap)) { 1085 if (ata_link_offline(link)) {
1056 DPRINTK("PHY reports no device\n"); 1086 DPRINTK("PHY reports no device\n");
1057 *class = ATA_DEV_NONE; 1087 *class = ATA_DEV_NONE;
1058 return 0; 1088 return 0;
@@ -1061,10 +1091,10 @@ static int ahci_do_softreset(struct ata_port *ap, unsigned int *class,
1061 /* prepare for SRST (AHCI-1.1 10.4.1) */ 1091 /* prepare for SRST (AHCI-1.1 10.4.1) */
1062 rc = ahci_kick_engine(ap, 1); 1092 rc = ahci_kick_engine(ap, 1);
1063 if (rc) 1093 if (rc)
1064 ata_port_printk(ap, KERN_WARNING, 1094 ata_link_printk(link, KERN_WARNING,
1065 "failed to reset engine (errno=%d)", rc); 1095 "failed to reset engine (errno=%d)", rc);
1066 1096
1067 ata_tf_init(ap->device, &tf); 1097 ata_tf_init(link->device, &tf);
1068 1098
1069 /* issue the first D2H Register FIS */ 1099 /* issue the first D2H Register FIS */
1070 msecs = 0; 1100 msecs = 0;
@@ -1109,19 +1139,25 @@ static int ahci_do_softreset(struct ata_port *ap, unsigned int *class,
1109 return 0; 1139 return 0;
1110 1140
1111 fail: 1141 fail:
1112 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason); 1142 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1113 return rc; 1143 return rc;
1114} 1144}
1115 1145
1116static int ahci_softreset(struct ata_port *ap, unsigned int *class, 1146static int ahci_softreset(struct ata_link *link, unsigned int *class,
1117 unsigned long deadline) 1147 unsigned long deadline)
1118{ 1148{
1119 return ahci_do_softreset(ap, class, 0, deadline); 1149 int pmp = 0;
1150
1151 if (link->ap->flags & ATA_FLAG_PMP)
1152 pmp = SATA_PMP_CTRL_PORT;
1153
1154 return ahci_do_softreset(link, class, pmp, deadline);
1120} 1155}
1121 1156
1122static int ahci_hardreset(struct ata_port *ap, unsigned int *class, 1157static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1123 unsigned long deadline) 1158 unsigned long deadline)
1124{ 1159{
1160 struct ata_port *ap = link->ap;
1125 struct ahci_port_priv *pp = ap->private_data; 1161 struct ahci_port_priv *pp = ap->private_data;
1126 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1162 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1127 struct ata_taskfile tf; 1163 struct ata_taskfile tf;
@@ -1132,26 +1168,27 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class,
1132 ahci_stop_engine(ap); 1168 ahci_stop_engine(ap);
1133 1169
1134 /* clear D2H reception area to properly wait for D2H FIS */ 1170 /* clear D2H reception area to properly wait for D2H FIS */
1135 ata_tf_init(ap->device, &tf); 1171 ata_tf_init(link->device, &tf);
1136 tf.command = 0x80; 1172 tf.command = 0x80;
1137 ata_tf_to_fis(&tf, 0, 0, d2h_fis); 1173 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1138 1174
1139 rc = sata_std_hardreset(ap, class, deadline); 1175 rc = sata_std_hardreset(link, class, deadline);
1140 1176
1141 ahci_start_engine(ap); 1177 ahci_start_engine(ap);
1142 1178
1143 if (rc == 0 && ata_port_online(ap)) 1179 if (rc == 0 && ata_link_online(link))
1144 *class = ahci_dev_classify(ap); 1180 *class = ahci_dev_classify(ap);
1145 if (*class == ATA_DEV_UNKNOWN) 1181 if (rc != -EAGAIN && *class == ATA_DEV_UNKNOWN)
1146 *class = ATA_DEV_NONE; 1182 *class = ATA_DEV_NONE;
1147 1183
1148 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 1184 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1149 return rc; 1185 return rc;
1150} 1186}
1151 1187
1152static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class, 1188static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1153 unsigned long deadline) 1189 unsigned long deadline)
1154{ 1190{
1191 struct ata_port *ap = link->ap;
1155 u32 serror; 1192 u32 serror;
1156 int rc; 1193 int rc;
1157 1194
@@ -1159,7 +1196,7 @@ static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class,
1159 1196
1160 ahci_stop_engine(ap); 1197 ahci_stop_engine(ap);
1161 1198
1162 rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->eh_context), 1199 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1163 deadline); 1200 deadline);
1164 1201
1165 /* vt8251 needs SError cleared for the port to operate */ 1202 /* vt8251 needs SError cleared for the port to operate */
@@ -1176,12 +1213,13 @@ static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class,
1176 return rc ?: -EAGAIN; 1213 return rc ?: -EAGAIN;
1177} 1214}
1178 1215
1179static void ahci_postreset(struct ata_port *ap, unsigned int *class) 1216static void ahci_postreset(struct ata_link *link, unsigned int *class)
1180{ 1217{
1218 struct ata_port *ap = link->ap;
1181 void __iomem *port_mmio = ahci_port_base(ap); 1219 void __iomem *port_mmio = ahci_port_base(ap);
1182 u32 new_tmp, tmp; 1220 u32 new_tmp, tmp;
1183 1221
1184 ata_std_postreset(ap, class); 1222 ata_std_postreset(link, class);
1185 1223
1186 /* Make sure port's ATAPI bit is set appropriately */ 1224 /* Make sure port's ATAPI bit is set appropriately */
1187 new_tmp = tmp = readl(port_mmio + PORT_CMD); 1225 new_tmp = tmp = readl(port_mmio + PORT_CMD);
@@ -1195,6 +1233,12 @@ static void ahci_postreset(struct ata_port *ap, unsigned int *class)
1195 } 1233 }
1196} 1234}
1197 1235
1236static int ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
1237 unsigned long deadline)
1238{
1239 return ahci_do_softreset(link, class, link->pmp, deadline);
1240}
1241
1198static u8 ahci_check_status(struct ata_port *ap) 1242static u8 ahci_check_status(struct ata_port *ap)
1199{ 1243{
1200 void __iomem *mmio = ap->ioaddr.cmd_addr; 1244 void __iomem *mmio = ap->ioaddr.cmd_addr;
@@ -1253,7 +1297,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
1253 */ 1297 */
1254 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; 1298 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1255 1299
1256 ata_tf_to_fis(&qc->tf, 0, 1, cmd_tbl); 1300 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1257 if (is_atapi) { 1301 if (is_atapi) {
1258 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 1302 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1259 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); 1303 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
@@ -1266,7 +1310,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
1266 /* 1310 /*
1267 * Fill in command slot information. 1311 * Fill in command slot information.
1268 */ 1312 */
1269 opts = cmd_fis_len | n_elem << 16; 1313 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1270 if (qc->tf.flags & ATA_TFLAG_WRITE) 1314 if (qc->tf.flags & ATA_TFLAG_WRITE)
1271 opts |= AHCI_CMD_WRITE; 1315 opts |= AHCI_CMD_WRITE;
1272 if (is_atapi) 1316 if (is_atapi)
@@ -1277,66 +1321,87 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
1277 1321
1278static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) 1322static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1279{ 1323{
1324 struct ahci_host_priv *hpriv = ap->host->private_data;
1280 struct ahci_port_priv *pp = ap->private_data; 1325 struct ahci_port_priv *pp = ap->private_data;
1281 struct ata_eh_info *ehi = &ap->eh_info; 1326 struct ata_eh_info *host_ehi = &ap->link.eh_info;
1282 unsigned int err_mask = 0, action = 0; 1327 struct ata_link *link = NULL;
1283 struct ata_queued_cmd *qc; 1328 struct ata_queued_cmd *active_qc;
1329 struct ata_eh_info *active_ehi;
1284 u32 serror; 1330 u32 serror;
1285 1331
1286 ata_ehi_clear_desc(ehi); 1332 /* determine active link */
1333 ata_port_for_each_link(link, ap)
1334 if (ata_link_active(link))
1335 break;
1336 if (!link)
1337 link = &ap->link;
1338
1339 active_qc = ata_qc_from_tag(ap, link->active_tag);
1340 active_ehi = &link->eh_info;
1341
1342 /* record irq stat */
1343 ata_ehi_clear_desc(host_ehi);
1344 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1287 1345
1288 /* AHCI needs SError cleared; otherwise, it might lock up */ 1346 /* AHCI needs SError cleared; otherwise, it might lock up */
1289 ahci_scr_read(ap, SCR_ERROR, &serror); 1347 ahci_scr_read(ap, SCR_ERROR, &serror);
1290 ahci_scr_write(ap, SCR_ERROR, serror); 1348 ahci_scr_write(ap, SCR_ERROR, serror);
1291 1349 host_ehi->serror |= serror;
1292 /* analyze @irq_stat */
1293 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
1294 1350
1295 /* some controllers set IRQ_IF_ERR on device errors, ignore it */ 1351 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1296 if (ap->flags & AHCI_FLAG_IGN_IRQ_IF_ERR) 1352 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1297 irq_stat &= ~PORT_IRQ_IF_ERR; 1353 irq_stat &= ~PORT_IRQ_IF_ERR;
1298 1354
1299 if (irq_stat & PORT_IRQ_TF_ERR) { 1355 if (irq_stat & PORT_IRQ_TF_ERR) {
1300 err_mask |= AC_ERR_DEV; 1356 /* If qc is active, charge it; otherwise, the active
1301 if (ap->flags & AHCI_FLAG_IGN_SERR_INTERNAL) 1357 * link. There's no active qc on NCQ errors. It will
1302 serror &= ~SERR_INTERNAL; 1358 * be determined by EH by reading log page 10h.
1359 */
1360 if (active_qc)
1361 active_qc->err_mask |= AC_ERR_DEV;
1362 else
1363 active_ehi->err_mask |= AC_ERR_DEV;
1364
1365 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1366 host_ehi->serror &= ~SERR_INTERNAL;
1367 }
1368
1369 if (irq_stat & PORT_IRQ_UNK_FIS) {
1370 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1371
1372 active_ehi->err_mask |= AC_ERR_HSM;
1373 active_ehi->action |= ATA_EH_SOFTRESET;
1374 ata_ehi_push_desc(active_ehi,
1375 "unknown FIS %08x %08x %08x %08x" ,
1376 unk[0], unk[1], unk[2], unk[3]);
1377 }
1378
1379 if (ap->nr_pmp_links && (irq_stat & PORT_IRQ_BAD_PMP)) {
1380 active_ehi->err_mask |= AC_ERR_HSM;
1381 active_ehi->action |= ATA_EH_SOFTRESET;
1382 ata_ehi_push_desc(active_ehi, "incorrect PMP");
1303 } 1383 }
1304 1384
1305 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { 1385 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1306 err_mask |= AC_ERR_HOST_BUS; 1386 host_ehi->err_mask |= AC_ERR_HOST_BUS;
1307 action |= ATA_EH_SOFTRESET; 1387 host_ehi->action |= ATA_EH_SOFTRESET;
1388 ata_ehi_push_desc(host_ehi, "host bus error");
1308 } 1389 }
1309 1390
1310 if (irq_stat & PORT_IRQ_IF_ERR) { 1391 if (irq_stat & PORT_IRQ_IF_ERR) {
1311 err_mask |= AC_ERR_ATA_BUS; 1392 host_ehi->err_mask |= AC_ERR_ATA_BUS;
1312 action |= ATA_EH_SOFTRESET; 1393 host_ehi->action |= ATA_EH_SOFTRESET;
1313 ata_ehi_push_desc(ehi, "interface fatal error"); 1394 ata_ehi_push_desc(host_ehi, "interface fatal error");
1314 } 1395 }
1315 1396
1316 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { 1397 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1317 ata_ehi_hotplugged(ehi); 1398 ata_ehi_hotplugged(host_ehi);
1318 ata_ehi_push_desc(ehi, "%s", irq_stat & PORT_IRQ_CONNECT ? 1399 ata_ehi_push_desc(host_ehi, "%s",
1400 irq_stat & PORT_IRQ_CONNECT ?
1319 "connection status changed" : "PHY RDY changed"); 1401 "connection status changed" : "PHY RDY changed");
1320 } 1402 }
1321 1403
1322 if (irq_stat & PORT_IRQ_UNK_FIS) {
1323 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1324
1325 err_mask |= AC_ERR_HSM;
1326 action |= ATA_EH_SOFTRESET;
1327 ata_ehi_push_desc(ehi, "unknown FIS %08x %08x %08x %08x",
1328 unk[0], unk[1], unk[2], unk[3]);
1329 }
1330
1331 /* okay, let's hand over to EH */ 1404 /* okay, let's hand over to EH */
1332 ehi->serror |= serror;
1333 ehi->action |= action;
1334
1335 qc = ata_qc_from_tag(ap, ap->active_tag);
1336 if (qc)
1337 qc->err_mask |= err_mask;
1338 else
1339 ehi->err_mask |= err_mask;
1340 1405
1341 if (irq_stat & PORT_IRQ_FREEZE) 1406 if (irq_stat & PORT_IRQ_FREEZE)
1342 ata_port_freeze(ap); 1407 ata_port_freeze(ap);
@@ -1347,25 +1412,64 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1347static void ahci_port_intr(struct ata_port *ap) 1412static void ahci_port_intr(struct ata_port *ap)
1348{ 1413{
1349 void __iomem *port_mmio = ap->ioaddr.cmd_addr; 1414 void __iomem *port_mmio = ap->ioaddr.cmd_addr;
1350 struct ata_eh_info *ehi = &ap->eh_info; 1415 struct ata_eh_info *ehi = &ap->link.eh_info;
1351 struct ahci_port_priv *pp = ap->private_data; 1416 struct ahci_port_priv *pp = ap->private_data;
1417 struct ahci_host_priv *hpriv = ap->host->private_data;
1418 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
1352 u32 status, qc_active; 1419 u32 status, qc_active;
1353 int rc, known_irq = 0; 1420 int rc, known_irq = 0;
1354 1421
1355 status = readl(port_mmio + PORT_IRQ_STAT); 1422 status = readl(port_mmio + PORT_IRQ_STAT);
1356 writel(status, port_mmio + PORT_IRQ_STAT); 1423 writel(status, port_mmio + PORT_IRQ_STAT);
1357 1424
1425 /* ignore BAD_PMP while resetting */
1426 if (unlikely(resetting))
1427 status &= ~PORT_IRQ_BAD_PMP;
1428
1358 if (unlikely(status & PORT_IRQ_ERROR)) { 1429 if (unlikely(status & PORT_IRQ_ERROR)) {
1359 ahci_error_intr(ap, status); 1430 ahci_error_intr(ap, status);
1360 return; 1431 return;
1361 } 1432 }
1362 1433
1363 if (ap->sactive) 1434 if (status & PORT_IRQ_SDB_FIS) {
1435 /* If SNotification is available, leave notification
1436 * handling to sata_async_notification(). If not,
1437 * emulate it by snooping SDB FIS RX area.
1438 *
1439 * Snooping FIS RX area is probably cheaper than
1440 * poking SNotification but some constrollers which
1441 * implement SNotification, ICH9 for example, don't
1442 * store AN SDB FIS into receive area.
1443 */
1444 if (hpriv->cap & HOST_CAP_SNTF)
1445 sata_async_notification(ap);
1446 else {
1447 /* If the 'N' bit in word 0 of the FIS is set,
1448 * we just received asynchronous notification.
1449 * Tell libata about it.
1450 */
1451 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
1452 u32 f0 = le32_to_cpu(f[0]);
1453
1454 if (f0 & (1 << 15))
1455 sata_async_notification(ap);
1456 }
1457 }
1458
1459 /* pp->active_link is valid iff any command is in flight */
1460 if (ap->qc_active && pp->active_link->sactive)
1364 qc_active = readl(port_mmio + PORT_SCR_ACT); 1461 qc_active = readl(port_mmio + PORT_SCR_ACT);
1365 else 1462 else
1366 qc_active = readl(port_mmio + PORT_CMD_ISSUE); 1463 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1367 1464
1368 rc = ata_qc_complete_multiple(ap, qc_active, NULL); 1465 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
1466
1467 /* If resetting, spurious or invalid completions are expected,
1468 * return unconditionally.
1469 */
1470 if (resetting)
1471 return;
1472
1369 if (rc > 0) 1473 if (rc > 0)
1370 return; 1474 return;
1371 if (rc < 0) { 1475 if (rc < 0) {
@@ -1380,7 +1484,7 @@ static void ahci_port_intr(struct ata_port *ap)
1380 /* if !NCQ, ignore. No modern ATA device has broken HSM 1484 /* if !NCQ, ignore. No modern ATA device has broken HSM
1381 * implementation for non-NCQ commands. 1485 * implementation for non-NCQ commands.
1382 */ 1486 */
1383 if (!ap->sactive) 1487 if (!ap->link.sactive)
1384 return; 1488 return;
1385 1489
1386 if (status & PORT_IRQ_D2H_REG_FIS) { 1490 if (status & PORT_IRQ_D2H_REG_FIS) {
@@ -1433,7 +1537,7 @@ static void ahci_port_intr(struct ata_port *ap)
1433 if (!known_irq) 1537 if (!known_irq)
1434 ata_port_printk(ap, KERN_INFO, "spurious interrupt " 1538 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
1435 "(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n", 1539 "(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n",
1436 status, ap->active_tag, ap->sactive); 1540 status, ap->link.active_tag, ap->link.sactive);
1437} 1541}
1438 1542
1439static void ahci_irq_clear(struct ata_port *ap) 1543static void ahci_irq_clear(struct ata_port *ap)
@@ -1498,6 +1602,13 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1498{ 1602{
1499 struct ata_port *ap = qc->ap; 1603 struct ata_port *ap = qc->ap;
1500 void __iomem *port_mmio = ahci_port_base(ap); 1604 void __iomem *port_mmio = ahci_port_base(ap);
1605 struct ahci_port_priv *pp = ap->private_data;
1606
1607 /* Keep track of the currently active link. It will be used
1608 * in completion path to determine whether NCQ phase is in
1609 * progress.
1610 */
1611 pp->active_link = qc->dev->link;
1501 1612
1502 if (qc->tf.protocol == ATA_PROT_NCQ) 1613 if (qc->tf.protocol == ATA_PROT_NCQ)
1503 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); 1614 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
@@ -1520,6 +1631,7 @@ static void ahci_thaw(struct ata_port *ap)
1520 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1631 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1521 void __iomem *port_mmio = ahci_port_base(ap); 1632 void __iomem *port_mmio = ahci_port_base(ap);
1522 u32 tmp; 1633 u32 tmp;
1634 struct ahci_port_priv *pp = ap->private_data;
1523 1635
1524 /* clear IRQ */ 1636 /* clear IRQ */
1525 tmp = readl(port_mmio + PORT_IRQ_STAT); 1637 tmp = readl(port_mmio + PORT_IRQ_STAT);
@@ -1527,7 +1639,7 @@ static void ahci_thaw(struct ata_port *ap)
1527 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); 1639 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
1528 1640
1529 /* turn IRQ back on */ 1641 /* turn IRQ back on */
1530 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK); 1642 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1531} 1643}
1532 1644
1533static void ahci_error_handler(struct ata_port *ap) 1645static void ahci_error_handler(struct ata_port *ap)
@@ -1539,8 +1651,10 @@ static void ahci_error_handler(struct ata_port *ap)
1539 } 1651 }
1540 1652
1541 /* perform recovery */ 1653 /* perform recovery */
1542 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_hardreset, 1654 sata_pmp_do_eh(ap, ata_std_prereset, ahci_softreset,
1543 ahci_postreset); 1655 ahci_hardreset, ahci_postreset,
1656 sata_pmp_std_prereset, ahci_pmp_softreset,
1657 sata_pmp_std_hardreset, sata_pmp_std_postreset);
1544} 1658}
1545 1659
1546static void ahci_vt8251_error_handler(struct ata_port *ap) 1660static void ahci_vt8251_error_handler(struct ata_port *ap)
@@ -1565,11 +1679,44 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1565 ahci_kick_engine(ap, 1); 1679 ahci_kick_engine(ap, 1);
1566} 1680}
1567 1681
1682static void ahci_pmp_attach(struct ata_port *ap)
1683{
1684 void __iomem *port_mmio = ahci_port_base(ap);
1685 struct ahci_port_priv *pp = ap->private_data;
1686 u32 cmd;
1687
1688 cmd = readl(port_mmio + PORT_CMD);
1689 cmd |= PORT_CMD_PMP;
1690 writel(cmd, port_mmio + PORT_CMD);
1691
1692 pp->intr_mask |= PORT_IRQ_BAD_PMP;
1693 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1694}
1695
1696static void ahci_pmp_detach(struct ata_port *ap)
1697{
1698 void __iomem *port_mmio = ahci_port_base(ap);
1699 struct ahci_port_priv *pp = ap->private_data;
1700 u32 cmd;
1701
1702 cmd = readl(port_mmio + PORT_CMD);
1703 cmd &= ~PORT_CMD_PMP;
1704 writel(cmd, port_mmio + PORT_CMD);
1705
1706 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
1707 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1708}
1709
1568static int ahci_port_resume(struct ata_port *ap) 1710static int ahci_port_resume(struct ata_port *ap)
1569{ 1711{
1570 ahci_power_up(ap); 1712 ahci_power_up(ap);
1571 ahci_start_port(ap); 1713 ahci_start_port(ap);
1572 1714
1715 if (ap->nr_pmp_links)
1716 ahci_pmp_attach(ap);
1717 else
1718 ahci_pmp_detach(ap);
1719
1573 return 0; 1720 return 0;
1574} 1721}
1575 1722
@@ -1681,6 +1828,12 @@ static int ahci_port_start(struct ata_port *ap)
1681 pp->cmd_tbl = mem; 1828 pp->cmd_tbl = mem;
1682 pp->cmd_tbl_dma = mem_dma; 1829 pp->cmd_tbl_dma = mem_dma;
1683 1830
1831 /*
1832 * Save off initial list of interrupts to be enabled.
1833 * This could be changed later
1834 */
1835 pp->intr_mask = DEF_PORT_IRQ;
1836
1684 ap->private_data = pp; 1837 ap->private_data = pp;
1685 1838
1686 /* engage engines, captain */ 1839 /* engage engines, captain */
@@ -1830,20 +1983,24 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1830 if (rc) 1983 if (rc)
1831 return rc; 1984 return rc;
1832 1985
1833 if ((pi.flags & AHCI_FLAG_NO_MSI) || pci_enable_msi(pdev))
1834 pci_intx(pdev, 1);
1835
1836 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 1986 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1837 if (!hpriv) 1987 if (!hpriv)
1838 return -ENOMEM; 1988 return -ENOMEM;
1989 hpriv->flags |= (unsigned long)pi.private_data;
1990
1991 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
1992 pci_intx(pdev, 1);
1839 1993
1840 /* save initial config */ 1994 /* save initial config */
1841 ahci_save_initial_config(pdev, &pi, hpriv); 1995 ahci_save_initial_config(pdev, hpriv);
1842 1996
1843 /* prepare host */ 1997 /* prepare host */
1844 if (hpriv->cap & HOST_CAP_NCQ) 1998 if (hpriv->cap & HOST_CAP_NCQ)
1845 pi.flags |= ATA_FLAG_NCQ; 1999 pi.flags |= ATA_FLAG_NCQ;
1846 2000
2001 if (hpriv->cap & HOST_CAP_PMP)
2002 pi.flags |= ATA_FLAG_PMP;
2003
1847 host = ata_host_alloc_pinfo(&pdev->dev, ppi, fls(hpriv->port_map)); 2004 host = ata_host_alloc_pinfo(&pdev->dev, ppi, fls(hpriv->port_map));
1848 if (!host) 2005 if (!host)
1849 return -ENOMEM; 2006 return -ENOMEM;
@@ -1854,6 +2011,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1854 struct ata_port *ap = host->ports[i]; 2011 struct ata_port *ap = host->ports[i];
1855 void __iomem *port_mmio = ahci_port_base(ap); 2012 void __iomem *port_mmio = ahci_port_base(ap);
1856 2013
2014 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
2015 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
2016 0x100 + ap->port_no * 0x80, "port");
2017
1857 /* standard SATA port setup */ 2018 /* standard SATA port setup */
1858 if (hpriv->port_map & (1 << i)) 2019 if (hpriv->port_map & (1 << i))
1859 ap->ioaddr.cmd_addr = port_mmio; 2020 ap->ioaddr.cmd_addr = port_mmio;
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 9454669547..90329982be 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -34,7 +34,7 @@
34 34
35/** 35/**
36 * generic_set_mode - mode setting 36 * generic_set_mode - mode setting
37 * @ap: interface to set up 37 * @link: link to set up
38 * @unused: returned device on error 38 * @unused: returned device on error
39 * 39 *
40 * Use a non standard set_mode function. We don't want to be tuned. 40 * Use a non standard set_mode function. We don't want to be tuned.
@@ -43,24 +43,24 @@
43 * and respect them. 43 * and respect them.
44 */ 44 */
45 45
46static int generic_set_mode(struct ata_port *ap, struct ata_device **unused) 46static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
47{ 47{
48 struct ata_port *ap = link->ap;
48 int dma_enabled = 0; 49 int dma_enabled = 0;
49 int i; 50 struct ata_device *dev;
50 51
51 /* Bits 5 and 6 indicate if DMA is active on master/slave */ 52 /* Bits 5 and 6 indicate if DMA is active on master/slave */
52 if (ap->ioaddr.bmdma_addr) 53 if (ap->ioaddr.bmdma_addr)
53 dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 54 dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
54 55
55 for (i = 0; i < ATA_MAX_DEVICES; i++) { 56 ata_link_for_each_dev(dev, link) {
56 struct ata_device *dev = &ap->device[i];
57 if (ata_dev_enabled(dev)) { 57 if (ata_dev_enabled(dev)) {
58 /* We don't really care */ 58 /* We don't really care */
59 dev->pio_mode = XFER_PIO_0; 59 dev->pio_mode = XFER_PIO_0;
60 dev->dma_mode = XFER_MW_DMA_0; 60 dev->dma_mode = XFER_MW_DMA_0;
61 /* We do need the right mode information for DMA or PIO 61 /* We do need the right mode information for DMA or PIO
62 and this comes from the current configuration flags */ 62 and this comes from the current configuration flags */
63 if (dma_enabled & (1 << (5 + i))) { 63 if (dma_enabled & (1 << (5 + dev->devno))) {
64 ata_id_to_dma_mode(dev, XFER_MW_DMA_0); 64 ata_id_to_dma_mode(dev, XFER_MW_DMA_0);
65 dev->flags &= ~ATA_DFLAG_PIO; 65 dev->flags &= ~ATA_DFLAG_PIO;
66 } else { 66 } else {
@@ -95,7 +95,6 @@ static struct scsi_host_template generic_sht = {
95static struct ata_port_operations generic_port_ops = { 95static struct ata_port_operations generic_port_ops = {
96 .set_mode = generic_set_mode, 96 .set_mode = generic_set_mode,
97 97
98 .port_disable = ata_port_disable,
99 .tf_load = ata_tf_load, 98 .tf_load = ata_tf_load,
100 .tf_read = ata_tf_read, 99 .tf_read = ata_tf_read,
101 .check_status = ata_check_status, 100 .check_status = ata_check_status,
@@ -121,9 +120,8 @@ static struct ata_port_operations generic_port_ops = {
121 .irq_handler = ata_interrupt, 120 .irq_handler = ata_interrupt,
122 .irq_clear = ata_bmdma_irq_clear, 121 .irq_clear = ata_bmdma_irq_clear,
123 .irq_on = ata_irq_on, 122 .irq_on = ata_irq_on,
124 .irq_ack = ata_irq_ack,
125 123
126 .port_start = ata_port_start, 124 .port_start = ata_sff_port_start,
127}; 125};
128 126
129static int all_generic_ide; /* Set to claim all devices */ 127static int all_generic_ide; /* Set to claim all devices */
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 92c2d5082b..e783e678ac 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -123,7 +123,6 @@ enum {
123 ich_pata_33 = 1, /* ICH up to UDMA 33 only */ 123 ich_pata_33 = 1, /* ICH up to UDMA 33 only */
124 ich_pata_66 = 2, /* ICH up to 66 Mhz */ 124 ich_pata_66 = 2, /* ICH up to 66 Mhz */
125 ich_pata_100 = 3, /* ICH up to UDMA 100 */ 125 ich_pata_100 = 3, /* ICH up to UDMA 100 */
126 ich_pata_133 = 4, /* ICH up to UDMA 133 */
127 ich5_sata = 5, 126 ich5_sata = 5,
128 ich6_sata = 6, 127 ich6_sata = 6,
129 ich6_sata_ahci = 7, 128 ich6_sata_ahci = 7,
@@ -199,7 +198,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
199 { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 198 { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
200 { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 199 { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
201 /* Intel ICH5 */ 200 /* Intel ICH5 */
202 { 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 }, 201 { 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
203 /* C-ICH (i810E2) */ 202 /* C-ICH (i810E2) */
204 { 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 203 { 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
205 /* ESB (855GME/875P + 6300ESB) UDMA 100 */ 204 /* ESB (855GME/875P + 6300ESB) UDMA 100 */
@@ -207,7 +206,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
207 /* ICH6 (and 6) (i915) UDMA 100 */ 206 /* ICH6 (and 6) (i915) UDMA 100 */
208 { 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 207 { 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
209 /* ICH7/7-R (i945, i975) UDMA 100*/ 208 /* ICH7/7-R (i945, i975) UDMA 100*/
210 { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 }, 209 { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
211 { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 210 { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
212 /* ICH8 Mobile PATA Controller */ 211 /* ICH8 Mobile PATA Controller */
213 { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 212 { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
@@ -290,7 +289,6 @@ static struct scsi_host_template piix_sht = {
290}; 289};
291 290
292static const struct ata_port_operations piix_pata_ops = { 291static const struct ata_port_operations piix_pata_ops = {
293 .port_disable = ata_port_disable,
294 .set_piomode = piix_set_piomode, 292 .set_piomode = piix_set_piomode,
295 .set_dmamode = piix_set_dmamode, 293 .set_dmamode = piix_set_dmamode,
296 .mode_filter = ata_pci_default_filter, 294 .mode_filter = ata_pci_default_filter,
@@ -318,13 +316,11 @@ static const struct ata_port_operations piix_pata_ops = {
318 .irq_handler = ata_interrupt, 316 .irq_handler = ata_interrupt,
319 .irq_clear = ata_bmdma_irq_clear, 317 .irq_clear = ata_bmdma_irq_clear,
320 .irq_on = ata_irq_on, 318 .irq_on = ata_irq_on,
321 .irq_ack = ata_irq_ack,
322 319
323 .port_start = ata_port_start, 320 .port_start = ata_port_start,
324}; 321};
325 322
326static const struct ata_port_operations ich_pata_ops = { 323static const struct ata_port_operations ich_pata_ops = {
327 .port_disable = ata_port_disable,
328 .set_piomode = piix_set_piomode, 324 .set_piomode = piix_set_piomode,
329 .set_dmamode = ich_set_dmamode, 325 .set_dmamode = ich_set_dmamode,
330 .mode_filter = ata_pci_default_filter, 326 .mode_filter = ata_pci_default_filter,
@@ -352,14 +348,11 @@ static const struct ata_port_operations ich_pata_ops = {
352 .irq_handler = ata_interrupt, 348 .irq_handler = ata_interrupt,
353 .irq_clear = ata_bmdma_irq_clear, 349 .irq_clear = ata_bmdma_irq_clear,
354 .irq_on = ata_irq_on, 350 .irq_on = ata_irq_on,
355 .irq_ack = ata_irq_ack,
356 351
357 .port_start = ata_port_start, 352 .port_start = ata_port_start,
358}; 353};
359 354
360static const struct ata_port_operations piix_sata_ops = { 355static const struct ata_port_operations piix_sata_ops = {
361 .port_disable = ata_port_disable,
362
363 .tf_load = ata_tf_load, 356 .tf_load = ata_tf_load,
364 .tf_read = ata_tf_read, 357 .tf_read = ata_tf_read,
365 .check_status = ata_check_status, 358 .check_status = ata_check_status,
@@ -382,7 +375,6 @@ static const struct ata_port_operations piix_sata_ops = {
382 .irq_handler = ata_interrupt, 375 .irq_handler = ata_interrupt,
383 .irq_clear = ata_bmdma_irq_clear, 376 .irq_clear = ata_bmdma_irq_clear,
384 .irq_on = ata_irq_on, 377 .irq_on = ata_irq_on,
385 .irq_ack = ata_irq_ack,
386 378
387 .port_start = ata_port_start, 379 .port_start = ata_port_start,
388}; 380};
@@ -445,15 +437,15 @@ static const struct piix_map_db ich8_map_db = {
445}; 437};
446 438
447static const struct piix_map_db tolapai_map_db = { 439static const struct piix_map_db tolapai_map_db = {
448 .mask = 0x3, 440 .mask = 0x3,
449 .port_enable = 0x3, 441 .port_enable = 0x3,
450 .map = { 442 .map = {
451 /* PM PS SM SS MAP */ 443 /* PM PS SM SS MAP */
452 { P0, NA, P1, NA }, /* 00b */ 444 { P0, NA, P1, NA }, /* 00b */
453 { RV, RV, RV, RV }, /* 01b */ 445 { RV, RV, RV, RV }, /* 01b */
454 { RV, RV, RV, RV }, /* 10b */ 446 { RV, RV, RV, RV }, /* 10b */
455 { RV, RV, RV, RV }, 447 { RV, RV, RV, RV },
456 }, 448 },
457}; 449};
458 450
459static const struct piix_map_db *piix_map_db_table[] = { 451static const struct piix_map_db *piix_map_db_table[] = {
@@ -466,7 +458,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
466}; 458};
467 459
468static struct ata_port_info piix_port_info[] = { 460static struct ata_port_info piix_port_info[] = {
469 /* piix_pata_33: 0: PIIX4 at 33MHz */ 461 [piix_pata_33] = /* PIIX4 at 33MHz */
470 { 462 {
471 .sht = &piix_sht, 463 .sht = &piix_sht,
472 .flags = PIIX_PATA_FLAGS, 464 .flags = PIIX_PATA_FLAGS,
@@ -476,7 +468,7 @@ static struct ata_port_info piix_port_info[] = {
476 .port_ops = &piix_pata_ops, 468 .port_ops = &piix_pata_ops,
477 }, 469 },
478 470
479 /* ich_pata_33: 1 ICH0 - ICH at 33Mhz*/ 471 [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/
480 { 472 {
481 .sht = &piix_sht, 473 .sht = &piix_sht,
482 .flags = PIIX_PATA_FLAGS, 474 .flags = PIIX_PATA_FLAGS,
@@ -485,7 +477,8 @@ static struct ata_port_info piix_port_info[] = {
485 .udma_mask = ATA_UDMA2, /* UDMA33 */ 477 .udma_mask = ATA_UDMA2, /* UDMA33 */
486 .port_ops = &ich_pata_ops, 478 .port_ops = &ich_pata_ops,
487 }, 479 },
488 /* ich_pata_66: 2 ICH controllers up to 66MHz */ 480
481 [ich_pata_66] = /* ICH controllers up to 66MHz */
489 { 482 {
490 .sht = &piix_sht, 483 .sht = &piix_sht,
491 .flags = PIIX_PATA_FLAGS, 484 .flags = PIIX_PATA_FLAGS,
@@ -495,7 +488,7 @@ static struct ata_port_info piix_port_info[] = {
495 .port_ops = &ich_pata_ops, 488 .port_ops = &ich_pata_ops,
496 }, 489 },
497 490
498 /* ich_pata_100: 3 */ 491 [ich_pata_100] =
499 { 492 {
500 .sht = &piix_sht, 493 .sht = &piix_sht,
501 .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR, 494 .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
@@ -505,17 +498,7 @@ static struct ata_port_info piix_port_info[] = {
505 .port_ops = &ich_pata_ops, 498 .port_ops = &ich_pata_ops,
506 }, 499 },
507 500
508 /* ich_pata_133: 4 ICH with full UDMA6 */ 501 [ich5_sata] =
509 {
510 .sht = &piix_sht,
511 .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
512 .pio_mask = 0x1f, /* pio 0-4 */
513 .mwdma_mask = 0x06, /* Check: maybe 0x07 */
514 .udma_mask = ATA_UDMA6, /* UDMA133 */
515 .port_ops = &ich_pata_ops,
516 },
517
518 /* ich5_sata: 5 */
519 { 502 {
520 .sht = &piix_sht, 503 .sht = &piix_sht,
521 .flags = PIIX_SATA_FLAGS, 504 .flags = PIIX_SATA_FLAGS,
@@ -525,7 +508,7 @@ static struct ata_port_info piix_port_info[] = {
525 .port_ops = &piix_sata_ops, 508 .port_ops = &piix_sata_ops,
526 }, 509 },
527 510
528 /* ich6_sata: 6 */ 511 [ich6_sata] =
529 { 512 {
530 .sht = &piix_sht, 513 .sht = &piix_sht,
531 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR, 514 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR,
@@ -535,7 +518,7 @@ static struct ata_port_info piix_port_info[] = {
535 .port_ops = &piix_sata_ops, 518 .port_ops = &piix_sata_ops,
536 }, 519 },
537 520
538 /* ich6_sata_ahci: 7 */ 521 [ich6_sata_ahci] =
539 { 522 {
540 .sht = &piix_sht, 523 .sht = &piix_sht,
541 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | 524 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
@@ -546,7 +529,7 @@ static struct ata_port_info piix_port_info[] = {
546 .port_ops = &piix_sata_ops, 529 .port_ops = &piix_sata_ops,
547 }, 530 },
548 531
549 /* ich6m_sata_ahci: 8 */ 532 [ich6m_sata_ahci] =
550 { 533 {
551 .sht = &piix_sht, 534 .sht = &piix_sht,
552 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | 535 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
@@ -557,7 +540,7 @@ static struct ata_port_info piix_port_info[] = {
557 .port_ops = &piix_sata_ops, 540 .port_ops = &piix_sata_ops,
558 }, 541 },
559 542
560 /* ich8_sata_ahci: 9 */ 543 [ich8_sata_ahci] =
561 { 544 {
562 .sht = &piix_sht, 545 .sht = &piix_sht,
563 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | 546 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
@@ -568,7 +551,7 @@ static struct ata_port_info piix_port_info[] = {
568 .port_ops = &piix_sata_ops, 551 .port_ops = &piix_sata_ops,
569 }, 552 },
570 553
571 /* piix_pata_mwdma: 10: PIIX3 MWDMA only */ 554 [piix_pata_mwdma] = /* PIIX3 MWDMA only */
572 { 555 {
573 .sht = &piix_sht, 556 .sht = &piix_sht,
574 .flags = PIIX_PATA_FLAGS, 557 .flags = PIIX_PATA_FLAGS,
@@ -577,7 +560,7 @@ static struct ata_port_info piix_port_info[] = {
577 .port_ops = &piix_pata_ops, 560 .port_ops = &piix_pata_ops,
578 }, 561 },
579 562
580 /* tolapai_sata_ahci: 11: */ 563 [tolapai_sata_ahci] =
581 { 564 {
582 .sht = &piix_sht, 565 .sht = &piix_sht,
583 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR | 566 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
@@ -615,6 +598,7 @@ static const struct ich_laptop ich_laptop[] = {
615 { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ 598 { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
616 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ 599 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
617 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ 600 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
601 { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
618 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ 602 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
619 /* end marker */ 603 /* end marker */
620 { 0, } 604 { 0, }
@@ -657,19 +641,20 @@ static int ich_pata_cable_detect(struct ata_port *ap)
657 641
658/** 642/**
659 * piix_pata_prereset - prereset for PATA host controller 643 * piix_pata_prereset - prereset for PATA host controller
660 * @ap: Target port 644 * @link: Target link
661 * @deadline: deadline jiffies for the operation 645 * @deadline: deadline jiffies for the operation
662 * 646 *
663 * LOCKING: 647 * LOCKING:
664 * None (inherited from caller). 648 * None (inherited from caller).
665 */ 649 */
666static int piix_pata_prereset(struct ata_port *ap, unsigned long deadline) 650static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
667{ 651{
652 struct ata_port *ap = link->ap;
668 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 653 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
669 654
670 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) 655 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no]))
671 return -ENOENT; 656 return -ENOENT;
672 return ata_std_prereset(ap, deadline); 657 return ata_std_prereset(link, deadline);
673} 658}
674 659
675static void piix_pata_error_handler(struct ata_port *ap) 660static void piix_pata_error_handler(struct ata_port *ap)
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index c059f78ad9..3f75335890 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -14,6 +14,7 @@
14#include <linux/acpi.h> 14#include <linux/acpi.h>
15#include <linux/libata.h> 15#include <linux/libata.h>
16#include <linux/pci.h> 16#include <linux/pci.h>
17#include <scsi/scsi_device.h>
17#include "libata.h" 18#include "libata.h"
18 19
19#include <acpi/acpi_bus.h> 20#include <acpi/acpi_bus.h>
@@ -40,11 +41,40 @@ static int is_pci_dev(struct device *dev)
40 return (dev->bus == &pci_bus_type); 41 return (dev->bus == &pci_bus_type);
41} 42}
42 43
43static void ata_acpi_associate_sata_port(struct ata_port *ap) 44/**
45 * ata_acpi_associate_sata_port - associate SATA port with ACPI objects
46 * @ap: target SATA port
47 *
48 * Look up ACPI objects associated with @ap and initialize acpi_handle
49 * fields of @ap, the port and devices accordingly.
50 *
51 * LOCKING:
52 * EH context.
53 *
54 * RETURNS:
55 * 0 on success, -errno on failure.
56 */
57void ata_acpi_associate_sata_port(struct ata_port *ap)
44{ 58{
45 acpi_integer adr = SATA_ADR(ap->port_no, NO_PORT_MULT); 59 WARN_ON(!(ap->flags & ATA_FLAG_ACPI_SATA));
60
61 if (!ap->nr_pmp_links) {
62 acpi_integer adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
63
64 ap->link.device->acpi_handle =
65 acpi_get_child(ap->host->acpi_handle, adr);
66 } else {
67 struct ata_link *link;
68
69 ap->link.device->acpi_handle = NULL;
46 70
47 ap->device->acpi_handle = acpi_get_child(ap->host->acpi_handle, adr); 71 ata_port_for_each_link(link, ap) {
72 acpi_integer adr = SATA_ADR(ap->port_no, link->pmp);
73
74 link->device->acpi_handle =
75 acpi_get_child(ap->host->acpi_handle, adr);
76 }
77 }
48} 78}
49 79
50static void ata_acpi_associate_ide_port(struct ata_port *ap) 80static void ata_acpi_associate_ide_port(struct ata_port *ap)
@@ -60,12 +90,53 @@ static void ata_acpi_associate_ide_port(struct ata_port *ap)
60 max_devices++; 90 max_devices++;
61 91
62 for (i = 0; i < max_devices; i++) { 92 for (i = 0; i < max_devices; i++) {
63 struct ata_device *dev = &ap->device[i]; 93 struct ata_device *dev = &ap->link.device[i];
64 94
65 dev->acpi_handle = acpi_get_child(ap->acpi_handle, i); 95 dev->acpi_handle = acpi_get_child(ap->acpi_handle, i);
66 } 96 }
67} 97}
68 98
99static void ata_acpi_handle_hotplug (struct ata_port *ap, struct kobject *kobj,
100 u32 event)
101{
102 char event_string[12];
103 char *envp[] = { event_string, NULL };
104 struct ata_eh_info *ehi = &ap->link.eh_info;
105
106 if (event == 0 || event == 1) {
107 unsigned long flags;
108 spin_lock_irqsave(ap->lock, flags);
109 ata_ehi_clear_desc(ehi);
110 ata_ehi_push_desc(ehi, "ACPI event");
111 ata_ehi_hotplugged(ehi);
112 ata_port_freeze(ap);
113 spin_unlock_irqrestore(ap->lock, flags);
114 }
115
116 if (kobj) {
117 sprintf(event_string, "BAY_EVENT=%d", event);
118 kobject_uevent_env(kobj, KOBJ_CHANGE, envp);
119 }
120}
121
122static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data)
123{
124 struct ata_device *dev = data;
125 struct kobject *kobj = NULL;
126
127 if (dev->sdev)
128 kobj = &dev->sdev->sdev_gendev.kobj;
129
130 ata_acpi_handle_hotplug (dev->link->ap, kobj, event);
131}
132
133static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data)
134{
135 struct ata_port *ap = data;
136
137 ata_acpi_handle_hotplug (ap, &ap->dev->kobj, event);
138}
139
69/** 140/**
70 * ata_acpi_associate - associate ATA host with ACPI objects 141 * ata_acpi_associate - associate ATA host with ACPI objects
71 * @host: target ATA host 142 * @host: target ATA host
@@ -81,7 +152,7 @@ static void ata_acpi_associate_ide_port(struct ata_port *ap)
81 */ 152 */
82void ata_acpi_associate(struct ata_host *host) 153void ata_acpi_associate(struct ata_host *host)
83{ 154{
84 int i; 155 int i, j;
85 156
86 if (!is_pci_dev(host->dev) || libata_noacpi) 157 if (!is_pci_dev(host->dev) || libata_noacpi)
87 return; 158 return;
@@ -97,6 +168,22 @@ void ata_acpi_associate(struct ata_host *host)
97 ata_acpi_associate_sata_port(ap); 168 ata_acpi_associate_sata_port(ap);
98 else 169 else
99 ata_acpi_associate_ide_port(ap); 170 ata_acpi_associate_ide_port(ap);
171
172 if (ap->acpi_handle)
173 acpi_install_notify_handler (ap->acpi_handle,
174 ACPI_SYSTEM_NOTIFY,
175 ata_acpi_ap_notify,
176 ap);
177
178 for (j = 0; j < ata_link_max_devices(&ap->link); j++) {
179 struct ata_device *dev = &ap->link.device[j];
180
181 if (dev->acpi_handle)
182 acpi_install_notify_handler (dev->acpi_handle,
183 ACPI_SYSTEM_NOTIFY,
184 ata_acpi_dev_notify,
185 dev);
186 }
100 } 187 }
101} 188}
102 189
@@ -113,7 +200,7 @@ void ata_acpi_associate(struct ata_host *host)
113 * RETURNS: 200 * RETURNS:
114 * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure. 201 * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure.
115 */ 202 */
116static int ata_acpi_gtm(const struct ata_port *ap, struct ata_acpi_gtm *gtm) 203int ata_acpi_gtm(const struct ata_port *ap, struct ata_acpi_gtm *gtm)
117{ 204{
118 struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER }; 205 struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER };
119 union acpi_object *out_obj; 206 union acpi_object *out_obj;
@@ -157,6 +244,8 @@ static int ata_acpi_gtm(const struct ata_port *ap, struct ata_acpi_gtm *gtm)
157 return rc; 244 return rc;
158} 245}
159 246
247EXPORT_SYMBOL_GPL(ata_acpi_gtm);
248
160/** 249/**
161 * ata_acpi_stm - execute _STM 250 * ata_acpi_stm - execute _STM
162 * @ap: target ATA port 251 * @ap: target ATA port
@@ -170,7 +259,7 @@ static int ata_acpi_gtm(const struct ata_port *ap, struct ata_acpi_gtm *gtm)
170 * RETURNS: 259 * RETURNS:
171 * 0 on success, -ENOENT if _STM doesn't exist, -errno on failure. 260 * 0 on success, -ENOENT if _STM doesn't exist, -errno on failure.
172 */ 261 */
173static int ata_acpi_stm(const struct ata_port *ap, struct ata_acpi_gtm *stm) 262int ata_acpi_stm(const struct ata_port *ap, struct ata_acpi_gtm *stm)
174{ 263{
175 acpi_status status; 264 acpi_status status;
176 struct acpi_object_list input; 265 struct acpi_object_list input;
@@ -182,10 +271,10 @@ static int ata_acpi_stm(const struct ata_port *ap, struct ata_acpi_gtm *stm)
182 /* Buffers for id may need byteswapping ? */ 271 /* Buffers for id may need byteswapping ? */
183 in_params[1].type = ACPI_TYPE_BUFFER; 272 in_params[1].type = ACPI_TYPE_BUFFER;
184 in_params[1].buffer.length = 512; 273 in_params[1].buffer.length = 512;
185 in_params[1].buffer.pointer = (u8 *)ap->device[0].id; 274 in_params[1].buffer.pointer = (u8 *)ap->link.device[0].id;
186 in_params[2].type = ACPI_TYPE_BUFFER; 275 in_params[2].type = ACPI_TYPE_BUFFER;
187 in_params[2].buffer.length = 512; 276 in_params[2].buffer.length = 512;
188 in_params[2].buffer.pointer = (u8 *)ap->device[1].id; 277 in_params[2].buffer.pointer = (u8 *)ap->link.device[1].id;
189 278
190 input.count = 3; 279 input.count = 3;
191 input.pointer = in_params; 280 input.pointer = in_params;
@@ -202,6 +291,8 @@ static int ata_acpi_stm(const struct ata_port *ap, struct ata_acpi_gtm *stm)
202 return 0; 291 return 0;
203} 292}
204 293
294EXPORT_SYMBOL_GPL(ata_acpi_stm);
295
205/** 296/**
206 * ata_dev_get_GTF - get the drive bootup default taskfile settings 297 * ata_dev_get_GTF - get the drive bootup default taskfile settings
207 * @dev: target ATA device 298 * @dev: target ATA device
@@ -226,7 +317,7 @@ static int ata_acpi_stm(const struct ata_port *ap, struct ata_acpi_gtm *stm)
226static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf, 317static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf,
227 void **ptr_to_free) 318 void **ptr_to_free)
228{ 319{
229 struct ata_port *ap = dev->ap; 320 struct ata_port *ap = dev->link->ap;
230 acpi_status status; 321 acpi_status status;
231 struct acpi_buffer output; 322 struct acpi_buffer output;
232 union acpi_object *out_obj; 323 union acpi_object *out_obj;
@@ -296,6 +387,44 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf,
296} 387}
297 388
298/** 389/**
390 * ata_acpi_cbl_80wire - Check for 80 wire cable
391 * @ap: Port to check
392 *
393 * Return 1 if the ACPI mode data for this port indicates the BIOS selected
394 * an 80wire mode.
395 */
396
397int ata_acpi_cbl_80wire(struct ata_port *ap)
398{
399 struct ata_acpi_gtm gtm;
400 int valid = 0;
401
402 /* No _GTM data, no information */
403 if (ata_acpi_gtm(ap, &gtm) < 0)
404 return 0;
405
406 /* Split timing, DMA enabled */
407 if ((gtm.flags & 0x11) == 0x11 && gtm.drive[0].dma < 55)
408 valid |= 1;
409 if ((gtm.flags & 0x14) == 0x14 && gtm.drive[1].dma < 55)
410 valid |= 2;
411 /* Shared timing, DMA enabled */
412 if ((gtm.flags & 0x11) == 0x01 && gtm.drive[0].dma < 55)
413 valid |= 1;
414 if ((gtm.flags & 0x14) == 0x04 && gtm.drive[0].dma < 55)
415 valid |= 2;
416
417 /* Drive check */
418 if ((valid & 1) && ata_dev_enabled(&ap->link.device[0]))
419 return 1;
420 if ((valid & 2) && ata_dev_enabled(&ap->link.device[1]))
421 return 1;
422 return 0;
423}
424
425EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
426
427/**
299 * taskfile_load_raw - send taskfile registers to host controller 428 * taskfile_load_raw - send taskfile registers to host controller
300 * @dev: target ATA device 429 * @dev: target ATA device
301 * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7) 430 * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7)
@@ -320,7 +449,7 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf,
320static int taskfile_load_raw(struct ata_device *dev, 449static int taskfile_load_raw(struct ata_device *dev,
321 const struct ata_acpi_gtf *gtf) 450 const struct ata_acpi_gtf *gtf)
322{ 451{
323 struct ata_port *ap = dev->ap; 452 struct ata_port *ap = dev->link->ap;
324 struct ata_taskfile tf, rtf; 453 struct ata_taskfile tf, rtf;
325 unsigned int err_mask; 454 unsigned int err_mask;
326 455
@@ -349,7 +478,7 @@ static int taskfile_load_raw(struct ata_device *dev,
349 tf.lbal, tf.lbam, tf.lbah, tf.device); 478 tf.lbal, tf.lbam, tf.lbah, tf.device);
350 479
351 rtf = tf; 480 rtf = tf;
352 err_mask = ata_exec_internal(dev, &rtf, NULL, DMA_NONE, NULL, 0); 481 err_mask = ata_exec_internal(dev, &rtf, NULL, DMA_NONE, NULL, 0, 0);
353 if (err_mask) { 482 if (err_mask) {
354 ata_dev_printk(dev, KERN_ERR, 483 ata_dev_printk(dev, KERN_ERR,
355 "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x failed " 484 "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x failed "
@@ -424,7 +553,7 @@ static int ata_acpi_exec_tfs(struct ata_device *dev)
424 */ 553 */
425static int ata_acpi_push_id(struct ata_device *dev) 554static int ata_acpi_push_id(struct ata_device *dev)
426{ 555{
427 struct ata_port *ap = dev->ap; 556 struct ata_port *ap = dev->link->ap;
428 int err; 557 int err;
429 acpi_status status; 558 acpi_status status;
430 struct acpi_object_list input; 559 struct acpi_object_list input;
@@ -508,7 +637,7 @@ int ata_acpi_on_suspend(struct ata_port *ap)
508 */ 637 */
509void ata_acpi_on_resume(struct ata_port *ap) 638void ata_acpi_on_resume(struct ata_port *ap)
510{ 639{
511 int i; 640 struct ata_device *dev;
512 641
513 if (ap->acpi_handle && (ap->pflags & ATA_PFLAG_GTM_VALID)) { 642 if (ap->acpi_handle && (ap->pflags & ATA_PFLAG_GTM_VALID)) {
514 BUG_ON(ap->flags & ATA_FLAG_ACPI_SATA); 643 BUG_ON(ap->flags & ATA_FLAG_ACPI_SATA);
@@ -518,8 +647,8 @@ void ata_acpi_on_resume(struct ata_port *ap)
518 } 647 }
519 648
520 /* schedule _GTF */ 649 /* schedule _GTF */
521 for (i = 0; i < ATA_MAX_DEVICES; i++) 650 ata_link_for_each_dev(dev, &ap->link)
522 ap->device[i].flags |= ATA_DFLAG_ACPI_PENDING; 651 dev->flags |= ATA_DFLAG_ACPI_PENDING;
523} 652}
524 653
525/** 654/**
@@ -538,8 +667,8 @@ void ata_acpi_on_resume(struct ata_port *ap)
538 */ 667 */
539int ata_acpi_on_devcfg(struct ata_device *dev) 668int ata_acpi_on_devcfg(struct ata_device *dev)
540{ 669{
541 struct ata_port *ap = dev->ap; 670 struct ata_port *ap = dev->link->ap;
542 struct ata_eh_context *ehc = &ap->eh_context; 671 struct ata_eh_context *ehc = &ap->link.eh_context;
543 int acpi_sata = ap->flags & ATA_FLAG_ACPI_SATA; 672 int acpi_sata = ap->flags & ATA_FLAG_ACPI_SATA;
544 int rc; 673 int rc;
545 674
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 772be09b46..b05384a8c3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -59,8 +59,6 @@
59 59
60#include "libata.h" 60#include "libata.h"
61 61
62#define DRV_VERSION "2.21" /* must be exactly four chars */
63
64 62
65/* debounce timing parameters in msecs { interval, duration, timeout } */ 63/* debounce timing parameters in msecs { interval, duration, timeout } */
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
@@ -70,6 +68,7 @@ const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
70static unsigned int ata_dev_init_params(struct ata_device *dev, 68static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors); 69 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
71static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
73static void ata_dev_xfermask(struct ata_device *dev); 72static void ata_dev_xfermask(struct ata_device *dev);
74static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 73static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
75 74
@@ -86,6 +85,10 @@ int atapi_dmadir = 0;
86module_param(atapi_dmadir, int, 0444); 85module_param(atapi_dmadir, int, 0444);
87MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); 86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88 87
88int atapi_passthru16 = 1;
89module_param(atapi_passthru16, int, 0444);
90MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
91
89int libata_fua = 0; 92int libata_fua = 0;
90module_param_named(fua, libata_fua, int, 0444); 93module_param_named(fua, libata_fua, int, 0444);
91MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 94MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
@@ -94,13 +97,17 @@ static int ata_ignore_hpa = 0;
94module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 97module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
95MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 98MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
96 99
100static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
101module_param_named(dma, libata_dma_mask, int, 0444);
102MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
103
97static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ; 104static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
98module_param(ata_probe_timeout, int, 0444); 105module_param(ata_probe_timeout, int, 0444);
99MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 106MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
100 107
101int libata_noacpi = 1; 108int libata_noacpi = 0;
102module_param_named(noacpi, libata_noacpi, int, 0444); 109module_param_named(noacpi, libata_noacpi, int, 0444);
103MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set"); 110MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
104 111
105MODULE_AUTHOR("Jeff Garzik"); 112MODULE_AUTHOR("Jeff Garzik");
106MODULE_DESCRIPTION("Library module for ATA devices"); 113MODULE_DESCRIPTION("Library module for ATA devices");
@@ -235,7 +242,7 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
235 if (dev->flags & ATA_DFLAG_PIO) { 242 if (dev->flags & ATA_DFLAG_PIO) {
236 tf->protocol = ATA_PROT_PIO; 243 tf->protocol = ATA_PROT_PIO;
237 index = dev->multi_count ? 0 : 8; 244 index = dev->multi_count ? 0 : 8;
238 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) { 245 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
239 /* Unable to use DMA due to host limitation */ 246 /* Unable to use DMA due to host limitation */
240 tf->protocol = ATA_PROT_PIO; 247 tf->protocol = ATA_PROT_PIO;
241 index = dev->multi_count ? 0 : 8; 248 index = dev->multi_count ? 0 : 8;
@@ -604,7 +611,7 @@ static const char *sata_spd_string(unsigned int spd)
604void ata_dev_disable(struct ata_device *dev) 611void ata_dev_disable(struct ata_device *dev)
605{ 612{
606 if (ata_dev_enabled(dev)) { 613 if (ata_dev_enabled(dev)) {
607 if (ata_msg_drv(dev->ap)) 614 if (ata_msg_drv(dev->link->ap))
608 ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 615 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
609 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | 616 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
610 ATA_DNXFER_QUIET); 617 ATA_DNXFER_QUIET);
@@ -667,37 +674,57 @@ static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
667 * None. 674 * None.
668 * 675 *
669 * RETURNS: 676 * RETURNS:
670 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN 677 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
671 * the event of failure. 678 * %ATA_DEV_UNKNOWN the event of failure.
672 */ 679 */
673
674unsigned int ata_dev_classify(const struct ata_taskfile *tf) 680unsigned int ata_dev_classify(const struct ata_taskfile *tf)
675{ 681{
676 /* Apple's open source Darwin code hints that some devices only 682 /* Apple's open source Darwin code hints that some devices only
677 * put a proper signature into the LBA mid/high registers, 683 * put a proper signature into the LBA mid/high registers,
678 * So, we only check those. It's sufficient for uniqueness. 684 * So, we only check those. It's sufficient for uniqueness.
685 *
686 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
687 * signatures for ATA and ATAPI devices attached on SerialATA,
688 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
689 * spec has never mentioned about using different signatures
690 * for ATA/ATAPI devices. Then, Serial ATA II: Port
691 * Multiplier specification began to use 0x69/0x96 to identify
692 * port multpliers and 0x3c/0xc3 to identify SEMB device.
693 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
694 * 0x69/0x96 shortly and described them as reserved for
695 * SerialATA.
696 *
697 * We follow the current spec and consider that 0x69/0x96
698 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
679 */ 699 */
680 700 if ((tf->lbam == 0) && (tf->lbah == 0)) {
681 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
682 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
683 DPRINTK("found ATA device by sig\n"); 701 DPRINTK("found ATA device by sig\n");
684 return ATA_DEV_ATA; 702 return ATA_DEV_ATA;
685 } 703 }
686 704
687 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) || 705 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
688 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
689 DPRINTK("found ATAPI device by sig\n"); 706 DPRINTK("found ATAPI device by sig\n");
690 return ATA_DEV_ATAPI; 707 return ATA_DEV_ATAPI;
691 } 708 }
692 709
710 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
711 DPRINTK("found PMP device by sig\n");
712 return ATA_DEV_PMP;
713 }
714
715 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
716 printk("ata: SEMB device ignored\n");
717 return ATA_DEV_SEMB_UNSUP; /* not yet */
718 }
719
693 DPRINTK("unknown device\n"); 720 DPRINTK("unknown device\n");
694 return ATA_DEV_UNKNOWN; 721 return ATA_DEV_UNKNOWN;
695} 722}
696 723
697/** 724/**
698 * ata_dev_try_classify - Parse returned ATA device signature 725 * ata_dev_try_classify - Parse returned ATA device signature
699 * @ap: ATA channel to examine 726 * @dev: ATA device to classify (starting at zero)
700 * @device: Device to examine (starting at zero) 727 * @present: device seems present
701 * @r_err: Value of error register on completion 728 * @r_err: Value of error register on completion
702 * 729 *
703 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 730 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
@@ -715,15 +742,15 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
715 * RETURNS: 742 * RETURNS:
716 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. 743 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
717 */ 744 */
718 745unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
719unsigned int 746 u8 *r_err)
720ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
721{ 747{
748 struct ata_port *ap = dev->link->ap;
722 struct ata_taskfile tf; 749 struct ata_taskfile tf;
723 unsigned int class; 750 unsigned int class;
724 u8 err; 751 u8 err;
725 752
726 ap->ops->dev_select(ap, device); 753 ap->ops->dev_select(ap, dev->devno);
727 754
728 memset(&tf, 0, sizeof(tf)); 755 memset(&tf, 0, sizeof(tf));
729 756
@@ -733,12 +760,12 @@ ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
733 *r_err = err; 760 *r_err = err;
734 761
735 /* see if device passed diags: if master then continue and warn later */ 762 /* see if device passed diags: if master then continue and warn later */
736 if (err == 0 && device == 0) 763 if (err == 0 && dev->devno == 0)
737 /* diagnostic fail : do nothing _YET_ */ 764 /* diagnostic fail : do nothing _YET_ */
738 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC; 765 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
739 else if (err == 1) 766 else if (err == 1)
740 /* do nothing */ ; 767 /* do nothing */ ;
741 else if ((device == 0) && (err == 0x81)) 768 else if ((dev->devno == 0) && (err == 0x81))
742 /* do nothing */ ; 769 /* do nothing */ ;
743 else 770 else
744 return ATA_DEV_NONE; 771 return ATA_DEV_NONE;
@@ -746,10 +773,20 @@ ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
746 /* determine if device is ATA or ATAPI */ 773 /* determine if device is ATA or ATAPI */
747 class = ata_dev_classify(&tf); 774 class = ata_dev_classify(&tf);
748 775
749 if (class == ATA_DEV_UNKNOWN) 776 if (class == ATA_DEV_UNKNOWN) {
750 return ATA_DEV_NONE; 777 /* If the device failed diagnostic, it's likely to
751 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 778 * have reported incorrect device signature too.
752 return ATA_DEV_NONE; 779 * Assume ATA device if the device seems present but
780 * device signature is invalid with diagnostic
781 * failure.
782 */
783 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
784 class = ATA_DEV_ATA;
785 else
786 class = ATA_DEV_NONE;
787 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
788 class = ATA_DEV_NONE;
789
753 return class; 790 return class;
754} 791}
755 792
@@ -816,6 +853,21 @@ void ata_id_c_string(const u16 *id, unsigned char *s,
816 *p = '\0'; 853 *p = '\0';
817} 854}
818 855
856static u64 ata_id_n_sectors(const u16 *id)
857{
858 if (ata_id_has_lba(id)) {
859 if (ata_id_has_lba48(id))
860 return ata_id_u64(id, 100);
861 else
862 return ata_id_u32(id, 60);
863 } else {
864 if (ata_id_current_chs_valid(id))
865 return ata_id_u32(id, 57);
866 else
867 return id[1] * id[3] * id[6];
868 }
869}
870
819static u64 ata_tf_to_lba48(struct ata_taskfile *tf) 871static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
820{ 872{
821 u64 sectors = 0; 873 u64 sectors = 0;
@@ -843,129 +895,110 @@ static u64 ata_tf_to_lba(struct ata_taskfile *tf)
843} 895}
844 896
845/** 897/**
846 * ata_read_native_max_address_ext - LBA48 native max query 898 * ata_read_native_max_address - Read native max address
847 * @dev: Device to query 899 * @dev: target device
900 * @max_sectors: out parameter for the result native max address
848 * 901 *
849 * Perform an LBA48 size query upon the device in question. Return the 902 * Perform an LBA48 or LBA28 native size query upon the device in
850 * actual LBA48 size or zero if the command fails. 903 * question.
851 */
852
853static u64 ata_read_native_max_address_ext(struct ata_device *dev)
854{
855 unsigned int err;
856 struct ata_taskfile tf;
857
858 ata_tf_init(dev, &tf);
859
860 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
861 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
862 tf.protocol |= ATA_PROT_NODATA;
863 tf.device |= 0x40;
864
865 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
866 if (err)
867 return 0;
868
869 return ata_tf_to_lba48(&tf);
870}
871
872/**
873 * ata_read_native_max_address - LBA28 native max query
874 * @dev: Device to query
875 * 904 *
876 * Performa an LBA28 size query upon the device in question. Return the 905 * RETURNS:
877 * actual LBA28 size or zero if the command fails. 906 * 0 on success, -EACCES if command is aborted by the drive.
907 * -EIO on other errors.
878 */ 908 */
879 909static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
880static u64 ata_read_native_max_address(struct ata_device *dev)
881{ 910{
882 unsigned int err; 911 unsigned int err_mask;
883 struct ata_taskfile tf; 912 struct ata_taskfile tf;
913 int lba48 = ata_id_has_lba48(dev->id);
884 914
885 ata_tf_init(dev, &tf); 915 ata_tf_init(dev, &tf);
886 916
887 tf.command = ATA_CMD_READ_NATIVE_MAX; 917 /* always clear all address registers */
888 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 918 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
919
920 if (lba48) {
921 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
922 tf.flags |= ATA_TFLAG_LBA48;
923 } else
924 tf.command = ATA_CMD_READ_NATIVE_MAX;
925
889 tf.protocol |= ATA_PROT_NODATA; 926 tf.protocol |= ATA_PROT_NODATA;
890 tf.device |= 0x40; 927 tf.device |= ATA_LBA;
891 928
892 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); 929 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
893 if (err) 930 if (err_mask) {
894 return 0; 931 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
932 "max address (err_mask=0x%x)\n", err_mask);
933 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
934 return -EACCES;
935 return -EIO;
936 }
895 937
896 return ata_tf_to_lba(&tf); 938 if (lba48)
939 *max_sectors = ata_tf_to_lba48(&tf);
940 else
941 *max_sectors = ata_tf_to_lba(&tf);
942 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
943 (*max_sectors)--;
944 return 0;
897} 945}
898 946
899/** 947/**
900 * ata_set_native_max_address_ext - LBA48 native max set 948 * ata_set_max_sectors - Set max sectors
901 * @dev: Device to query 949 * @dev: target device
902 * @new_sectors: new max sectors value to set for the device 950 * @new_sectors: new max sectors value to set for the device
903 * 951 *
904 * Perform an LBA48 size set max upon the device in question. Return the 952 * Set max sectors of @dev to @new_sectors.
905 * actual LBA48 size or zero if the command fails. 953 *
954 * RETURNS:
955 * 0 on success, -EACCES if command is aborted or denied (due to
956 * previous non-volatile SET_MAX) by the drive. -EIO on other
957 * errors.
906 */ 958 */
907 959static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
908static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
909{ 960{
910 unsigned int err; 961 unsigned int err_mask;
911 struct ata_taskfile tf; 962 struct ata_taskfile tf;
963 int lba48 = ata_id_has_lba48(dev->id);
912 964
913 new_sectors--; 965 new_sectors--;
914 966
915 ata_tf_init(dev, &tf); 967 ata_tf_init(dev, &tf);
916 968
917 tf.command = ATA_CMD_SET_MAX_EXT; 969 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
918 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
919 tf.protocol |= ATA_PROT_NODATA;
920 tf.device |= 0x40;
921
922 tf.lbal = (new_sectors >> 0) & 0xff;
923 tf.lbam = (new_sectors >> 8) & 0xff;
924 tf.lbah = (new_sectors >> 16) & 0xff;
925
926 tf.hob_lbal = (new_sectors >> 24) & 0xff;
927 tf.hob_lbam = (new_sectors >> 32) & 0xff;
928 tf.hob_lbah = (new_sectors >> 40) & 0xff;
929
930 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
931 if (err)
932 return 0;
933
934 return ata_tf_to_lba48(&tf);
935}
936
937/**
938 * ata_set_native_max_address - LBA28 native max set
939 * @dev: Device to query
940 * @new_sectors: new max sectors value to set for the device
941 *
942 * Perform an LBA28 size set max upon the device in question. Return the
943 * actual LBA28 size or zero if the command fails.
944 */
945 970
946static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors) 971 if (lba48) {
947{ 972 tf.command = ATA_CMD_SET_MAX_EXT;
948 unsigned int err; 973 tf.flags |= ATA_TFLAG_LBA48;
949 struct ata_taskfile tf;
950 974
951 new_sectors--; 975 tf.hob_lbal = (new_sectors >> 24) & 0xff;
976 tf.hob_lbam = (new_sectors >> 32) & 0xff;
977 tf.hob_lbah = (new_sectors >> 40) & 0xff;
978 } else {
979 tf.command = ATA_CMD_SET_MAX;
952 980
953 ata_tf_init(dev, &tf); 981 tf.device |= (new_sectors >> 24) & 0xf;
982 }
954 983
955 tf.command = ATA_CMD_SET_MAX;
956 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
957 tf.protocol |= ATA_PROT_NODATA; 984 tf.protocol |= ATA_PROT_NODATA;
985 tf.device |= ATA_LBA;
958 986
959 tf.lbal = (new_sectors >> 0) & 0xff; 987 tf.lbal = (new_sectors >> 0) & 0xff;
960 tf.lbam = (new_sectors >> 8) & 0xff; 988 tf.lbam = (new_sectors >> 8) & 0xff;
961 tf.lbah = (new_sectors >> 16) & 0xff; 989 tf.lbah = (new_sectors >> 16) & 0xff;
962 tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
963 990
964 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); 991 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
965 if (err) 992 if (err_mask) {
966 return 0; 993 ata_dev_printk(dev, KERN_WARNING, "failed to set "
994 "max address (err_mask=0x%x)\n", err_mask);
995 if (err_mask == AC_ERR_DEV &&
996 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
997 return -EACCES;
998 return -EIO;
999 }
967 1000
968 return ata_tf_to_lba(&tf); 1001 return 0;
969} 1002}
970 1003
971/** 1004/**
@@ -975,60 +1008,93 @@ static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
975 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1008 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
976 * it if required to the full size of the media. The caller must check 1009 * it if required to the full size of the media. The caller must check
977 * the drive has the HPA feature set enabled. 1010 * the drive has the HPA feature set enabled.
1011 *
1012 * RETURNS:
1013 * 0 on success, -errno on failure.
978 */ 1014 */
979 1015static int ata_hpa_resize(struct ata_device *dev)
980static u64 ata_hpa_resize(struct ata_device *dev)
981{ 1016{
982 u64 sectors = dev->n_sectors; 1017 struct ata_eh_context *ehc = &dev->link->eh_context;
983 u64 hpa_sectors; 1018 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1019 u64 sectors = ata_id_n_sectors(dev->id);
1020 u64 native_sectors;
1021 int rc;
984 1022
985 if (ata_id_has_lba48(dev->id)) 1023 /* do we need to do it? */
986 hpa_sectors = ata_read_native_max_address_ext(dev); 1024 if (dev->class != ATA_DEV_ATA ||
987 else 1025 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
988 hpa_sectors = ata_read_native_max_address(dev); 1026 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1027 return 0;
989 1028
990 if (hpa_sectors > sectors) { 1029 /* read native max address */
991 ata_dev_printk(dev, KERN_INFO, 1030 rc = ata_read_native_max_address(dev, &native_sectors);
992 "Host Protected Area detected:\n" 1031 if (rc) {
993 "\tcurrent size: %lld sectors\n" 1032 /* If HPA isn't going to be unlocked, skip HPA
994 "\tnative size: %lld sectors\n", 1033 * resizing from the next try.
995 (long long)sectors, (long long)hpa_sectors); 1034 */
996 1035 if (!ata_ignore_hpa) {
997 if (ata_ignore_hpa) { 1036 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
998 if (ata_id_has_lba48(dev->id)) 1037 "broken, will skip HPA handling\n");
999 hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors); 1038 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1000 else 1039
1001 hpa_sectors = ata_set_native_max_address(dev, 1040 /* we can continue if device aborted the command */
1002 hpa_sectors); 1041 if (rc == -EACCES)
1003 1042 rc = 0;
1004 if (hpa_sectors) {
1005 ata_dev_printk(dev, KERN_INFO, "native size "
1006 "increased to %lld sectors\n",
1007 (long long)hpa_sectors);
1008 return hpa_sectors;
1009 }
1010 } 1043 }
1011 } else if (hpa_sectors < sectors)
1012 ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) "
1013 "is smaller than sectors (%lld)\n", __FUNCTION__,
1014 (long long)hpa_sectors, (long long)sectors);
1015 1044
1016 return sectors; 1045 return rc;
1017} 1046 }
1018 1047
1019static u64 ata_id_n_sectors(const u16 *id) 1048 /* nothing to do? */
1020{ 1049 if (native_sectors <= sectors || !ata_ignore_hpa) {
1021 if (ata_id_has_lba(id)) { 1050 if (!print_info || native_sectors == sectors)
1022 if (ata_id_has_lba48(id)) 1051 return 0;
1023 return ata_id_u64(id, 100); 1052
1024 else 1053 if (native_sectors > sectors)
1025 return ata_id_u32(id, 60); 1054 ata_dev_printk(dev, KERN_INFO,
1026 } else { 1055 "HPA detected: current %llu, native %llu\n",
1027 if (ata_id_current_chs_valid(id)) 1056 (unsigned long long)sectors,
1028 return ata_id_u32(id, 57); 1057 (unsigned long long)native_sectors);
1029 else 1058 else if (native_sectors < sectors)
1030 return id[1] * id[3] * id[6]; 1059 ata_dev_printk(dev, KERN_WARNING,
1060 "native sectors (%llu) is smaller than "
1061 "sectors (%llu)\n",
1062 (unsigned long long)native_sectors,
1063 (unsigned long long)sectors);
1064 return 0;
1031 } 1065 }
1066
1067 /* let's unlock HPA */
1068 rc = ata_set_max_sectors(dev, native_sectors);
1069 if (rc == -EACCES) {
1070 /* if device aborted the command, skip HPA resizing */
1071 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1072 "(%llu -> %llu), skipping HPA handling\n",
1073 (unsigned long long)sectors,
1074 (unsigned long long)native_sectors);
1075 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1076 return 0;
1077 } else if (rc)
1078 return rc;
1079
1080 /* re-read IDENTIFY data */
1081 rc = ata_dev_reread_id(dev, 0);
1082 if (rc) {
1083 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1084 "data after HPA resizing\n");
1085 return rc;
1086 }
1087
1088 if (print_info) {
1089 u64 new_sectors = ata_id_n_sectors(dev->id);
1090 ata_dev_printk(dev, KERN_INFO,
1091 "HPA unlocked: %llu -> %llu, native %llu\n",
1092 (unsigned long long)sectors,
1093 (unsigned long long)new_sectors,
1094 (unsigned long long)native_sectors);
1095 }
1096
1097 return 0;
1032} 1098}
1033 1099
1034/** 1100/**
@@ -1150,7 +1216,7 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
1150 ap->ops->dev_select(ap, device); 1216 ap->ops->dev_select(ap, device);
1151 1217
1152 if (wait) { 1218 if (wait) {
1153 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI) 1219 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1154 msleep(150); 1220 msleep(150);
1155 ata_wait_idle(ap); 1221 ata_wait_idle(ap);
1156 } 1222 }
@@ -1328,6 +1394,7 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1328 * @dma_dir: Data tranfer direction of the command 1394 * @dma_dir: Data tranfer direction of the command
1329 * @sg: sg list for the data buffer of the command 1395 * @sg: sg list for the data buffer of the command
1330 * @n_elem: Number of sg entries 1396 * @n_elem: Number of sg entries
1397 * @timeout: Timeout in msecs (0 for default)
1331 * 1398 *
1332 * Executes libata internal command with timeout. @tf contains 1399 * Executes libata internal command with timeout. @tf contains
1333 * command on entry and result on return. Timeout and error 1400 * command on entry and result on return. Timeout and error
@@ -1344,13 +1411,15 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1344unsigned ata_exec_internal_sg(struct ata_device *dev, 1411unsigned ata_exec_internal_sg(struct ata_device *dev,
1345 struct ata_taskfile *tf, const u8 *cdb, 1412 struct ata_taskfile *tf, const u8 *cdb,
1346 int dma_dir, struct scatterlist *sg, 1413 int dma_dir, struct scatterlist *sg,
1347 unsigned int n_elem) 1414 unsigned int n_elem, unsigned long timeout)
1348{ 1415{
1349 struct ata_port *ap = dev->ap; 1416 struct ata_link *link = dev->link;
1417 struct ata_port *ap = link->ap;
1350 u8 command = tf->command; 1418 u8 command = tf->command;
1351 struct ata_queued_cmd *qc; 1419 struct ata_queued_cmd *qc;
1352 unsigned int tag, preempted_tag; 1420 unsigned int tag, preempted_tag;
1353 u32 preempted_sactive, preempted_qc_active; 1421 u32 preempted_sactive, preempted_qc_active;
1422 int preempted_nr_active_links;
1354 DECLARE_COMPLETION_ONSTACK(wait); 1423 DECLARE_COMPLETION_ONSTACK(wait);
1355 unsigned long flags; 1424 unsigned long flags;
1356 unsigned int err_mask; 1425 unsigned int err_mask;
@@ -1386,12 +1455,14 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1386 qc->dev = dev; 1455 qc->dev = dev;
1387 ata_qc_reinit(qc); 1456 ata_qc_reinit(qc);
1388 1457
1389 preempted_tag = ap->active_tag; 1458 preempted_tag = link->active_tag;
1390 preempted_sactive = ap->sactive; 1459 preempted_sactive = link->sactive;
1391 preempted_qc_active = ap->qc_active; 1460 preempted_qc_active = ap->qc_active;
1392 ap->active_tag = ATA_TAG_POISON; 1461 preempted_nr_active_links = ap->nr_active_links;
1393 ap->sactive = 0; 1462 link->active_tag = ATA_TAG_POISON;
1463 link->sactive = 0;
1394 ap->qc_active = 0; 1464 ap->qc_active = 0;
1465 ap->nr_active_links = 0;
1395 1466
1396 /* prepare & issue qc */ 1467 /* prepare & issue qc */
1397 qc->tf = *tf; 1468 qc->tf = *tf;
@@ -1416,7 +1487,10 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1416 1487
1417 spin_unlock_irqrestore(ap->lock, flags); 1488 spin_unlock_irqrestore(ap->lock, flags);
1418 1489
1419 rc = wait_for_completion_timeout(&wait, ata_probe_timeout); 1490 if (!timeout)
1491 timeout = ata_probe_timeout * 1000 / HZ;
1492
1493 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1420 1494
1421 ata_port_flush_task(ap); 1495 ata_port_flush_task(ap);
1422 1496
@@ -1467,9 +1541,10 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1467 err_mask = qc->err_mask; 1541 err_mask = qc->err_mask;
1468 1542
1469 ata_qc_free(qc); 1543 ata_qc_free(qc);
1470 ap->active_tag = preempted_tag; 1544 link->active_tag = preempted_tag;
1471 ap->sactive = preempted_sactive; 1545 link->sactive = preempted_sactive;
1472 ap->qc_active = preempted_qc_active; 1546 ap->qc_active = preempted_qc_active;
1547 ap->nr_active_links = preempted_nr_active_links;
1473 1548
1474 /* XXX - Some LLDDs (sata_mv) disable port on command failure. 1549 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1475 * Until those drivers are fixed, we detect the condition 1550 * Until those drivers are fixed, we detect the condition
@@ -1500,6 +1575,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1500 * @dma_dir: Data tranfer direction of the command 1575 * @dma_dir: Data tranfer direction of the command
1501 * @buf: Data buffer of the command 1576 * @buf: Data buffer of the command
1502 * @buflen: Length of data buffer 1577 * @buflen: Length of data buffer
1578 * @timeout: Timeout in msecs (0 for default)
1503 * 1579 *
1504 * Wrapper around ata_exec_internal_sg() which takes simple 1580 * Wrapper around ata_exec_internal_sg() which takes simple
1505 * buffer instead of sg list. 1581 * buffer instead of sg list.
@@ -1512,7 +1588,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1512 */ 1588 */
1513unsigned ata_exec_internal(struct ata_device *dev, 1589unsigned ata_exec_internal(struct ata_device *dev,
1514 struct ata_taskfile *tf, const u8 *cdb, 1590 struct ata_taskfile *tf, const u8 *cdb,
1515 int dma_dir, void *buf, unsigned int buflen) 1591 int dma_dir, void *buf, unsigned int buflen,
1592 unsigned long timeout)
1516{ 1593{
1517 struct scatterlist *psg = NULL, sg; 1594 struct scatterlist *psg = NULL, sg;
1518 unsigned int n_elem = 0; 1595 unsigned int n_elem = 0;
@@ -1524,7 +1601,8 @@ unsigned ata_exec_internal(struct ata_device *dev,
1524 n_elem++; 1601 n_elem++;
1525 } 1602 }
1526 1603
1527 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem); 1604 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1605 timeout);
1528} 1606}
1529 1607
1530/** 1608/**
@@ -1551,7 +1629,7 @@ unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1551 tf.flags |= ATA_TFLAG_DEVICE; 1629 tf.flags |= ATA_TFLAG_DEVICE;
1552 tf.protocol = ATA_PROT_NODATA; 1630 tf.protocol = ATA_PROT_NODATA;
1553 1631
1554 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); 1632 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1555} 1633}
1556 1634
1557/** 1635/**
@@ -1566,7 +1644,7 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1566{ 1644{
1567 /* Controller doesn't support IORDY. Probably a pointless check 1645 /* Controller doesn't support IORDY. Probably a pointless check
1568 as the caller should know this */ 1646 as the caller should know this */
1569 if (adev->ap->flags & ATA_FLAG_NO_IORDY) 1647 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1570 return 0; 1648 return 0;
1571 /* PIO3 and higher it is mandatory */ 1649 /* PIO3 and higher it is mandatory */
1572 if (adev->pio_mode > XFER_PIO_2) 1650 if (adev->pio_mode > XFER_PIO_2)
@@ -1613,6 +1691,9 @@ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1613 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 1691 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1614 * for pre-ATA4 drives. 1692 * for pre-ATA4 drives.
1615 * 1693 *
1694 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1695 * now we abort if we hit that case.
1696 *
1616 * LOCKING: 1697 * LOCKING:
1617 * Kernel thread context (may sleep) 1698 * Kernel thread context (may sleep)
1618 * 1699 *
@@ -1622,7 +1703,7 @@ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1622int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1703int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1623 unsigned int flags, u16 *id) 1704 unsigned int flags, u16 *id)
1624{ 1705{
1625 struct ata_port *ap = dev->ap; 1706 struct ata_port *ap = dev->link->ap;
1626 unsigned int class = *p_class; 1707 unsigned int class = *p_class;
1627 struct ata_taskfile tf; 1708 struct ata_taskfile tf;
1628 unsigned int err_mask = 0; 1709 unsigned int err_mask = 0;
@@ -1663,7 +1744,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1663 tf.flags |= ATA_TFLAG_POLLING; 1744 tf.flags |= ATA_TFLAG_POLLING;
1664 1745
1665 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 1746 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1666 id, sizeof(id[0]) * ATA_ID_WORDS); 1747 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1667 if (err_mask) { 1748 if (err_mask) {
1668 if (err_mask & AC_ERR_NODEV_HINT) { 1749 if (err_mask & AC_ERR_NODEV_HINT) {
1669 DPRINTK("ata%u.%d: NODEV after polling detection\n", 1750 DPRINTK("ata%u.%d: NODEV after polling detection\n",
@@ -1722,7 +1803,8 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1722 tf.feature = SETFEATURES_SPINUP; 1803 tf.feature = SETFEATURES_SPINUP;
1723 tf.protocol = ATA_PROT_NODATA; 1804 tf.protocol = ATA_PROT_NODATA;
1724 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1805 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1725 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); 1806 err_mask = ata_exec_internal(dev, &tf, NULL,
1807 DMA_NONE, NULL, 0, 0);
1726 if (err_mask && id[2] != 0x738c) { 1808 if (err_mask && id[2] != 0x738c) {
1727 rc = -EIO; 1809 rc = -EIO;
1728 reason = "SPINUP failed"; 1810 reason = "SPINUP failed";
@@ -1740,10 +1822,13 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1740 /* 1822 /*
1741 * The exact sequence expected by certain pre-ATA4 drives is: 1823 * The exact sequence expected by certain pre-ATA4 drives is:
1742 * SRST RESET 1824 * SRST RESET
1743 * IDENTIFY 1825 * IDENTIFY (optional in early ATA)
1744 * INITIALIZE DEVICE PARAMETERS 1826 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1745 * anything else.. 1827 * anything else..
1746 * Some drives were very specific about that exact sequence. 1828 * Some drives were very specific about that exact sequence.
1829 *
1830 * Note that ATA4 says lba is mandatory so the second check
1831 * shoud never trigger.
1747 */ 1832 */
1748 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 1833 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1749 err_mask = ata_dev_init_params(dev, id[3], id[6]); 1834 err_mask = ata_dev_init_params(dev, id[3], id[6]);
@@ -1774,13 +1859,14 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1774 1859
1775static inline u8 ata_dev_knobble(struct ata_device *dev) 1860static inline u8 ata_dev_knobble(struct ata_device *dev)
1776{ 1861{
1777 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 1862 struct ata_port *ap = dev->link->ap;
1863 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1778} 1864}
1779 1865
1780static void ata_dev_config_ncq(struct ata_device *dev, 1866static void ata_dev_config_ncq(struct ata_device *dev,
1781 char *desc, size_t desc_sz) 1867 char *desc, size_t desc_sz)
1782{ 1868{
1783 struct ata_port *ap = dev->ap; 1869 struct ata_port *ap = dev->link->ap;
1784 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 1870 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1785 1871
1786 if (!ata_id_has_ncq(dev->id)) { 1872 if (!ata_id_has_ncq(dev->id)) {
@@ -1817,8 +1903,8 @@ static void ata_dev_config_ncq(struct ata_device *dev,
1817 */ 1903 */
1818int ata_dev_configure(struct ata_device *dev) 1904int ata_dev_configure(struct ata_device *dev)
1819{ 1905{
1820 struct ata_port *ap = dev->ap; 1906 struct ata_port *ap = dev->link->ap;
1821 struct ata_eh_context *ehc = &ap->eh_context; 1907 struct ata_eh_context *ehc = &dev->link->eh_context;
1822 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1908 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1823 const u16 *id = dev->id; 1909 const u16 *id = dev->id;
1824 unsigned int xfer_mask; 1910 unsigned int xfer_mask;
@@ -1844,6 +1930,11 @@ int ata_dev_configure(struct ata_device *dev)
1844 if (rc) 1930 if (rc)
1845 return rc; 1931 return rc;
1846 1932
1933 /* massage HPA, do it early as it might change IDENTIFY data */
1934 rc = ata_hpa_resize(dev);
1935 if (rc)
1936 return rc;
1937
1847 /* print device capabilities */ 1938 /* print device capabilities */
1848 if (ata_msg_probe(ap)) 1939 if (ata_msg_probe(ap))
1849 ata_dev_printk(dev, KERN_DEBUG, 1940 ata_dev_printk(dev, KERN_DEBUG,
@@ -1911,10 +2002,6 @@ int ata_dev_configure(struct ata_device *dev)
1911 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2002 dev->flags |= ATA_DFLAG_FLUSH_EXT;
1912 } 2003 }
1913 2004
1914 if (!(dev->horkage & ATA_HORKAGE_BROKEN_HPA) &&
1915 ata_id_hpa_enabled(dev->id))
1916 dev->n_sectors = ata_hpa_resize(dev);
1917
1918 /* config NCQ */ 2005 /* config NCQ */
1919 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2006 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1920 2007
@@ -1963,7 +2050,9 @@ int ata_dev_configure(struct ata_device *dev)
1963 2050
1964 /* ATAPI-specific feature tests */ 2051 /* ATAPI-specific feature tests */
1965 else if (dev->class == ATA_DEV_ATAPI) { 2052 else if (dev->class == ATA_DEV_ATAPI) {
1966 char *cdb_intr_string = ""; 2053 const char *cdb_intr_string = "";
2054 const char *atapi_an_string = "";
2055 u32 sntf;
1967 2056
1968 rc = atapi_cdb_len(id); 2057 rc = atapi_cdb_len(id);
1969 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2058 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
@@ -1975,6 +2064,28 @@ int ata_dev_configure(struct ata_device *dev)
1975 } 2064 }
1976 dev->cdb_len = (unsigned int) rc; 2065 dev->cdb_len = (unsigned int) rc;
1977 2066
2067 /* Enable ATAPI AN if both the host and device have
2068 * the support. If PMP is attached, SNTF is required
2069 * to enable ATAPI AN to discern between PHY status
2070 * changed notifications and ATAPI ANs.
2071 */
2072 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2073 (!ap->nr_pmp_links ||
2074 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2075 unsigned int err_mask;
2076
2077 /* issue SET feature command to turn this on */
2078 err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2079 if (err_mask)
2080 ata_dev_printk(dev, KERN_ERR,
2081 "failed to enable ATAPI AN "
2082 "(err_mask=0x%x)\n", err_mask);
2083 else {
2084 dev->flags |= ATA_DFLAG_AN;
2085 atapi_an_string = ", ATAPI AN";
2086 }
2087 }
2088
1978 if (ata_id_cdb_intr(dev->id)) { 2089 if (ata_id_cdb_intr(dev->id)) {
1979 dev->flags |= ATA_DFLAG_CDB_INTR; 2090 dev->flags |= ATA_DFLAG_CDB_INTR;
1980 cdb_intr_string = ", CDB intr"; 2091 cdb_intr_string = ", CDB intr";
@@ -1983,10 +2094,10 @@ int ata_dev_configure(struct ata_device *dev)
1983 /* print device info to dmesg */ 2094 /* print device info to dmesg */
1984 if (ata_msg_drv(ap) && print_info) 2095 if (ata_msg_drv(ap) && print_info)
1985 ata_dev_printk(dev, KERN_INFO, 2096 ata_dev_printk(dev, KERN_INFO,
1986 "ATAPI: %s, %s, max %s%s\n", 2097 "ATAPI: %s, %s, max %s%s%s\n",
1987 modelbuf, fwrevbuf, 2098 modelbuf, fwrevbuf,
1988 ata_mode_string(xfer_mask), 2099 ata_mode_string(xfer_mask),
1989 cdb_intr_string); 2100 cdb_intr_string, atapi_an_string);
1990 } 2101 }
1991 2102
1992 /* determine max_sectors */ 2103 /* determine max_sectors */
@@ -2103,21 +2214,19 @@ int ata_bus_probe(struct ata_port *ap)
2103{ 2214{
2104 unsigned int classes[ATA_MAX_DEVICES]; 2215 unsigned int classes[ATA_MAX_DEVICES];
2105 int tries[ATA_MAX_DEVICES]; 2216 int tries[ATA_MAX_DEVICES];
2106 int i, rc; 2217 int rc;
2107 struct ata_device *dev; 2218 struct ata_device *dev;
2108 2219
2109 ata_port_probe(ap); 2220 ata_port_probe(ap);
2110 2221
2111 for (i = 0; i < ATA_MAX_DEVICES; i++) 2222 ata_link_for_each_dev(dev, &ap->link)
2112 tries[i] = ATA_PROBE_MAX_TRIES; 2223 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2113 2224
2114 retry: 2225 retry:
2115 /* reset and determine device classes */ 2226 /* reset and determine device classes */
2116 ap->ops->phy_reset(ap); 2227 ap->ops->phy_reset(ap);
2117 2228
2118 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2229 ata_link_for_each_dev(dev, &ap->link) {
2119 dev = &ap->device[i];
2120
2121 if (!(ap->flags & ATA_FLAG_DISABLED) && 2230 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2122 dev->class != ATA_DEV_UNKNOWN) 2231 dev->class != ATA_DEV_UNKNOWN)
2123 classes[dev->devno] = dev->class; 2232 classes[dev->devno] = dev->class;
@@ -2132,18 +2241,16 @@ int ata_bus_probe(struct ata_port *ap)
2132 /* after the reset the device state is PIO 0 and the controller 2241 /* after the reset the device state is PIO 0 and the controller
2133 state is undefined. Record the mode */ 2242 state is undefined. Record the mode */
2134 2243
2135 for (i = 0; i < ATA_MAX_DEVICES; i++) 2244 ata_link_for_each_dev(dev, &ap->link)
2136 ap->device[i].pio_mode = XFER_PIO_0; 2245 dev->pio_mode = XFER_PIO_0;
2137 2246
2138 /* read IDENTIFY page and configure devices. We have to do the identify 2247 /* read IDENTIFY page and configure devices. We have to do the identify
2139 specific sequence bass-ackwards so that PDIAG- is released by 2248 specific sequence bass-ackwards so that PDIAG- is released by
2140 the slave device */ 2249 the slave device */
2141 2250
2142 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) { 2251 ata_link_for_each_dev(dev, &ap->link) {
2143 dev = &ap->device[i]; 2252 if (tries[dev->devno])
2144 2253 dev->class = classes[dev->devno];
2145 if (tries[i])
2146 dev->class = classes[i];
2147 2254
2148 if (!ata_dev_enabled(dev)) 2255 if (!ata_dev_enabled(dev))
2149 continue; 2256 continue;
@@ -2158,33 +2265,42 @@ int ata_bus_probe(struct ata_port *ap)
2158 if (ap->ops->cable_detect) 2265 if (ap->ops->cable_detect)
2159 ap->cbl = ap->ops->cable_detect(ap); 2266 ap->cbl = ap->ops->cable_detect(ap);
2160 2267
2268 /* We may have SATA bridge glue hiding here irrespective of the
2269 reported cable types and sensed types */
2270 ata_link_for_each_dev(dev, &ap->link) {
2271 if (!ata_dev_enabled(dev))
2272 continue;
2273 /* SATA drives indicate we have a bridge. We don't know which
2274 end of the link the bridge is which is a problem */
2275 if (ata_id_is_sata(dev->id))
2276 ap->cbl = ATA_CBL_SATA;
2277 }
2278
2161 /* After the identify sequence we can now set up the devices. We do 2279 /* After the identify sequence we can now set up the devices. We do
2162 this in the normal order so that the user doesn't get confused */ 2280 this in the normal order so that the user doesn't get confused */
2163 2281
2164 for(i = 0; i < ATA_MAX_DEVICES; i++) { 2282 ata_link_for_each_dev(dev, &ap->link) {
2165 dev = &ap->device[i];
2166 if (!ata_dev_enabled(dev)) 2283 if (!ata_dev_enabled(dev))
2167 continue; 2284 continue;
2168 2285
2169 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO; 2286 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2170 rc = ata_dev_configure(dev); 2287 rc = ata_dev_configure(dev);
2171 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2288 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2172 if (rc) 2289 if (rc)
2173 goto fail; 2290 goto fail;
2174 } 2291 }
2175 2292
2176 /* configure transfer mode */ 2293 /* configure transfer mode */
2177 rc = ata_set_mode(ap, &dev); 2294 rc = ata_set_mode(&ap->link, &dev);
2178 if (rc) 2295 if (rc)
2179 goto fail; 2296 goto fail;
2180 2297
2181 for (i = 0; i < ATA_MAX_DEVICES; i++) 2298 ata_link_for_each_dev(dev, &ap->link)
2182 if (ata_dev_enabled(&ap->device[i])) 2299 if (ata_dev_enabled(dev))
2183 return 0; 2300 return 0;
2184 2301
2185 /* no device present, disable port */ 2302 /* no device present, disable port */
2186 ata_port_disable(ap); 2303 ata_port_disable(ap);
2187 ap->ops->port_disable(ap);
2188 return -ENODEV; 2304 return -ENODEV;
2189 2305
2190 fail: 2306 fail:
@@ -2204,7 +2320,7 @@ int ata_bus_probe(struct ata_port *ap)
2204 /* This is the last chance, better to slow 2320 /* This is the last chance, better to slow
2205 * down than lose it. 2321 * down than lose it.
2206 */ 2322 */
2207 sata_down_spd_limit(ap); 2323 sata_down_spd_limit(&ap->link);
2208 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2324 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2209 } 2325 }
2210 } 2326 }
@@ -2233,28 +2349,28 @@ void ata_port_probe(struct ata_port *ap)
2233 2349
2234/** 2350/**
2235 * sata_print_link_status - Print SATA link status 2351 * sata_print_link_status - Print SATA link status
2236 * @ap: SATA port to printk link status about 2352 * @link: SATA link to printk link status about
2237 * 2353 *
2238 * This function prints link speed and status of a SATA link. 2354 * This function prints link speed and status of a SATA link.
2239 * 2355 *
2240 * LOCKING: 2356 * LOCKING:
2241 * None. 2357 * None.
2242 */ 2358 */
2243void sata_print_link_status(struct ata_port *ap) 2359void sata_print_link_status(struct ata_link *link)
2244{ 2360{
2245 u32 sstatus, scontrol, tmp; 2361 u32 sstatus, scontrol, tmp;
2246 2362
2247 if (sata_scr_read(ap, SCR_STATUS, &sstatus)) 2363 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2248 return; 2364 return;
2249 sata_scr_read(ap, SCR_CONTROL, &scontrol); 2365 sata_scr_read(link, SCR_CONTROL, &scontrol);
2250 2366
2251 if (ata_port_online(ap)) { 2367 if (ata_link_online(link)) {
2252 tmp = (sstatus >> 4) & 0xf; 2368 tmp = (sstatus >> 4) & 0xf;
2253 ata_port_printk(ap, KERN_INFO, 2369 ata_link_printk(link, KERN_INFO,
2254 "SATA link up %s (SStatus %X SControl %X)\n", 2370 "SATA link up %s (SStatus %X SControl %X)\n",
2255 sata_spd_string(tmp), sstatus, scontrol); 2371 sata_spd_string(tmp), sstatus, scontrol);
2256 } else { 2372 } else {
2257 ata_port_printk(ap, KERN_INFO, 2373 ata_link_printk(link, KERN_INFO,
2258 "SATA link down (SStatus %X SControl %X)\n", 2374 "SATA link down (SStatus %X SControl %X)\n",
2259 sstatus, scontrol); 2375 sstatus, scontrol);
2260 } 2376 }
@@ -2274,32 +2390,33 @@ void sata_print_link_status(struct ata_port *ap)
2274 */ 2390 */
2275void __sata_phy_reset(struct ata_port *ap) 2391void __sata_phy_reset(struct ata_port *ap)
2276{ 2392{
2277 u32 sstatus; 2393 struct ata_link *link = &ap->link;
2278 unsigned long timeout = jiffies + (HZ * 5); 2394 unsigned long timeout = jiffies + (HZ * 5);
2395 u32 sstatus;
2279 2396
2280 if (ap->flags & ATA_FLAG_SATA_RESET) { 2397 if (ap->flags & ATA_FLAG_SATA_RESET) {
2281 /* issue phy wake/reset */ 2398 /* issue phy wake/reset */
2282 sata_scr_write_flush(ap, SCR_CONTROL, 0x301); 2399 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
2283 /* Couldn't find anything in SATA I/II specs, but 2400 /* Couldn't find anything in SATA I/II specs, but
2284 * AHCI-1.1 10.4.2 says at least 1 ms. */ 2401 * AHCI-1.1 10.4.2 says at least 1 ms. */
2285 mdelay(1); 2402 mdelay(1);
2286 } 2403 }
2287 /* phy wake/clear reset */ 2404 /* phy wake/clear reset */
2288 sata_scr_write_flush(ap, SCR_CONTROL, 0x300); 2405 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
2289 2406
2290 /* wait for phy to become ready, if necessary */ 2407 /* wait for phy to become ready, if necessary */
2291 do { 2408 do {
2292 msleep(200); 2409 msleep(200);
2293 sata_scr_read(ap, SCR_STATUS, &sstatus); 2410 sata_scr_read(link, SCR_STATUS, &sstatus);
2294 if ((sstatus & 0xf) != 1) 2411 if ((sstatus & 0xf) != 1)
2295 break; 2412 break;
2296 } while (time_before(jiffies, timeout)); 2413 } while (time_before(jiffies, timeout));
2297 2414
2298 /* print link status */ 2415 /* print link status */
2299 sata_print_link_status(ap); 2416 sata_print_link_status(link);
2300 2417
2301 /* TODO: phy layer with polling, timeouts, etc. */ 2418 /* TODO: phy layer with polling, timeouts, etc. */
2302 if (!ata_port_offline(ap)) 2419 if (!ata_link_offline(link))
2303 ata_port_probe(ap); 2420 ata_port_probe(ap);
2304 else 2421 else
2305 ata_port_disable(ap); 2422 ata_port_disable(ap);
@@ -2344,8 +2461,8 @@ void sata_phy_reset(struct ata_port *ap)
2344 2461
2345struct ata_device *ata_dev_pair(struct ata_device *adev) 2462struct ata_device *ata_dev_pair(struct ata_device *adev)
2346{ 2463{
2347 struct ata_port *ap = adev->ap; 2464 struct ata_link *link = adev->link;
2348 struct ata_device *pair = &ap->device[1 - adev->devno]; 2465 struct ata_device *pair = &link->device[1 - adev->devno];
2349 if (!ata_dev_enabled(pair)) 2466 if (!ata_dev_enabled(pair))
2350 return NULL; 2467 return NULL;
2351 return pair; 2468 return pair;
@@ -2366,16 +2483,16 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
2366 2483
2367void ata_port_disable(struct ata_port *ap) 2484void ata_port_disable(struct ata_port *ap)
2368{ 2485{
2369 ap->device[0].class = ATA_DEV_NONE; 2486 ap->link.device[0].class = ATA_DEV_NONE;
2370 ap->device[1].class = ATA_DEV_NONE; 2487 ap->link.device[1].class = ATA_DEV_NONE;
2371 ap->flags |= ATA_FLAG_DISABLED; 2488 ap->flags |= ATA_FLAG_DISABLED;
2372} 2489}
2373 2490
2374/** 2491/**
2375 * sata_down_spd_limit - adjust SATA spd limit downward 2492 * sata_down_spd_limit - adjust SATA spd limit downward
2376 * @ap: Port to adjust SATA spd limit for 2493 * @link: Link to adjust SATA spd limit for
2377 * 2494 *
2378 * Adjust SATA spd limit of @ap downward. Note that this 2495 * Adjust SATA spd limit of @link downward. Note that this
2379 * function only adjusts the limit. The change must be applied 2496 * function only adjusts the limit. The change must be applied
2380 * using sata_set_spd(). 2497 * using sata_set_spd().
2381 * 2498 *
@@ -2385,24 +2502,24 @@ void ata_port_disable(struct ata_port *ap)
2385 * RETURNS: 2502 * RETURNS:
2386 * 0 on success, negative errno on failure 2503 * 0 on success, negative errno on failure
2387 */ 2504 */
2388int sata_down_spd_limit(struct ata_port *ap) 2505int sata_down_spd_limit(struct ata_link *link)
2389{ 2506{
2390 u32 sstatus, spd, mask; 2507 u32 sstatus, spd, mask;
2391 int rc, highbit; 2508 int rc, highbit;
2392 2509
2393 if (!sata_scr_valid(ap)) 2510 if (!sata_scr_valid(link))
2394 return -EOPNOTSUPP; 2511 return -EOPNOTSUPP;
2395 2512
2396 /* If SCR can be read, use it to determine the current SPD. 2513 /* If SCR can be read, use it to determine the current SPD.
2397 * If not, use cached value in ap->sata_spd. 2514 * If not, use cached value in link->sata_spd.
2398 */ 2515 */
2399 rc = sata_scr_read(ap, SCR_STATUS, &sstatus); 2516 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2400 if (rc == 0) 2517 if (rc == 0)
2401 spd = (sstatus >> 4) & 0xf; 2518 spd = (sstatus >> 4) & 0xf;
2402 else 2519 else
2403 spd = ap->sata_spd; 2520 spd = link->sata_spd;
2404 2521
2405 mask = ap->sata_spd_limit; 2522 mask = link->sata_spd_limit;
2406 if (mask <= 1) 2523 if (mask <= 1)
2407 return -EINVAL; 2524 return -EINVAL;
2408 2525
@@ -2422,22 +2539,22 @@ int sata_down_spd_limit(struct ata_port *ap)
2422 if (!mask) 2539 if (!mask)
2423 return -EINVAL; 2540 return -EINVAL;
2424 2541
2425 ap->sata_spd_limit = mask; 2542 link->sata_spd_limit = mask;
2426 2543
2427 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n", 2544 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2428 sata_spd_string(fls(mask))); 2545 sata_spd_string(fls(mask)));
2429 2546
2430 return 0; 2547 return 0;
2431} 2548}
2432 2549
2433static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol) 2550static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2434{ 2551{
2435 u32 spd, limit; 2552 u32 spd, limit;
2436 2553
2437 if (ap->sata_spd_limit == UINT_MAX) 2554 if (link->sata_spd_limit == UINT_MAX)
2438 limit = 0; 2555 limit = 0;
2439 else 2556 else
2440 limit = fls(ap->sata_spd_limit); 2557 limit = fls(link->sata_spd_limit);
2441 2558
2442 spd = (*scontrol >> 4) & 0xf; 2559 spd = (*scontrol >> 4) & 0xf;
2443 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4); 2560 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
@@ -2447,10 +2564,10 @@ static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
2447 2564
2448/** 2565/**
2449 * sata_set_spd_needed - is SATA spd configuration needed 2566 * sata_set_spd_needed - is SATA spd configuration needed
2450 * @ap: Port in question 2567 * @link: Link in question
2451 * 2568 *
2452 * Test whether the spd limit in SControl matches 2569 * Test whether the spd limit in SControl matches
2453 * @ap->sata_spd_limit. This function is used to determine 2570 * @link->sata_spd_limit. This function is used to determine
2454 * whether hardreset is necessary to apply SATA spd 2571 * whether hardreset is necessary to apply SATA spd
2455 * configuration. 2572 * configuration.
2456 * 2573 *
@@ -2460,21 +2577,21 @@ static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
2460 * RETURNS: 2577 * RETURNS:
2461 * 1 if SATA spd configuration is needed, 0 otherwise. 2578 * 1 if SATA spd configuration is needed, 0 otherwise.
2462 */ 2579 */
2463int sata_set_spd_needed(struct ata_port *ap) 2580int sata_set_spd_needed(struct ata_link *link)
2464{ 2581{
2465 u32 scontrol; 2582 u32 scontrol;
2466 2583
2467 if (sata_scr_read(ap, SCR_CONTROL, &scontrol)) 2584 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2468 return 0; 2585 return 0;
2469 2586
2470 return __sata_set_spd_needed(ap, &scontrol); 2587 return __sata_set_spd_needed(link, &scontrol);
2471} 2588}
2472 2589
2473/** 2590/**
2474 * sata_set_spd - set SATA spd according to spd limit 2591 * sata_set_spd - set SATA spd according to spd limit
2475 * @ap: Port to set SATA spd for 2592 * @link: Link to set SATA spd for
2476 * 2593 *
2477 * Set SATA spd of @ap according to sata_spd_limit. 2594 * Set SATA spd of @link according to sata_spd_limit.
2478 * 2595 *
2479 * LOCKING: 2596 * LOCKING:
2480 * Inherited from caller. 2597 * Inherited from caller.
@@ -2483,18 +2600,18 @@ int sata_set_spd_needed(struct ata_port *ap)
2483 * 0 if spd doesn't need to be changed, 1 if spd has been 2600 * 0 if spd doesn't need to be changed, 1 if spd has been
2484 * changed. Negative errno if SCR registers are inaccessible. 2601 * changed. Negative errno if SCR registers are inaccessible.
2485 */ 2602 */
2486int sata_set_spd(struct ata_port *ap) 2603int sata_set_spd(struct ata_link *link)
2487{ 2604{
2488 u32 scontrol; 2605 u32 scontrol;
2489 int rc; 2606 int rc;
2490 2607
2491 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) 2608 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2492 return rc; 2609 return rc;
2493 2610
2494 if (!__sata_set_spd_needed(ap, &scontrol)) 2611 if (!__sata_set_spd_needed(link, &scontrol))
2495 return 0; 2612 return 0;
2496 2613
2497 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) 2614 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2498 return rc; 2615 return rc;
2499 2616
2500 return 1; 2617 return 1;
@@ -2749,7 +2866,7 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2749 2866
2750static int ata_dev_set_mode(struct ata_device *dev) 2867static int ata_dev_set_mode(struct ata_device *dev)
2751{ 2868{
2752 struct ata_eh_context *ehc = &dev->ap->eh_context; 2869 struct ata_eh_context *ehc = &dev->link->eh_context;
2753 unsigned int err_mask; 2870 unsigned int err_mask;
2754 int rc; 2871 int rc;
2755 2872
@@ -2761,7 +2878,11 @@ static int ata_dev_set_mode(struct ata_device *dev)
2761 /* Old CFA may refuse this command, which is just fine */ 2878 /* Old CFA may refuse this command, which is just fine */
2762 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) 2879 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2763 err_mask &= ~AC_ERR_DEV; 2880 err_mask &= ~AC_ERR_DEV;
2764 2881 /* Some very old devices and some bad newer ones fail any kind of
2882 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2883 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2884 dev->pio_mode <= XFER_PIO_2)
2885 err_mask &= ~AC_ERR_DEV;
2765 if (err_mask) { 2886 if (err_mask) {
2766 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 2887 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2767 "(err_mask=0x%x)\n", err_mask); 2888 "(err_mask=0x%x)\n", err_mask);
@@ -2769,7 +2890,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
2769 } 2890 }
2770 2891
2771 ehc->i.flags |= ATA_EHI_POST_SETMODE; 2892 ehc->i.flags |= ATA_EHI_POST_SETMODE;
2772 rc = ata_dev_revalidate(dev, 0); 2893 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
2773 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 2894 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2774 if (rc) 2895 if (rc)
2775 return rc; 2896 return rc;
@@ -2784,7 +2905,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
2784 2905
2785/** 2906/**
2786 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 2907 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2787 * @ap: port on which timings will be programmed 2908 * @link: link on which timings will be programmed
2788 * @r_failed_dev: out paramter for failed device 2909 * @r_failed_dev: out paramter for failed device
2789 * 2910 *
2790 * Standard implementation of the function used to tune and set 2911 * Standard implementation of the function used to tune and set
@@ -2799,25 +2920,36 @@ static int ata_dev_set_mode(struct ata_device *dev)
2799 * 0 on success, negative errno otherwise 2920 * 0 on success, negative errno otherwise
2800 */ 2921 */
2801 2922
2802int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) 2923int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2803{ 2924{
2925 struct ata_port *ap = link->ap;
2804 struct ata_device *dev; 2926 struct ata_device *dev;
2805 int i, rc = 0, used_dma = 0, found = 0; 2927 int rc = 0, used_dma = 0, found = 0;
2806
2807 2928
2808 /* step 1: calculate xfer_mask */ 2929 /* step 1: calculate xfer_mask */
2809 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2930 ata_link_for_each_dev(dev, link) {
2810 unsigned int pio_mask, dma_mask; 2931 unsigned int pio_mask, dma_mask;
2811 2932 unsigned int mode_mask;
2812 dev = &ap->device[i];
2813 2933
2814 if (!ata_dev_enabled(dev)) 2934 if (!ata_dev_enabled(dev))
2815 continue; 2935 continue;
2816 2936
2937 mode_mask = ATA_DMA_MASK_ATA;
2938 if (dev->class == ATA_DEV_ATAPI)
2939 mode_mask = ATA_DMA_MASK_ATAPI;
2940 else if (ata_id_is_cfa(dev->id))
2941 mode_mask = ATA_DMA_MASK_CFA;
2942
2817 ata_dev_xfermask(dev); 2943 ata_dev_xfermask(dev);
2818 2944
2819 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 2945 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2820 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 2946 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2947
2948 if (libata_dma_mask & mode_mask)
2949 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2950 else
2951 dma_mask = 0;
2952
2821 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 2953 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2822 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 2954 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2823 2955
@@ -2829,8 +2961,7 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2829 goto out; 2961 goto out;
2830 2962
2831 /* step 2: always set host PIO timings */ 2963 /* step 2: always set host PIO timings */
2832 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2964 ata_link_for_each_dev(dev, link) {
2833 dev = &ap->device[i];
2834 if (!ata_dev_enabled(dev)) 2965 if (!ata_dev_enabled(dev))
2835 continue; 2966 continue;
2836 2967
@@ -2847,9 +2978,7 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2847 } 2978 }
2848 2979
2849 /* step 3: set host DMA timings */ 2980 /* step 3: set host DMA timings */
2850 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2981 ata_link_for_each_dev(dev, link) {
2851 dev = &ap->device[i];
2852
2853 if (!ata_dev_enabled(dev) || !dev->dma_mode) 2982 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2854 continue; 2983 continue;
2855 2984
@@ -2860,9 +2989,7 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2860 } 2989 }
2861 2990
2862 /* step 4: update devices' xfer mode */ 2991 /* step 4: update devices' xfer mode */
2863 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2992 ata_link_for_each_dev(dev, link) {
2864 dev = &ap->device[i];
2865
2866 /* don't update suspended devices' xfer mode */ 2993 /* don't update suspended devices' xfer mode */
2867 if (!ata_dev_enabled(dev)) 2994 if (!ata_dev_enabled(dev))
2868 continue; 2995 continue;
@@ -2886,7 +3013,7 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2886 3013
2887/** 3014/**
2888 * ata_set_mode - Program timings and issue SET FEATURES - XFER 3015 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2889 * @ap: port on which timings will be programmed 3016 * @link: link on which timings will be programmed
2890 * @r_failed_dev: out paramter for failed device 3017 * @r_failed_dev: out paramter for failed device
2891 * 3018 *
2892 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3019 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
@@ -2899,12 +3026,14 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2899 * RETURNS: 3026 * RETURNS:
2900 * 0 on success, negative errno otherwise 3027 * 0 on success, negative errno otherwise
2901 */ 3028 */
2902int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) 3029int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2903{ 3030{
3031 struct ata_port *ap = link->ap;
3032
2904 /* has private set_mode? */ 3033 /* has private set_mode? */
2905 if (ap->ops->set_mode) 3034 if (ap->ops->set_mode)
2906 return ap->ops->set_mode(ap, r_failed_dev); 3035 return ap->ops->set_mode(link, r_failed_dev);
2907 return ata_do_set_mode(ap, r_failed_dev); 3036 return ata_do_set_mode(link, r_failed_dev);
2908} 3037}
2909 3038
2910/** 3039/**
@@ -3007,7 +3136,7 @@ int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3007 3136
3008 if (!(status & ATA_BUSY)) 3137 if (!(status & ATA_BUSY))
3009 return 0; 3138 return 0;
3010 if (!ata_port_online(ap) && status == 0xff) 3139 if (!ata_link_online(&ap->link) && status == 0xff)
3011 return -ENODEV; 3140 return -ENODEV;
3012 if (time_after(now, deadline)) 3141 if (time_after(now, deadline))
3013 return -EBUSY; 3142 return -EBUSY;
@@ -3088,6 +3217,8 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3088 unsigned long deadline) 3217 unsigned long deadline)
3089{ 3218{
3090 struct ata_ioports *ioaddr = &ap->ioaddr; 3219 struct ata_ioports *ioaddr = &ap->ioaddr;
3220 struct ata_device *dev;
3221 int i = 0;
3091 3222
3092 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); 3223 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3093 3224
@@ -3098,6 +3229,25 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3098 udelay(20); /* FIXME: flush */ 3229 udelay(20); /* FIXME: flush */
3099 iowrite8(ap->ctl, ioaddr->ctl_addr); 3230 iowrite8(ap->ctl, ioaddr->ctl_addr);
3100 3231
3232 /* If we issued an SRST then an ATA drive (not ATAPI)
3233 * may have changed configuration and be in PIO0 timing. If
3234 * we did a hard reset (or are coming from power on) this is
3235 * true for ATA or ATAPI. Until we've set a suitable controller
3236 * mode we should not touch the bus as we may be talking too fast.
3237 */
3238
3239 ata_link_for_each_dev(dev, &ap->link)
3240 dev->pio_mode = XFER_PIO_0;
3241
3242 /* If the controller has a pio mode setup function then use
3243 it to set the chipset to rights. Don't touch the DMA setup
3244 as that will be dealt with when revalidating */
3245 if (ap->ops->set_piomode) {
3246 ata_link_for_each_dev(dev, &ap->link)
3247 if (devmask & (1 << i++))
3248 ap->ops->set_piomode(ap, dev);
3249 }
3250
3101 /* spec mandates ">= 2ms" before checking status. 3251 /* spec mandates ">= 2ms" before checking status.
3102 * We wait 150ms, because that was the magic delay used for 3252 * We wait 150ms, because that was the magic delay used for
3103 * ATAPI devices in Hale Landis's ATADRVR, for the period of time 3253 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
@@ -3142,6 +3292,7 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3142 3292
3143void ata_bus_reset(struct ata_port *ap) 3293void ata_bus_reset(struct ata_port *ap)
3144{ 3294{
3295 struct ata_device *device = ap->link.device;
3145 struct ata_ioports *ioaddr = &ap->ioaddr; 3296 struct ata_ioports *ioaddr = &ap->ioaddr;
3146 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 3297 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3147 u8 err; 3298 u8 err;
@@ -3177,19 +3328,19 @@ void ata_bus_reset(struct ata_port *ap)
3177 /* 3328 /*
3178 * determine by signature whether we have ATA or ATAPI devices 3329 * determine by signature whether we have ATA or ATAPI devices
3179 */ 3330 */
3180 ap->device[0].class = ata_dev_try_classify(ap, 0, &err); 3331 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3181 if ((slave_possible) && (err != 0x81)) 3332 if ((slave_possible) && (err != 0x81))
3182 ap->device[1].class = ata_dev_try_classify(ap, 1, &err); 3333 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3183 3334
3184 /* is double-select really necessary? */ 3335 /* is double-select really necessary? */
3185 if (ap->device[1].class != ATA_DEV_NONE) 3336 if (device[1].class != ATA_DEV_NONE)
3186 ap->ops->dev_select(ap, 1); 3337 ap->ops->dev_select(ap, 1);
3187 if (ap->device[0].class != ATA_DEV_NONE) 3338 if (device[0].class != ATA_DEV_NONE)
3188 ap->ops->dev_select(ap, 0); 3339 ap->ops->dev_select(ap, 0);
3189 3340
3190 /* if no devices were detected, disable this port */ 3341 /* if no devices were detected, disable this port */
3191 if ((ap->device[0].class == ATA_DEV_NONE) && 3342 if ((device[0].class == ATA_DEV_NONE) &&
3192 (ap->device[1].class == ATA_DEV_NONE)) 3343 (device[1].class == ATA_DEV_NONE))
3193 goto err_out; 3344 goto err_out;
3194 3345
3195 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { 3346 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
@@ -3202,18 +3353,18 @@ void ata_bus_reset(struct ata_port *ap)
3202 3353
3203err_out: 3354err_out:
3204 ata_port_printk(ap, KERN_ERR, "disabling port\n"); 3355 ata_port_printk(ap, KERN_ERR, "disabling port\n");
3205 ap->ops->port_disable(ap); 3356 ata_port_disable(ap);
3206 3357
3207 DPRINTK("EXIT\n"); 3358 DPRINTK("EXIT\n");
3208} 3359}
3209 3360
3210/** 3361/**
3211 * sata_phy_debounce - debounce SATA phy status 3362 * sata_link_debounce - debounce SATA phy status
3212 * @ap: ATA port to debounce SATA phy status for 3363 * @link: ATA link to debounce SATA phy status for
3213 * @params: timing parameters { interval, duratinon, timeout } in msec 3364 * @params: timing parameters { interval, duratinon, timeout } in msec
3214 * @deadline: deadline jiffies for the operation 3365 * @deadline: deadline jiffies for the operation
3215 * 3366 *
3216 * Make sure SStatus of @ap reaches stable state, determined by 3367* Make sure SStatus of @link reaches stable state, determined by
3217 * holding the same value where DET is not 1 for @duration polled 3368 * holding the same value where DET is not 1 for @duration polled
3218 * every @interval, before @timeout. Timeout constraints the 3369 * every @interval, before @timeout. Timeout constraints the
3219 * beginning of the stable state. Because DET gets stuck at 1 on 3370 * beginning of the stable state. Because DET gets stuck at 1 on
@@ -3229,8 +3380,8 @@ err_out:
3229 * RETURNS: 3380 * RETURNS:
3230 * 0 on success, -errno on failure. 3381 * 0 on success, -errno on failure.
3231 */ 3382 */
3232int sata_phy_debounce(struct ata_port *ap, const unsigned long *params, 3383int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3233 unsigned long deadline) 3384 unsigned long deadline)
3234{ 3385{
3235 unsigned long interval_msec = params[0]; 3386 unsigned long interval_msec = params[0];
3236 unsigned long duration = msecs_to_jiffies(params[1]); 3387 unsigned long duration = msecs_to_jiffies(params[1]);
@@ -3242,7 +3393,7 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3242 if (time_before(t, deadline)) 3393 if (time_before(t, deadline))
3243 deadline = t; 3394 deadline = t;
3244 3395
3245 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur))) 3396 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3246 return rc; 3397 return rc;
3247 cur &= 0xf; 3398 cur &= 0xf;
3248 3399
@@ -3251,7 +3402,7 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3251 3402
3252 while (1) { 3403 while (1) {
3253 msleep(interval_msec); 3404 msleep(interval_msec);
3254 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur))) 3405 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3255 return rc; 3406 return rc;
3256 cur &= 0xf; 3407 cur &= 0xf;
3257 3408
@@ -3277,12 +3428,12 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3277} 3428}
3278 3429
3279/** 3430/**
3280 * sata_phy_resume - resume SATA phy 3431 * sata_link_resume - resume SATA link
3281 * @ap: ATA port to resume SATA phy for 3432 * @link: ATA link to resume SATA
3282 * @params: timing parameters { interval, duratinon, timeout } in msec 3433 * @params: timing parameters { interval, duratinon, timeout } in msec
3283 * @deadline: deadline jiffies for the operation 3434 * @deadline: deadline jiffies for the operation
3284 * 3435 *
3285 * Resume SATA phy of @ap and debounce it. 3436 * Resume SATA phy @link and debounce it.
3286 * 3437 *
3287 * LOCKING: 3438 * LOCKING:
3288 * Kernel thread context (may sleep) 3439 * Kernel thread context (may sleep)
@@ -3290,18 +3441,18 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3290 * RETURNS: 3441 * RETURNS:
3291 * 0 on success, -errno on failure. 3442 * 0 on success, -errno on failure.
3292 */ 3443 */
3293int sata_phy_resume(struct ata_port *ap, const unsigned long *params, 3444int sata_link_resume(struct ata_link *link, const unsigned long *params,
3294 unsigned long deadline) 3445 unsigned long deadline)
3295{ 3446{
3296 u32 scontrol; 3447 u32 scontrol;
3297 int rc; 3448 int rc;
3298 3449
3299 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) 3450 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3300 return rc; 3451 return rc;
3301 3452
3302 scontrol = (scontrol & 0x0f0) | 0x300; 3453 scontrol = (scontrol & 0x0f0) | 0x300;
3303 3454
3304 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) 3455 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3305 return rc; 3456 return rc;
3306 3457
3307 /* Some PHYs react badly if SStatus is pounded immediately 3458 /* Some PHYs react badly if SStatus is pounded immediately
@@ -3309,15 +3460,15 @@ int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3309 */ 3460 */
3310 msleep(200); 3461 msleep(200);
3311 3462
3312 return sata_phy_debounce(ap, params, deadline); 3463 return sata_link_debounce(link, params, deadline);
3313} 3464}
3314 3465
3315/** 3466/**
3316 * ata_std_prereset - prepare for reset 3467 * ata_std_prereset - prepare for reset
3317 * @ap: ATA port to be reset 3468 * @link: ATA link to be reset
3318 * @deadline: deadline jiffies for the operation 3469 * @deadline: deadline jiffies for the operation
3319 * 3470 *
3320 * @ap is about to be reset. Initialize it. Failure from 3471 * @link is about to be reset. Initialize it. Failure from
3321 * prereset makes libata abort whole reset sequence and give up 3472 * prereset makes libata abort whole reset sequence and give up
3322 * that port, so prereset should be best-effort. It does its 3473 * that port, so prereset should be best-effort. It does its
3323 * best to prepare for reset sequence but if things go wrong, it 3474 * best to prepare for reset sequence but if things go wrong, it
@@ -3329,37 +3480,44 @@ int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3329 * RETURNS: 3480 * RETURNS:
3330 * 0 on success, -errno otherwise. 3481 * 0 on success, -errno otherwise.
3331 */ 3482 */
3332int ata_std_prereset(struct ata_port *ap, unsigned long deadline) 3483int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3333{ 3484{
3334 struct ata_eh_context *ehc = &ap->eh_context; 3485 struct ata_port *ap = link->ap;
3486 struct ata_eh_context *ehc = &link->eh_context;
3335 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3487 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3336 int rc; 3488 int rc;
3337 3489
3338 /* handle link resume */ 3490 /* handle link resume */
3339 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && 3491 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3340 (ap->flags & ATA_FLAG_HRST_TO_RESUME)) 3492 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3493 ehc->i.action |= ATA_EH_HARDRESET;
3494
3495 /* Some PMPs don't work with only SRST, force hardreset if PMP
3496 * is supported.
3497 */
3498 if (ap->flags & ATA_FLAG_PMP)
3341 ehc->i.action |= ATA_EH_HARDRESET; 3499 ehc->i.action |= ATA_EH_HARDRESET;
3342 3500
3343 /* if we're about to do hardreset, nothing more to do */ 3501 /* if we're about to do hardreset, nothing more to do */
3344 if (ehc->i.action & ATA_EH_HARDRESET) 3502 if (ehc->i.action & ATA_EH_HARDRESET)
3345 return 0; 3503 return 0;
3346 3504
3347 /* if SATA, resume phy */ 3505 /* if SATA, resume link */
3348 if (ap->flags & ATA_FLAG_SATA) { 3506 if (ap->flags & ATA_FLAG_SATA) {
3349 rc = sata_phy_resume(ap, timing, deadline); 3507 rc = sata_link_resume(link, timing, deadline);
3350 /* whine about phy resume failure but proceed */ 3508 /* whine about phy resume failure but proceed */
3351 if (rc && rc != -EOPNOTSUPP) 3509 if (rc && rc != -EOPNOTSUPP)
3352 ata_port_printk(ap, KERN_WARNING, "failed to resume " 3510 ata_link_printk(link, KERN_WARNING, "failed to resume "
3353 "link for reset (errno=%d)\n", rc); 3511 "link for reset (errno=%d)\n", rc);
3354 } 3512 }
3355 3513
3356 /* Wait for !BSY if the controller can wait for the first D2H 3514 /* Wait for !BSY if the controller can wait for the first D2H
3357 * Reg FIS and we don't know that no device is attached. 3515 * Reg FIS and we don't know that no device is attached.
3358 */ 3516 */
3359 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) { 3517 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3360 rc = ata_wait_ready(ap, deadline); 3518 rc = ata_wait_ready(ap, deadline);
3361 if (rc && rc != -ENODEV) { 3519 if (rc && rc != -ENODEV) {
3362 ata_port_printk(ap, KERN_WARNING, "device not ready " 3520 ata_link_printk(link, KERN_WARNING, "device not ready "
3363 "(errno=%d), forcing hardreset\n", rc); 3521 "(errno=%d), forcing hardreset\n", rc);
3364 ehc->i.action |= ATA_EH_HARDRESET; 3522 ehc->i.action |= ATA_EH_HARDRESET;
3365 } 3523 }
@@ -3370,7 +3528,7 @@ int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
3370 3528
3371/** 3529/**
3372 * ata_std_softreset - reset host port via ATA SRST 3530 * ata_std_softreset - reset host port via ATA SRST
3373 * @ap: port to reset 3531 * @link: ATA link to reset
3374 * @classes: resulting classes of attached devices 3532 * @classes: resulting classes of attached devices
3375 * @deadline: deadline jiffies for the operation 3533 * @deadline: deadline jiffies for the operation
3376 * 3534 *
@@ -3382,9 +3540,10 @@ int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
3382 * RETURNS: 3540 * RETURNS:
3383 * 0 on success, -errno otherwise. 3541 * 0 on success, -errno otherwise.
3384 */ 3542 */
3385int ata_std_softreset(struct ata_port *ap, unsigned int *classes, 3543int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3386 unsigned long deadline) 3544 unsigned long deadline)
3387{ 3545{
3546 struct ata_port *ap = link->ap;
3388 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 3547 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3389 unsigned int devmask = 0; 3548 unsigned int devmask = 0;
3390 int rc; 3549 int rc;
@@ -3392,7 +3551,7 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3392 3551
3393 DPRINTK("ENTER\n"); 3552 DPRINTK("ENTER\n");
3394 3553
3395 if (ata_port_offline(ap)) { 3554 if (ata_link_offline(link)) {
3396 classes[0] = ATA_DEV_NONE; 3555 classes[0] = ATA_DEV_NONE;
3397 goto out; 3556 goto out;
3398 } 3557 }
@@ -3410,15 +3569,17 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3410 DPRINTK("about to softreset, devmask=%x\n", devmask); 3569 DPRINTK("about to softreset, devmask=%x\n", devmask);
3411 rc = ata_bus_softreset(ap, devmask, deadline); 3570 rc = ata_bus_softreset(ap, devmask, deadline);
3412 /* if link is occupied, -ENODEV too is an error */ 3571 /* if link is occupied, -ENODEV too is an error */
3413 if (rc && (rc != -ENODEV || sata_scr_valid(ap))) { 3572 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3414 ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc); 3573 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3415 return rc; 3574 return rc;
3416 } 3575 }
3417 3576
3418 /* determine by signature whether we have ATA or ATAPI devices */ 3577 /* determine by signature whether we have ATA or ATAPI devices */
3419 classes[0] = ata_dev_try_classify(ap, 0, &err); 3578 classes[0] = ata_dev_try_classify(&link->device[0],
3579 devmask & (1 << 0), &err);
3420 if (slave_possible && err != 0x81) 3580 if (slave_possible && err != 0x81)
3421 classes[1] = ata_dev_try_classify(ap, 1, &err); 3581 classes[1] = ata_dev_try_classify(&link->device[1],
3582 devmask & (1 << 1), &err);
3422 3583
3423 out: 3584 out:
3424 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); 3585 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
@@ -3426,12 +3587,12 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3426} 3587}
3427 3588
3428/** 3589/**
3429 * sata_port_hardreset - reset port via SATA phy reset 3590 * sata_link_hardreset - reset link via SATA phy reset
3430 * @ap: port to reset 3591 * @link: link to reset
3431 * @timing: timing parameters { interval, duratinon, timeout } in msec 3592 * @timing: timing parameters { interval, duratinon, timeout } in msec
3432 * @deadline: deadline jiffies for the operation 3593 * @deadline: deadline jiffies for the operation
3433 * 3594 *
3434 * SATA phy-reset host port using DET bits of SControl register. 3595 * SATA phy-reset @link using DET bits of SControl register.
3435 * 3596 *
3436 * LOCKING: 3597 * LOCKING:
3437 * Kernel thread context (may sleep) 3598 * Kernel thread context (may sleep)
@@ -3439,7 +3600,7 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3439 * RETURNS: 3600 * RETURNS:
3440 * 0 on success, -errno otherwise. 3601 * 0 on success, -errno otherwise.
3441 */ 3602 */
3442int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing, 3603int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3443 unsigned long deadline) 3604 unsigned long deadline)
3444{ 3605{
3445 u32 scontrol; 3606 u32 scontrol;
@@ -3447,30 +3608,30 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3447 3608
3448 DPRINTK("ENTER\n"); 3609 DPRINTK("ENTER\n");
3449 3610
3450 if (sata_set_spd_needed(ap)) { 3611 if (sata_set_spd_needed(link)) {
3451 /* SATA spec says nothing about how to reconfigure 3612 /* SATA spec says nothing about how to reconfigure
3452 * spd. To be on the safe side, turn off phy during 3613 * spd. To be on the safe side, turn off phy during
3453 * reconfiguration. This works for at least ICH7 AHCI 3614 * reconfiguration. This works for at least ICH7 AHCI
3454 * and Sil3124. 3615 * and Sil3124.
3455 */ 3616 */
3456 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) 3617 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3457 goto out; 3618 goto out;
3458 3619
3459 scontrol = (scontrol & 0x0f0) | 0x304; 3620 scontrol = (scontrol & 0x0f0) | 0x304;
3460 3621
3461 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) 3622 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3462 goto out; 3623 goto out;
3463 3624
3464 sata_set_spd(ap); 3625 sata_set_spd(link);
3465 } 3626 }
3466 3627
3467 /* issue phy wake/reset */ 3628 /* issue phy wake/reset */
3468 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) 3629 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3469 goto out; 3630 goto out;
3470 3631
3471 scontrol = (scontrol & 0x0f0) | 0x301; 3632 scontrol = (scontrol & 0x0f0) | 0x301;
3472 3633
3473 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol))) 3634 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3474 goto out; 3635 goto out;
3475 3636
3476 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3637 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
@@ -3478,8 +3639,8 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3478 */ 3639 */
3479 msleep(1); 3640 msleep(1);
3480 3641
3481 /* bring phy back */ 3642 /* bring link back */
3482 rc = sata_phy_resume(ap, timing, deadline); 3643 rc = sata_link_resume(link, timing, deadline);
3483 out: 3644 out:
3484 DPRINTK("EXIT, rc=%d\n", rc); 3645 DPRINTK("EXIT, rc=%d\n", rc);
3485 return rc; 3646 return rc;
@@ -3487,7 +3648,7 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3487 3648
3488/** 3649/**
3489 * sata_std_hardreset - reset host port via SATA phy reset 3650 * sata_std_hardreset - reset host port via SATA phy reset
3490 * @ap: port to reset 3651 * @link: link to reset
3491 * @class: resulting class of attached device 3652 * @class: resulting class of attached device
3492 * @deadline: deadline jiffies for the operation 3653 * @deadline: deadline jiffies for the operation
3493 * 3654 *
@@ -3500,24 +3661,25 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3500 * RETURNS: 3661 * RETURNS:
3501 * 0 on success, -errno otherwise. 3662 * 0 on success, -errno otherwise.
3502 */ 3663 */
3503int sata_std_hardreset(struct ata_port *ap, unsigned int *class, 3664int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3504 unsigned long deadline) 3665 unsigned long deadline)
3505{ 3666{
3506 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context); 3667 struct ata_port *ap = link->ap;
3668 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3507 int rc; 3669 int rc;
3508 3670
3509 DPRINTK("ENTER\n"); 3671 DPRINTK("ENTER\n");
3510 3672
3511 /* do hardreset */ 3673 /* do hardreset */
3512 rc = sata_port_hardreset(ap, timing, deadline); 3674 rc = sata_link_hardreset(link, timing, deadline);
3513 if (rc) { 3675 if (rc) {
3514 ata_port_printk(ap, KERN_ERR, 3676 ata_link_printk(link, KERN_ERR,
3515 "COMRESET failed (errno=%d)\n", rc); 3677 "COMRESET failed (errno=%d)\n", rc);
3516 return rc; 3678 return rc;
3517 } 3679 }
3518 3680
3519 /* TODO: phy layer with polling, timeouts, etc. */ 3681 /* TODO: phy layer with polling, timeouts, etc. */
3520 if (ata_port_offline(ap)) { 3682 if (ata_link_offline(link)) {
3521 *class = ATA_DEV_NONE; 3683 *class = ATA_DEV_NONE;
3522 DPRINTK("EXIT, link offline\n"); 3684 DPRINTK("EXIT, link offline\n");
3523 return 0; 3685 return 0;
@@ -3526,17 +3688,27 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3526 /* wait a while before checking status, see SRST for more info */ 3688 /* wait a while before checking status, see SRST for more info */
3527 msleep(150); 3689 msleep(150);
3528 3690
3691 /* If PMP is supported, we have to do follow-up SRST. Note
3692 * that some PMPs don't send D2H Reg FIS after hardreset at
3693 * all if the first port is empty. Wait for it just for a
3694 * second and request follow-up SRST.
3695 */
3696 if (ap->flags & ATA_FLAG_PMP) {
3697 ata_wait_ready(ap, jiffies + HZ);
3698 return -EAGAIN;
3699 }
3700
3529 rc = ata_wait_ready(ap, deadline); 3701 rc = ata_wait_ready(ap, deadline);
3530 /* link occupied, -ENODEV too is an error */ 3702 /* link occupied, -ENODEV too is an error */
3531 if (rc) { 3703 if (rc) {
3532 ata_port_printk(ap, KERN_ERR, 3704 ata_link_printk(link, KERN_ERR,
3533 "COMRESET failed (errno=%d)\n", rc); 3705 "COMRESET failed (errno=%d)\n", rc);
3534 return rc; 3706 return rc;
3535 } 3707 }
3536 3708
3537 ap->ops->dev_select(ap, 0); /* probably unnecessary */ 3709 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3538 3710
3539 *class = ata_dev_try_classify(ap, 0, NULL); 3711 *class = ata_dev_try_classify(link->device, 1, NULL);
3540 3712
3541 DPRINTK("EXIT, class=%u\n", *class); 3713 DPRINTK("EXIT, class=%u\n", *class);
3542 return 0; 3714 return 0;
@@ -3544,7 +3716,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3544 3716
3545/** 3717/**
3546 * ata_std_postreset - standard postreset callback 3718 * ata_std_postreset - standard postreset callback
3547 * @ap: the target ata_port 3719 * @link: the target ata_link
3548 * @classes: classes of attached devices 3720 * @classes: classes of attached devices
3549 * 3721 *
3550 * This function is invoked after a successful reset. Note that 3722 * This function is invoked after a successful reset. Note that
@@ -3554,18 +3726,19 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3554 * LOCKING: 3726 * LOCKING:
3555 * Kernel thread context (may sleep) 3727 * Kernel thread context (may sleep)
3556 */ 3728 */
3557void ata_std_postreset(struct ata_port *ap, unsigned int *classes) 3729void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3558{ 3730{
3731 struct ata_port *ap = link->ap;
3559 u32 serror; 3732 u32 serror;
3560 3733
3561 DPRINTK("ENTER\n"); 3734 DPRINTK("ENTER\n");
3562 3735
3563 /* print link status */ 3736 /* print link status */
3564 sata_print_link_status(ap); 3737 sata_print_link_status(link);
3565 3738
3566 /* clear SError */ 3739 /* clear SError */
3567 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0) 3740 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3568 sata_scr_write(ap, SCR_ERROR, serror); 3741 sata_scr_write(link, SCR_ERROR, serror);
3569 3742
3570 /* is double-select really necessary? */ 3743 /* is double-select really necessary? */
3571 if (classes[0] != ATA_DEV_NONE) 3744 if (classes[0] != ATA_DEV_NONE)
@@ -3652,7 +3825,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3652int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 3825int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3653{ 3826{
3654 unsigned int class = dev->class; 3827 unsigned int class = dev->class;
3655 u16 *id = (void *)dev->ap->sector_buf; 3828 u16 *id = (void *)dev->link->ap->sector_buf;
3656 int rc; 3829 int rc;
3657 3830
3658 /* read ID data */ 3831 /* read ID data */
@@ -3671,6 +3844,7 @@ int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3671/** 3844/**
3672 * ata_dev_revalidate - Revalidate ATA device 3845 * ata_dev_revalidate - Revalidate ATA device
3673 * @dev: device to revalidate 3846 * @dev: device to revalidate
3847 * @new_class: new class code
3674 * @readid_flags: read ID flags 3848 * @readid_flags: read ID flags
3675 * 3849 *
3676 * Re-read IDENTIFY page, make sure @dev is still attached to the 3850 * Re-read IDENTIFY page, make sure @dev is still attached to the
@@ -3682,7 +3856,8 @@ int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3682 * RETURNS: 3856 * RETURNS:
3683 * 0 on success, negative errno otherwise 3857 * 0 on success, negative errno otherwise
3684 */ 3858 */
3685int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags) 3859int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3860 unsigned int readid_flags)
3686{ 3861{
3687 u64 n_sectors = dev->n_sectors; 3862 u64 n_sectors = dev->n_sectors;
3688 int rc; 3863 int rc;
@@ -3690,6 +3865,15 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3690 if (!ata_dev_enabled(dev)) 3865 if (!ata_dev_enabled(dev))
3691 return -ENODEV; 3866 return -ENODEV;
3692 3867
3868 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3869 if (ata_class_enabled(new_class) &&
3870 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3871 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3872 dev->class, new_class);
3873 rc = -ENODEV;
3874 goto fail;
3875 }
3876
3693 /* re-read ID */ 3877 /* re-read ID */
3694 rc = ata_dev_reread_id(dev, readid_flags); 3878 rc = ata_dev_reread_id(dev, readid_flags);
3695 if (rc) 3879 if (rc)
@@ -3763,6 +3947,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
3763 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ 3947 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
3764 { "IOMEGA ZIP 250 ATAPI Floppy", 3948 { "IOMEGA ZIP 250 ATAPI Floppy",
3765 NULL, ATA_HORKAGE_NODMA }, 3949 NULL, ATA_HORKAGE_NODMA },
3950 /* Odd clown on sil3726/4726 PMPs */
3951 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
3952 ATA_HORKAGE_SKIP_PM },
3766 3953
3767 /* Weird ATAPI devices */ 3954 /* Weird ATAPI devices */
3768 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 3955 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
@@ -3775,16 +3962,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
3775 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 3962 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3776 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 3963 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3777 /* NCQ is broken */ 3964 /* NCQ is broken */
3778 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ }, 3965 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
3779 { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ },
3780 { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
3781 { "Maxtor 7B250S0", "BANC1B70", ATA_HORKAGE_NONCQ, },
3782 { "Maxtor 7B300S0", "BANC1B70", ATA_HORKAGE_NONCQ },
3783 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 3966 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3784 { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI", 3967 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
3785 ATA_HORKAGE_NONCQ }, 3968 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
3786 /* NCQ hard hangs device under heavier load, needs hard power cycle */ 3969 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
3787 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ }, 3970
3788 /* Blacklist entries taken from Silicon Image 3124/3132 3971 /* Blacklist entries taken from Silicon Image 3124/3132
3789 Windows driver .inf file - also several Linux problem reports */ 3972 Windows driver .inf file - also several Linux problem reports */
3790 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 3973 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
@@ -3793,11 +3976,16 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
3793 /* Drives which do spurious command completion */ 3976 /* Drives which do spurious command completion */
3794 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, }, 3977 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
3795 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, }, 3978 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
3979 { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, },
3796 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, 3980 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
3797 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 3981 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3982 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
3798 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, 3983 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3984 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3799 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, }, 3985 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3800 { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, }, 3986 { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ, },
3987 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
3988 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
3801 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, }, 3989 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
3802 3990
3803 /* devices which puke on READ_NATIVE_MAX */ 3991 /* devices which puke on READ_NATIVE_MAX */
@@ -3806,10 +3994,31 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
3806 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 3994 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3807 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 3995 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3808 3996
3997 /* Devices which report 1 sector over size HPA */
3998 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
3999 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4000
3809 /* End Marker */ 4001 /* End Marker */
3810 { } 4002 { }
3811}; 4003};
3812 4004
4005int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4006{
4007 const char *p;
4008 int len;
4009
4010 /*
4011 * check for trailing wildcard: *\0
4012 */
4013 p = strchr(patt, wildchar);
4014 if (p && ((*(p + 1)) == 0))
4015 len = p - patt;
4016 else
4017 len = strlen(name);
4018
4019 return strncmp(patt, name, len);
4020}
4021
3813static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4022static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3814{ 4023{
3815 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4024 unsigned char model_num[ATA_ID_PROD_LEN + 1];
@@ -3820,10 +4029,10 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3820 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4029 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3821 4030
3822 while (ad->model_num) { 4031 while (ad->model_num) {
3823 if (!strcmp(ad->model_num, model_num)) { 4032 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
3824 if (ad->model_rev == NULL) 4033 if (ad->model_rev == NULL)
3825 return ad->horkage; 4034 return ad->horkage;
3826 if (!strcmp(ad->model_rev, model_rev)) 4035 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
3827 return ad->horkage; 4036 return ad->horkage;
3828 } 4037 }
3829 ad++; 4038 ad++;
@@ -3837,7 +4046,7 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
3837 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4046 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3838 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4047 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3839 */ 4048 */
3840 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) && 4049 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
3841 (dev->flags & ATA_DFLAG_CDB_INTR)) 4050 (dev->flags & ATA_DFLAG_CDB_INTR))
3842 return 1; 4051 return 1;
3843 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4052 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
@@ -3857,7 +4066,8 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
3857 */ 4066 */
3858static void ata_dev_xfermask(struct ata_device *dev) 4067static void ata_dev_xfermask(struct ata_device *dev)
3859{ 4068{
3860 struct ata_port *ap = dev->ap; 4069 struct ata_link *link = dev->link;
4070 struct ata_port *ap = link->ap;
3861 struct ata_host *host = ap->host; 4071 struct ata_host *host = ap->host;
3862 unsigned long xfer_mask; 4072 unsigned long xfer_mask;
3863 4073
@@ -3955,7 +4165,43 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3955 tf.protocol = ATA_PROT_NODATA; 4165 tf.protocol = ATA_PROT_NODATA;
3956 tf.nsect = dev->xfer_mode; 4166 tf.nsect = dev->xfer_mode;
3957 4167
3958 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); 4168 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4169
4170 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4171 return err_mask;
4172}
4173
4174/**
4175 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4176 * @dev: Device to which command will be sent
4177 * @enable: Whether to enable or disable the feature
4178 *
4179 * Issue SET FEATURES - SATA FEATURES command to device @dev
4180 * on port @ap with sector count set to indicate Asynchronous
4181 * Notification feature
4182 *
4183 * LOCKING:
4184 * PCI/etc. bus probe sem.
4185 *
4186 * RETURNS:
4187 * 0 on success, AC_ERR_* mask otherwise.
4188 */
4189static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4190{
4191 struct ata_taskfile tf;
4192 unsigned int err_mask;
4193
4194 /* set up set-features taskfile */
4195 DPRINTK("set features - SATA features\n");
4196
4197 ata_tf_init(dev, &tf);
4198 tf.command = ATA_CMD_SET_FEATURES;
4199 tf.feature = enable;
4200 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4201 tf.protocol = ATA_PROT_NODATA;
4202 tf.nsect = SATA_AN;
4203
4204 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3959 4205
3960 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4206 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3961 return err_mask; 4207 return err_mask;
@@ -3993,7 +4239,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
3993 tf.nsect = sectors; 4239 tf.nsect = sectors;
3994 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4240 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3995 4241
3996 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); 4242 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3997 /* A clean abort indicates an original or just out of spec drive 4243 /* A clean abort indicates an original or just out of spec drive
3998 and we should continue as we issue the setup based on the 4244 and we should continue as we issue the setup based on the
3999 drive reported working geometry */ 4245 drive reported working geometry */
@@ -4207,6 +4453,36 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4207} 4453}
4208 4454
4209/** 4455/**
4456 * ata_std_qc_defer - Check whether a qc needs to be deferred
4457 * @qc: ATA command in question
4458 *
4459 * Non-NCQ commands cannot run with any other command, NCQ or
4460 * not. As upper layer only knows the queue depth, we are
4461 * responsible for maintaining exclusion. This function checks
4462 * whether a new command @qc can be issued.
4463 *
4464 * LOCKING:
4465 * spin_lock_irqsave(host lock)
4466 *
4467 * RETURNS:
4468 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4469 */
4470int ata_std_qc_defer(struct ata_queued_cmd *qc)
4471{
4472 struct ata_link *link = qc->dev->link;
4473
4474 if (qc->tf.protocol == ATA_PROT_NCQ) {
4475 if (!ata_tag_valid(link->active_tag))
4476 return 0;
4477 } else {
4478 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4479 return 0;
4480 }
4481
4482 return ATA_DEFER_LINK;
4483}
4484
4485/**
4210 * ata_qc_prep - Prepare taskfile for submission 4486 * ata_qc_prep - Prepare taskfile for submission
4211 * @qc: Metadata associated with taskfile to be prepared 4487 * @qc: Metadata associated with taskfile to be prepared
4212 * 4488 *
@@ -4482,7 +4758,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4482void ata_data_xfer(struct ata_device *adev, unsigned char *buf, 4758void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4483 unsigned int buflen, int write_data) 4759 unsigned int buflen, int write_data)
4484{ 4760{
4485 struct ata_port *ap = adev->ap; 4761 struct ata_port *ap = adev->link->ap;
4486 unsigned int words = buflen >> 1; 4762 unsigned int words = buflen >> 1;
4487 4763
4488 /* Transfer multiple of 2 bytes */ 4764 /* Transfer multiple of 2 bytes */
@@ -4611,6 +4887,8 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
4611 ata_pio_sector(qc); 4887 ata_pio_sector(qc);
4612 } else 4888 } else
4613 ata_pio_sector(qc); 4889 ata_pio_sector(qc);
4890
4891 ata_altstatus(qc->ap); /* flush */
4614} 4892}
4615 4893
4616/** 4894/**
@@ -4785,6 +5063,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4785 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); 5063 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4786 5064
4787 __atapi_pio_bytes(qc, bytes); 5065 __atapi_pio_bytes(qc, bytes);
5066 ata_altstatus(ap); /* flush */
4788 5067
4789 return; 5068 return;
4790 5069
@@ -4956,7 +5235,6 @@ fsm_start:
4956 */ 5235 */
4957 ap->hsm_task_state = HSM_ST; 5236 ap->hsm_task_state = HSM_ST;
4958 ata_pio_sectors(qc); 5237 ata_pio_sectors(qc);
4959 ata_altstatus(ap); /* flush */
4960 } else 5238 } else
4961 /* send CDB */ 5239 /* send CDB */
4962 atapi_send_cdb(ap, qc); 5240 atapi_send_cdb(ap, qc);
@@ -5037,7 +5315,6 @@ fsm_start:
5037 5315
5038 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 5316 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5039 ata_pio_sectors(qc); 5317 ata_pio_sectors(qc);
5040 ata_altstatus(ap);
5041 status = ata_wait_idle(ap); 5318 status = ata_wait_idle(ap);
5042 } 5319 }
5043 5320
@@ -5057,13 +5334,11 @@ fsm_start:
5057 if (ap->hsm_task_state == HSM_ST_LAST && 5334 if (ap->hsm_task_state == HSM_ST_LAST &&
5058 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { 5335 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5059 /* all data read */ 5336 /* all data read */
5060 ata_altstatus(ap);
5061 status = ata_wait_idle(ap); 5337 status = ata_wait_idle(ap);
5062 goto fsm_start; 5338 goto fsm_start;
5063 } 5339 }
5064 } 5340 }
5065 5341
5066 ata_altstatus(ap); /* flush */
5067 poll_next = 1; 5342 poll_next = 1;
5068 break; 5343 break;
5069 5344
@@ -5188,7 +5463,7 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5188 5463
5189struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) 5464struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5190{ 5465{
5191 struct ata_port *ap = dev->ap; 5466 struct ata_port *ap = dev->link->ap;
5192 struct ata_queued_cmd *qc; 5467 struct ata_queued_cmd *qc;
5193 5468
5194 qc = ata_qc_new(ap); 5469 qc = ata_qc_new(ap);
@@ -5231,6 +5506,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
5231void __ata_qc_complete(struct ata_queued_cmd *qc) 5506void __ata_qc_complete(struct ata_queued_cmd *qc)
5232{ 5507{
5233 struct ata_port *ap = qc->ap; 5508 struct ata_port *ap = qc->ap;
5509 struct ata_link *link = qc->dev->link;
5234 5510
5235 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5511 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5236 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 5512 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
@@ -5239,10 +5515,19 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
5239 ata_sg_clean(qc); 5515 ata_sg_clean(qc);
5240 5516
5241 /* command should be marked inactive atomically with qc completion */ 5517 /* command should be marked inactive atomically with qc completion */
5242 if (qc->tf.protocol == ATA_PROT_NCQ) 5518 if (qc->tf.protocol == ATA_PROT_NCQ) {
5243 ap->sactive &= ~(1 << qc->tag); 5519 link->sactive &= ~(1 << qc->tag);
5244 else 5520 if (!link->sactive)
5245 ap->active_tag = ATA_TAG_POISON; 5521 ap->nr_active_links--;
5522 } else {
5523 link->active_tag = ATA_TAG_POISON;
5524 ap->nr_active_links--;
5525 }
5526
5527 /* clear exclusive status */
5528 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5529 ap->excl_link == link))
5530 ap->excl_link = NULL;
5246 5531
5247 /* atapi: mark qc as inactive to prevent the interrupt handler 5532 /* atapi: mark qc as inactive to prevent the interrupt handler
5248 * from completing the command twice later, before the error handler 5533 * from completing the command twice later, before the error handler
@@ -5411,19 +5696,25 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5411void ata_qc_issue(struct ata_queued_cmd *qc) 5696void ata_qc_issue(struct ata_queued_cmd *qc)
5412{ 5697{
5413 struct ata_port *ap = qc->ap; 5698 struct ata_port *ap = qc->ap;
5699 struct ata_link *link = qc->dev->link;
5414 5700
5415 /* Make sure only one non-NCQ command is outstanding. The 5701 /* Make sure only one non-NCQ command is outstanding. The
5416 * check is skipped for old EH because it reuses active qc to 5702 * check is skipped for old EH because it reuses active qc to
5417 * request ATAPI sense. 5703 * request ATAPI sense.
5418 */ 5704 */
5419 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag)); 5705 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5420 5706
5421 if (qc->tf.protocol == ATA_PROT_NCQ) { 5707 if (qc->tf.protocol == ATA_PROT_NCQ) {
5422 WARN_ON(ap->sactive & (1 << qc->tag)); 5708 WARN_ON(link->sactive & (1 << qc->tag));
5423 ap->sactive |= 1 << qc->tag; 5709
5710 if (!link->sactive)
5711 ap->nr_active_links++;
5712 link->sactive |= 1 << qc->tag;
5424 } else { 5713 } else {
5425 WARN_ON(ap->sactive); 5714 WARN_ON(link->sactive);
5426 ap->active_tag = qc->tag; 5715
5716 ap->nr_active_links++;
5717 link->active_tag = qc->tag;
5427 } 5718 }
5428 5719
5429 qc->flags |= ATA_QCFLAG_ACTIVE; 5720 qc->flags |= ATA_QCFLAG_ACTIVE;
@@ -5606,7 +5897,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5606inline unsigned int ata_host_intr (struct ata_port *ap, 5897inline unsigned int ata_host_intr (struct ata_port *ap,
5607 struct ata_queued_cmd *qc) 5898 struct ata_queued_cmd *qc)
5608{ 5899{
5609 struct ata_eh_info *ehi = &ap->eh_info; 5900 struct ata_eh_info *ehi = &ap->link.eh_info;
5610 u8 status, host_stat = 0; 5901 u8 status, host_stat = 0;
5611 5902
5612 VPRINTK("ata%u: protocol %d task_state %d\n", 5903 VPRINTK("ata%u: protocol %d task_state %d\n",
@@ -5680,7 +5971,8 @@ idle_irq:
5680 5971
5681#ifdef ATA_IRQ_TRAP 5972#ifdef ATA_IRQ_TRAP
5682 if ((ap->stats.idle_irq % 1000) == 0) { 5973 if ((ap->stats.idle_irq % 1000) == 0) {
5683 ap->ops->irq_ack(ap, 0); /* debug trap */ 5974 ata_chk_status(ap);
5975 ap->ops->irq_clear(ap);
5684 ata_port_printk(ap, KERN_WARNING, "irq trap\n"); 5976 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5685 return 1; 5977 return 1;
5686 } 5978 }
@@ -5721,7 +6013,7 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance)
5721 !(ap->flags & ATA_FLAG_DISABLED)) { 6013 !(ap->flags & ATA_FLAG_DISABLED)) {
5722 struct ata_queued_cmd *qc; 6014 struct ata_queued_cmd *qc;
5723 6015
5724 qc = ata_qc_from_tag(ap, ap->active_tag); 6016 qc = ata_qc_from_tag(ap, ap->link.active_tag);
5725 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && 6017 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5726 (qc->flags & ATA_QCFLAG_ACTIVE)) 6018 (qc->flags & ATA_QCFLAG_ACTIVE))
5727 handled |= ata_host_intr(ap, qc); 6019 handled |= ata_host_intr(ap, qc);
@@ -5735,9 +6027,9 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance)
5735 6027
5736/** 6028/**
5737 * sata_scr_valid - test whether SCRs are accessible 6029 * sata_scr_valid - test whether SCRs are accessible
5738 * @ap: ATA port to test SCR accessibility for 6030 * @link: ATA link to test SCR accessibility for
5739 * 6031 *
5740 * Test whether SCRs are accessible for @ap. 6032 * Test whether SCRs are accessible for @link.
5741 * 6033 *
5742 * LOCKING: 6034 * LOCKING:
5743 * None. 6035 * None.
@@ -5745,60 +6037,74 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance)
5745 * RETURNS: 6037 * RETURNS:
5746 * 1 if SCRs are accessible, 0 otherwise. 6038 * 1 if SCRs are accessible, 0 otherwise.
5747 */ 6039 */
5748int sata_scr_valid(struct ata_port *ap) 6040int sata_scr_valid(struct ata_link *link)
5749{ 6041{
6042 struct ata_port *ap = link->ap;
6043
5750 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 6044 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5751} 6045}
5752 6046
5753/** 6047/**
5754 * sata_scr_read - read SCR register of the specified port 6048 * sata_scr_read - read SCR register of the specified port
5755 * @ap: ATA port to read SCR for 6049 * @link: ATA link to read SCR for
5756 * @reg: SCR to read 6050 * @reg: SCR to read
5757 * @val: Place to store read value 6051 * @val: Place to store read value
5758 * 6052 *
5759 * Read SCR register @reg of @ap into *@val. This function is 6053 * Read SCR register @reg of @link into *@val. This function is
5760 * guaranteed to succeed if the cable type of the port is SATA 6054 * guaranteed to succeed if @link is ap->link, the cable type of
5761 * and the port implements ->scr_read. 6055 * the port is SATA and the port implements ->scr_read.
5762 * 6056 *
5763 * LOCKING: 6057 * LOCKING:
5764 * None. 6058 * None if @link is ap->link. Kernel thread context otherwise.
5765 * 6059 *
5766 * RETURNS: 6060 * RETURNS:
5767 * 0 on success, negative errno on failure. 6061 * 0 on success, negative errno on failure.
5768 */ 6062 */
5769int sata_scr_read(struct ata_port *ap, int reg, u32 *val) 6063int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5770{ 6064{
5771 if (sata_scr_valid(ap)) 6065 if (ata_is_host_link(link)) {
5772 return ap->ops->scr_read(ap, reg, val); 6066 struct ata_port *ap = link->ap;
5773 return -EOPNOTSUPP; 6067
6068 if (sata_scr_valid(link))
6069 return ap->ops->scr_read(ap, reg, val);
6070 return -EOPNOTSUPP;
6071 }
6072
6073 return sata_pmp_scr_read(link, reg, val);
5774} 6074}
5775 6075
5776/** 6076/**
5777 * sata_scr_write - write SCR register of the specified port 6077 * sata_scr_write - write SCR register of the specified port
5778 * @ap: ATA port to write SCR for 6078 * @link: ATA link to write SCR for
5779 * @reg: SCR to write 6079 * @reg: SCR to write
5780 * @val: value to write 6080 * @val: value to write
5781 * 6081 *
5782 * Write @val to SCR register @reg of @ap. This function is 6082 * Write @val to SCR register @reg of @link. This function is
5783 * guaranteed to succeed if the cable type of the port is SATA 6083 * guaranteed to succeed if @link is ap->link, the cable type of
5784 * and the port implements ->scr_read. 6084 * the port is SATA and the port implements ->scr_read.
5785 * 6085 *
5786 * LOCKING: 6086 * LOCKING:
5787 * None. 6087 * None if @link is ap->link. Kernel thread context otherwise.
5788 * 6088 *
5789 * RETURNS: 6089 * RETURNS:
5790 * 0 on success, negative errno on failure. 6090 * 0 on success, negative errno on failure.
5791 */ 6091 */
5792int sata_scr_write(struct ata_port *ap, int reg, u32 val) 6092int sata_scr_write(struct ata_link *link, int reg, u32 val)
5793{ 6093{
5794 if (sata_scr_valid(ap)) 6094 if (ata_is_host_link(link)) {
5795 return ap->ops->scr_write(ap, reg, val); 6095 struct ata_port *ap = link->ap;
5796 return -EOPNOTSUPP; 6096
6097 if (sata_scr_valid(link))
6098 return ap->ops->scr_write(ap, reg, val);
6099 return -EOPNOTSUPP;
6100 }
6101
6102 return sata_pmp_scr_write(link, reg, val);
5797} 6103}
5798 6104
5799/** 6105/**
5800 * sata_scr_write_flush - write SCR register of the specified port and flush 6106 * sata_scr_write_flush - write SCR register of the specified port and flush
5801 * @ap: ATA port to write SCR for 6107 * @link: ATA link to write SCR for
5802 * @reg: SCR to write 6108 * @reg: SCR to write
5803 * @val: value to write 6109 * @val: value to write
5804 * 6110 *
@@ -5806,31 +6112,36 @@ int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5806 * function performs flush after writing to the register. 6112 * function performs flush after writing to the register.
5807 * 6113 *
5808 * LOCKING: 6114 * LOCKING:
5809 * None. 6115 * None if @link is ap->link. Kernel thread context otherwise.
5810 * 6116 *
5811 * RETURNS: 6117 * RETURNS:
5812 * 0 on success, negative errno on failure. 6118 * 0 on success, negative errno on failure.
5813 */ 6119 */
5814int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val) 6120int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5815{ 6121{
5816 int rc; 6122 if (ata_is_host_link(link)) {
6123 struct ata_port *ap = link->ap;
6124 int rc;
5817 6125
5818 if (sata_scr_valid(ap)) { 6126 if (sata_scr_valid(link)) {
5819 rc = ap->ops->scr_write(ap, reg, val); 6127 rc = ap->ops->scr_write(ap, reg, val);
5820 if (rc == 0) 6128 if (rc == 0)
5821 rc = ap->ops->scr_read(ap, reg, &val); 6129 rc = ap->ops->scr_read(ap, reg, &val);
5822 return rc; 6130 return rc;
6131 }
6132 return -EOPNOTSUPP;
5823 } 6133 }
5824 return -EOPNOTSUPP; 6134
6135 return sata_pmp_scr_write(link, reg, val);
5825} 6136}
5826 6137
5827/** 6138/**
5828 * ata_port_online - test whether the given port is online 6139 * ata_link_online - test whether the given link is online
5829 * @ap: ATA port to test 6140 * @link: ATA link to test
5830 * 6141 *
5831 * Test whether @ap is online. Note that this function returns 0 6142 * Test whether @link is online. Note that this function returns
5832 * if online status of @ap cannot be obtained, so 6143 * 0 if online status of @link cannot be obtained, so
5833 * ata_port_online(ap) != !ata_port_offline(ap). 6144 * ata_link_online(link) != !ata_link_offline(link).
5834 * 6145 *
5835 * LOCKING: 6146 * LOCKING:
5836 * None. 6147 * None.
@@ -5838,22 +6149,23 @@ int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5838 * RETURNS: 6149 * RETURNS:
5839 * 1 if the port online status is available and online. 6150 * 1 if the port online status is available and online.
5840 */ 6151 */
5841int ata_port_online(struct ata_port *ap) 6152int ata_link_online(struct ata_link *link)
5842{ 6153{
5843 u32 sstatus; 6154 u32 sstatus;
5844 6155
5845 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3) 6156 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6157 (sstatus & 0xf) == 0x3)
5846 return 1; 6158 return 1;
5847 return 0; 6159 return 0;
5848} 6160}
5849 6161
5850/** 6162/**
5851 * ata_port_offline - test whether the given port is offline 6163 * ata_link_offline - test whether the given link is offline
5852 * @ap: ATA port to test 6164 * @link: ATA link to test
5853 * 6165 *
5854 * Test whether @ap is offline. Note that this function returns 6166 * Test whether @link is offline. Note that this function
5855 * 0 if offline status of @ap cannot be obtained, so 6167 * returns 0 if offline status of @link cannot be obtained, so
5856 * ata_port_online(ap) != !ata_port_offline(ap). 6168 * ata_link_online(link) != !ata_link_offline(link).
5857 * 6169 *
5858 * LOCKING: 6170 * LOCKING:
5859 * None. 6171 * None.
@@ -5861,11 +6173,12 @@ int ata_port_online(struct ata_port *ap)
5861 * RETURNS: 6173 * RETURNS:
5862 * 1 if the port offline status is available and offline. 6174 * 1 if the port offline status is available and offline.
5863 */ 6175 */
5864int ata_port_offline(struct ata_port *ap) 6176int ata_link_offline(struct ata_link *link)
5865{ 6177{
5866 u32 sstatus; 6178 u32 sstatus;
5867 6179
5868 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3) 6180 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6181 (sstatus & 0xf) != 0x3)
5869 return 1; 6182 return 1;
5870 return 0; 6183 return 0;
5871} 6184}
@@ -5883,6 +6196,10 @@ int ata_flush_cache(struct ata_device *dev)
5883 else 6196 else
5884 cmd = ATA_CMD_FLUSH; 6197 cmd = ATA_CMD_FLUSH;
5885 6198
6199 /* This is wrong. On a failed flush we get back the LBA of the lost
6200 sector and we should (assuming it wasn't aborted as unknown) issue
6201 a further flush command to continue the writeback until it
6202 does not error */
5886 err_mask = ata_do_simple_cmd(dev, cmd); 6203 err_mask = ata_do_simple_cmd(dev, cmd);
5887 if (err_mask) { 6204 if (err_mask) {
5888 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n"); 6205 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
@@ -5902,6 +6219,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5902 6219
5903 for (i = 0; i < host->n_ports; i++) { 6220 for (i = 0; i < host->n_ports; i++) {
5904 struct ata_port *ap = host->ports[i]; 6221 struct ata_port *ap = host->ports[i];
6222 struct ata_link *link;
5905 6223
5906 /* Previous resume operation might still be in 6224 /* Previous resume operation might still be in
5907 * progress. Wait for PM_PENDING to clear. 6225 * progress. Wait for PM_PENDING to clear.
@@ -5921,8 +6239,10 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5921 } 6239 }
5922 6240
5923 ap->pflags |= ATA_PFLAG_PM_PENDING; 6241 ap->pflags |= ATA_PFLAG_PM_PENDING;
5924 ap->eh_info.action |= action; 6242 __ata_port_for_each_link(link, ap) {
5925 ap->eh_info.flags |= ehi_flags; 6243 link->eh_info.action |= action;
6244 link->eh_info.flags |= ehi_flags;
6245 }
5926 6246
5927 ata_port_schedule_eh(ap); 6247 ata_port_schedule_eh(ap);
5928 6248
@@ -6026,12 +6346,13 @@ int ata_port_start(struct ata_port *ap)
6026 */ 6346 */
6027void ata_dev_init(struct ata_device *dev) 6347void ata_dev_init(struct ata_device *dev)
6028{ 6348{
6029 struct ata_port *ap = dev->ap; 6349 struct ata_link *link = dev->link;
6350 struct ata_port *ap = link->ap;
6030 unsigned long flags; 6351 unsigned long flags;
6031 6352
6032 /* SATA spd limit is bound to the first device */ 6353 /* SATA spd limit is bound to the first device */
6033 ap->sata_spd_limit = ap->hw_sata_spd_limit; 6354 link->sata_spd_limit = link->hw_sata_spd_limit;
6034 ap->sata_spd = 0; 6355 link->sata_spd = 0;
6035 6356
6036 /* High bits of dev->flags are used to record warm plug 6357 /* High bits of dev->flags are used to record warm plug
6037 * requests which occur asynchronously. Synchronize using 6358 * requests which occur asynchronously. Synchronize using
@@ -6050,6 +6371,70 @@ void ata_dev_init(struct ata_device *dev)
6050} 6371}
6051 6372
6052/** 6373/**
6374 * ata_link_init - Initialize an ata_link structure
6375 * @ap: ATA port link is attached to
6376 * @link: Link structure to initialize
6377 * @pmp: Port multiplier port number
6378 *
6379 * Initialize @link.
6380 *
6381 * LOCKING:
6382 * Kernel thread context (may sleep)
6383 */
6384void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
6385{
6386 int i;
6387
6388 /* clear everything except for devices */
6389 memset(link, 0, offsetof(struct ata_link, device[0]));
6390
6391 link->ap = ap;
6392 link->pmp = pmp;
6393 link->active_tag = ATA_TAG_POISON;
6394 link->hw_sata_spd_limit = UINT_MAX;
6395
6396 /* can't use iterator, ap isn't initialized yet */
6397 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6398 struct ata_device *dev = &link->device[i];
6399
6400 dev->link = link;
6401 dev->devno = dev - link->device;
6402 ata_dev_init(dev);
6403 }
6404}
6405
6406/**
6407 * sata_link_init_spd - Initialize link->sata_spd_limit
6408 * @link: Link to configure sata_spd_limit for
6409 *
6410 * Initialize @link->[hw_]sata_spd_limit to the currently
6411 * configured value.
6412 *
6413 * LOCKING:
6414 * Kernel thread context (may sleep).
6415 *
6416 * RETURNS:
6417 * 0 on success, -errno on failure.
6418 */
6419int sata_link_init_spd(struct ata_link *link)
6420{
6421 u32 scontrol, spd;
6422 int rc;
6423
6424 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6425 if (rc)
6426 return rc;
6427
6428 spd = (scontrol >> 4) & 0xf;
6429 if (spd)
6430 link->hw_sata_spd_limit &= (1 << spd) - 1;
6431
6432 link->sata_spd_limit = link->hw_sata_spd_limit;
6433
6434 return 0;
6435}
6436
6437/**
6053 * ata_port_alloc - allocate and initialize basic ATA port resources 6438 * ata_port_alloc - allocate and initialize basic ATA port resources
6054 * @host: ATA host this allocated port belongs to 6439 * @host: ATA host this allocated port belongs to
6055 * 6440 *
@@ -6064,7 +6449,6 @@ void ata_dev_init(struct ata_device *dev)
6064struct ata_port *ata_port_alloc(struct ata_host *host) 6449struct ata_port *ata_port_alloc(struct ata_host *host)
6065{ 6450{
6066 struct ata_port *ap; 6451 struct ata_port *ap;
6067 unsigned int i;
6068 6452
6069 DPRINTK("ENTER\n"); 6453 DPRINTK("ENTER\n");
6070 6454
@@ -6079,9 +6463,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
6079 ap->ctl = ATA_DEVCTL_OBS; 6463 ap->ctl = ATA_DEVCTL_OBS;
6080 ap->host = host; 6464 ap->host = host;
6081 ap->dev = host->dev; 6465 ap->dev = host->dev;
6082
6083 ap->hw_sata_spd_limit = UINT_MAX;
6084 ap->active_tag = ATA_TAG_POISON;
6085 ap->last_ctl = 0xFF; 6466 ap->last_ctl = 0xFF;
6086 6467
6087#if defined(ATA_VERBOSE_DEBUG) 6468#if defined(ATA_VERBOSE_DEBUG)
@@ -6104,12 +6485,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
6104 6485
6105 ap->cbl = ATA_CBL_NONE; 6486 ap->cbl = ATA_CBL_NONE;
6106 6487
6107 for (i = 0; i < ATA_MAX_DEVICES; i++) { 6488 ata_link_init(ap, &ap->link, 0);
6108 struct ata_device *dev = &ap->device[i];
6109 dev->ap = ap;
6110 dev->devno = i;
6111 ata_dev_init(dev);
6112 }
6113 6489
6114#ifdef ATA_IRQ_TRAP 6490#ifdef ATA_IRQ_TRAP
6115 ap->stats.unhandled_irq = 1; 6491 ap->stats.unhandled_irq = 1;
@@ -6145,6 +6521,7 @@ static void ata_host_release(struct device *gendev, void *res)
6145 if (ap->scsi_host) 6521 if (ap->scsi_host)
6146 scsi_host_put(ap->scsi_host); 6522 scsi_host_put(ap->scsi_host);
6147 6523
6524 kfree(ap->pmp_link);
6148 kfree(ap); 6525 kfree(ap);
6149 host->ports[i] = NULL; 6526 host->ports[i] = NULL;
6150 } 6527 }
@@ -6255,6 +6632,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6255 ap->mwdma_mask = pi->mwdma_mask; 6632 ap->mwdma_mask = pi->mwdma_mask;
6256 ap->udma_mask = pi->udma_mask; 6633 ap->udma_mask = pi->udma_mask;
6257 ap->flags |= pi->flags; 6634 ap->flags |= pi->flags;
6635 ap->link.flags |= pi->link_flags;
6258 ap->ops = pi->port_ops; 6636 ap->ops = pi->port_ops;
6259 6637
6260 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 6638 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
@@ -6390,8 +6768,6 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6390 /* set cable, sata_spd_limit and report */ 6768 /* set cable, sata_spd_limit and report */
6391 for (i = 0; i < host->n_ports; i++) { 6769 for (i = 0; i < host->n_ports; i++) {
6392 struct ata_port *ap = host->ports[i]; 6770 struct ata_port *ap = host->ports[i];
6393 int irq_line;
6394 u32 scontrol;
6395 unsigned long xfer_mask; 6771 unsigned long xfer_mask;
6396 6772
6397 /* set SATA cable type if still unset */ 6773 /* set SATA cable type if still unset */
@@ -6399,32 +6775,20 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6399 ap->cbl = ATA_CBL_SATA; 6775 ap->cbl = ATA_CBL_SATA;
6400 6776
6401 /* init sata_spd_limit to the current value */ 6777 /* init sata_spd_limit to the current value */
6402 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) { 6778 sata_link_init_spd(&ap->link);
6403 int spd = (scontrol >> 4) & 0xf;
6404 if (spd)
6405 ap->hw_sata_spd_limit &= (1 << spd) - 1;
6406 }
6407 ap->sata_spd_limit = ap->hw_sata_spd_limit;
6408
6409 /* report the secondary IRQ for second channel legacy */
6410 irq_line = host->irq;
6411 if (i == 1 && host->irq2)
6412 irq_line = host->irq2;
6413 6779
6780 /* print per-port info to dmesg */
6414 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6781 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6415 ap->udma_mask); 6782 ap->udma_mask);
6416 6783
6417 /* print per-port info to dmesg */ 6784 if (!ata_port_is_dummy(ap)) {
6418 if (!ata_port_is_dummy(ap)) 6785 ata_port_printk(ap, KERN_INFO,
6419 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p " 6786 "%cATA max %s %s\n",
6420 "ctl 0x%p bmdma 0x%p irq %d\n",
6421 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6787 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6422 ata_mode_string(xfer_mask), 6788 ata_mode_string(xfer_mask),
6423 ap->ioaddr.cmd_addr, 6789 ap->link.eh_info.desc);
6424 ap->ioaddr.ctl_addr, 6790 ata_ehi_clear_desc(&ap->link.eh_info);
6425 ap->ioaddr.bmdma_addr, 6791 } else
6426 irq_line);
6427 else
6428 ata_port_printk(ap, KERN_INFO, "DUMMY\n"); 6792 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6429 } 6793 }
6430 6794
@@ -6436,7 +6800,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6436 6800
6437 /* probe */ 6801 /* probe */
6438 if (ap->ops->error_handler) { 6802 if (ap->ops->error_handler) {
6439 struct ata_eh_info *ehi = &ap->eh_info; 6803 struct ata_eh_info *ehi = &ap->link.eh_info;
6440 unsigned long flags; 6804 unsigned long flags;
6441 6805
6442 ata_port_probe(ap); 6806 ata_port_probe(ap);
@@ -6444,7 +6808,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6444 /* kick EH for boot probing */ 6808 /* kick EH for boot probing */
6445 spin_lock_irqsave(ap->lock, flags); 6809 spin_lock_irqsave(ap->lock, flags);
6446 6810
6447 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1; 6811 ehi->probe_mask =
6812 (1 << ata_link_max_devices(&ap->link)) - 1;
6448 ehi->action |= ATA_EH_SOFTRESET; 6813 ehi->action |= ATA_EH_SOFTRESET;
6449 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6814 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6450 6815
@@ -6506,7 +6871,7 @@ int ata_host_activate(struct ata_host *host, int irq,
6506 irq_handler_t irq_handler, unsigned long irq_flags, 6871 irq_handler_t irq_handler, unsigned long irq_flags,
6507 struct scsi_host_template *sht) 6872 struct scsi_host_template *sht)
6508{ 6873{
6509 int rc; 6874 int i, rc;
6510 6875
6511 rc = ata_host_start(host); 6876 rc = ata_host_start(host);
6512 if (rc) 6877 if (rc)
@@ -6517,8 +6882,8 @@ int ata_host_activate(struct ata_host *host, int irq,
6517 if (rc) 6882 if (rc)
6518 return rc; 6883 return rc;
6519 6884
6520 /* Used to print device info at probe */ 6885 for (i = 0; i < host->n_ports; i++)
6521 host->irq = irq; 6886 ata_port_desc(host->ports[i], "irq %d", irq);
6522 6887
6523 rc = ata_host_register(host, sht); 6888 rc = ata_host_register(host, sht);
6524 /* if failed, just free the IRQ and leave ports alone */ 6889 /* if failed, just free the IRQ and leave ports alone */
@@ -6542,7 +6907,8 @@ int ata_host_activate(struct ata_host *host, int irq,
6542void ata_port_detach(struct ata_port *ap) 6907void ata_port_detach(struct ata_port *ap)
6543{ 6908{
6544 unsigned long flags; 6909 unsigned long flags;
6545 int i; 6910 struct ata_link *link;
6911 struct ata_device *dev;
6546 6912
6547 if (!ap->ops->error_handler) 6913 if (!ap->ops->error_handler)
6548 goto skip_eh; 6914 goto skip_eh;
@@ -6559,8 +6925,10 @@ void ata_port_detach(struct ata_port *ap)
6559 */ 6925 */
6560 spin_lock_irqsave(ap->lock, flags); 6926 spin_lock_irqsave(ap->lock, flags);
6561 6927
6562 for (i = 0; i < ATA_MAX_DEVICES; i++) 6928 ata_port_for_each_link(link, ap) {
6563 ata_dev_disable(&ap->device[i]); 6929 ata_link_for_each_dev(dev, link)
6930 ata_dev_disable(dev);
6931 }
6564 6932
6565 spin_unlock_irqrestore(ap->lock, flags); 6933 spin_unlock_irqrestore(ap->lock, flags);
6566 6934
@@ -6639,7 +7007,7 @@ void ata_std_ports(struct ata_ioports *ioaddr)
6639 */ 7007 */
6640void ata_pci_remove_one(struct pci_dev *pdev) 7008void ata_pci_remove_one(struct pci_dev *pdev)
6641{ 7009{
6642 struct device *dev = pci_dev_to_dev(pdev); 7010 struct device *dev = &pdev->dev;
6643 struct ata_host *host = dev_get_drvdata(dev); 7011 struct ata_host *host = dev_get_drvdata(dev);
6644 7012
6645 ata_host_detach(host); 7013 ata_host_detach(host);
@@ -6847,7 +7215,6 @@ static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6847} 7215}
6848 7216
6849const struct ata_port_operations ata_dummy_port_ops = { 7217const struct ata_port_operations ata_dummy_port_ops = {
6850 .port_disable = ata_port_disable,
6851 .check_status = ata_dummy_check_status, 7218 .check_status = ata_dummy_check_status,
6852 .check_altstatus = ata_dummy_check_status, 7219 .check_altstatus = ata_dummy_check_status,
6853 .dev_select = ata_noop_dev_select, 7220 .dev_select = ata_noop_dev_select,
@@ -6909,6 +7276,7 @@ EXPORT_SYMBOL_GPL(ata_interrupt);
6909EXPORT_SYMBOL_GPL(ata_do_set_mode); 7276EXPORT_SYMBOL_GPL(ata_do_set_mode);
6910EXPORT_SYMBOL_GPL(ata_data_xfer); 7277EXPORT_SYMBOL_GPL(ata_data_xfer);
6911EXPORT_SYMBOL_GPL(ata_data_xfer_noirq); 7278EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
7279EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6912EXPORT_SYMBOL_GPL(ata_qc_prep); 7280EXPORT_SYMBOL_GPL(ata_qc_prep);
6913EXPORT_SYMBOL_GPL(ata_dumb_qc_prep); 7281EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
6914EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 7282EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
@@ -6925,14 +7293,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6925EXPORT_SYMBOL_GPL(ata_port_probe); 7293EXPORT_SYMBOL_GPL(ata_port_probe);
6926EXPORT_SYMBOL_GPL(ata_dev_disable); 7294EXPORT_SYMBOL_GPL(ata_dev_disable);
6927EXPORT_SYMBOL_GPL(sata_set_spd); 7295EXPORT_SYMBOL_GPL(sata_set_spd);
6928EXPORT_SYMBOL_GPL(sata_phy_debounce); 7296EXPORT_SYMBOL_GPL(sata_link_debounce);
6929EXPORT_SYMBOL_GPL(sata_phy_resume); 7297EXPORT_SYMBOL_GPL(sata_link_resume);
6930EXPORT_SYMBOL_GPL(sata_phy_reset); 7298EXPORT_SYMBOL_GPL(sata_phy_reset);
6931EXPORT_SYMBOL_GPL(__sata_phy_reset); 7299EXPORT_SYMBOL_GPL(__sata_phy_reset);
6932EXPORT_SYMBOL_GPL(ata_bus_reset); 7300EXPORT_SYMBOL_GPL(ata_bus_reset);
6933EXPORT_SYMBOL_GPL(ata_std_prereset); 7301EXPORT_SYMBOL_GPL(ata_std_prereset);
6934EXPORT_SYMBOL_GPL(ata_std_softreset); 7302EXPORT_SYMBOL_GPL(ata_std_softreset);
6935EXPORT_SYMBOL_GPL(sata_port_hardreset); 7303EXPORT_SYMBOL_GPL(sata_link_hardreset);
6936EXPORT_SYMBOL_GPL(sata_std_hardreset); 7304EXPORT_SYMBOL_GPL(sata_std_hardreset);
6937EXPORT_SYMBOL_GPL(ata_std_postreset); 7305EXPORT_SYMBOL_GPL(ata_std_postreset);
6938EXPORT_SYMBOL_GPL(ata_dev_classify); 7306EXPORT_SYMBOL_GPL(ata_dev_classify);
@@ -6953,8 +7321,8 @@ EXPORT_SYMBOL_GPL(sata_scr_valid);
6953EXPORT_SYMBOL_GPL(sata_scr_read); 7321EXPORT_SYMBOL_GPL(sata_scr_read);
6954EXPORT_SYMBOL_GPL(sata_scr_write); 7322EXPORT_SYMBOL_GPL(sata_scr_write);
6955EXPORT_SYMBOL_GPL(sata_scr_write_flush); 7323EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6956EXPORT_SYMBOL_GPL(ata_port_online); 7324EXPORT_SYMBOL_GPL(ata_link_online);
6957EXPORT_SYMBOL_GPL(ata_port_offline); 7325EXPORT_SYMBOL_GPL(ata_link_offline);
6958#ifdef CONFIG_PM 7326#ifdef CONFIG_PM
6959EXPORT_SYMBOL_GPL(ata_host_suspend); 7327EXPORT_SYMBOL_GPL(ata_host_suspend);
6960EXPORT_SYMBOL_GPL(ata_host_resume); 7328EXPORT_SYMBOL_GPL(ata_host_resume);
@@ -6985,22 +7353,31 @@ EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6985EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); 7353EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6986#endif /* CONFIG_PCI */ 7354#endif /* CONFIG_PCI */
6987 7355
7356EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
7357EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7358EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7359EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7360EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7361
6988EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 7362EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6989EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 7363EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6990EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 7364EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7365EXPORT_SYMBOL_GPL(ata_port_desc);
7366#ifdef CONFIG_PCI
7367EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7368#endif /* CONFIG_PCI */
6991EXPORT_SYMBOL_GPL(ata_eng_timeout); 7369EXPORT_SYMBOL_GPL(ata_eng_timeout);
6992EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 7370EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7371EXPORT_SYMBOL_GPL(ata_link_abort);
6993EXPORT_SYMBOL_GPL(ata_port_abort); 7372EXPORT_SYMBOL_GPL(ata_port_abort);
6994EXPORT_SYMBOL_GPL(ata_port_freeze); 7373EXPORT_SYMBOL_GPL(ata_port_freeze);
7374EXPORT_SYMBOL_GPL(sata_async_notification);
6995EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 7375EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6996EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 7376EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6997EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 7377EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6998EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 7378EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6999EXPORT_SYMBOL_GPL(ata_do_eh); 7379EXPORT_SYMBOL_GPL(ata_do_eh);
7000EXPORT_SYMBOL_GPL(ata_irq_on); 7380EXPORT_SYMBOL_GPL(ata_irq_on);
7001EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
7002EXPORT_SYMBOL_GPL(ata_irq_ack);
7003EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
7004EXPORT_SYMBOL_GPL(ata_dev_try_classify); 7381EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7005 7382
7006EXPORT_SYMBOL_GPL(ata_cable_40wire); 7383EXPORT_SYMBOL_GPL(ata_cable_40wire);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index ac6ceed4bb..2eaa39fc65 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -33,6 +33,7 @@
33 */ 33 */
34 34
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/pci.h>
36#include <scsi/scsi.h> 37#include <scsi/scsi.h>
37#include <scsi/scsi_host.h> 38#include <scsi/scsi_host.h>
38#include <scsi/scsi_eh.h> 39#include <scsi/scsi_eh.h>
@@ -74,7 +75,6 @@ static const unsigned long ata_eh_reset_timeouts[] = {
74}; 75};
75 76
76static void __ata_port_freeze(struct ata_port *ap); 77static void __ata_port_freeze(struct ata_port *ap);
77static void ata_eh_finish(struct ata_port *ap);
78#ifdef CONFIG_PM 78#ifdef CONFIG_PM
79static void ata_eh_handle_port_suspend(struct ata_port *ap); 79static void ata_eh_handle_port_suspend(struct ata_port *ap);
80static void ata_eh_handle_port_resume(struct ata_port *ap); 80static void ata_eh_handle_port_resume(struct ata_port *ap);
@@ -151,6 +151,73 @@ void ata_ehi_clear_desc(struct ata_eh_info *ehi)
151 ehi->desc_len = 0; 151 ehi->desc_len = 0;
152} 152}
153 153
154/**
155 * ata_port_desc - append port description
156 * @ap: target ATA port
157 * @fmt: printf format string
158 *
159 * Format string according to @fmt and append it to port
160 * description. If port description is not empty, " " is added
161 * in-between. This function is to be used while initializing
162 * ata_host. The description is printed on host registration.
163 *
164 * LOCKING:
165 * None.
166 */
167void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
168{
169 va_list args;
170
171 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
172
173 if (ap->link.eh_info.desc_len)
174 __ata_ehi_push_desc(&ap->link.eh_info, " ");
175
176 va_start(args, fmt);
177 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
178 va_end(args);
179}
180
181#ifdef CONFIG_PCI
182
183/**
184 * ata_port_pbar_desc - append PCI BAR description
185 * @ap: target ATA port
186 * @bar: target PCI BAR
187 * @offset: offset into PCI BAR
188 * @name: name of the area
189 *
190 * If @offset is negative, this function formats a string which
191 * contains the name, address, size and type of the BAR and
192 * appends it to the port description. If @offset is zero or
193 * positive, only name and offsetted address is appended.
194 *
195 * LOCKING:
196 * None.
197 */
198void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
199 const char *name)
200{
201 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
202 char *type = "";
203 unsigned long long start, len;
204
205 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
206 type = "m";
207 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
208 type = "i";
209
210 start = (unsigned long long)pci_resource_start(pdev, bar);
211 len = (unsigned long long)pci_resource_len(pdev, bar);
212
213 if (offset < 0)
214 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
215 else
216 ata_port_desc(ap, "%s 0x%llx", name, start + offset);
217}
218
219#endif /* CONFIG_PCI */
220
154static void ata_ering_record(struct ata_ering *ering, int is_io, 221static void ata_ering_record(struct ata_ering *ering, int is_io,
155 unsigned int err_mask) 222 unsigned int err_mask)
156{ 223{
@@ -195,28 +262,29 @@ static int ata_ering_map(struct ata_ering *ering,
195 262
196static unsigned int ata_eh_dev_action(struct ata_device *dev) 263static unsigned int ata_eh_dev_action(struct ata_device *dev)
197{ 264{
198 struct ata_eh_context *ehc = &dev->ap->eh_context; 265 struct ata_eh_context *ehc = &dev->link->eh_context;
199 266
200 return ehc->i.action | ehc->i.dev_action[dev->devno]; 267 return ehc->i.action | ehc->i.dev_action[dev->devno];
201} 268}
202 269
203static void ata_eh_clear_action(struct ata_device *dev, 270static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
204 struct ata_eh_info *ehi, unsigned int action) 271 struct ata_eh_info *ehi, unsigned int action)
205{ 272{
206 int i; 273 struct ata_device *tdev;
207 274
208 if (!dev) { 275 if (!dev) {
209 ehi->action &= ~action; 276 ehi->action &= ~action;
210 for (i = 0; i < ATA_MAX_DEVICES; i++) 277 ata_link_for_each_dev(tdev, link)
211 ehi->dev_action[i] &= ~action; 278 ehi->dev_action[tdev->devno] &= ~action;
212 } else { 279 } else {
213 /* doesn't make sense for port-wide EH actions */ 280 /* doesn't make sense for port-wide EH actions */
214 WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 281 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
215 282
216 /* break ehi->action into ehi->dev_action */ 283 /* break ehi->action into ehi->dev_action */
217 if (ehi->action & action) { 284 if (ehi->action & action) {
218 for (i = 0; i < ATA_MAX_DEVICES; i++) 285 ata_link_for_each_dev(tdev, link)
219 ehi->dev_action[i] |= ehi->action & action; 286 ehi->dev_action[tdev->devno] |=
287 ehi->action & action;
220 ehi->action &= ~action; 288 ehi->action &= ~action;
221 } 289 }
222 290
@@ -261,7 +329,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
261 329
262 ret = EH_HANDLED; 330 ret = EH_HANDLED;
263 spin_lock_irqsave(ap->lock, flags); 331 spin_lock_irqsave(ap->lock, flags);
264 qc = ata_qc_from_tag(ap, ap->active_tag); 332 qc = ata_qc_from_tag(ap, ap->link.active_tag);
265 if (qc) { 333 if (qc) {
266 WARN_ON(qc->scsicmd != cmd); 334 WARN_ON(qc->scsicmd != cmd);
267 qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 335 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
@@ -290,7 +358,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
290void ata_scsi_error(struct Scsi_Host *host) 358void ata_scsi_error(struct Scsi_Host *host)
291{ 359{
292 struct ata_port *ap = ata_shost_to_port(host); 360 struct ata_port *ap = ata_shost_to_port(host);
293 int i, repeat_cnt = ATA_EH_MAX_REPEAT; 361 int i;
294 unsigned long flags; 362 unsigned long flags;
295 363
296 DPRINTK("ENTER\n"); 364 DPRINTK("ENTER\n");
@@ -356,12 +424,17 @@ void ata_scsi_error(struct Scsi_Host *host)
356 __ata_port_freeze(ap); 424 __ata_port_freeze(ap);
357 425
358 spin_unlock_irqrestore(ap->lock, flags); 426 spin_unlock_irqrestore(ap->lock, flags);
427
428 /* initialize eh_tries */
429 ap->eh_tries = ATA_EH_MAX_TRIES;
359 } else 430 } else
360 spin_unlock_wait(ap->lock); 431 spin_unlock_wait(ap->lock);
361 432
362 repeat: 433 repeat:
363 /* invoke error handler */ 434 /* invoke error handler */
364 if (ap->ops->error_handler) { 435 if (ap->ops->error_handler) {
436 struct ata_link *link;
437
365 /* kill fast drain timer */ 438 /* kill fast drain timer */
366 del_timer_sync(&ap->fastdrain_timer); 439 del_timer_sync(&ap->fastdrain_timer);
367 440
@@ -371,12 +444,15 @@ void ata_scsi_error(struct Scsi_Host *host)
371 /* fetch & clear EH info */ 444 /* fetch & clear EH info */
372 spin_lock_irqsave(ap->lock, flags); 445 spin_lock_irqsave(ap->lock, flags);
373 446
374 memset(&ap->eh_context, 0, sizeof(ap->eh_context)); 447 __ata_port_for_each_link(link, ap) {
375 ap->eh_context.i = ap->eh_info; 448 memset(&link->eh_context, 0, sizeof(link->eh_context));
376 memset(&ap->eh_info, 0, sizeof(ap->eh_info)); 449 link->eh_context.i = link->eh_info;
450 memset(&link->eh_info, 0, sizeof(link->eh_info));
451 }
377 452
378 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 453 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
379 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 454 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
455 ap->excl_link = NULL; /* don't maintain exclusion over EH */
380 456
381 spin_unlock_irqrestore(ap->lock, flags); 457 spin_unlock_irqrestore(ap->lock, flags);
382 458
@@ -396,20 +472,18 @@ void ata_scsi_error(struct Scsi_Host *host)
396 spin_lock_irqsave(ap->lock, flags); 472 spin_lock_irqsave(ap->lock, flags);
397 473
398 if (ap->pflags & ATA_PFLAG_EH_PENDING) { 474 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
399 if (--repeat_cnt) { 475 if (--ap->eh_tries) {
400 ata_port_printk(ap, KERN_INFO,
401 "EH pending after completion, "
402 "repeating EH (cnt=%d)\n", repeat_cnt);
403 spin_unlock_irqrestore(ap->lock, flags); 476 spin_unlock_irqrestore(ap->lock, flags);
404 goto repeat; 477 goto repeat;
405 } 478 }
406 ata_port_printk(ap, KERN_ERR, "EH pending after %d " 479 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
407 "tries, giving up\n", ATA_EH_MAX_REPEAT); 480 "tries, giving up\n", ATA_EH_MAX_TRIES);
408 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 481 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
409 } 482 }
410 483
411 /* this run is complete, make sure EH info is clear */ 484 /* this run is complete, make sure EH info is clear */
412 memset(&ap->eh_info, 0, sizeof(ap->eh_info)); 485 __ata_port_for_each_link(link, ap)
486 memset(&link->eh_info, 0, sizeof(link->eh_info));
413 487
414 /* Clear host_eh_scheduled while holding ap->lock such 488 /* Clear host_eh_scheduled while holding ap->lock such
415 * that if exception occurs after this point but 489 * that if exception occurs after this point but
@@ -420,7 +494,7 @@ void ata_scsi_error(struct Scsi_Host *host)
420 494
421 spin_unlock_irqrestore(ap->lock, flags); 495 spin_unlock_irqrestore(ap->lock, flags);
422 } else { 496 } else {
423 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); 497 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
424 ap->ops->eng_timeout(ap); 498 ap->ops->eng_timeout(ap);
425 } 499 }
426 500
@@ -575,7 +649,7 @@ void ata_eng_timeout(struct ata_port *ap)
575{ 649{
576 DPRINTK("ENTER\n"); 650 DPRINTK("ENTER\n");
577 651
578 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag)); 652 ata_qc_timeout(ata_qc_from_tag(ap, ap->link.active_tag));
579 653
580 DPRINTK("EXIT\n"); 654 DPRINTK("EXIT\n");
581} 655}
@@ -718,19 +792,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
718 DPRINTK("port EH scheduled\n"); 792 DPRINTK("port EH scheduled\n");
719} 793}
720 794
721/** 795static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
722 * ata_port_abort - abort all qc's on the port
723 * @ap: ATA port to abort qc's for
724 *
725 * Abort all active qc's of @ap and schedule EH.
726 *
727 * LOCKING:
728 * spin_lock_irqsave(host lock)
729 *
730 * RETURNS:
731 * Number of aborted qc's.
732 */
733int ata_port_abort(struct ata_port *ap)
734{ 796{
735 int tag, nr_aborted = 0; 797 int tag, nr_aborted = 0;
736 798
@@ -742,7 +804,7 @@ int ata_port_abort(struct ata_port *ap)
742 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 804 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
743 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 805 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
744 806
745 if (qc) { 807 if (qc && (!link || qc->dev->link == link)) {
746 qc->flags |= ATA_QCFLAG_FAILED; 808 qc->flags |= ATA_QCFLAG_FAILED;
747 ata_qc_complete(qc); 809 ata_qc_complete(qc);
748 nr_aborted++; 810 nr_aborted++;
@@ -756,6 +818,40 @@ int ata_port_abort(struct ata_port *ap)
756} 818}
757 819
758/** 820/**
821 * ata_link_abort - abort all qc's on the link
822 * @link: ATA link to abort qc's for
823 *
824 * Abort all active qc's active on @link and schedule EH.
825 *
826 * LOCKING:
827 * spin_lock_irqsave(host lock)
828 *
829 * RETURNS:
830 * Number of aborted qc's.
831 */
832int ata_link_abort(struct ata_link *link)
833{
834 return ata_do_link_abort(link->ap, link);
835}
836
837/**
838 * ata_port_abort - abort all qc's on the port
839 * @ap: ATA port to abort qc's for
840 *
841 * Abort all active qc's of @ap and schedule EH.
842 *
843 * LOCKING:
844 * spin_lock_irqsave(host_set lock)
845 *
846 * RETURNS:
847 * Number of aborted qc's.
848 */
849int ata_port_abort(struct ata_port *ap)
850{
851 return ata_do_link_abort(ap, NULL);
852}
853
854/**
759 * __ata_port_freeze - freeze port 855 * __ata_port_freeze - freeze port
760 * @ap: ATA port to freeze 856 * @ap: ATA port to freeze
761 * 857 *
@@ -810,6 +906,79 @@ int ata_port_freeze(struct ata_port *ap)
810} 906}
811 907
812/** 908/**
909 * sata_async_notification - SATA async notification handler
910 * @ap: ATA port where async notification is received
911 *
912 * Handler to be called when async notification via SDB FIS is
913 * received. This function schedules EH if necessary.
914 *
915 * LOCKING:
916 * spin_lock_irqsave(host lock)
917 *
918 * RETURNS:
919 * 1 if EH is scheduled, 0 otherwise.
920 */
921int sata_async_notification(struct ata_port *ap)
922{
923 u32 sntf;
924 int rc;
925
926 if (!(ap->flags & ATA_FLAG_AN))
927 return 0;
928
929 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
930 if (rc == 0)
931 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
932
933 if (!ap->nr_pmp_links || rc) {
934 /* PMP is not attached or SNTF is not available */
935 if (!ap->nr_pmp_links) {
936 /* PMP is not attached. Check whether ATAPI
937 * AN is configured. If so, notify media
938 * change.
939 */
940 struct ata_device *dev = ap->link.device;
941
942 if ((dev->class == ATA_DEV_ATAPI) &&
943 (dev->flags & ATA_DFLAG_AN))
944 ata_scsi_media_change_notify(dev);
945 return 0;
946 } else {
947 /* PMP is attached but SNTF is not available.
948 * ATAPI async media change notification is
949 * not used. The PMP must be reporting PHY
950 * status change, schedule EH.
951 */
952 ata_port_schedule_eh(ap);
953 return 1;
954 }
955 } else {
956 /* PMP is attached and SNTF is available */
957 struct ata_link *link;
958
959 /* check and notify ATAPI AN */
960 ata_port_for_each_link(link, ap) {
961 if (!(sntf & (1 << link->pmp)))
962 continue;
963
964 if ((link->device->class == ATA_DEV_ATAPI) &&
965 (link->device->flags & ATA_DFLAG_AN))
966 ata_scsi_media_change_notify(link->device);
967 }
968
969 /* If PMP is reporting that PHY status of some
970 * downstream ports has changed, schedule EH.
971 */
972 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
973 ata_port_schedule_eh(ap);
974 return 1;
975 }
976
977 return 0;
978 }
979}
980
981/**
813 * ata_eh_freeze_port - EH helper to freeze port 982 * ata_eh_freeze_port - EH helper to freeze port
814 * @ap: ATA port to freeze 983 * @ap: ATA port to freeze
815 * 984 *
@@ -920,9 +1089,10 @@ void ata_eh_qc_retry(struct ata_queued_cmd *qc)
920 * LOCKING: 1089 * LOCKING:
921 * None. 1090 * None.
922 */ 1091 */
923static void ata_eh_detach_dev(struct ata_device *dev) 1092void ata_eh_detach_dev(struct ata_device *dev)
924{ 1093{
925 struct ata_port *ap = dev->ap; 1094 struct ata_link *link = dev->link;
1095 struct ata_port *ap = link->ap;
926 unsigned long flags; 1096 unsigned long flags;
927 1097
928 ata_dev_disable(dev); 1098 ata_dev_disable(dev);
@@ -937,31 +1107,32 @@ static void ata_eh_detach_dev(struct ata_device *dev)
937 } 1107 }
938 1108
939 /* clear per-dev EH actions */ 1109 /* clear per-dev EH actions */
940 ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK); 1110 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
941 ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK); 1111 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
942 1112
943 spin_unlock_irqrestore(ap->lock, flags); 1113 spin_unlock_irqrestore(ap->lock, flags);
944} 1114}
945 1115
946/** 1116/**
947 * ata_eh_about_to_do - about to perform eh_action 1117 * ata_eh_about_to_do - about to perform eh_action
948 * @ap: target ATA port 1118 * @link: target ATA link
949 * @dev: target ATA dev for per-dev action (can be NULL) 1119 * @dev: target ATA dev for per-dev action (can be NULL)
950 * @action: action about to be performed 1120 * @action: action about to be performed
951 * 1121 *
952 * Called just before performing EH actions to clear related bits 1122 * Called just before performing EH actions to clear related bits
953 * in @ap->eh_info such that eh actions are not unnecessarily 1123 * in @link->eh_info such that eh actions are not unnecessarily
954 * repeated. 1124 * repeated.
955 * 1125 *
956 * LOCKING: 1126 * LOCKING:
957 * None. 1127 * None.
958 */ 1128 */
959static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, 1129void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
960 unsigned int action) 1130 unsigned int action)
961{ 1131{
1132 struct ata_port *ap = link->ap;
1133 struct ata_eh_info *ehi = &link->eh_info;
1134 struct ata_eh_context *ehc = &link->eh_context;
962 unsigned long flags; 1135 unsigned long flags;
963 struct ata_eh_info *ehi = &ap->eh_info;
964 struct ata_eh_context *ehc = &ap->eh_context;
965 1136
966 spin_lock_irqsave(ap->lock, flags); 1137 spin_lock_irqsave(ap->lock, flags);
967 1138
@@ -978,7 +1149,7 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
978 ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK; 1149 ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
979 } 1150 }
980 1151
981 ata_eh_clear_action(dev, ehi, action); 1152 ata_eh_clear_action(link, dev, ehi, action);
982 1153
983 if (!(ehc->i.flags & ATA_EHI_QUIET)) 1154 if (!(ehc->i.flags & ATA_EHI_QUIET))
984 ap->pflags |= ATA_PFLAG_RECOVERED; 1155 ap->pflags |= ATA_PFLAG_RECOVERED;
@@ -988,26 +1159,28 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
988 1159
989/** 1160/**
990 * ata_eh_done - EH action complete 1161 * ata_eh_done - EH action complete
991 * @ap: target ATA port 1162* @ap: target ATA port
992 * @dev: target ATA dev for per-dev action (can be NULL) 1163 * @dev: target ATA dev for per-dev action (can be NULL)
993 * @action: action just completed 1164 * @action: action just completed
994 * 1165 *
995 * Called right after performing EH actions to clear related bits 1166 * Called right after performing EH actions to clear related bits
996 * in @ap->eh_context. 1167 * in @link->eh_context.
997 * 1168 *
998 * LOCKING: 1169 * LOCKING:
999 * None. 1170 * None.
1000 */ 1171 */
1001static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, 1172void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1002 unsigned int action) 1173 unsigned int action)
1003{ 1174{
1175 struct ata_eh_context *ehc = &link->eh_context;
1176
1004 /* if reset is complete, clear all reset actions & reset modifier */ 1177 /* if reset is complete, clear all reset actions & reset modifier */
1005 if (action & ATA_EH_RESET_MASK) { 1178 if (action & ATA_EH_RESET_MASK) {
1006 action |= ATA_EH_RESET_MASK; 1179 action |= ATA_EH_RESET_MASK;
1007 ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK; 1180 ehc->i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
1008 } 1181 }
1009 1182
1010 ata_eh_clear_action(dev, &ap->eh_context.i, action); 1183 ata_eh_clear_action(link, dev, &ehc->i, action);
1011} 1184}
1012 1185
1013/** 1186/**
@@ -1077,7 +1250,7 @@ static unsigned int ata_read_log_page(struct ata_device *dev,
1077 tf.protocol = ATA_PROT_PIO; 1250 tf.protocol = ATA_PROT_PIO;
1078 1251
1079 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 1252 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1080 buf, sectors * ATA_SECT_SIZE); 1253 buf, sectors * ATA_SECT_SIZE, 0);
1081 1254
1082 DPRINTK("EXIT, err_mask=%x\n", err_mask); 1255 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1083 return err_mask; 1256 return err_mask;
@@ -1101,7 +1274,7 @@ static unsigned int ata_read_log_page(struct ata_device *dev,
1101static int ata_eh_read_log_10h(struct ata_device *dev, 1274static int ata_eh_read_log_10h(struct ata_device *dev,
1102 int *tag, struct ata_taskfile *tf) 1275 int *tag, struct ata_taskfile *tf)
1103{ 1276{
1104 u8 *buf = dev->ap->sector_buf; 1277 u8 *buf = dev->link->ap->sector_buf;
1105 unsigned int err_mask; 1278 unsigned int err_mask;
1106 u8 csum; 1279 u8 csum;
1107 int i; 1280 int i;
@@ -1155,7 +1328,7 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
1155{ 1328{
1156 struct ata_device *dev = qc->dev; 1329 struct ata_device *dev = qc->dev;
1157 unsigned char *sense_buf = qc->scsicmd->sense_buffer; 1330 unsigned char *sense_buf = qc->scsicmd->sense_buffer;
1158 struct ata_port *ap = dev->ap; 1331 struct ata_port *ap = dev->link->ap;
1159 struct ata_taskfile tf; 1332 struct ata_taskfile tf;
1160 u8 cdb[ATAPI_CDB_LEN]; 1333 u8 cdb[ATAPI_CDB_LEN];
1161 1334
@@ -1191,12 +1364,12 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
1191 } 1364 }
1192 1365
1193 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 1366 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1194 sense_buf, SCSI_SENSE_BUFFERSIZE); 1367 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1195} 1368}
1196 1369
1197/** 1370/**
1198 * ata_eh_analyze_serror - analyze SError for a failed port 1371 * ata_eh_analyze_serror - analyze SError for a failed port
1199 * @ap: ATA port to analyze SError for 1372 * @link: ATA link to analyze SError for
1200 * 1373 *
1201 * Analyze SError if available and further determine cause of 1374 * Analyze SError if available and further determine cause of
1202 * failure. 1375 * failure.
@@ -1204,11 +1377,12 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
1204 * LOCKING: 1377 * LOCKING:
1205 * None. 1378 * None.
1206 */ 1379 */
1207static void ata_eh_analyze_serror(struct ata_port *ap) 1380static void ata_eh_analyze_serror(struct ata_link *link)
1208{ 1381{
1209 struct ata_eh_context *ehc = &ap->eh_context; 1382 struct ata_eh_context *ehc = &link->eh_context;
1210 u32 serror = ehc->i.serror; 1383 u32 serror = ehc->i.serror;
1211 unsigned int err_mask = 0, action = 0; 1384 unsigned int err_mask = 0, action = 0;
1385 u32 hotplug_mask;
1212 1386
1213 if (serror & SERR_PERSISTENT) { 1387 if (serror & SERR_PERSISTENT) {
1214 err_mask |= AC_ERR_ATA_BUS; 1388 err_mask |= AC_ERR_ATA_BUS;
@@ -1227,7 +1401,20 @@ static void ata_eh_analyze_serror(struct ata_port *ap)
1227 err_mask |= AC_ERR_SYSTEM; 1401 err_mask |= AC_ERR_SYSTEM;
1228 action |= ATA_EH_HARDRESET; 1402 action |= ATA_EH_HARDRESET;
1229 } 1403 }
1230 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG)) 1404
1405 /* Determine whether a hotplug event has occurred. Both
1406 * SError.N/X are considered hotplug events for enabled or
1407 * host links. For disabled PMP links, only N bit is
1408 * considered as X bit is left at 1 for link plugging.
1409 */
1410 hotplug_mask = 0;
1411
1412 if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1413 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1414 else
1415 hotplug_mask = SERR_PHYRDY_CHG;
1416
1417 if (serror & hotplug_mask)
1231 ata_ehi_hotplugged(&ehc->i); 1418 ata_ehi_hotplugged(&ehc->i);
1232 1419
1233 ehc->i.err_mask |= err_mask; 1420 ehc->i.err_mask |= err_mask;
@@ -1236,7 +1423,7 @@ static void ata_eh_analyze_serror(struct ata_port *ap)
1236 1423
1237/** 1424/**
1238 * ata_eh_analyze_ncq_error - analyze NCQ error 1425 * ata_eh_analyze_ncq_error - analyze NCQ error
1239 * @ap: ATA port to analyze NCQ error for 1426 * @link: ATA link to analyze NCQ error for
1240 * 1427 *
1241 * Read log page 10h, determine the offending qc and acquire 1428 * Read log page 10h, determine the offending qc and acquire
1242 * error status TF. For NCQ device errors, all LLDDs have to do 1429 * error status TF. For NCQ device errors, all LLDDs have to do
@@ -1246,10 +1433,11 @@ static void ata_eh_analyze_serror(struct ata_port *ap)
1246 * LOCKING: 1433 * LOCKING:
1247 * Kernel thread context (may sleep). 1434 * Kernel thread context (may sleep).
1248 */ 1435 */
1249static void ata_eh_analyze_ncq_error(struct ata_port *ap) 1436static void ata_eh_analyze_ncq_error(struct ata_link *link)
1250{ 1437{
1251 struct ata_eh_context *ehc = &ap->eh_context; 1438 struct ata_port *ap = link->ap;
1252 struct ata_device *dev = ap->device; 1439 struct ata_eh_context *ehc = &link->eh_context;
1440 struct ata_device *dev = link->device;
1253 struct ata_queued_cmd *qc; 1441 struct ata_queued_cmd *qc;
1254 struct ata_taskfile tf; 1442 struct ata_taskfile tf;
1255 int tag, rc; 1443 int tag, rc;
@@ -1259,7 +1447,7 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1259 return; 1447 return;
1260 1448
1261 /* is it NCQ device error? */ 1449 /* is it NCQ device error? */
1262 if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1450 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1263 return; 1451 return;
1264 1452
1265 /* has LLDD analyzed already? */ 1453 /* has LLDD analyzed already? */
@@ -1276,13 +1464,13 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1276 /* okay, this error is ours */ 1464 /* okay, this error is ours */
1277 rc = ata_eh_read_log_10h(dev, &tag, &tf); 1465 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1278 if (rc) { 1466 if (rc) {
1279 ata_port_printk(ap, KERN_ERR, "failed to read log page 10h " 1467 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1280 "(errno=%d)\n", rc); 1468 "(errno=%d)\n", rc);
1281 return; 1469 return;
1282 } 1470 }
1283 1471
1284 if (!(ap->sactive & (1 << tag))) { 1472 if (!(link->sactive & (1 << tag))) {
1285 ata_port_printk(ap, KERN_ERR, "log page 10h reported " 1473 ata_link_printk(link, KERN_ERR, "log page 10h reported "
1286 "inactive tag %d\n", tag); 1474 "inactive tag %d\n", tag);
1287 return; 1475 return;
1288 } 1476 }
@@ -1497,7 +1685,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
1497 /* speed down? */ 1685 /* speed down? */
1498 if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1686 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1499 /* speed down SATA link speed if possible */ 1687 /* speed down SATA link speed if possible */
1500 if (sata_down_spd_limit(dev->ap) == 0) { 1688 if (sata_down_spd_limit(dev->link) == 0) {
1501 action |= ATA_EH_HARDRESET; 1689 action |= ATA_EH_HARDRESET;
1502 goto done; 1690 goto done;
1503 } 1691 }
@@ -1528,7 +1716,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
1528 * SATA. Consider it only for PATA. 1716 * SATA. Consider it only for PATA.
1529 */ 1717 */
1530 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1718 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1531 (dev->ap->cbl != ATA_CBL_SATA) && 1719 (dev->link->ap->cbl != ATA_CBL_SATA) &&
1532 (dev->xfer_shift != ATA_SHIFT_PIO)) { 1720 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1533 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 1721 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1534 dev->spdn_cnt = 0; 1722 dev->spdn_cnt = 0;
@@ -1545,19 +1733,20 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
1545} 1733}
1546 1734
1547/** 1735/**
1548 * ata_eh_autopsy - analyze error and determine recovery action 1736 * ata_eh_link_autopsy - analyze error and determine recovery action
1549 * @ap: ATA port to perform autopsy on 1737 * @link: host link to perform autopsy on
1550 * 1738 *
1551 * Analyze why @ap failed and determine which recovery action is 1739 * Analyze why @link failed and determine which recovery actions
1552 * needed. This function also sets more detailed AC_ERR_* values 1740 * are needed. This function also sets more detailed AC_ERR_*
1553 * and fills sense data for ATAPI CHECK SENSE. 1741 * values and fills sense data for ATAPI CHECK SENSE.
1554 * 1742 *
1555 * LOCKING: 1743 * LOCKING:
1556 * Kernel thread context (may sleep). 1744 * Kernel thread context (may sleep).
1557 */ 1745 */
1558static void ata_eh_autopsy(struct ata_port *ap) 1746static void ata_eh_link_autopsy(struct ata_link *link)
1559{ 1747{
1560 struct ata_eh_context *ehc = &ap->eh_context; 1748 struct ata_port *ap = link->ap;
1749 struct ata_eh_context *ehc = &link->eh_context;
1561 unsigned int all_err_mask = 0; 1750 unsigned int all_err_mask = 0;
1562 int tag, is_io = 0; 1751 int tag, is_io = 0;
1563 u32 serror; 1752 u32 serror;
@@ -1569,10 +1758,10 @@ static void ata_eh_autopsy(struct ata_port *ap)
1569 return; 1758 return;
1570 1759
1571 /* obtain and analyze SError */ 1760 /* obtain and analyze SError */
1572 rc = sata_scr_read(ap, SCR_ERROR, &serror); 1761 rc = sata_scr_read(link, SCR_ERROR, &serror);
1573 if (rc == 0) { 1762 if (rc == 0) {
1574 ehc->i.serror |= serror; 1763 ehc->i.serror |= serror;
1575 ata_eh_analyze_serror(ap); 1764 ata_eh_analyze_serror(link);
1576 } else if (rc != -EOPNOTSUPP) { 1765 } else if (rc != -EOPNOTSUPP) {
1577 /* SError read failed, force hardreset and probing */ 1766 /* SError read failed, force hardreset and probing */
1578 ata_ehi_schedule_probe(&ehc->i); 1767 ata_ehi_schedule_probe(&ehc->i);
@@ -1581,7 +1770,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
1581 } 1770 }
1582 1771
1583 /* analyze NCQ failure */ 1772 /* analyze NCQ failure */
1584 ata_eh_analyze_ncq_error(ap); 1773 ata_eh_analyze_ncq_error(link);
1585 1774
1586 /* any real error trumps AC_ERR_OTHER */ 1775 /* any real error trumps AC_ERR_OTHER */
1587 if (ehc->i.err_mask & ~AC_ERR_OTHER) 1776 if (ehc->i.err_mask & ~AC_ERR_OTHER)
@@ -1592,7 +1781,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
1592 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1781 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1593 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1782 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1594 1783
1595 if (!(qc->flags & ATA_QCFLAG_FAILED)) 1784 if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link)
1596 continue; 1785 continue;
1597 1786
1598 /* inherit upper level err_mask */ 1787 /* inherit upper level err_mask */
@@ -1646,20 +1835,43 @@ static void ata_eh_autopsy(struct ata_port *ap)
1646} 1835}
1647 1836
1648/** 1837/**
1649 * ata_eh_report - report error handling to user 1838 * ata_eh_autopsy - analyze error and determine recovery action
1650 * @ap: ATA port EH is going on 1839 * @ap: host port to perform autopsy on
1840 *
1841 * Analyze all links of @ap and determine why they failed and
1842 * which recovery actions are needed.
1843 *
1844 * LOCKING:
1845 * Kernel thread context (may sleep).
1846 */
1847void ata_eh_autopsy(struct ata_port *ap)
1848{
1849 struct ata_link *link;
1850
1851 __ata_port_for_each_link(link, ap)
1852 ata_eh_link_autopsy(link);
1853}
1854
1855/**
1856 * ata_eh_link_report - report error handling to user
1857 * @link: ATA link EH is going on
1651 * 1858 *
1652 * Report EH to user. 1859 * Report EH to user.
1653 * 1860 *
1654 * LOCKING: 1861 * LOCKING:
1655 * None. 1862 * None.
1656 */ 1863 */
1657static void ata_eh_report(struct ata_port *ap) 1864static void ata_eh_link_report(struct ata_link *link)
1658{ 1865{
1659 struct ata_eh_context *ehc = &ap->eh_context; 1866 struct ata_port *ap = link->ap;
1867 struct ata_eh_context *ehc = &link->eh_context;
1660 const char *frozen, *desc; 1868 const char *frozen, *desc;
1869 char tries_buf[6];
1661 int tag, nr_failed = 0; 1870 int tag, nr_failed = 0;
1662 1871
1872 if (ehc->i.flags & ATA_EHI_QUIET)
1873 return;
1874
1663 desc = NULL; 1875 desc = NULL;
1664 if (ehc->i.desc[0] != '\0') 1876 if (ehc->i.desc[0] != '\0')
1665 desc = ehc->i.desc; 1877 desc = ehc->i.desc;
@@ -1667,7 +1879,7 @@ static void ata_eh_report(struct ata_port *ap)
1667 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1879 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1668 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1880 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1669 1881
1670 if (!(qc->flags & ATA_QCFLAG_FAILED)) 1882 if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link)
1671 continue; 1883 continue;
1672 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 1884 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1673 continue; 1885 continue;
@@ -1682,22 +1894,48 @@ static void ata_eh_report(struct ata_port *ap)
1682 if (ap->pflags & ATA_PFLAG_FROZEN) 1894 if (ap->pflags & ATA_PFLAG_FROZEN)
1683 frozen = " frozen"; 1895 frozen = " frozen";
1684 1896
1897 memset(tries_buf, 0, sizeof(tries_buf));
1898 if (ap->eh_tries < ATA_EH_MAX_TRIES)
1899 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
1900 ap->eh_tries);
1901
1685 if (ehc->i.dev) { 1902 if (ehc->i.dev) {
1686 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " 1903 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1687 "SAct 0x%x SErr 0x%x action 0x%x%s\n", 1904 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
1688 ehc->i.err_mask, ap->sactive, ehc->i.serror, 1905 ehc->i.err_mask, link->sactive, ehc->i.serror,
1689 ehc->i.action, frozen); 1906 ehc->i.action, frozen, tries_buf);
1690 if (desc) 1907 if (desc)
1691 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); 1908 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
1692 } else { 1909 } else {
1693 ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x " 1910 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
1694 "SAct 0x%x SErr 0x%x action 0x%x%s\n", 1911 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
1695 ehc->i.err_mask, ap->sactive, ehc->i.serror, 1912 ehc->i.err_mask, link->sactive, ehc->i.serror,
1696 ehc->i.action, frozen); 1913 ehc->i.action, frozen, tries_buf);
1697 if (desc) 1914 if (desc)
1698 ata_port_printk(ap, KERN_ERR, "%s\n", desc); 1915 ata_link_printk(link, KERN_ERR, "%s\n", desc);
1699 } 1916 }
1700 1917
1918 if (ehc->i.serror)
1919 ata_port_printk(ap, KERN_ERR,
1920 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
1921 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
1922 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
1923 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
1924 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
1925 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
1926 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
1927 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
1928 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
1929 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
1930 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
1931 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
1932 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
1933 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
1934 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
1935 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
1936 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
1937 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "" );
1938
1701 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1939 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1702 static const char *dma_str[] = { 1940 static const char *dma_str[] = {
1703 [DMA_BIDIRECTIONAL] = "bidi", 1941 [DMA_BIDIRECTIONAL] = "bidi",
@@ -1708,7 +1946,8 @@ static void ata_eh_report(struct ata_port *ap)
1708 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1946 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1709 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 1947 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
1710 1948
1711 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask) 1949 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
1950 qc->dev->link != link || !qc->err_mask)
1712 continue; 1951 continue;
1713 1952
1714 ata_dev_printk(qc->dev, KERN_ERR, 1953 ata_dev_printk(qc->dev, KERN_ERR,
@@ -1728,18 +1967,60 @@ static void ata_eh_report(struct ata_port *ap)
1728 res->hob_lbal, res->hob_lbam, res->hob_lbah, 1967 res->hob_lbal, res->hob_lbam, res->hob_lbah,
1729 res->device, qc->err_mask, ata_err_string(qc->err_mask), 1968 res->device, qc->err_mask, ata_err_string(qc->err_mask),
1730 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 1969 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
1970
1971 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
1972 ATA_ERR) ) {
1973 if (res->command & ATA_BUSY)
1974 ata_dev_printk(qc->dev, KERN_ERR,
1975 "status: { Busy }\n" );
1976 else
1977 ata_dev_printk(qc->dev, KERN_ERR,
1978 "status: { %s%s%s%s}\n",
1979 res->command & ATA_DRDY ? "DRDY " : "",
1980 res->command & ATA_DF ? "DF " : "",
1981 res->command & ATA_DRQ ? "DRQ " : "",
1982 res->command & ATA_ERR ? "ERR " : "" );
1983 }
1984
1985 if (cmd->command != ATA_CMD_PACKET &&
1986 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
1987 ATA_ABORTED)))
1988 ata_dev_printk(qc->dev, KERN_ERR,
1989 "error: { %s%s%s%s}\n",
1990 res->feature & ATA_ICRC ? "ICRC " : "",
1991 res->feature & ATA_UNC ? "UNC " : "",
1992 res->feature & ATA_IDNF ? "IDNF " : "",
1993 res->feature & ATA_ABORTED ? "ABRT " : "" );
1731 } 1994 }
1732} 1995}
1733 1996
1734static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset, 1997/**
1998 * ata_eh_report - report error handling to user
1999 * @ap: ATA port to report EH about
2000 *
2001 * Report EH to user.
2002 *
2003 * LOCKING:
2004 * None.
2005 */
2006void ata_eh_report(struct ata_port *ap)
2007{
2008 struct ata_link *link;
2009
2010 __ata_port_for_each_link(link, ap)
2011 ata_eh_link_report(link);
2012}
2013
2014static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
1735 unsigned int *classes, unsigned long deadline) 2015 unsigned int *classes, unsigned long deadline)
1736{ 2016{
1737 int i, rc; 2017 struct ata_device *dev;
2018 int rc;
1738 2019
1739 for (i = 0; i < ATA_MAX_DEVICES; i++) 2020 ata_link_for_each_dev(dev, link)
1740 classes[i] = ATA_DEV_UNKNOWN; 2021 classes[dev->devno] = ATA_DEV_UNKNOWN;
1741 2022
1742 rc = reset(ap, classes, deadline); 2023 rc = reset(link, classes, deadline);
1743 if (rc) 2024 if (rc)
1744 return rc; 2025 return rc;
1745 2026
@@ -1747,71 +2028,87 @@ static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
1747 * is complete and convert all ATA_DEV_UNKNOWN to 2028 * is complete and convert all ATA_DEV_UNKNOWN to
1748 * ATA_DEV_NONE. 2029 * ATA_DEV_NONE.
1749 */ 2030 */
1750 for (i = 0; i < ATA_MAX_DEVICES; i++) 2031 ata_link_for_each_dev(dev, link)
1751 if (classes[i] != ATA_DEV_UNKNOWN) 2032 if (classes[dev->devno] != ATA_DEV_UNKNOWN)
1752 break; 2033 break;
1753 2034
1754 if (i < ATA_MAX_DEVICES) 2035 if (dev) {
1755 for (i = 0; i < ATA_MAX_DEVICES; i++) 2036 ata_link_for_each_dev(dev, link) {
1756 if (classes[i] == ATA_DEV_UNKNOWN) 2037 if (classes[dev->devno] == ATA_DEV_UNKNOWN)
1757 classes[i] = ATA_DEV_NONE; 2038 classes[dev->devno] = ATA_DEV_NONE;
2039 }
2040 }
1758 2041
1759 return 0; 2042 return 0;
1760} 2043}
1761 2044
1762static int ata_eh_followup_srst_needed(int rc, int classify, 2045static int ata_eh_followup_srst_needed(struct ata_link *link,
2046 int rc, int classify,
1763 const unsigned int *classes) 2047 const unsigned int *classes)
1764{ 2048{
2049 if (link->flags & ATA_LFLAG_NO_SRST)
2050 return 0;
1765 if (rc == -EAGAIN) 2051 if (rc == -EAGAIN)
1766 return 1; 2052 return 1;
1767 if (rc != 0) 2053 if (rc != 0)
1768 return 0; 2054 return 0;
1769 if (classify && classes[0] == ATA_DEV_UNKNOWN) 2055 if ((link->ap->flags & ATA_FLAG_PMP) && ata_is_host_link(link))
2056 return 1;
2057 if (classify && !(link->flags & ATA_LFLAG_ASSUME_CLASS) &&
2058 classes[0] == ATA_DEV_UNKNOWN)
1770 return 1; 2059 return 1;
1771 return 0; 2060 return 0;
1772} 2061}
1773 2062
1774static int ata_eh_reset(struct ata_port *ap, int classify, 2063int ata_eh_reset(struct ata_link *link, int classify,
1775 ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2064 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1776 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2065 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1777{ 2066{
1778 struct ata_eh_context *ehc = &ap->eh_context; 2067 struct ata_port *ap = link->ap;
2068 struct ata_eh_context *ehc = &link->eh_context;
1779 unsigned int *classes = ehc->classes; 2069 unsigned int *classes = ehc->classes;
1780 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2070 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
1781 int try = 0; 2071 int try = 0;
2072 struct ata_device *dev;
1782 unsigned long deadline; 2073 unsigned long deadline;
1783 unsigned int action; 2074 unsigned int action;
1784 ata_reset_fn_t reset; 2075 ata_reset_fn_t reset;
1785 int i, rc; 2076 unsigned long flags;
2077 int rc;
1786 2078
1787 /* about to reset */ 2079 /* about to reset */
1788 ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); 2080 spin_lock_irqsave(ap->lock, flags);
2081 ap->pflags |= ATA_PFLAG_RESETTING;
2082 spin_unlock_irqrestore(ap->lock, flags);
2083
2084 ata_eh_about_to_do(link, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1789 2085
1790 /* Determine which reset to use and record in ehc->i.action. 2086 /* Determine which reset to use and record in ehc->i.action.
1791 * prereset() may examine and modify it. 2087 * prereset() may examine and modify it.
1792 */ 2088 */
1793 action = ehc->i.action; 2089 action = ehc->i.action;
1794 ehc->i.action &= ~ATA_EH_RESET_MASK; 2090 ehc->i.action &= ~ATA_EH_RESET_MASK;
1795 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) && 2091 if (softreset && (!hardreset || (!(link->flags & ATA_LFLAG_NO_SRST) &&
2092 !sata_set_spd_needed(link) &&
1796 !(action & ATA_EH_HARDRESET)))) 2093 !(action & ATA_EH_HARDRESET))))
1797 ehc->i.action |= ATA_EH_SOFTRESET; 2094 ehc->i.action |= ATA_EH_SOFTRESET;
1798 else 2095 else
1799 ehc->i.action |= ATA_EH_HARDRESET; 2096 ehc->i.action |= ATA_EH_HARDRESET;
1800 2097
1801 if (prereset) { 2098 if (prereset) {
1802 rc = prereset(ap, jiffies + ATA_EH_PRERESET_TIMEOUT); 2099 rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT);
1803 if (rc) { 2100 if (rc) {
1804 if (rc == -ENOENT) { 2101 if (rc == -ENOENT) {
1805 ata_port_printk(ap, KERN_DEBUG, 2102 ata_link_printk(link, KERN_DEBUG,
1806 "port disabled. ignoring.\n"); 2103 "port disabled. ignoring.\n");
1807 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; 2104 ehc->i.action &= ~ATA_EH_RESET_MASK;
1808 2105
1809 for (i = 0; i < ATA_MAX_DEVICES; i++) 2106 ata_link_for_each_dev(dev, link)
1810 classes[i] = ATA_DEV_NONE; 2107 classes[dev->devno] = ATA_DEV_NONE;
1811 2108
1812 rc = 0; 2109 rc = 0;
1813 } else 2110 } else
1814 ata_port_printk(ap, KERN_ERR, 2111 ata_link_printk(link, KERN_ERR,
1815 "prereset failed (errno=%d)\n", rc); 2112 "prereset failed (errno=%d)\n", rc);
1816 goto out; 2113 goto out;
1817 } 2114 }
@@ -1824,8 +2121,8 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1824 reset = softreset; 2121 reset = softreset;
1825 else { 2122 else {
1826 /* prereset told us not to reset, bang classes and return */ 2123 /* prereset told us not to reset, bang classes and return */
1827 for (i = 0; i < ATA_MAX_DEVICES; i++) 2124 ata_link_for_each_dev(dev, link)
1828 classes[i] = ATA_DEV_NONE; 2125 classes[dev->devno] = ATA_DEV_NONE;
1829 rc = 0; 2126 rc = 0;
1830 goto out; 2127 goto out;
1831 } 2128 }
@@ -1843,7 +2140,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1843 2140
1844 /* shut up during boot probing */ 2141 /* shut up during boot probing */
1845 if (verbose) 2142 if (verbose)
1846 ata_port_printk(ap, KERN_INFO, "%s resetting port\n", 2143 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
1847 reset == softreset ? "soft" : "hard"); 2144 reset == softreset ? "soft" : "hard");
1848 2145
1849 /* mark that this EH session started with reset */ 2146 /* mark that this EH session started with reset */
@@ -1852,49 +2149,54 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1852 else 2149 else
1853 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2150 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
1854 2151
1855 rc = ata_do_reset(ap, reset, classes, deadline); 2152 rc = ata_do_reset(link, reset, classes, deadline);
1856 2153
1857 if (reset == hardreset && 2154 if (reset == hardreset &&
1858 ata_eh_followup_srst_needed(rc, classify, classes)) { 2155 ata_eh_followup_srst_needed(link, rc, classify, classes)) {
1859 /* okay, let's do follow-up softreset */ 2156 /* okay, let's do follow-up softreset */
1860 reset = softreset; 2157 reset = softreset;
1861 2158
1862 if (!reset) { 2159 if (!reset) {
1863 ata_port_printk(ap, KERN_ERR, 2160 ata_link_printk(link, KERN_ERR,
1864 "follow-up softreset required " 2161 "follow-up softreset required "
1865 "but no softreset avaliable\n"); 2162 "but no softreset avaliable\n");
1866 rc = -EINVAL; 2163 rc = -EINVAL;
1867 goto out; 2164 goto out;
1868 } 2165 }
1869 2166
1870 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); 2167 ata_eh_about_to_do(link, NULL, ATA_EH_RESET_MASK);
1871 rc = ata_do_reset(ap, reset, classes, deadline); 2168 rc = ata_do_reset(link, reset, classes, deadline);
1872 2169
1873 if (rc == 0 && classify && 2170 if (rc == 0 && classify && classes[0] == ATA_DEV_UNKNOWN &&
1874 classes[0] == ATA_DEV_UNKNOWN) { 2171 !(link->flags & ATA_LFLAG_ASSUME_CLASS)) {
1875 ata_port_printk(ap, KERN_ERR, 2172 ata_link_printk(link, KERN_ERR,
1876 "classification failed\n"); 2173 "classification failed\n");
1877 rc = -EINVAL; 2174 rc = -EINVAL;
1878 goto out; 2175 goto out;
1879 } 2176 }
1880 } 2177 }
1881 2178
1882 if (rc && try < ARRAY_SIZE(ata_eh_reset_timeouts)) { 2179 /* if we skipped follow-up srst, clear rc */
2180 if (rc == -EAGAIN)
2181 rc = 0;
2182
2183 if (rc && rc != -ERESTART && try < ARRAY_SIZE(ata_eh_reset_timeouts)) {
1883 unsigned long now = jiffies; 2184 unsigned long now = jiffies;
1884 2185
1885 if (time_before(now, deadline)) { 2186 if (time_before(now, deadline)) {
1886 unsigned long delta = deadline - jiffies; 2187 unsigned long delta = deadline - jiffies;
1887 2188
1888 ata_port_printk(ap, KERN_WARNING, "reset failed " 2189 ata_link_printk(link, KERN_WARNING, "reset failed "
1889 "(errno=%d), retrying in %u secs\n", 2190 "(errno=%d), retrying in %u secs\n",
1890 rc, (jiffies_to_msecs(delta) + 999) / 1000); 2191 rc, (jiffies_to_msecs(delta) + 999) / 1000);
1891 2192
1892 schedule_timeout_uninterruptible(delta); 2193 while (delta)
2194 delta = schedule_timeout_uninterruptible(delta);
1893 } 2195 }
1894 2196
1895 if (rc == -EPIPE || 2197 if (rc == -EPIPE ||
1896 try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1) 2198 try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1)
1897 sata_down_spd_limit(ap); 2199 sata_down_spd_limit(link);
1898 if (hardreset) 2200 if (hardreset)
1899 reset = hardreset; 2201 reset = hardreset;
1900 goto retry; 2202 goto retry;
@@ -1903,37 +2205,56 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1903 if (rc == 0) { 2205 if (rc == 0) {
1904 u32 sstatus; 2206 u32 sstatus;
1905 2207
1906 /* After the reset, the device state is PIO 0 and the 2208 ata_link_for_each_dev(dev, link) {
1907 * controller state is undefined. Record the mode. 2209 /* After the reset, the device state is PIO 0
1908 */ 2210 * and the controller state is undefined.
1909 for (i = 0; i < ATA_MAX_DEVICES; i++) 2211 * Record the mode.
1910 ap->device[i].pio_mode = XFER_PIO_0; 2212 */
2213 dev->pio_mode = XFER_PIO_0;
2214
2215 if (ata_link_offline(link))
2216 continue;
2217
2218 /* apply class override and convert UNKNOWN to NONE */
2219 if (link->flags & ATA_LFLAG_ASSUME_ATA)
2220 classes[dev->devno] = ATA_DEV_ATA;
2221 else if (link->flags & ATA_LFLAG_ASSUME_SEMB)
2222 classes[dev->devno] = ATA_DEV_SEMB_UNSUP; /* not yet */
2223 else if (classes[dev->devno] == ATA_DEV_UNKNOWN)
2224 classes[dev->devno] = ATA_DEV_NONE;
2225 }
1911 2226
1912 /* record current link speed */ 2227 /* record current link speed */
1913 if (sata_scr_read(ap, SCR_STATUS, &sstatus) == 0) 2228 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
1914 ap->sata_spd = (sstatus >> 4) & 0xf; 2229 link->sata_spd = (sstatus >> 4) & 0xf;
1915 2230
1916 if (postreset) 2231 if (postreset)
1917 postreset(ap, classes); 2232 postreset(link, classes);
1918 2233
1919 /* reset successful, schedule revalidation */ 2234 /* reset successful, schedule revalidation */
1920 ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); 2235 ata_eh_done(link, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1921 ehc->i.action |= ATA_EH_REVALIDATE; 2236 ehc->i.action |= ATA_EH_REVALIDATE;
1922 } 2237 }
1923 out: 2238 out:
1924 /* clear hotplug flag */ 2239 /* clear hotplug flag */
1925 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2240 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2241
2242 spin_lock_irqsave(ap->lock, flags);
2243 ap->pflags &= ~ATA_PFLAG_RESETTING;
2244 spin_unlock_irqrestore(ap->lock, flags);
2245
1926 return rc; 2246 return rc;
1927} 2247}
1928 2248
1929static int ata_eh_revalidate_and_attach(struct ata_port *ap, 2249static int ata_eh_revalidate_and_attach(struct ata_link *link,
1930 struct ata_device **r_failed_dev) 2250 struct ata_device **r_failed_dev)
1931{ 2251{
1932 struct ata_eh_context *ehc = &ap->eh_context; 2252 struct ata_port *ap = link->ap;
2253 struct ata_eh_context *ehc = &link->eh_context;
1933 struct ata_device *dev; 2254 struct ata_device *dev;
1934 unsigned int new_mask = 0; 2255 unsigned int new_mask = 0;
1935 unsigned long flags; 2256 unsigned long flags;
1936 int i, rc = 0; 2257 int rc = 0;
1937 2258
1938 DPRINTK("ENTER\n"); 2259 DPRINTK("ENTER\n");
1939 2260
@@ -1941,27 +2262,28 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1941 * be done backwards such that PDIAG- is released by the slave 2262 * be done backwards such that PDIAG- is released by the slave
1942 * device before the master device is identified. 2263 * device before the master device is identified.
1943 */ 2264 */
1944 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) { 2265 ata_link_for_each_dev_reverse(dev, link) {
1945 unsigned int action, readid_flags = 0; 2266 unsigned int action = ata_eh_dev_action(dev);
1946 2267 unsigned int readid_flags = 0;
1947 dev = &ap->device[i];
1948 action = ata_eh_dev_action(dev);
1949 2268
1950 if (ehc->i.flags & ATA_EHI_DID_RESET) 2269 if (ehc->i.flags & ATA_EHI_DID_RESET)
1951 readid_flags |= ATA_READID_POSTRESET; 2270 readid_flags |= ATA_READID_POSTRESET;
1952 2271
1953 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2272 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
1954 if (ata_port_offline(ap)) { 2273 WARN_ON(dev->class == ATA_DEV_PMP);
2274
2275 if (ata_link_offline(link)) {
1955 rc = -EIO; 2276 rc = -EIO;
1956 goto err; 2277 goto err;
1957 } 2278 }
1958 2279
1959 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE); 2280 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
1960 rc = ata_dev_revalidate(dev, readid_flags); 2281 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2282 readid_flags);
1961 if (rc) 2283 if (rc)
1962 goto err; 2284 goto err;
1963 2285
1964 ata_eh_done(ap, dev, ATA_EH_REVALIDATE); 2286 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
1965 2287
1966 /* Configuration may have changed, reconfigure 2288 /* Configuration may have changed, reconfigure
1967 * transfer mode. 2289 * transfer mode.
@@ -1975,11 +2297,14 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1975 ata_class_enabled(ehc->classes[dev->devno])) { 2297 ata_class_enabled(ehc->classes[dev->devno])) {
1976 dev->class = ehc->classes[dev->devno]; 2298 dev->class = ehc->classes[dev->devno];
1977 2299
1978 rc = ata_dev_read_id(dev, &dev->class, readid_flags, 2300 if (dev->class == ATA_DEV_PMP)
1979 dev->id); 2301 rc = sata_pmp_attach(dev);
2302 else
2303 rc = ata_dev_read_id(dev, &dev->class,
2304 readid_flags, dev->id);
1980 switch (rc) { 2305 switch (rc) {
1981 case 0: 2306 case 0:
1982 new_mask |= 1 << i; 2307 new_mask |= 1 << dev->devno;
1983 break; 2308 break;
1984 case -ENOENT: 2309 case -ENOENT:
1985 /* IDENTIFY was issued to non-existent 2310 /* IDENTIFY was issued to non-existent
@@ -1997,16 +2322,16 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1997 } 2322 }
1998 2323
1999 /* PDIAG- should have been released, ask cable type if post-reset */ 2324 /* PDIAG- should have been released, ask cable type if post-reset */
2000 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ap->ops->cable_detect) 2325 if (ata_is_host_link(link) && ap->ops->cable_detect &&
2326 (ehc->i.flags & ATA_EHI_DID_RESET))
2001 ap->cbl = ap->ops->cable_detect(ap); 2327 ap->cbl = ap->ops->cable_detect(ap);
2002 2328
2003 /* Configure new devices forward such that user doesn't see 2329 /* Configure new devices forward such that user doesn't see
2004 * device detection messages backwards. 2330 * device detection messages backwards.
2005 */ 2331 */
2006 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2332 ata_link_for_each_dev(dev, link) {
2007 dev = &ap->device[i]; 2333 if (!(new_mask & (1 << dev->devno)) ||
2008 2334 dev->class == ATA_DEV_PMP)
2009 if (!(new_mask & (1 << i)))
2010 continue; 2335 continue;
2011 2336
2012 ehc->i.flags |= ATA_EHI_PRINTINFO; 2337 ehc->i.flags |= ATA_EHI_PRINTINFO;
@@ -2031,40 +2356,44 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
2031 return rc; 2356 return rc;
2032} 2357}
2033 2358
2034static int ata_port_nr_enabled(struct ata_port *ap) 2359static int ata_link_nr_enabled(struct ata_link *link)
2035{ 2360{
2036 int i, cnt = 0; 2361 struct ata_device *dev;
2362 int cnt = 0;
2037 2363
2038 for (i = 0; i < ATA_MAX_DEVICES; i++) 2364 ata_link_for_each_dev(dev, link)
2039 if (ata_dev_enabled(&ap->device[i])) 2365 if (ata_dev_enabled(dev))
2040 cnt++; 2366 cnt++;
2041 return cnt; 2367 return cnt;
2042} 2368}
2043 2369
2044static int ata_port_nr_vacant(struct ata_port *ap) 2370static int ata_link_nr_vacant(struct ata_link *link)
2045{ 2371{
2046 int i, cnt = 0; 2372 struct ata_device *dev;
2373 int cnt = 0;
2047 2374
2048 for (i = 0; i < ATA_MAX_DEVICES; i++) 2375 ata_link_for_each_dev(dev, link)
2049 if (ap->device[i].class == ATA_DEV_UNKNOWN) 2376 if (dev->class == ATA_DEV_UNKNOWN)
2050 cnt++; 2377 cnt++;
2051 return cnt; 2378 return cnt;
2052} 2379}
2053 2380
2054static int ata_eh_skip_recovery(struct ata_port *ap) 2381static int ata_eh_skip_recovery(struct ata_link *link)
2055{ 2382{
2056 struct ata_eh_context *ehc = &ap->eh_context; 2383 struct ata_eh_context *ehc = &link->eh_context;
2057 int i; 2384 struct ata_device *dev;
2385
2386 /* skip disabled links */
2387 if (link->flags & ATA_LFLAG_DISABLED)
2388 return 1;
2058 2389
2059 /* thaw frozen port, resume link and recover failed devices */ 2390 /* thaw frozen port, resume link and recover failed devices */
2060 if ((ap->pflags & ATA_PFLAG_FROZEN) || 2391 if ((link->ap->pflags & ATA_PFLAG_FROZEN) ||
2061 (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap)) 2392 (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_link_nr_enabled(link))
2062 return 0; 2393 return 0;
2063 2394
2064 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 2395 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
2065 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2396 ata_link_for_each_dev(dev, link) {
2066 struct ata_device *dev = &ap->device[i];
2067
2068 if (dev->class == ATA_DEV_UNKNOWN && 2397 if (dev->class == ATA_DEV_UNKNOWN &&
2069 ehc->classes[dev->devno] != ATA_DEV_NONE) 2398 ehc->classes[dev->devno] != ATA_DEV_NONE)
2070 return 0; 2399 return 0;
@@ -2073,10 +2402,9 @@ static int ata_eh_skip_recovery(struct ata_port *ap)
2073 return 1; 2402 return 1;
2074} 2403}
2075 2404
2076static void ata_eh_handle_dev_fail(struct ata_device *dev, int err) 2405static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2077{ 2406{
2078 struct ata_port *ap = dev->ap; 2407 struct ata_eh_context *ehc = &dev->link->eh_context;
2079 struct ata_eh_context *ehc = &ap->eh_context;
2080 2408
2081 ehc->tries[dev->devno]--; 2409 ehc->tries[dev->devno]--;
2082 2410
@@ -2092,7 +2420,7 @@ static void ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2092 /* This is the last chance, better to slow 2420 /* This is the last chance, better to slow
2093 * down than lose it. 2421 * down than lose it.
2094 */ 2422 */
2095 sata_down_spd_limit(ap); 2423 sata_down_spd_limit(dev->link);
2096 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2424 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2097 } 2425 }
2098 } 2426 }
@@ -2102,7 +2430,7 @@ static void ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2102 ata_dev_disable(dev); 2430 ata_dev_disable(dev);
2103 2431
2104 /* detach if offline */ 2432 /* detach if offline */
2105 if (ata_port_offline(ap)) 2433 if (ata_link_offline(dev->link))
2106 ata_eh_detach_dev(dev); 2434 ata_eh_detach_dev(dev);
2107 2435
2108 /* probe if requested */ 2436 /* probe if requested */
@@ -2115,12 +2443,16 @@ static void ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2115 ehc->did_probe_mask |= (1 << dev->devno); 2443 ehc->did_probe_mask |= (1 << dev->devno);
2116 ehc->i.action |= ATA_EH_SOFTRESET; 2444 ehc->i.action |= ATA_EH_SOFTRESET;
2117 } 2445 }
2446
2447 return 1;
2118 } else { 2448 } else {
2119 /* soft didn't work? be haaaaard */ 2449 /* soft didn't work? be haaaaard */
2120 if (ehc->i.flags & ATA_EHI_DID_RESET) 2450 if (ehc->i.flags & ATA_EHI_DID_RESET)
2121 ehc->i.action |= ATA_EH_HARDRESET; 2451 ehc->i.action |= ATA_EH_HARDRESET;
2122 else 2452 else
2123 ehc->i.action |= ATA_EH_SOFTRESET; 2453 ehc->i.action |= ATA_EH_SOFTRESET;
2454
2455 return 0;
2124 } 2456 }
2125} 2457}
2126 2458
@@ -2131,12 +2463,13 @@ static void ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2131 * @softreset: softreset method (can be NULL) 2463 * @softreset: softreset method (can be NULL)
2132 * @hardreset: hardreset method (can be NULL) 2464 * @hardreset: hardreset method (can be NULL)
2133 * @postreset: postreset method (can be NULL) 2465 * @postreset: postreset method (can be NULL)
2466 * @r_failed_link: out parameter for failed link
2134 * 2467 *
2135 * This is the alpha and omega, eum and yang, heart and soul of 2468 * This is the alpha and omega, eum and yang, heart and soul of
2136 * libata exception handling. On entry, actions required to 2469 * libata exception handling. On entry, actions required to
2137 * recover the port and hotplug requests are recorded in 2470 * recover each link and hotplug requests are recorded in the
2138 * eh_context. This function executes all the operations with 2471 * link's eh_context. This function executes all the operations
2139 * appropriate retrials and fallbacks to resurrect failed 2472 * with appropriate retrials and fallbacks to resurrect failed
2140 * devices, detach goners and greet newcomers. 2473 * devices, detach goners and greet newcomers.
2141 * 2474 *
2142 * LOCKING: 2475 * LOCKING:
@@ -2145,104 +2478,171 @@ static void ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2145 * RETURNS: 2478 * RETURNS:
2146 * 0 on success, -errno on failure. 2479 * 0 on success, -errno on failure.
2147 */ 2480 */
2148static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 2481int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2149 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 2482 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2150 ata_postreset_fn_t postreset) 2483 ata_postreset_fn_t postreset,
2484 struct ata_link **r_failed_link)
2151{ 2485{
2152 struct ata_eh_context *ehc = &ap->eh_context; 2486 struct ata_link *link;
2153 struct ata_device *dev; 2487 struct ata_device *dev;
2154 int i, rc; 2488 int nr_failed_devs, nr_disabled_devs;
2489 int reset, rc;
2490 unsigned long flags;
2155 2491
2156 DPRINTK("ENTER\n"); 2492 DPRINTK("ENTER\n");
2157 2493
2158 /* prep for recovery */ 2494 /* prep for recovery */
2159 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2495 ata_port_for_each_link(link, ap) {
2160 dev = &ap->device[i]; 2496 struct ata_eh_context *ehc = &link->eh_context;
2161 2497
2162 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 2498 /* re-enable link? */
2163 2499 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
2164 /* collect port action mask recorded in dev actions */ 2500 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
2165 ehc->i.action |= ehc->i.dev_action[i] & ~ATA_EH_PERDEV_MASK; 2501 spin_lock_irqsave(ap->lock, flags);
2166 ehc->i.dev_action[i] &= ATA_EH_PERDEV_MASK; 2502 link->flags &= ~ATA_LFLAG_DISABLED;
2167 2503 spin_unlock_irqrestore(ap->lock, flags);
2168 /* process hotplug request */ 2504 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
2169 if (dev->flags & ATA_DFLAG_DETACH) 2505 }
2170 ata_eh_detach_dev(dev);
2171 2506
2172 if (!ata_dev_enabled(dev) && 2507 ata_link_for_each_dev(dev, link) {
2173 ((ehc->i.probe_mask & (1 << dev->devno)) && 2508 if (link->flags & ATA_LFLAG_NO_RETRY)
2174 !(ehc->did_probe_mask & (1 << dev->devno)))) { 2509 ehc->tries[dev->devno] = 1;
2175 ata_eh_detach_dev(dev); 2510 else
2176 ata_dev_init(dev); 2511 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
2177 ehc->did_probe_mask |= (1 << dev->devno); 2512
2178 ehc->i.action |= ATA_EH_SOFTRESET; 2513 /* collect port action mask recorded in dev actions */
2514 ehc->i.action |= ehc->i.dev_action[dev->devno] &
2515 ~ATA_EH_PERDEV_MASK;
2516 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
2517
2518 /* process hotplug request */
2519 if (dev->flags & ATA_DFLAG_DETACH)
2520 ata_eh_detach_dev(dev);
2521
2522 if (!ata_dev_enabled(dev) &&
2523 ((ehc->i.probe_mask & (1 << dev->devno)) &&
2524 !(ehc->did_probe_mask & (1 << dev->devno)))) {
2525 ata_eh_detach_dev(dev);
2526 ata_dev_init(dev);
2527 ehc->did_probe_mask |= (1 << dev->devno);
2528 ehc->i.action |= ATA_EH_SOFTRESET;
2529 }
2179 } 2530 }
2180 } 2531 }
2181 2532
2182 retry: 2533 retry:
2183 rc = 0; 2534 rc = 0;
2535 nr_failed_devs = 0;
2536 nr_disabled_devs = 0;
2537 reset = 0;
2184 2538
2185 /* if UNLOADING, finish immediately */ 2539 /* if UNLOADING, finish immediately */
2186 if (ap->pflags & ATA_PFLAG_UNLOADING) 2540 if (ap->pflags & ATA_PFLAG_UNLOADING)
2187 goto out; 2541 goto out;
2188 2542
2189 /* skip EH if possible. */ 2543 /* prep for EH */
2190 if (ata_eh_skip_recovery(ap)) 2544 ata_port_for_each_link(link, ap) {
2191 ehc->i.action = 0; 2545 struct ata_eh_context *ehc = &link->eh_context;
2192 2546
2193 for (i = 0; i < ATA_MAX_DEVICES; i++) 2547 /* skip EH if possible. */
2194 ehc->classes[i] = ATA_DEV_UNKNOWN; 2548 if (ata_eh_skip_recovery(link))
2549 ehc->i.action = 0;
2195 2550
2196 /* reset */ 2551 /* do we need to reset? */
2197 if (ehc->i.action & ATA_EH_RESET_MASK) { 2552 if (ehc->i.action & ATA_EH_RESET_MASK)
2198 ata_eh_freeze_port(ap); 2553 reset = 1;
2199 2554
2200 rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset, 2555 ata_link_for_each_dev(dev, link)
2201 softreset, hardreset, postreset); 2556 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
2202 if (rc) { 2557 }
2203 ata_port_printk(ap, KERN_ERR, 2558
2204 "reset failed, giving up\n"); 2559 /* reset */
2205 goto out; 2560 if (reset) {
2561 /* if PMP is attached, this function only deals with
2562 * downstream links, port should stay thawed.
2563 */
2564 if (!ap->nr_pmp_links)
2565 ata_eh_freeze_port(ap);
2566
2567 ata_port_for_each_link(link, ap) {
2568 struct ata_eh_context *ehc = &link->eh_context;
2569
2570 if (!(ehc->i.action & ATA_EH_RESET_MASK))
2571 continue;
2572
2573 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
2574 prereset, softreset, hardreset,
2575 postreset);
2576 if (rc) {
2577 ata_link_printk(link, KERN_ERR,
2578 "reset failed, giving up\n");
2579 goto out;
2580 }
2206 } 2581 }
2207 2582
2208 ata_eh_thaw_port(ap); 2583 if (!ap->nr_pmp_links)
2584 ata_eh_thaw_port(ap);
2209 } 2585 }
2210 2586
2211 /* revalidate existing devices and attach new ones */ 2587 /* the rest */
2212 rc = ata_eh_revalidate_and_attach(ap, &dev); 2588 ata_port_for_each_link(link, ap) {
2213 if (rc) 2589 struct ata_eh_context *ehc = &link->eh_context;
2214 goto dev_fail;
2215 2590
2216 /* configure transfer mode if necessary */ 2591 /* revalidate existing devices and attach new ones */
2217 if (ehc->i.flags & ATA_EHI_SETMODE) { 2592 rc = ata_eh_revalidate_and_attach(link, &dev);
2218 rc = ata_set_mode(ap, &dev);
2219 if (rc) 2593 if (rc)
2220 goto dev_fail; 2594 goto dev_fail;
2221 ehc->i.flags &= ~ATA_EHI_SETMODE;
2222 }
2223 2595
2224 goto out; 2596 /* if PMP got attached, return, pmp EH will take care of it */
2597 if (link->device->class == ATA_DEV_PMP) {
2598 ehc->i.action = 0;
2599 return 0;
2600 }
2225 2601
2226 dev_fail: 2602 /* configure transfer mode if necessary */
2227 ata_eh_handle_dev_fail(dev, rc); 2603 if (ehc->i.flags & ATA_EHI_SETMODE) {
2604 rc = ata_set_mode(link, &dev);
2605 if (rc)
2606 goto dev_fail;
2607 ehc->i.flags &= ~ATA_EHI_SETMODE;
2608 }
2228 2609
2229 if (ata_port_nr_enabled(ap)) { 2610 /* this link is okay now */
2230 ata_port_printk(ap, KERN_WARNING, "failed to recover some " 2611 ehc->i.flags = 0;
2231 "devices, retrying in 5 secs\n"); 2612 continue;
2232 ssleep(5); 2613
2233 } else { 2614 dev_fail:
2234 /* no device left, repeat fast */ 2615 nr_failed_devs++;
2235 msleep(500); 2616 if (ata_eh_handle_dev_fail(dev, rc))
2617 nr_disabled_devs++;
2618
2619 if (ap->pflags & ATA_PFLAG_FROZEN) {
2620 /* PMP reset requires working host port.
2621 * Can't retry if it's frozen.
2622 */
2623 if (ap->nr_pmp_links)
2624 goto out;
2625 break;
2626 }
2236 } 2627 }
2237 2628
2238 goto retry; 2629 if (nr_failed_devs) {
2630 if (nr_failed_devs != nr_disabled_devs) {
2631 ata_port_printk(ap, KERN_WARNING, "failed to recover "
2632 "some devices, retrying in 5 secs\n");
2633 ssleep(5);
2634 } else {
2635 /* no device left to recover, repeat fast */
2636 msleep(500);
2637 }
2239 2638
2240 out: 2639 goto retry;
2241 if (rc) {
2242 for (i = 0; i < ATA_MAX_DEVICES; i++)
2243 ata_dev_disable(&ap->device[i]);
2244 } 2640 }
2245 2641
2642 out:
2643 if (rc && r_failed_link)
2644 *r_failed_link = link;
2645
2246 DPRINTK("EXIT, rc=%d\n", rc); 2646 DPRINTK("EXIT, rc=%d\n", rc);
2247 return rc; 2647 return rc;
2248} 2648}
@@ -2257,7 +2657,7 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2257 * LOCKING: 2657 * LOCKING:
2258 * None. 2658 * None.
2259 */ 2659 */
2260static void ata_eh_finish(struct ata_port *ap) 2660void ata_eh_finish(struct ata_port *ap)
2261{ 2661{
2262 int tag; 2662 int tag;
2263 2663
@@ -2287,6 +2687,10 @@ static void ata_eh_finish(struct ata_port *ap)
2287 } 2687 }
2288 } 2688 }
2289 } 2689 }
2690
2691 /* make sure nr_active_links is zero after EH */
2692 WARN_ON(ap->nr_active_links);
2693 ap->nr_active_links = 0;
2290} 2694}
2291 2695
2292/** 2696/**
@@ -2306,9 +2710,19 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
2306 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 2710 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2307 ata_postreset_fn_t postreset) 2711 ata_postreset_fn_t postreset)
2308{ 2712{
2713 struct ata_device *dev;
2714 int rc;
2715
2309 ata_eh_autopsy(ap); 2716 ata_eh_autopsy(ap);
2310 ata_eh_report(ap); 2717 ata_eh_report(ap);
2311 ata_eh_recover(ap, prereset, softreset, hardreset, postreset); 2718
2719 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
2720 NULL);
2721 if (rc) {
2722 ata_link_for_each_dev(dev, &ap->link)
2723 ata_dev_disable(dev);
2724 }
2725
2312 ata_eh_finish(ap); 2726 ata_eh_finish(ap);
2313} 2727}
2314 2728
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
new file mode 100644
index 0000000000..c0c4dbcde0
--- /dev/null
+++ b/drivers/ata/libata-pmp.c
@@ -0,0 +1,1191 @@
1/*
2 * libata-pmp.c - libata port multiplier support
3 *
4 * Copyright (c) 2007 SUSE Linux Products GmbH
5 * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/kernel.h>
11#include <linux/libata.h>
12#include "libata.h"
13
14/**
15 * sata_pmp_read - read PMP register
16 * @link: link to read PMP register for
17 * @reg: register to read
18 * @r_val: resulting value
19 *
20 * Read PMP register.
21 *
22 * LOCKING:
23 * Kernel thread context (may sleep).
24 *
25 * RETURNS:
26 * 0 on success, AC_ERR_* mask on failure.
27 */
28static unsigned int sata_pmp_read(struct ata_link *link, int reg, u32 *r_val)
29{
30 struct ata_port *ap = link->ap;
31 struct ata_device *pmp_dev = ap->link.device;
32 struct ata_taskfile tf;
33 unsigned int err_mask;
34
35 ata_tf_init(pmp_dev, &tf);
36 tf.command = ATA_CMD_PMP_READ;
37 tf.protocol = ATA_PROT_NODATA;
38 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
39 tf.feature = reg;
40 tf.device = link->pmp;
41
42 err_mask = ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
43 SATA_PMP_SCR_TIMEOUT);
44 if (err_mask)
45 return err_mask;
46
47 *r_val = tf.nsect | tf.lbal << 8 | tf.lbam << 16 | tf.lbah << 24;
48 return 0;
49}
50
51/**
52 * sata_pmp_write - write PMP register
53 * @link: link to write PMP register for
54 * @reg: register to write
55 * @r_val: value to write
56 *
57 * Write PMP register.
58 *
59 * LOCKING:
60 * Kernel thread context (may sleep).
61 *
62 * RETURNS:
63 * 0 on success, AC_ERR_* mask on failure.
64 */
65static unsigned int sata_pmp_write(struct ata_link *link, int reg, u32 val)
66{
67 struct ata_port *ap = link->ap;
68 struct ata_device *pmp_dev = ap->link.device;
69 struct ata_taskfile tf;
70
71 ata_tf_init(pmp_dev, &tf);
72 tf.command = ATA_CMD_PMP_WRITE;
73 tf.protocol = ATA_PROT_NODATA;
74 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
75 tf.feature = reg;
76 tf.device = link->pmp;
77 tf.nsect = val & 0xff;
78 tf.lbal = (val >> 8) & 0xff;
79 tf.lbam = (val >> 16) & 0xff;
80 tf.lbah = (val >> 24) & 0xff;
81
82 return ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
83 SATA_PMP_SCR_TIMEOUT);
84}
85
86/**
87 * sata_pmp_qc_defer_cmd_switch - qc_defer for command switching PMP
88 * @qc: ATA command in question
89 *
90 * A host which has command switching PMP support cannot issue
91 * commands to multiple links simultaneously.
92 *
93 * LOCKING:
94 * spin_lock_irqsave(host lock)
95 *
96 * RETURNS:
97 * ATA_DEFER_* if deferring is needed, 0 otherwise.
98 */
99int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc)
100{
101 struct ata_link *link = qc->dev->link;
102 struct ata_port *ap = link->ap;
103
104 if (ap->excl_link == NULL || ap->excl_link == link) {
105 if (ap->nr_active_links == 0 || ata_link_active(link)) {
106 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
107 return ata_std_qc_defer(qc);
108 }
109
110 ap->excl_link = link;
111 }
112
113 return ATA_DEFER_PORT;
114}
115
116/**
117 * sata_pmp_scr_read - read PSCR
118 * @link: ATA link to read PSCR for
119 * @reg: PSCR to read
120 * @r_val: resulting value
121 *
122 * Read PSCR @reg into @r_val for @link, to be called from
123 * ata_scr_read().
124 *
125 * LOCKING:
126 * Kernel thread context (may sleep).
127 *
128 * RETURNS:
129 * 0 on success, -errno on failure.
130 */
131int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *r_val)
132{
133 unsigned int err_mask;
134
135 if (reg > SATA_PMP_PSCR_CONTROL)
136 return -EINVAL;
137
138 err_mask = sata_pmp_read(link, reg, r_val);
139 if (err_mask) {
140 ata_link_printk(link, KERN_WARNING, "failed to read SCR %d "
141 "(Emask=0x%x)\n", reg, err_mask);
142 return -EIO;
143 }
144 return 0;
145}
146
147/**
148 * sata_pmp_scr_write - write PSCR
149 * @link: ATA link to write PSCR for
150 * @reg: PSCR to write
151 * @val: value to be written
152 *
153 * Write @val to PSCR @reg for @link, to be called from
154 * ata_scr_write() and ata_scr_write_flush().
155 *
156 * LOCKING:
157 * Kernel thread context (may sleep).
158 *
159 * RETURNS:
160 * 0 on success, -errno on failure.
161 */
162int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val)
163{
164 unsigned int err_mask;
165
166 if (reg > SATA_PMP_PSCR_CONTROL)
167 return -EINVAL;
168
169 err_mask = sata_pmp_write(link, reg, val);
170 if (err_mask) {
171 ata_link_printk(link, KERN_WARNING, "failed to write SCR %d "
172 "(Emask=0x%x)\n", reg, err_mask);
173 return -EIO;
174 }
175 return 0;
176}
177
178/**
179 * sata_pmp_std_prereset - prepare PMP link for reset
180 * @link: link to be reset
181 * @deadline: deadline jiffies for the operation
182 *
183 * @link is about to be reset. Initialize it.
184 *
185 * LOCKING:
186 * Kernel thread context (may sleep)
187 *
188 * RETURNS:
189 * 0 on success, -errno otherwise.
190 */
191int sata_pmp_std_prereset(struct ata_link *link, unsigned long deadline)
192{
193 struct ata_eh_context *ehc = &link->eh_context;
194 const unsigned long *timing = sata_ehc_deb_timing(ehc);
195 int rc;
196
197 /* force HRST? */
198 if (link->flags & ATA_LFLAG_NO_SRST)
199 ehc->i.action |= ATA_EH_HARDRESET;
200
201 /* handle link resume */
202 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
203 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
204 ehc->i.action |= ATA_EH_HARDRESET;
205
206 /* if we're about to do hardreset, nothing more to do */
207 if (ehc->i.action & ATA_EH_HARDRESET)
208 return 0;
209
210 /* resume link */
211 rc = sata_link_resume(link, timing, deadline);
212 if (rc) {
213 /* phy resume failed */
214 ata_link_printk(link, KERN_WARNING, "failed to resume link "
215 "for reset (errno=%d)\n", rc);
216 return rc;
217 }
218
219 /* clear SError bits including .X which blocks the port when set */
220 rc = sata_scr_write(link, SCR_ERROR, 0xffffffff);
221 if (rc) {
222 ata_link_printk(link, KERN_ERR,
223 "failed to clear SError (errno=%d)\n", rc);
224 return rc;
225 }
226
227 return 0;
228}
229
230/**
231 * sata_pmp_std_hardreset - standard hardreset method for PMP link
232 * @link: link to be reset
233 * @class: resulting class of attached device
234 * @deadline: deadline jiffies for the operation
235 *
236 * Hardreset PMP port @link. Note that this function doesn't
237 * wait for BSY clearance. There simply isn't a generic way to
238 * wait the event. Instead, this function return -EAGAIN thus
239 * telling libata-EH to followup with softreset.
240 *
241 * LOCKING:
242 * Kernel thread context (may sleep)
243 *
244 * RETURNS:
245 * 0 on success, -errno otherwise.
246 */
247int sata_pmp_std_hardreset(struct ata_link *link, unsigned int *class,
248 unsigned long deadline)
249{
250 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
251 u32 tmp;
252 int rc;
253
254 DPRINTK("ENTER\n");
255
256 /* do hardreset */
257 rc = sata_link_hardreset(link, timing, deadline);
258 if (rc) {
259 ata_link_printk(link, KERN_ERR,
260 "COMRESET failed (errno=%d)\n", rc);
261 goto out;
262 }
263
264 /* clear SError bits including .X which blocks the port when set */
265 rc = sata_scr_write(link, SCR_ERROR, 0xffffffff);
266 if (rc) {
267 ata_link_printk(link, KERN_ERR, "failed to clear SError "
268 "during hardreset (errno=%d)\n", rc);
269 goto out;
270 }
271
272 /* if device is present, follow up with srst to wait for !BSY */
273 if (ata_link_online(link))
274 rc = -EAGAIN;
275 out:
276 /* if SCR isn't accessible, we need to reset the PMP */
277 if (rc && rc != -EAGAIN && sata_scr_read(link, SCR_STATUS, &tmp))
278 rc = -ERESTART;
279
280 DPRINTK("EXIT, rc=%d\n", rc);
281 return rc;
282}
283
284/**
285 * ata_std_postreset - standard postreset method for PMP link
286 * @link: the target ata_link
287 * @classes: classes of attached devices
288 *
289 * This function is invoked after a successful reset. Note that
290 * the device might have been reset more than once using
291 * different reset methods before postreset is invoked.
292 *
293 * LOCKING:
294 * Kernel thread context (may sleep)
295 */
296void sata_pmp_std_postreset(struct ata_link *link, unsigned int *class)
297{
298 u32 serror;
299
300 DPRINTK("ENTER\n");
301
302 /* clear SError */
303 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
304 sata_scr_write(link, SCR_ERROR, serror);
305
306 /* print link status */
307 sata_print_link_status(link);
308
309 DPRINTK("EXIT\n");
310}
311
312/**
313 * sata_pmp_read_gscr - read GSCR block of SATA PMP
314 * @dev: PMP device
315 * @gscr: buffer to read GSCR block into
316 *
317 * Read selected PMP GSCRs from the PMP at @dev. This will serve
318 * as configuration and identification info for the PMP.
319 *
320 * LOCKING:
321 * Kernel thread context (may sleep).
322 *
323 * RETURNS:
324 * 0 on success, -errno on failure.
325 */
326static int sata_pmp_read_gscr(struct ata_device *dev, u32 *gscr)
327{
328 static const int gscr_to_read[] = { 0, 1, 2, 32, 33, 64, 96 };
329 int i;
330
331 for (i = 0; i < ARRAY_SIZE(gscr_to_read); i++) {
332 int reg = gscr_to_read[i];
333 unsigned int err_mask;
334
335 err_mask = sata_pmp_read(dev->link, reg, &gscr[reg]);
336 if (err_mask) {
337 ata_dev_printk(dev, KERN_ERR, "failed to read PMP "
338 "GSCR[%d] (Emask=0x%x)\n", reg, err_mask);
339 return -EIO;
340 }
341 }
342
343 return 0;
344}
345
346static const char *sata_pmp_spec_rev_str(const u32 *gscr)
347{
348 u32 rev = gscr[SATA_PMP_GSCR_REV];
349
350 if (rev & (1 << 2))
351 return "1.1";
352 if (rev & (1 << 1))
353 return "1.0";
354 return "<unknown>";
355}
356
357static int sata_pmp_configure(struct ata_device *dev, int print_info)
358{
359 struct ata_port *ap = dev->link->ap;
360 u32 *gscr = dev->gscr;
361 unsigned int err_mask = 0;
362 const char *reason;
363 int nr_ports, rc;
364
365 nr_ports = sata_pmp_gscr_ports(gscr);
366
367 if (nr_ports <= 0 || nr_ports > SATA_PMP_MAX_PORTS) {
368 rc = -EINVAL;
369 reason = "invalid nr_ports";
370 goto fail;
371 }
372
373 if ((ap->flags & ATA_FLAG_AN) &&
374 (gscr[SATA_PMP_GSCR_FEAT] & SATA_PMP_FEAT_NOTIFY))
375 dev->flags |= ATA_DFLAG_AN;
376
377 /* monitor SERR_PHYRDY_CHG on fan-out ports */
378 err_mask = sata_pmp_write(dev->link, SATA_PMP_GSCR_ERROR_EN,
379 SERR_PHYRDY_CHG);
380 if (err_mask) {
381 rc = -EIO;
382 reason = "failed to write GSCR_ERROR_EN";
383 goto fail;
384 }
385
386 /* turn off notification till fan-out ports are reset and configured */
387 if (gscr[SATA_PMP_GSCR_FEAT_EN] & SATA_PMP_FEAT_NOTIFY) {
388 gscr[SATA_PMP_GSCR_FEAT_EN] &= ~SATA_PMP_FEAT_NOTIFY;
389
390 err_mask = sata_pmp_write(dev->link, SATA_PMP_GSCR_FEAT_EN,
391 gscr[SATA_PMP_GSCR_FEAT_EN]);
392 if (err_mask) {
393 rc = -EIO;
394 reason = "failed to write GSCR_FEAT_EN";
395 goto fail;
396 }
397 }
398
399 if (print_info) {
400 ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
401 "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
402 sata_pmp_spec_rev_str(gscr),
403 sata_pmp_gscr_vendor(gscr),
404 sata_pmp_gscr_devid(gscr),
405 sata_pmp_gscr_rev(gscr),
406 nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
407 gscr[SATA_PMP_GSCR_FEAT]);
408
409 if (!(dev->flags & ATA_DFLAG_AN))
410 ata_dev_printk(dev, KERN_INFO,
411 "Asynchronous notification not supported, "
412 "hotplug won't\n work on fan-out "
413 "ports. Use warm-plug instead.\n");
414 }
415
416 return 0;
417
418 fail:
419 ata_dev_printk(dev, KERN_ERR,
420 "failed to configure Port Multiplier (%s, Emask=0x%x)\n",
421 reason, err_mask);
422 return rc;
423}
424
425static int sata_pmp_init_links(struct ata_port *ap, int nr_ports)
426{
427 struct ata_link *pmp_link = ap->pmp_link;
428 int i;
429
430 if (!pmp_link) {
431 pmp_link = kzalloc(sizeof(pmp_link[0]) * SATA_PMP_MAX_PORTS,
432 GFP_NOIO);
433 if (!pmp_link)
434 return -ENOMEM;
435
436 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
437 ata_link_init(ap, &pmp_link[i], i);
438
439 ap->pmp_link = pmp_link;
440 }
441
442 for (i = 0; i < nr_ports; i++) {
443 struct ata_link *link = &pmp_link[i];
444 struct ata_eh_context *ehc = &link->eh_context;
445
446 link->flags = 0;
447 ehc->i.probe_mask |= 1;
448 ehc->i.action |= ATA_EH_SOFTRESET;
449 ehc->i.flags |= ATA_EHI_RESUME_LINK;
450 }
451
452 return 0;
453}
454
455static void sata_pmp_quirks(struct ata_port *ap)
456{
457 u32 *gscr = ap->link.device->gscr;
458 u16 vendor = sata_pmp_gscr_vendor(gscr);
459 u16 devid = sata_pmp_gscr_devid(gscr);
460 struct ata_link *link;
461
462 if (vendor == 0x1095 && devid == 0x3726) {
463 /* sil3726 quirks */
464 ata_port_for_each_link(link, ap) {
465 /* SError.N need a kick in the ass to get working */
466 link->flags |= ATA_LFLAG_HRST_TO_RESUME;
467
468 /* class code report is unreliable */
469 if (link->pmp < 5)
470 link->flags |= ATA_LFLAG_ASSUME_ATA;
471
472 /* port 5 is for SEMB device and it doesn't like SRST */
473 if (link->pmp == 5)
474 link->flags |= ATA_LFLAG_NO_SRST |
475 ATA_LFLAG_ASSUME_SEMB;
476 }
477 } else if (vendor == 0x1095 && devid == 0x4723) {
478 /* sil4723 quirks */
479 ata_port_for_each_link(link, ap) {
480 /* SError.N need a kick in the ass to get working */
481 link->flags |= ATA_LFLAG_HRST_TO_RESUME;
482
483 /* class code report is unreliable */
484 if (link->pmp < 2)
485 link->flags |= ATA_LFLAG_ASSUME_ATA;
486
487 /* the config device at port 2 locks up on SRST */
488 if (link->pmp == 2)
489 link->flags |= ATA_LFLAG_NO_SRST |
490 ATA_LFLAG_ASSUME_ATA;
491 }
492 } else if (vendor == 0x1095 && devid == 0x4726) {
493 /* sil4726 quirks */
494 ata_port_for_each_link(link, ap) {
495 /* SError.N need a kick in the ass to get working */
496 link->flags |= ATA_LFLAG_HRST_TO_RESUME;
497
498 /* class code report is unreliable */
499 if (link->pmp < 5)
500 link->flags |= ATA_LFLAG_ASSUME_ATA;
501
502 /* The config device, which can be either at
503 * port 0 or 5, locks up on SRST.
504 */
505 if (link->pmp == 0 || link->pmp == 5)
506 link->flags |= ATA_LFLAG_NO_SRST |
507 ATA_LFLAG_ASSUME_ATA;
508
509 /* Port 6 is for SEMB device which doesn't
510 * like SRST either.
511 */
512 if (link->pmp == 6)
513 link->flags |= ATA_LFLAG_NO_SRST |
514 ATA_LFLAG_ASSUME_SEMB;
515 }
516 } else if (vendor == 0x1095 && (devid == 0x5723 || devid == 0x5733 ||
517 devid == 0x5734 || devid == 0x5744)) {
518 /* sil5723/5744 quirks */
519
520 /* sil5723/5744 has either two or three downstream
521 * ports depending on operation mode. The last port
522 * is empty if any actual IO device is available or
523 * occupied by a pseudo configuration device
524 * otherwise. Don't try hard to recover it.
525 */
526 ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
527 } else if (vendor == 0x11ab && devid == 0x4140) {
528 /* Marvell 88SM4140 quirks. Fan-out ports require PHY
529 * reset to work; other than that, it behaves very
530 * nicely.
531 */
532 ata_port_for_each_link(link, ap)
533 link->flags |= ATA_LFLAG_HRST_TO_RESUME;
534 }
535}
536
537/**
538 * sata_pmp_attach - attach a SATA PMP device
539 * @dev: SATA PMP device to attach
540 *
541 * Configure and attach SATA PMP device @dev. This function is
542 * also responsible for allocating and initializing PMP links.
543 *
544 * LOCKING:
545 * Kernel thread context (may sleep).
546 *
547 * RETURNS:
548 * 0 on success, -errno on failure.
549 */
550int sata_pmp_attach(struct ata_device *dev)
551{
552 struct ata_link *link = dev->link;
553 struct ata_port *ap = link->ap;
554 unsigned long flags;
555 struct ata_link *tlink;
556 int rc;
557
558 /* is it hanging off the right place? */
559 if (!(ap->flags & ATA_FLAG_PMP)) {
560 ata_dev_printk(dev, KERN_ERR,
561 "host does not support Port Multiplier\n");
562 return -EINVAL;
563 }
564
565 if (!ata_is_host_link(link)) {
566 ata_dev_printk(dev, KERN_ERR,
567 "Port Multipliers cannot be nested\n");
568 return -EINVAL;
569 }
570
571 if (dev->devno) {
572 ata_dev_printk(dev, KERN_ERR,
573 "Port Multiplier must be the first device\n");
574 return -EINVAL;
575 }
576
577 WARN_ON(link->pmp != 0);
578 link->pmp = SATA_PMP_CTRL_PORT;
579
580 /* read GSCR block */
581 rc = sata_pmp_read_gscr(dev, dev->gscr);
582 if (rc)
583 goto fail;
584
585 /* config PMP */
586 rc = sata_pmp_configure(dev, 1);
587 if (rc)
588 goto fail;
589
590 rc = sata_pmp_init_links(ap, sata_pmp_gscr_ports(dev->gscr));
591 if (rc) {
592 ata_dev_printk(dev, KERN_INFO,
593 "failed to initialize PMP links\n");
594 goto fail;
595 }
596
597 /* attach it */
598 spin_lock_irqsave(ap->lock, flags);
599 WARN_ON(ap->nr_pmp_links);
600 ap->nr_pmp_links = sata_pmp_gscr_ports(dev->gscr);
601 spin_unlock_irqrestore(ap->lock, flags);
602
603 sata_pmp_quirks(ap);
604
605 if (ap->ops->pmp_attach)
606 ap->ops->pmp_attach(ap);
607
608 ata_port_for_each_link(tlink, ap)
609 sata_link_init_spd(tlink);
610
611 ata_acpi_associate_sata_port(ap);
612
613 return 0;
614
615 fail:
616 link->pmp = 0;
617 return rc;
618}
619
620/**
621 * sata_pmp_detach - detach a SATA PMP device
622 * @dev: SATA PMP device to detach
623 *
624 * Detach SATA PMP device @dev. This function is also
625 * responsible for deconfiguring PMP links.
626 *
627 * LOCKING:
628 * Kernel thread context (may sleep).
629 */
630static void sata_pmp_detach(struct ata_device *dev)
631{
632 struct ata_link *link = dev->link;
633 struct ata_port *ap = link->ap;
634 struct ata_link *tlink;
635 unsigned long flags;
636
637 ata_dev_printk(dev, KERN_INFO, "Port Multiplier detaching\n");
638
639 WARN_ON(!ata_is_host_link(link) || dev->devno ||
640 link->pmp != SATA_PMP_CTRL_PORT);
641
642 if (ap->ops->pmp_detach)
643 ap->ops->pmp_detach(ap);
644
645 ata_port_for_each_link(tlink, ap)
646 ata_eh_detach_dev(tlink->device);
647
648 spin_lock_irqsave(ap->lock, flags);
649 ap->nr_pmp_links = 0;
650 link->pmp = 0;
651 spin_unlock_irqrestore(ap->lock, flags);
652
653 ata_acpi_associate_sata_port(ap);
654}
655
656/**
657 * sata_pmp_same_pmp - does new GSCR matches the configured PMP?
658 * @dev: PMP device to compare against
659 * @new_gscr: GSCR block of the new device
660 *
661 * Compare @new_gscr against @dev and determine whether @dev is
662 * the PMP described by @new_gscr.
663 *
664 * LOCKING:
665 * None.
666 *
667 * RETURNS:
668 * 1 if @dev matches @new_gscr, 0 otherwise.
669 */
670static int sata_pmp_same_pmp(struct ata_device *dev, const u32 *new_gscr)
671{
672 const u32 *old_gscr = dev->gscr;
673 u16 old_vendor, new_vendor, old_devid, new_devid;
674 int old_nr_ports, new_nr_ports;
675
676 old_vendor = sata_pmp_gscr_vendor(old_gscr);
677 new_vendor = sata_pmp_gscr_vendor(new_gscr);
678 old_devid = sata_pmp_gscr_devid(old_gscr);
679 new_devid = sata_pmp_gscr_devid(new_gscr);
680 old_nr_ports = sata_pmp_gscr_ports(old_gscr);
681 new_nr_ports = sata_pmp_gscr_ports(new_gscr);
682
683 if (old_vendor != new_vendor) {
684 ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
685 "vendor mismatch '0x%x' != '0x%x'\n",
686 old_vendor, new_vendor);
687 return 0;
688 }
689
690 if (old_devid != new_devid) {
691 ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
692 "device ID mismatch '0x%x' != '0x%x'\n",
693 old_devid, new_devid);
694 return 0;
695 }
696
697 if (old_nr_ports != new_nr_ports) {
698 ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
699 "nr_ports mismatch '0x%x' != '0x%x'\n",
700 old_nr_ports, new_nr_ports);
701 return 0;
702 }
703
704 return 1;
705}
706
707/**
708 * sata_pmp_revalidate - revalidate SATA PMP
709 * @dev: PMP device to revalidate
710 * @new_class: new class code
711 *
712 * Re-read GSCR block and make sure @dev is still attached to the
713 * port and properly configured.
714 *
715 * LOCKING:
716 * Kernel thread context (may sleep).
717 *
718 * RETURNS:
719 * 0 on success, -errno otherwise.
720 */
721static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
722{
723 struct ata_link *link = dev->link;
724 struct ata_port *ap = link->ap;
725 u32 *gscr = (void *)ap->sector_buf;
726 int rc;
727
728 DPRINTK("ENTER\n");
729
730 ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE);
731
732 if (!ata_dev_enabled(dev)) {
733 rc = -ENODEV;
734 goto fail;
735 }
736
737 /* wrong class? */
738 if (ata_class_enabled(new_class) && new_class != ATA_DEV_PMP) {
739 rc = -ENODEV;
740 goto fail;
741 }
742
743 /* read GSCR */
744 rc = sata_pmp_read_gscr(dev, gscr);
745 if (rc)
746 goto fail;
747
748 /* is the pmp still there? */
749 if (!sata_pmp_same_pmp(dev, gscr)) {
750 rc = -ENODEV;
751 goto fail;
752 }
753
754 memcpy(dev->gscr, gscr, sizeof(gscr[0]) * SATA_PMP_GSCR_DWORDS);
755
756 rc = sata_pmp_configure(dev, 0);
757 if (rc)
758 goto fail;
759
760 ata_eh_done(link, NULL, ATA_EH_REVALIDATE);
761
762 DPRINTK("EXIT, rc=0\n");
763 return 0;
764
765 fail:
766 ata_dev_printk(dev, KERN_ERR,
767 "PMP revalidation failed (errno=%d)\n", rc);
768 DPRINTK("EXIT, rc=%d\n", rc);
769 return rc;
770}
771
772/**
773 * sata_pmp_revalidate_quick - revalidate SATA PMP quickly
774 * @dev: PMP device to revalidate
775 *
776 * Make sure the attached PMP is accessible.
777 *
778 * LOCKING:
779 * Kernel thread context (may sleep).
780 *
781 * RETURNS:
782 * 0 on success, -errno otherwise.
783 */
784static int sata_pmp_revalidate_quick(struct ata_device *dev)
785{
786 unsigned int err_mask;
787 u32 prod_id;
788
789 err_mask = sata_pmp_read(dev->link, SATA_PMP_GSCR_PROD_ID, &prod_id);
790 if (err_mask) {
791 ata_dev_printk(dev, KERN_ERR, "failed to read PMP product ID "
792 "(Emask=0x%x)\n", err_mask);
793 return -EIO;
794 }
795
796 if (prod_id != dev->gscr[SATA_PMP_GSCR_PROD_ID]) {
797 ata_dev_printk(dev, KERN_ERR, "PMP product ID mismatch\n");
798 /* something weird is going on, request full PMP recovery */
799 return -EIO;
800 }
801
802 return 0;
803}
804
805/**
806 * sata_pmp_eh_recover_pmp - recover PMP
807 * @ap: ATA port PMP is attached to
808 * @prereset: prereset method (can be NULL)
809 * @softreset: softreset method
810 * @hardreset: hardreset method
811 * @postreset: postreset method (can be NULL)
812 *
813 * Recover PMP attached to @ap. Recovery procedure is somewhat
814 * similar to that of ata_eh_recover() except that reset should
815 * always be performed in hard->soft sequence and recovery
816 * failure results in PMP detachment.
817 *
818 * LOCKING:
819 * Kernel thread context (may sleep).
820 *
821 * RETURNS:
822 * 0 on success, -errno on failure.
823 */
824static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
825 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
826 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
827{
828 struct ata_link *link = &ap->link;
829 struct ata_eh_context *ehc = &link->eh_context;
830 struct ata_device *dev = link->device;
831 int tries = ATA_EH_PMP_TRIES;
832 int detach = 0, rc = 0;
833 int reval_failed = 0;
834
835 DPRINTK("ENTER\n");
836
837 if (dev->flags & ATA_DFLAG_DETACH) {
838 detach = 1;
839 goto fail;
840 }
841
842 retry:
843 ehc->classes[0] = ATA_DEV_UNKNOWN;
844
845 if (ehc->i.action & ATA_EH_RESET_MASK) {
846 struct ata_link *tlink;
847
848 ata_eh_freeze_port(ap);
849
850 /* reset */
851 ehc->i.action = ATA_EH_HARDRESET;
852 rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
853 postreset);
854 if (rc) {
855 ata_link_printk(link, KERN_ERR,
856 "failed to reset PMP, giving up\n");
857 goto fail;
858 }
859
860 ata_eh_thaw_port(ap);
861
862 /* PMP is reset, SErrors cannot be trusted, scan all */
863 ata_port_for_each_link(tlink, ap)
864 ata_ehi_schedule_probe(&tlink->eh_context.i);
865 }
866
867 /* If revalidation is requested, revalidate and reconfigure;
868 * otherwise, do quick revalidation.
869 */
870 if (ehc->i.action & ATA_EH_REVALIDATE)
871 rc = sata_pmp_revalidate(dev, ehc->classes[0]);
872 else
873 rc = sata_pmp_revalidate_quick(dev);
874
875 if (rc) {
876 tries--;
877
878 if (rc == -ENODEV) {
879 ehc->i.probe_mask |= 1;
880 detach = 1;
881 /* give it just two more chances */
882 tries = min(tries, 2);
883 }
884
885 if (tries) {
886 int sleep = ehc->i.flags & ATA_EHI_DID_RESET;
887
888 /* consecutive revalidation failures? speed down */
889 if (reval_failed)
890 sata_down_spd_limit(link);
891 else
892 reval_failed = 1;
893
894 ata_dev_printk(dev, KERN_WARNING,
895 "retrying hardreset%s\n",
896 sleep ? " in 5 secs" : "");
897 if (sleep)
898 ssleep(5);
899 ehc->i.action |= ATA_EH_HARDRESET;
900 goto retry;
901 } else {
902 ata_dev_printk(dev, KERN_ERR, "failed to recover PMP "
903 "after %d tries, giving up\n",
904 ATA_EH_PMP_TRIES);
905 goto fail;
906 }
907 }
908
909 /* okay, PMP resurrected */
910 ehc->i.flags = 0;
911
912 DPRINTK("EXIT, rc=0\n");
913 return 0;
914
915 fail:
916 sata_pmp_detach(dev);
917 if (detach)
918 ata_eh_detach_dev(dev);
919 else
920 ata_dev_disable(dev);
921
922 DPRINTK("EXIT, rc=%d\n", rc);
923 return rc;
924}
925
926static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap)
927{
928 struct ata_link *link;
929 unsigned long flags;
930 int rc;
931
932 spin_lock_irqsave(ap->lock, flags);
933
934 ata_port_for_each_link(link, ap) {
935 if (!(link->flags & ATA_LFLAG_DISABLED))
936 continue;
937
938 spin_unlock_irqrestore(ap->lock, flags);
939
940 /* Some PMPs require hardreset sequence to get
941 * SError.N working.
942 */
943 if ((link->flags & ATA_LFLAG_HRST_TO_RESUME) &&
944 (link->eh_context.i.flags & ATA_EHI_RESUME_LINK))
945 sata_link_hardreset(link, sata_deb_timing_normal,
946 jiffies + ATA_TMOUT_INTERNAL_QUICK);
947
948 /* unconditionally clear SError.N */
949 rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
950 if (rc) {
951 ata_link_printk(link, KERN_ERR, "failed to clear "
952 "SError.N (errno=%d)\n", rc);
953 return rc;
954 }
955
956 spin_lock_irqsave(ap->lock, flags);
957 }
958
959 spin_unlock_irqrestore(ap->lock, flags);
960
961 return 0;
962}
963
964static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
965{
966 struct ata_port *ap = link->ap;
967 unsigned long flags;
968
969 if (link_tries[link->pmp] && --link_tries[link->pmp])
970 return 1;
971
972 /* disable this link */
973 if (!(link->flags & ATA_LFLAG_DISABLED)) {
974 ata_link_printk(link, KERN_WARNING,
975 "failed to recover link after %d tries, disabling\n",
976 ATA_EH_PMP_LINK_TRIES);
977
978 spin_lock_irqsave(ap->lock, flags);
979 link->flags |= ATA_LFLAG_DISABLED;
980 spin_unlock_irqrestore(ap->lock, flags);
981 }
982
983 ata_dev_disable(link->device);
984 link->eh_context.i.action = 0;
985
986 return 0;
987}
988
989/**
990 * sata_pmp_eh_recover - recover PMP-enabled port
991 * @ap: ATA port to recover
992 * @prereset: prereset method (can be NULL)
993 * @softreset: softreset method
994 * @hardreset: hardreset method
995 * @postreset: postreset method (can be NULL)
996 * @pmp_prereset: PMP prereset method (can be NULL)
997 * @pmp_softreset: PMP softreset method (can be NULL)
998 * @pmp_hardreset: PMP hardreset method (can be NULL)
999 * @pmp_postreset: PMP postreset method (can be NULL)
1000 *
1001 * Drive EH recovery operation for PMP enabled port @ap. This
1002 * function recovers host and PMP ports with proper retrials and
1003 * fallbacks. Actual recovery operations are performed using
1004 * ata_eh_recover() and sata_pmp_eh_recover_pmp().
1005 *
1006 * LOCKING:
1007 * Kernel thread context (may sleep).
1008 *
1009 * RETURNS:
1010 * 0 on success, -errno on failure.
1011 */
1012static int sata_pmp_eh_recover(struct ata_port *ap,
1013 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1014 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset,
1015 ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset,
1016 ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset)
1017{
1018 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
1019 struct ata_link *pmp_link = &ap->link;
1020 struct ata_device *pmp_dev = pmp_link->device;
1021 struct ata_eh_context *pmp_ehc = &pmp_link->eh_context;
1022 struct ata_link *link;
1023 struct ata_device *dev;
1024 unsigned int err_mask;
1025 u32 gscr_error, sntf;
1026 int cnt, rc;
1027
1028 pmp_tries = ATA_EH_PMP_TRIES;
1029 ata_port_for_each_link(link, ap)
1030 link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;
1031
1032 retry:
1033 /* PMP attached? */
1034 if (!ap->nr_pmp_links) {
1035 rc = ata_eh_recover(ap, prereset, softreset, hardreset,
1036 postreset, NULL);
1037 if (rc) {
1038 ata_link_for_each_dev(dev, &ap->link)
1039 ata_dev_disable(dev);
1040 return rc;
1041 }
1042
1043 if (pmp_dev->class != ATA_DEV_PMP)
1044 return 0;
1045
1046 /* new PMP online */
1047 ata_port_for_each_link(link, ap)
1048 link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;
1049
1050 /* fall through */
1051 }
1052
1053 /* recover pmp */
1054 rc = sata_pmp_eh_recover_pmp(ap, prereset, softreset, hardreset,
1055 postreset);
1056 if (rc)
1057 goto pmp_fail;
1058
1059 /* handle disabled links */
1060 rc = sata_pmp_eh_handle_disabled_links(ap);
1061 if (rc)
1062 goto pmp_fail;
1063
1064 /* recover links */
1065 rc = ata_eh_recover(ap, pmp_prereset, pmp_softreset, pmp_hardreset,
1066 pmp_postreset, &link);
1067 if (rc)
1068 goto link_fail;
1069
1070 /* Connection status might have changed while resetting other
1071 * links, check SATA_PMP_GSCR_ERROR before returning.
1072 */
1073
1074 /* clear SNotification */
1075 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1076 if (rc == 0)
1077 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1078
1079 /* enable notification */
1080 if (pmp_dev->flags & ATA_DFLAG_AN) {
1081 pmp_dev->gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY;
1082
1083 err_mask = sata_pmp_write(pmp_dev->link, SATA_PMP_GSCR_FEAT_EN,
1084 pmp_dev->gscr[SATA_PMP_GSCR_FEAT_EN]);
1085 if (err_mask) {
1086 ata_dev_printk(pmp_dev, KERN_ERR, "failed to write "
1087 "PMP_FEAT_EN (Emask=0x%x)\n", err_mask);
1088 rc = -EIO;
1089 goto pmp_fail;
1090 }
1091 }
1092
1093 /* check GSCR_ERROR */
1094 err_mask = sata_pmp_read(pmp_link, SATA_PMP_GSCR_ERROR, &gscr_error);
1095 if (err_mask) {
1096 ata_dev_printk(pmp_dev, KERN_ERR, "failed to read "
1097 "PMP_GSCR_ERROR (Emask=0x%x)\n", err_mask);
1098 rc = -EIO;
1099 goto pmp_fail;
1100 }
1101
1102 cnt = 0;
1103 ata_port_for_each_link(link, ap) {
1104 if (!(gscr_error & (1 << link->pmp)))
1105 continue;
1106
1107 if (sata_pmp_handle_link_fail(link, link_tries)) {
1108 ata_ehi_hotplugged(&link->eh_context.i);
1109 cnt++;
1110 } else {
1111 ata_link_printk(link, KERN_WARNING,
1112 "PHY status changed but maxed out on retries, "
1113 "giving up\n");
1114 ata_link_printk(link, KERN_WARNING,
1115 "Manully issue scan to resume this link\n");
1116 }
1117 }
1118
1119 if (cnt) {
1120 ata_port_printk(ap, KERN_INFO, "PMP SError.N set for some "
1121 "ports, repeating recovery\n");
1122 goto retry;
1123 }
1124
1125 return 0;
1126
1127 link_fail:
1128 if (sata_pmp_handle_link_fail(link, link_tries)) {
1129 pmp_ehc->i.action |= ATA_EH_HARDRESET;
1130 goto retry;
1131 }
1132
1133 /* fall through */
1134 pmp_fail:
1135 /* Control always ends up here after detaching PMP. Shut up
1136 * and return if we're unloading.
1137 */
1138 if (ap->pflags & ATA_PFLAG_UNLOADING)
1139 return rc;
1140
1141 if (!ap->nr_pmp_links)
1142 goto retry;
1143
1144 if (--pmp_tries) {
1145 ata_port_printk(ap, KERN_WARNING,
1146 "failed to recover PMP, retrying in 5 secs\n");
1147 pmp_ehc->i.action |= ATA_EH_HARDRESET;
1148 ssleep(5);
1149 goto retry;
1150 }
1151
1152 ata_port_printk(ap, KERN_ERR,
1153 "failed to recover PMP after %d tries, giving up\n",
1154 ATA_EH_PMP_TRIES);
1155 sata_pmp_detach(pmp_dev);
1156 ata_dev_disable(pmp_dev);
1157
1158 return rc;
1159}
1160
1161/**
1162 * sata_pmp_do_eh - do standard error handling for PMP-enabled host
1163 * @ap: host port to handle error for
1164 * @prereset: prereset method (can be NULL)
1165 * @softreset: softreset method
1166 * @hardreset: hardreset method
1167 * @postreset: postreset method (can be NULL)
1168 * @pmp_prereset: PMP prereset method (can be NULL)
1169 * @pmp_softreset: PMP softreset method (can be NULL)
1170 * @pmp_hardreset: PMP hardreset method (can be NULL)
1171 * @pmp_postreset: PMP postreset method (can be NULL)
1172 *
1173 * Perform standard error handling sequence for PMP-enabled host
1174 * @ap.
1175 *
1176 * LOCKING:
1177 * Kernel thread context (may sleep).
1178 */
1179void sata_pmp_do_eh(struct ata_port *ap,
1180 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1181 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset,
1182 ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset,
1183 ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset)
1184{
1185 ata_eh_autopsy(ap);
1186 ata_eh_report(ap);
1187 sata_pmp_eh_recover(ap, prereset, softreset, hardreset, postreset,
1188 pmp_prereset, pmp_softreset, pmp_hardreset,
1189 pmp_postreset);
1190 ata_eh_finish(ap);
1191}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e83647651b..ea53e6a570 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -71,11 +71,10 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
71#define ALL_SUB_MPAGES 0xff 71#define ALL_SUB_MPAGES 0xff
72 72
73 73
74static const u8 def_rw_recovery_mpage[] = { 74static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = {
75 RW_RECOVERY_MPAGE, 75 RW_RECOVERY_MPAGE,
76 RW_RECOVERY_MPAGE_LEN - 2, 76 RW_RECOVERY_MPAGE_LEN - 2,
77 (1 << 7) | /* AWRE, sat-r06 say it shall be 0 */ 77 (1 << 7), /* AWRE */
78 (1 << 6), /* ARRE (auto read reallocation) */
79 0, /* read retry count */ 78 0, /* read retry count */
80 0, 0, 0, 0, 79 0, 0, 0, 0,
81 0, /* write retry count */ 80 0, /* write retry count */
@@ -450,13 +449,8 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
450 qc->scsicmd = cmd; 449 qc->scsicmd = cmd;
451 qc->scsidone = done; 450 qc->scsidone = done;
452 451
453 if (cmd->use_sg) { 452 qc->__sg = scsi_sglist(cmd);
454 qc->__sg = (struct scatterlist *) cmd->request_buffer; 453 qc->n_elem = scsi_sg_count(cmd);
455 qc->n_elem = cmd->use_sg;
456 } else if (cmd->request_bufflen) {
457 qc->__sg = &qc->sgent;
458 qc->n_elem = 1;
459 }
460 } else { 454 } else {
461 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); 455 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
462 done(cmd); 456 done(cmd);
@@ -755,6 +749,13 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
755{ 749{
756 sdev->use_10_for_rw = 1; 750 sdev->use_10_for_rw = 1;
757 sdev->use_10_for_ms = 1; 751 sdev->use_10_for_ms = 1;
752
753 /* Schedule policy is determined by ->qc_defer() callback and
754 * it needs to see every deferred qc. Set dev_blocked to 1 to
755 * prevent SCSI midlayer from automatically deferring
756 * requests.
757 */
758 sdev->max_device_blocked = 1;
758} 759}
759 760
760static void ata_scsi_dev_config(struct scsi_device *sdev, 761static void ata_scsi_dev_config(struct scsi_device *sdev,
@@ -943,6 +944,13 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
943 goto invalid_fld; /* LOEJ bit set not supported */ 944 goto invalid_fld; /* LOEJ bit set not supported */
944 if (((cdb[4] >> 4) & 0xf) != 0) 945 if (((cdb[4] >> 4) & 0xf) != 0)
945 goto invalid_fld; /* power conditions not supported */ 946 goto invalid_fld; /* power conditions not supported */
947
948 if (qc->dev->horkage & ATA_HORKAGE_SKIP_PM) {
949 /* the device lacks PM support, finish without doing anything */
950 scmd->result = SAM_STAT_GOOD;
951 return 1;
952 }
953
946 if (cdb[4] & 0x1) { 954 if (cdb[4] & 0x1) {
947 tf->nsect = 1; /* 1 sector, lba=0 */ 955 tf->nsect = 1; /* 1 sector, lba=0 */
948 956
@@ -1368,14 +1376,14 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1368 case ATA_CMD_SET_FEATURES: 1376 case ATA_CMD_SET_FEATURES:
1369 if ((qc->tf.feature == SETFEATURES_WC_ON) || 1377 if ((qc->tf.feature == SETFEATURES_WC_ON) ||
1370 (qc->tf.feature == SETFEATURES_WC_OFF)) { 1378 (qc->tf.feature == SETFEATURES_WC_OFF)) {
1371 ap->eh_info.action |= ATA_EH_REVALIDATE; 1379 ap->link.eh_info.action |= ATA_EH_REVALIDATE;
1372 ata_port_schedule_eh(ap); 1380 ata_port_schedule_eh(ap);
1373 } 1381 }
1374 break; 1382 break;
1375 1383
1376 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 1384 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
1377 case ATA_CMD_SET_MULTI: /* multi_count changed */ 1385 case ATA_CMD_SET_MULTI: /* multi_count changed */
1378 ap->eh_info.action |= ATA_EH_REVALIDATE; 1386 ap->link.eh_info.action |= ATA_EH_REVALIDATE;
1379 ata_port_schedule_eh(ap); 1387 ata_port_schedule_eh(ap);
1380 break; 1388 break;
1381 } 1389 }
@@ -1422,37 +1430,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1422} 1430}
1423 1431
1424/** 1432/**
1425 * ata_scmd_need_defer - Check whether we need to defer scmd
1426 * @dev: ATA device to which the command is addressed
1427 * @is_io: Is the command IO (and thus possibly NCQ)?
1428 *
1429 * NCQ and non-NCQ commands cannot run together. As upper layer
1430 * only knows the queue depth, we are responsible for maintaining
1431 * exclusion. This function checks whether a new command can be
1432 * issued to @dev.
1433 *
1434 * LOCKING:
1435 * spin_lock_irqsave(host lock)
1436 *
1437 * RETURNS:
1438 * 1 if deferring is needed, 0 otherwise.
1439 */
1440static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
1441{
1442 struct ata_port *ap = dev->ap;
1443 int is_ncq = is_io && ata_ncq_enabled(dev);
1444
1445 if (is_ncq) {
1446 if (!ata_tag_valid(ap->active_tag))
1447 return 0;
1448 } else {
1449 if (!ata_tag_valid(ap->active_tag) && !ap->sactive)
1450 return 0;
1451 }
1452 return 1;
1453}
1454
1455/**
1456 * ata_scsi_translate - Translate then issue SCSI command to ATA device 1433 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1457 * @dev: ATA device to which the command is addressed 1434 * @dev: ATA device to which the command is addressed
1458 * @cmd: SCSI command to execute 1435 * @cmd: SCSI command to execute
@@ -1483,14 +1460,12 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1483 void (*done)(struct scsi_cmnd *), 1460 void (*done)(struct scsi_cmnd *),
1484 ata_xlat_func_t xlat_func) 1461 ata_xlat_func_t xlat_func)
1485{ 1462{
1463 struct ata_port *ap = dev->link->ap;
1486 struct ata_queued_cmd *qc; 1464 struct ata_queued_cmd *qc;
1487 int is_io = xlat_func == ata_scsi_rw_xlat; 1465 int rc;
1488 1466
1489 VPRINTK("ENTER\n"); 1467 VPRINTK("ENTER\n");
1490 1468
1491 if (unlikely(ata_scmd_need_defer(dev, is_io)))
1492 goto defer;
1493
1494 qc = ata_scsi_qc_new(dev, cmd, done); 1469 qc = ata_scsi_qc_new(dev, cmd, done);
1495 if (!qc) 1470 if (!qc)
1496 goto err_mem; 1471 goto err_mem;
@@ -1498,17 +1473,13 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1498 /* data is present; dma-map it */ 1473 /* data is present; dma-map it */
1499 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 1474 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1500 cmd->sc_data_direction == DMA_TO_DEVICE) { 1475 cmd->sc_data_direction == DMA_TO_DEVICE) {
1501 if (unlikely(cmd->request_bufflen < 1)) { 1476 if (unlikely(scsi_bufflen(cmd) < 1)) {
1502 ata_dev_printk(dev, KERN_WARNING, 1477 ata_dev_printk(dev, KERN_WARNING,
1503 "WARNING: zero len r/w req\n"); 1478 "WARNING: zero len r/w req\n");
1504 goto err_did; 1479 goto err_did;
1505 } 1480 }
1506 1481
1507 if (cmd->use_sg) 1482 ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
1508 ata_sg_init(qc, cmd->request_buffer, cmd->use_sg);
1509 else
1510 ata_sg_init_one(qc, cmd->request_buffer,
1511 cmd->request_bufflen);
1512 1483
1513 qc->dma_dir = cmd->sc_data_direction; 1484 qc->dma_dir = cmd->sc_data_direction;
1514 } 1485 }
@@ -1518,6 +1489,11 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1518 if (xlat_func(qc)) 1489 if (xlat_func(qc))
1519 goto early_finish; 1490 goto early_finish;
1520 1491
1492 if (ap->ops->qc_defer) {
1493 if ((rc = ap->ops->qc_defer(qc)))
1494 goto defer;
1495 }
1496
1521 /* select device, send command to hardware */ 1497 /* select device, send command to hardware */
1522 ata_qc_issue(qc); 1498 ata_qc_issue(qc);
1523 1499
@@ -1539,8 +1515,12 @@ err_mem:
1539 return 0; 1515 return 0;
1540 1516
1541defer: 1517defer:
1518 ata_qc_free(qc);
1542 DPRINTK("EXIT - defer\n"); 1519 DPRINTK("EXIT - defer\n");
1543 return SCSI_MLQUEUE_DEVICE_BUSY; 1520 if (rc == ATA_DEFER_LINK)
1521 return SCSI_MLQUEUE_DEVICE_BUSY;
1522 else
1523 return SCSI_MLQUEUE_HOST_BUSY;
1544} 1524}
1545 1525
1546/** 1526/**
@@ -1562,15 +1542,14 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
1562 u8 *buf; 1542 u8 *buf;
1563 unsigned int buflen; 1543 unsigned int buflen;
1564 1544
1565 if (cmd->use_sg) { 1545 struct scatterlist *sg = scsi_sglist(cmd);
1566 struct scatterlist *sg;
1567 1546
1568 sg = (struct scatterlist *) cmd->request_buffer; 1547 if (sg) {
1569 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1548 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1570 buflen = sg->length; 1549 buflen = sg->length;
1571 } else { 1550 } else {
1572 buf = cmd->request_buffer; 1551 buf = NULL;
1573 buflen = cmd->request_bufflen; 1552 buflen = 0;
1574 } 1553 }
1575 1554
1576 *buf_out = buf; 1555 *buf_out = buf;
@@ -1590,12 +1569,9 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
1590 1569
1591static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) 1570static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
1592{ 1571{
1593 if (cmd->use_sg) { 1572 struct scatterlist *sg = scsi_sglist(cmd);
1594 struct scatterlist *sg; 1573 if (sg)
1595
1596 sg = (struct scatterlist *) cmd->request_buffer;
1597 kunmap_atomic(buf - sg->offset, KM_IRQ0); 1574 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1598 }
1599} 1575}
1600 1576
1601/** 1577/**
@@ -1817,6 +1793,62 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
1817} 1793}
1818 1794
1819/** 1795/**
1796 * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
1797 * @args: device IDENTIFY data / SCSI command of interest.
1798 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1799 * @buflen: Response buffer length.
1800 *
1801 * Yields SAT-specified ATA VPD page.
1802 *
1803 * LOCKING:
1804 * spin_lock_irqsave(host lock)
1805 */
1806
1807unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf,
1808 unsigned int buflen)
1809{
1810 u8 pbuf[60];
1811 struct ata_taskfile tf;
1812 unsigned int i;
1813
1814 if (!buflen)
1815 return 0;
1816
1817 memset(&pbuf, 0, sizeof(pbuf));
1818 memset(&tf, 0, sizeof(tf));
1819
1820 pbuf[1] = 0x89; /* our page code */
1821 pbuf[2] = (0x238 >> 8); /* page size fixed at 238h */
1822 pbuf[3] = (0x238 & 0xff);
1823
1824 memcpy(&pbuf[8], "linux ", 8);
1825 memcpy(&pbuf[16], "libata ", 16);
1826 memcpy(&pbuf[32], DRV_VERSION, 4);
1827 ata_id_string(args->id, &pbuf[32], ATA_ID_FW_REV, 4);
1828
1829 /* we don't store the ATA device signature, so we fake it */
1830
1831 tf.command = ATA_DRDY; /* really, this is Status reg */
1832 tf.lbal = 0x1;
1833 tf.nsect = 0x1;
1834
1835 ata_tf_to_fis(&tf, 0, 1, &pbuf[36]); /* TODO: PMP? */
1836 pbuf[36] = 0x34; /* force D2H Reg FIS (34h) */
1837
1838 pbuf[56] = ATA_CMD_ID_ATA;
1839
1840 i = min(buflen, 60U);
1841 memcpy(rbuf, &pbuf[0], i);
1842 buflen -= i;
1843
1844 if (!buflen)
1845 return 0;
1846
1847 memcpy(&rbuf[60], &args->id[0], min(buflen, 512U));
1848 return 0;
1849}
1850
1851/**
1820 * ata_scsiop_noop - Command handler that simply returns success. 1852 * ata_scsiop_noop - Command handler that simply returns success.
1821 * @args: device IDENTIFY data / SCSI command of interest. 1853 * @args: device IDENTIFY data / SCSI command of interest.
1822 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1854 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
@@ -2273,8 +2305,8 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2273 qc->tf.feature |= ATAPI_PKT_DMA; 2305 qc->tf.feature |= ATAPI_PKT_DMA;
2274 } else { 2306 } else {
2275 qc->tf.protocol = ATA_PROT_ATAPI; 2307 qc->tf.protocol = ATA_PROT_ATAPI;
2276 qc->tf.lbam = (8 * 1024) & 0xff; 2308 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
2277 qc->tf.lbah = (8 * 1024) >> 8; 2309 qc->tf.lbah = 0;
2278 } 2310 }
2279 qc->nbytes = SCSI_SENSE_BUFFERSIZE; 2311 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2280 2312
@@ -2383,6 +2415,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2383 struct ata_device *dev = qc->dev; 2415 struct ata_device *dev = qc->dev;
2384 int using_pio = (dev->flags & ATA_DFLAG_PIO); 2416 int using_pio = (dev->flags & ATA_DFLAG_PIO);
2385 int nodata = (scmd->sc_data_direction == DMA_NONE); 2417 int nodata = (scmd->sc_data_direction == DMA_NONE);
2418 unsigned int nbytes;
2386 2419
2387 memset(qc->cdb, 0, dev->cdb_len); 2420 memset(qc->cdb, 0, dev->cdb_len);
2388 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); 2421 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len);
@@ -2396,20 +2429,26 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2396 } 2429 }
2397 2430
2398 qc->tf.command = ATA_CMD_PACKET; 2431 qc->tf.command = ATA_CMD_PACKET;
2399 qc->nbytes = scmd->request_bufflen; 2432 qc->nbytes = scsi_bufflen(scmd);
2400 2433
2401 /* check whether ATAPI DMA is safe */ 2434 /* check whether ATAPI DMA is safe */
2402 if (!using_pio && ata_check_atapi_dma(qc)) 2435 if (!using_pio && ata_check_atapi_dma(qc))
2403 using_pio = 1; 2436 using_pio = 1;
2404 2437
2438 /* Some controller variants snoop this value for Packet transfers
2439 to do state machine and FIFO management. Thus we want to set it
2440 properly, and for DMA where it is effectively meaningless */
2441 nbytes = min(qc->nbytes, (unsigned int)63 * 1024);
2442
2443 qc->tf.lbam = (nbytes & 0xFF);
2444 qc->tf.lbah = (nbytes >> 8);
2445
2405 if (using_pio || nodata) { 2446 if (using_pio || nodata) {
2406 /* no data, or PIO data xfer */ 2447 /* no data, or PIO data xfer */
2407 if (nodata) 2448 if (nodata)
2408 qc->tf.protocol = ATA_PROT_ATAPI_NODATA; 2449 qc->tf.protocol = ATA_PROT_ATAPI_NODATA;
2409 else 2450 else
2410 qc->tf.protocol = ATA_PROT_ATAPI; 2451 qc->tf.protocol = ATA_PROT_ATAPI;
2411 qc->tf.lbam = (8 * 1024) & 0xff;
2412 qc->tf.lbah = (8 * 1024) >> 8;
2413 } else { 2452 } else {
2414 /* DMA data xfer */ 2453 /* DMA data xfer */
2415 qc->tf.protocol = ATA_PROT_ATAPI_DMA; 2454 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
@@ -2420,24 +2459,42 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2420 qc->tf.feature |= ATAPI_DMADIR; 2459 qc->tf.feature |= ATAPI_DMADIR;
2421 } 2460 }
2422 2461
2462
2463 /* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE
2464 as ATAPI tape drives don't get this right otherwise */
2423 return 0; 2465 return 0;
2424} 2466}
2425 2467
2426static struct ata_device * ata_find_dev(struct ata_port *ap, int id) 2468static struct ata_device * ata_find_dev(struct ata_port *ap, int devno)
2427{ 2469{
2428 if (likely(id < ATA_MAX_DEVICES)) 2470 if (ap->nr_pmp_links == 0) {
2429 return &ap->device[id]; 2471 if (likely(devno < ata_link_max_devices(&ap->link)))
2472 return &ap->link.device[devno];
2473 } else {
2474 if (likely(devno < ap->nr_pmp_links))
2475 return &ap->pmp_link[devno].device[0];
2476 }
2477
2430 return NULL; 2478 return NULL;
2431} 2479}
2432 2480
2433static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap, 2481static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
2434 const struct scsi_device *scsidev) 2482 const struct scsi_device *scsidev)
2435{ 2483{
2484 int devno;
2485
2436 /* skip commands not addressed to targets we simulate */ 2486 /* skip commands not addressed to targets we simulate */
2437 if (unlikely(scsidev->channel || scsidev->lun)) 2487 if (ap->nr_pmp_links == 0) {
2438 return NULL; 2488 if (unlikely(scsidev->channel || scsidev->lun))
2489 return NULL;
2490 devno = scsidev->id;
2491 } else {
2492 if (unlikely(scsidev->id || scsidev->lun))
2493 return NULL;
2494 devno = scsidev->channel;
2495 }
2439 2496
2440 return ata_find_dev(ap, scsidev->id); 2497 return ata_find_dev(ap, devno);
2441} 2498}
2442 2499
2443/** 2500/**
@@ -2458,7 +2515,7 @@ static int ata_scsi_dev_enabled(struct ata_device *dev)
2458 if (unlikely(!ata_dev_enabled(dev))) 2515 if (unlikely(!ata_dev_enabled(dev)))
2459 return 0; 2516 return 0;
2460 2517
2461 if (!atapi_enabled || (dev->ap->flags & ATA_FLAG_NO_ATAPI)) { 2518 if (!atapi_enabled || (dev->link->ap->flags & ATA_FLAG_NO_ATAPI)) {
2462 if (unlikely(dev->class == ATA_DEV_ATAPI)) { 2519 if (unlikely(dev->class == ATA_DEV_ATAPI)) {
2463 ata_dev_printk(dev, KERN_WARNING, 2520 ata_dev_printk(dev, KERN_WARNING,
2464 "WARNING: ATAPI is %s, device ignored.\n", 2521 "WARNING: ATAPI is %s, device ignored.\n",
@@ -2631,7 +2688,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2631 case ATA_CMD_WRITE_LONG_ONCE: 2688 case ATA_CMD_WRITE_LONG_ONCE:
2632 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) 2689 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
2633 goto invalid_fld; 2690 goto invalid_fld;
2634 qc->sect_size = scmd->request_bufflen; 2691 qc->sect_size = scsi_bufflen(scmd);
2635 } 2692 }
2636 2693
2637 /* 2694 /*
@@ -2661,7 +2718,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2661 * TODO: find out if we need to do more here to 2718 * TODO: find out if we need to do more here to
2662 * cover scatter/gather case. 2719 * cover scatter/gather case.
2663 */ 2720 */
2664 qc->nbytes = scmd->request_bufflen; 2721 qc->nbytes = scsi_bufflen(scmd);
2665 2722
2666 /* request result TF */ 2723 /* request result TF */
2667 qc->flags |= ATA_QCFLAG_RESULT_TF; 2724 qc->flags |= ATA_QCFLAG_RESULT_TF;
@@ -2746,28 +2803,48 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
2746 void (*done)(struct scsi_cmnd *), 2803 void (*done)(struct scsi_cmnd *),
2747 struct ata_device *dev) 2804 struct ata_device *dev)
2748{ 2805{
2806 u8 scsi_op = scmd->cmnd[0];
2807 ata_xlat_func_t xlat_func;
2749 int rc = 0; 2808 int rc = 0;
2750 2809
2751 if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) {
2752 DPRINTK("bad CDB len=%u, max=%u\n",
2753 scmd->cmd_len, dev->cdb_len);
2754 scmd->result = DID_ERROR << 16;
2755 done(scmd);
2756 return 0;
2757 }
2758
2759 if (dev->class == ATA_DEV_ATA) { 2810 if (dev->class == ATA_DEV_ATA) {
2760 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev, 2811 if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
2761 scmd->cmnd[0]); 2812 goto bad_cdb_len;
2762 2813
2763 if (xlat_func) 2814 xlat_func = ata_get_xlat_func(dev, scsi_op);
2764 rc = ata_scsi_translate(dev, scmd, done, xlat_func); 2815 } else {
2765 else 2816 if (unlikely(!scmd->cmd_len))
2766 ata_scsi_simulate(dev, scmd, done); 2817 goto bad_cdb_len;
2767 } else 2818
2768 rc = ata_scsi_translate(dev, scmd, done, atapi_xlat); 2819 xlat_func = NULL;
2820 if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
2821 /* relay SCSI command to ATAPI device */
2822 if (unlikely(scmd->cmd_len > dev->cdb_len))
2823 goto bad_cdb_len;
2824
2825 xlat_func = atapi_xlat;
2826 } else {
2827 /* ATA_16 passthru, treat as an ATA command */
2828 if (unlikely(scmd->cmd_len > 16))
2829 goto bad_cdb_len;
2830
2831 xlat_func = ata_get_xlat_func(dev, scsi_op);
2832 }
2833 }
2834
2835 if (xlat_func)
2836 rc = ata_scsi_translate(dev, scmd, done, xlat_func);
2837 else
2838 ata_scsi_simulate(dev, scmd, done);
2769 2839
2770 return rc; 2840 return rc;
2841
2842 bad_cdb_len:
2843 DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
2844 scmd->cmd_len, scsi_op, dev->cdb_len);
2845 scmd->result = DID_ERROR << 16;
2846 done(scmd);
2847 return 0;
2771} 2848}
2772 2849
2773/** 2850/**
@@ -2835,6 +2912,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2835{ 2912{
2836 struct ata_scsi_args args; 2913 struct ata_scsi_args args;
2837 const u8 *scsicmd = cmd->cmnd; 2914 const u8 *scsicmd = cmd->cmnd;
2915 u8 tmp8;
2838 2916
2839 args.dev = dev; 2917 args.dev = dev;
2840 args.id = dev->id; 2918 args.id = dev->id;
@@ -2842,15 +2920,9 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2842 args.done = done; 2920 args.done = done;
2843 2921
2844 switch(scsicmd[0]) { 2922 switch(scsicmd[0]) {
2845 /* no-op's, complete with success */ 2923 /* TODO: worth improving? */
2846 case SYNCHRONIZE_CACHE: 2924 case FORMAT_UNIT:
2847 case REZERO_UNIT: 2925 ata_scsi_invalid_field(cmd, done);
2848 case SEEK_6:
2849 case SEEK_10:
2850 case TEST_UNIT_READY:
2851 case FORMAT_UNIT: /* FIXME: correct? */
2852 case SEND_DIAGNOSTIC: /* FIXME: correct? */
2853 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
2854 break; 2926 break;
2855 2927
2856 case INQUIRY: 2928 case INQUIRY:
@@ -2858,14 +2930,23 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2858 ata_scsi_invalid_field(cmd, done); 2930 ata_scsi_invalid_field(cmd, done);
2859 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 2931 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
2860 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 2932 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
2861 else if (scsicmd[2] == 0x00) 2933 else switch (scsicmd[2]) {
2934 case 0x00:
2862 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); 2935 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
2863 else if (scsicmd[2] == 0x80) 2936 break;
2937 case 0x80:
2864 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); 2938 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
2865 else if (scsicmd[2] == 0x83) 2939 break;
2940 case 0x83:
2866 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 2941 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
2867 else 2942 break;
2943 case 0x89:
2944 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
2945 break;
2946 default:
2868 ata_scsi_invalid_field(cmd, done); 2947 ata_scsi_invalid_field(cmd, done);
2948 break;
2949 }
2869 break; 2950 break;
2870 2951
2871 case MODE_SENSE: 2952 case MODE_SENSE:
@@ -2893,8 +2974,33 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2893 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); 2974 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
2894 break; 2975 break;
2895 2976
2896 /* mandatory commands we haven't implemented yet */
2897 case REQUEST_SENSE: 2977 case REQUEST_SENSE:
2978 ata_scsi_set_sense(cmd, 0, 0, 0);
2979 cmd->result = (DRIVER_SENSE << 24);
2980 done(cmd);
2981 break;
2982
2983 /* if we reach this, then writeback caching is disabled,
2984 * turning this into a no-op.
2985 */
2986 case SYNCHRONIZE_CACHE:
2987 /* fall through */
2988
2989 /* no-op's, complete with success */
2990 case REZERO_UNIT:
2991 case SEEK_6:
2992 case SEEK_10:
2993 case TEST_UNIT_READY:
2994 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
2995 break;
2996
2997 case SEND_DIAGNOSTIC:
2998 tmp8 = scsicmd[1] & ~(1 << 3);
2999 if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
3000 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
3001 else
3002 ata_scsi_invalid_field(cmd, done);
3003 break;
2898 3004
2899 /* all other commands */ 3005 /* all other commands */
2900 default: 3006 default:
@@ -2928,6 +3034,13 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
2928 shost->max_channel = 1; 3034 shost->max_channel = 1;
2929 shost->max_cmd_len = 16; 3035 shost->max_cmd_len = 16;
2930 3036
3037 /* Schedule policy is determined by ->qc_defer()
3038 * callback and it needs to see every deferred qc.
3039 * Set host_blocked to 1 to prevent SCSI midlayer from
3040 * automatically deferring requests.
3041 */
3042 shost->max_host_blocked = 1;
3043
2931 rc = scsi_add_host(ap->scsi_host, ap->host->dev); 3044 rc = scsi_add_host(ap->scsi_host, ap->host->dev);
2932 if (rc) 3045 if (rc)
2933 goto err_add; 3046 goto err_add;
@@ -2951,25 +3064,32 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
2951{ 3064{
2952 int tries = 5; 3065 int tries = 5;
2953 struct ata_device *last_failed_dev = NULL; 3066 struct ata_device *last_failed_dev = NULL;
3067 struct ata_link *link;
2954 struct ata_device *dev; 3068 struct ata_device *dev;
2955 unsigned int i;
2956 3069
2957 if (ap->flags & ATA_FLAG_DISABLED) 3070 if (ap->flags & ATA_FLAG_DISABLED)
2958 return; 3071 return;
2959 3072
2960 repeat: 3073 repeat:
2961 for (i = 0; i < ATA_MAX_DEVICES; i++) { 3074 ata_port_for_each_link(link, ap) {
2962 struct scsi_device *sdev; 3075 ata_link_for_each_dev(dev, link) {
3076 struct scsi_device *sdev;
3077 int channel = 0, id = 0;
2963 3078
2964 dev = &ap->device[i]; 3079 if (!ata_dev_enabled(dev) || dev->sdev)
3080 continue;
2965 3081
2966 if (!ata_dev_enabled(dev) || dev->sdev) 3082 if (ata_is_host_link(link))
2967 continue; 3083 id = dev->devno;
3084 else
3085 channel = link->pmp;
2968 3086
2969 sdev = __scsi_add_device(ap->scsi_host, 0, i, 0, NULL); 3087 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0,
2970 if (!IS_ERR(sdev)) { 3088 NULL);
2971 dev->sdev = sdev; 3089 if (!IS_ERR(sdev)) {
2972 scsi_device_put(sdev); 3090 dev->sdev = sdev;
3091 scsi_device_put(sdev);
3092 }
2973 } 3093 }
2974 } 3094 }
2975 3095
@@ -2977,12 +3097,14 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
2977 * failure occurred, scan would have failed silently. Check 3097 * failure occurred, scan would have failed silently. Check
2978 * whether all devices are attached. 3098 * whether all devices are attached.
2979 */ 3099 */
2980 for (i = 0; i < ATA_MAX_DEVICES; i++) { 3100 ata_port_for_each_link(link, ap) {
2981 dev = &ap->device[i]; 3101 ata_link_for_each_dev(dev, link) {
2982 if (ata_dev_enabled(dev) && !dev->sdev) 3102 if (ata_dev_enabled(dev) && !dev->sdev)
2983 break; 3103 goto exit_loop;
3104 }
2984 } 3105 }
2985 if (i == ATA_MAX_DEVICES) 3106 exit_loop:
3107 if (!link)
2986 return; 3108 return;
2987 3109
2988 /* we're missing some SCSI devices */ 3110 /* we're missing some SCSI devices */
@@ -3049,7 +3171,7 @@ int ata_scsi_offline_dev(struct ata_device *dev)
3049 */ 3171 */
3050static void ata_scsi_remove_dev(struct ata_device *dev) 3172static void ata_scsi_remove_dev(struct ata_device *dev)
3051{ 3173{
3052 struct ata_port *ap = dev->ap; 3174 struct ata_port *ap = dev->link->ap;
3053 struct scsi_device *sdev; 3175 struct scsi_device *sdev;
3054 unsigned long flags; 3176 unsigned long flags;
3055 3177
@@ -3096,6 +3218,43 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
3096 } 3218 }
3097} 3219}
3098 3220
3221static void ata_scsi_handle_link_detach(struct ata_link *link)
3222{
3223 struct ata_port *ap = link->ap;
3224 struct ata_device *dev;
3225
3226 ata_link_for_each_dev(dev, link) {
3227 unsigned long flags;
3228
3229 if (!(dev->flags & ATA_DFLAG_DETACHED))
3230 continue;
3231
3232 spin_lock_irqsave(ap->lock, flags);
3233 dev->flags &= ~ATA_DFLAG_DETACHED;
3234 spin_unlock_irqrestore(ap->lock, flags);
3235
3236 ata_scsi_remove_dev(dev);
3237 }
3238}
3239
3240/**
3241 * ata_scsi_media_change_notify - send media change event
3242 * @atadev: Pointer to the disk device with media change event
3243 *
3244 * Tell the block layer to send a media change notification
3245 * event.
3246 *
3247 * LOCKING:
3248 * spin_lock_irqsave(host lock)
3249 */
3250void ata_scsi_media_change_notify(struct ata_device *dev)
3251{
3252#ifdef OTHER_AN_PATCHES_HAVE_BEEN_APPLIED
3253 if (dev->sdev)
3254 scsi_device_event_notify(dev->sdev, SDEV_MEDIA_CHANGE);
3255#endif
3256}
3257
3099/** 3258/**
3100 * ata_scsi_hotplug - SCSI part of hotplug 3259 * ata_scsi_hotplug - SCSI part of hotplug
3101 * @work: Pointer to ATA port to perform SCSI hotplug on 3260 * @work: Pointer to ATA port to perform SCSI hotplug on
@@ -3121,20 +3280,14 @@ void ata_scsi_hotplug(struct work_struct *work)
3121 3280
3122 DPRINTK("ENTER\n"); 3281 DPRINTK("ENTER\n");
3123 3282
3124 /* unplug detached devices */ 3283 /* Unplug detached devices. We cannot use link iterator here
3125 for (i = 0; i < ATA_MAX_DEVICES; i++) { 3284 * because PMP links have to be scanned even if PMP is
3126 struct ata_device *dev = &ap->device[i]; 3285 * currently not attached. Iterate manually.
3127 unsigned long flags; 3286 */
3128 3287 ata_scsi_handle_link_detach(&ap->link);
3129 if (!(dev->flags & ATA_DFLAG_DETACHED)) 3288 if (ap->pmp_link)
3130 continue; 3289 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
3131 3290 ata_scsi_handle_link_detach(&ap->pmp_link[i]);
3132 spin_lock_irqsave(ap->lock, flags);
3133 dev->flags &= ~ATA_DFLAG_DETACHED;
3134 spin_unlock_irqrestore(ap->lock, flags);
3135
3136 ata_scsi_remove_dev(dev);
3137 }
3138 3291
3139 /* scan for new ones */ 3292 /* scan for new ones */
3140 ata_scsi_scan_host(ap, 0); 3293 ata_scsi_scan_host(ap, 0);
@@ -3163,27 +3316,42 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3163{ 3316{
3164 struct ata_port *ap = ata_shost_to_port(shost); 3317 struct ata_port *ap = ata_shost_to_port(shost);
3165 unsigned long flags; 3318 unsigned long flags;
3166 int rc = 0; 3319 int devno, rc = 0;
3167 3320
3168 if (!ap->ops->error_handler) 3321 if (!ap->ops->error_handler)
3169 return -EOPNOTSUPP; 3322 return -EOPNOTSUPP;
3170 3323
3171 if ((channel != SCAN_WILD_CARD && channel != 0) || 3324 if (lun != SCAN_WILD_CARD && lun)
3172 (lun != SCAN_WILD_CARD && lun != 0))
3173 return -EINVAL; 3325 return -EINVAL;
3174 3326
3327 if (ap->nr_pmp_links == 0) {
3328 if (channel != SCAN_WILD_CARD && channel)
3329 return -EINVAL;
3330 devno = id;
3331 } else {
3332 if (id != SCAN_WILD_CARD && id)
3333 return -EINVAL;
3334 devno = channel;
3335 }
3336
3175 spin_lock_irqsave(ap->lock, flags); 3337 spin_lock_irqsave(ap->lock, flags);
3176 3338
3177 if (id == SCAN_WILD_CARD) { 3339 if (devno == SCAN_WILD_CARD) {
3178 ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1; 3340 struct ata_link *link;
3179 ap->eh_info.action |= ATA_EH_SOFTRESET; 3341
3342 ata_port_for_each_link(link, ap) {
3343 struct ata_eh_info *ehi = &link->eh_info;
3344 ehi->probe_mask |= (1 << ata_link_max_devices(link)) - 1;
3345 ehi->action |= ATA_EH_SOFTRESET;
3346 }
3180 } else { 3347 } else {
3181 struct ata_device *dev = ata_find_dev(ap, id); 3348 struct ata_device *dev = ata_find_dev(ap, devno);
3182 3349
3183 if (dev) { 3350 if (dev) {
3184 ap->eh_info.probe_mask |= 1 << dev->devno; 3351 struct ata_eh_info *ehi = &dev->link->eh_info;
3185 ap->eh_info.action |= ATA_EH_SOFTRESET; 3352 ehi->probe_mask |= 1 << dev->devno;
3186 ap->eh_info.flags |= ATA_EHI_RESUME_LINK; 3353 ehi->action |= ATA_EH_SOFTRESET;
3354 ehi->flags |= ATA_EHI_RESUME_LINK;
3187 } else 3355 } else
3188 rc = -EINVAL; 3356 rc = -EINVAL;
3189 } 3357 }
@@ -3214,24 +3382,26 @@ void ata_scsi_dev_rescan(struct work_struct *work)
3214{ 3382{
3215 struct ata_port *ap = 3383 struct ata_port *ap =
3216 container_of(work, struct ata_port, scsi_rescan_task); 3384 container_of(work, struct ata_port, scsi_rescan_task);
3385 struct ata_link *link;
3386 struct ata_device *dev;
3217 unsigned long flags; 3387 unsigned long flags;
3218 unsigned int i;
3219 3388
3220 spin_lock_irqsave(ap->lock, flags); 3389 spin_lock_irqsave(ap->lock, flags);
3221 3390
3222 for (i = 0; i < ATA_MAX_DEVICES; i++) { 3391 ata_port_for_each_link(link, ap) {
3223 struct ata_device *dev = &ap->device[i]; 3392 ata_link_for_each_dev(dev, link) {
3224 struct scsi_device *sdev = dev->sdev; 3393 struct scsi_device *sdev = dev->sdev;
3225 3394
3226 if (!ata_dev_enabled(dev) || !sdev) 3395 if (!ata_dev_enabled(dev) || !sdev)
3227 continue; 3396 continue;
3228 if (scsi_device_get(sdev)) 3397 if (scsi_device_get(sdev))
3229 continue; 3398 continue;
3230 3399
3231 spin_unlock_irqrestore(ap->lock, flags); 3400 spin_unlock_irqrestore(ap->lock, flags);
3232 scsi_rescan_device(&(sdev->sdev_gendev)); 3401 scsi_rescan_device(&(sdev->sdev_gendev));
3233 scsi_device_put(sdev); 3402 scsi_device_put(sdev);
3234 spin_lock_irqsave(ap->lock, flags); 3403 spin_lock_irqsave(ap->lock, flags);
3404 }
3235 } 3405 }
3236 3406
3237 spin_unlock_irqrestore(ap->lock, flags); 3407 spin_unlock_irqrestore(ap->lock, flags);
@@ -3359,7 +3529,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
3359int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) 3529int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
3360{ 3530{
3361 ata_scsi_sdev_config(sdev); 3531 ata_scsi_sdev_config(sdev);
3362 ata_scsi_dev_config(sdev, ap->device); 3532 ata_scsi_dev_config(sdev, ap->link.device);
3363 return 0; 3533 return 0;
3364} 3534}
3365EXPORT_SYMBOL_GPL(ata_sas_slave_configure); 3535EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
@@ -3382,8 +3552,8 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
3382 3552
3383 ata_scsi_dump_cdb(ap, cmd); 3553 ata_scsi_dump_cdb(ap, cmd);
3384 3554
3385 if (likely(ata_scsi_dev_enabled(ap->device))) 3555 if (likely(ata_scsi_dev_enabled(ap->link.device)))
3386 rc = __ata_scsi_queuecmd(cmd, done, ap->device); 3556 rc = __ata_scsi_queuecmd(cmd, done, ap->link.device);
3387 else { 3557 else {
3388 cmd->result = (DID_BAD_TARGET << 16); 3558 cmd->result = (DID_BAD_TARGET << 16);
3389 done(cmd); 3559 done(cmd);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 8023167bbb..026439e05a 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -64,46 +64,6 @@ u8 ata_irq_on(struct ata_port *ap)
64 return tmp; 64 return tmp;
65} 65}
66 66
67u8 ata_dummy_irq_on (struct ata_port *ap) { return 0; }
68
69/**
70 * ata_irq_ack - Acknowledge a device interrupt.
71 * @ap: Port on which interrupts are enabled.
72 *
73 * Wait up to 10 ms for legacy IDE device to become idle (BUSY
74 * or BUSY+DRQ clear). Obtain dma status and port status from
75 * device. Clear the interrupt. Return port status.
76 *
77 * LOCKING:
78 */
79
80u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
81{
82 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
83 u8 host_stat = 0, post_stat = 0, status;
84
85 status = ata_busy_wait(ap, bits, 1000);
86 if (status & bits)
87 if (ata_msg_err(ap))
88 printk(KERN_ERR "abnormal status 0x%X\n", status);
89
90 if (ap->ioaddr.bmdma_addr) {
91 /* get controller status; clear intr, err bits */
92 host_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
93 iowrite8(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
94 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
95
96 post_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
97 }
98 if (ata_msg_intr(ap))
99 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
100 __FUNCTION__,
101 host_stat, post_stat, status);
102 return status;
103}
104
105u8 ata_dummy_irq_ack(struct ata_port *ap, unsigned int chk_drq) { return 0; }
106
107/** 67/**
108 * ata_tf_load - send taskfile registers to host controller 68 * ata_tf_load - send taskfile registers to host controller
109 * @ap: Port to which output is sent 69 * @ap: Port to which output is sent
@@ -445,7 +405,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
445 unsigned long flags; 405 unsigned long flags;
446 int thaw = 0; 406 int thaw = 0;
447 407
448 qc = __ata_qc_from_tag(ap, ap->active_tag); 408 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
449 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) 409 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
450 qc = NULL; 410 qc = NULL;
451 411
@@ -500,7 +460,7 @@ void ata_bmdma_error_handler(struct ata_port *ap)
500 ata_reset_fn_t hardreset; 460 ata_reset_fn_t hardreset;
501 461
502 hardreset = NULL; 462 hardreset = NULL;
503 if (sata_scr_valid(ap)) 463 if (sata_scr_valid(&ap->link))
504 hardreset = sata_std_hardreset; 464 hardreset = sata_std_hardreset;
505 465
506 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset, 466 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
@@ -607,6 +567,9 @@ int ata_pci_init_bmdma(struct ata_host *host)
607 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && 567 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
608 (ioread8(bmdma + 2) & 0x80)) 568 (ioread8(bmdma + 2) & 0x80))
609 host->flags |= ATA_HOST_SIMPLEX; 569 host->flags |= ATA_HOST_SIMPLEX;
570
571 ata_port_desc(ap, "bmdma 0x%llx",
572 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
610 } 573 }
611 574
612 return 0; 575 return 0;
@@ -674,6 +637,10 @@ int ata_pci_init_sff_host(struct ata_host *host)
674 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); 637 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
675 ata_std_ports(&ap->ioaddr); 638 ata_std_ports(&ap->ioaddr);
676 639
640 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
641 (unsigned long long)pci_resource_start(pdev, base),
642 (unsigned long long)pci_resource_start(pdev, base + 1));
643
677 mask |= 1 << i; 644 mask |= 1 << i;
678 } 645 }
679 646
@@ -844,24 +811,30 @@ int ata_pci_init_one(struct pci_dev *pdev,
844 IRQF_SHARED, DRV_NAME, host); 811 IRQF_SHARED, DRV_NAME, host);
845 if (rc) 812 if (rc)
846 goto err_out; 813 goto err_out;
847 host->irq = pdev->irq; 814
815 ata_port_desc(host->ports[0], "irq %d", pdev->irq);
816 ata_port_desc(host->ports[1], "irq %d", pdev->irq);
848 } else { 817 } else {
849 if (!ata_port_is_dummy(host->ports[0])) { 818 if (!ata_port_is_dummy(host->ports[0])) {
850 host->irq = ATA_PRIMARY_IRQ(pdev); 819 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
851 rc = devm_request_irq(dev, host->irq,
852 pi->port_ops->irq_handler, 820 pi->port_ops->irq_handler,
853 IRQF_SHARED, DRV_NAME, host); 821 IRQF_SHARED, DRV_NAME, host);
854 if (rc) 822 if (rc)
855 goto err_out; 823 goto err_out;
824
825 ata_port_desc(host->ports[0], "irq %d",
826 ATA_PRIMARY_IRQ(pdev));
856 } 827 }
857 828
858 if (!ata_port_is_dummy(host->ports[1])) { 829 if (!ata_port_is_dummy(host->ports[1])) {
859 host->irq2 = ATA_SECONDARY_IRQ(pdev); 830 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
860 rc = devm_request_irq(dev, host->irq2,
861 pi->port_ops->irq_handler, 831 pi->port_ops->irq_handler,
862 IRQF_SHARED, DRV_NAME, host); 832 IRQF_SHARED, DRV_NAME, host);
863 if (rc) 833 if (rc)
864 goto err_out; 834 goto err_out;
835
836 ata_port_desc(host->ports[1], "irq %d",
837 ATA_SECONDARY_IRQ(pdev));
865 } 838 }
866 } 839 }
867 840
@@ -909,7 +882,7 @@ unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer
909 /* Filter out DMA modes if the device has been configured by 882 /* Filter out DMA modes if the device has been configured by
910 the BIOS as PIO only */ 883 the BIOS as PIO only */
911 884
912 if (adev->ap->ioaddr.bmdma_addr == 0) 885 if (adev->link->ap->ioaddr.bmdma_addr == 0)
913 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 886 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
914 return xfer_mask; 887 return xfer_mask;
915} 888}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 564cd234c8..90df58a3ed 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -29,6 +29,7 @@
29#define __LIBATA_H__ 29#define __LIBATA_H__
30 30
31#define DRV_NAME "libata" 31#define DRV_NAME "libata"
32#define DRV_VERSION "3.00" /* must be exactly four chars */
32 33
33struct ata_scsi_args { 34struct ata_scsi_args {
34 struct ata_device *dev; 35 struct ata_device *dev;
@@ -56,6 +57,7 @@ extern unsigned int ata_print_id;
56extern struct workqueue_struct *ata_aux_wq; 57extern struct workqueue_struct *ata_aux_wq;
57extern int atapi_enabled; 58extern int atapi_enabled;
58extern int atapi_dmadir; 59extern int atapi_dmadir;
60extern int atapi_passthru16;
59extern int libata_fua; 61extern int libata_fua;
60extern int libata_noacpi; 62extern int libata_noacpi;
61extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev); 63extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
@@ -67,21 +69,23 @@ extern void ata_dev_disable(struct ata_device *dev);
67extern void ata_port_flush_task(struct ata_port *ap); 69extern void ata_port_flush_task(struct ata_port *ap);
68extern unsigned ata_exec_internal(struct ata_device *dev, 70extern unsigned ata_exec_internal(struct ata_device *dev,
69 struct ata_taskfile *tf, const u8 *cdb, 71 struct ata_taskfile *tf, const u8 *cdb,
70 int dma_dir, void *buf, unsigned int buflen); 72 int dma_dir, void *buf, unsigned int buflen,
73 unsigned long timeout);
71extern unsigned ata_exec_internal_sg(struct ata_device *dev, 74extern unsigned ata_exec_internal_sg(struct ata_device *dev,
72 struct ata_taskfile *tf, const u8 *cdb, 75 struct ata_taskfile *tf, const u8 *cdb,
73 int dma_dir, struct scatterlist *sg, 76 int dma_dir, struct scatterlist *sg,
74 unsigned int n_elem); 77 unsigned int n_elem, unsigned long timeout);
75extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd); 78extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
76extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 79extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
77 unsigned int flags, u16 *id); 80 unsigned int flags, u16 *id);
78extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags); 81extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags);
79extern int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags); 82extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
83 unsigned int readid_flags);
80extern int ata_dev_configure(struct ata_device *dev); 84extern int ata_dev_configure(struct ata_device *dev);
81extern int sata_down_spd_limit(struct ata_port *ap); 85extern int sata_down_spd_limit(struct ata_link *link);
82extern int sata_set_spd_needed(struct ata_port *ap); 86extern int sata_set_spd_needed(struct ata_link *link);
83extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); 87extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
84extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev); 88extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
85extern void ata_sg_clean(struct ata_queued_cmd *qc); 89extern void ata_sg_clean(struct ata_queued_cmd *qc);
86extern void ata_qc_free(struct ata_queued_cmd *qc); 90extern void ata_qc_free(struct ata_queued_cmd *qc);
87extern void ata_qc_issue(struct ata_queued_cmd *qc); 91extern void ata_qc_issue(struct ata_queued_cmd *qc);
@@ -92,17 +96,21 @@ extern void ata_dev_select(struct ata_port *ap, unsigned int device,
92extern void swap_buf_le16(u16 *buf, unsigned int buf_words); 96extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
93extern int ata_flush_cache(struct ata_device *dev); 97extern int ata_flush_cache(struct ata_device *dev);
94extern void ata_dev_init(struct ata_device *dev); 98extern void ata_dev_init(struct ata_device *dev);
99extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp);
100extern int sata_link_init_spd(struct ata_link *link);
95extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); 101extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
96extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); 102extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
97extern struct ata_port *ata_port_alloc(struct ata_host *host); 103extern struct ata_port *ata_port_alloc(struct ata_host *host);
98 104
99/* libata-acpi.c */ 105/* libata-acpi.c */
100#ifdef CONFIG_ATA_ACPI 106#ifdef CONFIG_ATA_ACPI
107extern void ata_acpi_associate_sata_port(struct ata_port *ap);
101extern void ata_acpi_associate(struct ata_host *host); 108extern void ata_acpi_associate(struct ata_host *host);
102extern int ata_acpi_on_suspend(struct ata_port *ap); 109extern int ata_acpi_on_suspend(struct ata_port *ap);
103extern void ata_acpi_on_resume(struct ata_port *ap); 110extern void ata_acpi_on_resume(struct ata_port *ap);
104extern int ata_acpi_on_devcfg(struct ata_device *adev); 111extern int ata_acpi_on_devcfg(struct ata_device *adev);
105#else 112#else
113static inline void ata_acpi_associate_sata_port(struct ata_port *ap) { }
106static inline void ata_acpi_associate(struct ata_host *host) { } 114static inline void ata_acpi_associate(struct ata_host *host) { }
107static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } 115static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
108static inline void ata_acpi_on_resume(struct ata_port *ap) { } 116static inline void ata_acpi_on_resume(struct ata_port *ap) { }
@@ -114,6 +122,7 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
114 struct scsi_host_template *sht); 122 struct scsi_host_template *sht);
115extern void ata_scsi_scan_host(struct ata_port *ap, int sync); 123extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
116extern int ata_scsi_offline_dev(struct ata_device *dev); 124extern int ata_scsi_offline_dev(struct ata_device *dev);
125extern void ata_scsi_media_change_notify(struct ata_device *dev);
117extern void ata_scsi_hotplug(struct work_struct *work); 126extern void ata_scsi_hotplug(struct work_struct *work);
118extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 127extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
119 unsigned int buflen); 128 unsigned int buflen);
@@ -147,12 +156,32 @@ extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
147extern void ata_scsi_dev_rescan(struct work_struct *work); 156extern void ata_scsi_dev_rescan(struct work_struct *work);
148extern int ata_bus_probe(struct ata_port *ap); 157extern int ata_bus_probe(struct ata_port *ap);
149 158
159/* libata-pmp.c */
160extern int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val);
161extern int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val);
162extern int sata_pmp_attach(struct ata_device *dev);
163
150/* libata-eh.c */ 164/* libata-eh.c */
151extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); 165extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
152extern void ata_scsi_error(struct Scsi_Host *host); 166extern void ata_scsi_error(struct Scsi_Host *host);
153extern void ata_port_wait_eh(struct ata_port *ap); 167extern void ata_port_wait_eh(struct ata_port *ap);
154extern void ata_eh_fastdrain_timerfn(unsigned long arg); 168extern void ata_eh_fastdrain_timerfn(unsigned long arg);
155extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); 169extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
170extern void ata_eh_detach_dev(struct ata_device *dev);
171extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
172 unsigned int action);
173extern void ata_eh_done(struct ata_link *link, struct ata_device *dev,
174 unsigned int action);
175extern void ata_eh_autopsy(struct ata_port *ap);
176extern void ata_eh_report(struct ata_port *ap);
177extern int ata_eh_reset(struct ata_link *link, int classify,
178 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
179 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
180extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
181 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
182 ata_postreset_fn_t postreset,
183 struct ata_link **r_failed_disk);
184extern void ata_eh_finish(struct ata_port *ap);
156 185
157/* libata-sff.c */ 186/* libata-sff.c */
158extern u8 ata_irq_on(struct ata_port *ap); 187extern u8 ata_irq_on(struct ata_port *ap);
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
new file mode 100644
index 0000000000..5d3920f6fd
--- /dev/null
+++ b/drivers/ata/pata_acpi.c
@@ -0,0 +1,395 @@
1/*
2 * ACPI PATA driver
3 *
4 * (c) 2007 Red Hat <alan@redhat.com>
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/pci.h>
10#include <linux/init.h>
11#include <linux/blkdev.h>
12#include <linux/delay.h>
13#include <linux/device.h>
14#include <scsi/scsi_host.h>
15#include <acpi/acpi_bus.h>
16#include <acpi/acnames.h>
17#include <acpi/acnamesp.h>
18#include <acpi/acparser.h>
19#include <acpi/acexcep.h>
20#include <acpi/acmacros.h>
21#include <acpi/actypes.h>
22
23#include <linux/libata.h>
24#include <linux/ata.h>
25
26#define DRV_NAME "pata_acpi"
27#define DRV_VERSION "0.2.3"
28
29struct pata_acpi {
30 struct ata_acpi_gtm gtm;
31 void *last;
32 unsigned long mask[2];
33};
34
35/**
36 * pacpi_pre_reset - check for 40/80 pin
37 * @ap: Port
38 * @deadline: deadline jiffies for the operation
39 *
40 * Perform the PATA port setup we need.
41 */
42
43static int pacpi_pre_reset(struct ata_link *link, unsigned long deadline)
44{
45 struct ata_port *ap = link->ap;
46 struct pata_acpi *acpi = ap->private_data;
47 if (ap->acpi_handle == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0)
48 return -ENODEV;
49
50 return ata_std_prereset(link, deadline);
51}
52
53/**
54 * pacpi_cable_detect - cable type detection
55 * @ap: port to detect
56 *
57 * Perform device specific cable detection
58 */
59
60static int pacpi_cable_detect(struct ata_port *ap)
61{
62 struct pata_acpi *acpi = ap->private_data;
63
64 if ((acpi->mask[0] | acpi->mask[1]) & (0xF8 << ATA_SHIFT_UDMA))
65 return ATA_CBL_PATA80;
66 else
67 return ATA_CBL_PATA40;
68}
69
70/**
71 * pacpi_error_handler - Setup and error handler
72 * @ap: Port to handle
73 *
74 * LOCKING:
75 * None (inherited from caller).
76 */
77
78static void pacpi_error_handler(struct ata_port *ap)
79{
80 return ata_bmdma_drive_eh(ap, pacpi_pre_reset, ata_std_softreset,
81 NULL, ata_std_postreset);
82}
83
84/* Welcome to ACPI, bring a bucket */
85static const unsigned int pio_cycle[7] = {
86 600, 383, 240, 180, 120, 100, 80
87};
88static const unsigned int mwdma_cycle[5] = {
89 480, 150, 120, 100, 80
90};
91static const unsigned int udma_cycle[7] = {
92 120, 80, 60, 45, 30, 20, 15
93};
94
95/**
96 * pacpi_discover_modes - filter non ACPI modes
97 * @adev: ATA device
98 * @mask: proposed modes
99 *
100 * Try the modes available and see which ones the ACPI method will
101 * set up sensibly. From this we get a mask of ACPI modes we can use
102 */
103
104static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device *adev)
105{
106 int unit = adev->devno;
107 struct pata_acpi *acpi = ap->private_data;
108 int i;
109 u32 t;
110 unsigned long mask = (0x7f << ATA_SHIFT_UDMA) | (0x7 << ATA_SHIFT_MWDMA) | (0x1F << ATA_SHIFT_PIO);
111
112 struct ata_acpi_gtm probe;
113
114 probe = acpi->gtm;
115
116 /* We always use the 0 slot for crap hardware */
117 if (!(probe.flags & 0x10))
118 unit = 0;
119
120 ata_acpi_gtm(ap, &probe);
121
122 /* Start by scanning for PIO modes */
123 for (i = 0; i < 7; i++) {
124 t = probe.drive[unit].pio;
125 if (t <= pio_cycle[i]) {
126 mask |= (2 << (ATA_SHIFT_PIO + i)) - 1;
127 break;
128 }
129 }
130
131 /* See if we have MWDMA or UDMA data. We don't bother with MWDMA
132 if UDMA is availabe as this means the BIOS set UDMA and our
133 error changedown if it works is UDMA to PIO anyway */
134 if (probe.flags & (1 << (2 * unit))) {
135 /* MWDMA */
136 for (i = 0; i < 5; i++) {
137 t = probe.drive[unit].dma;
138 if (t <= mwdma_cycle[i]) {
139 mask |= (2 << (ATA_SHIFT_MWDMA + i)) - 1;
140 break;
141 }
142 }
143 } else {
144 /* UDMA */
145 for (i = 0; i < 7; i++) {
146 t = probe.drive[unit].dma;
147 if (t <= udma_cycle[i]) {
148 mask |= (2 << (ATA_SHIFT_UDMA + i)) - 1;
149 break;
150 }
151 }
152 }
153 if (mask & (0xF8 << ATA_SHIFT_UDMA))
154 ap->cbl = ATA_CBL_PATA80;
155 return mask;
156}
157
158/**
159 * pacpi_mode_filter - mode filter for ACPI
160 * @adev: device
161 * @mask: mask of valid modes
162 *
163 * Filter the valid mode list according to our own specific rules, in
164 * this case the list of discovered valid modes obtained by ACPI probing
165 */
166
167static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask)
168{
169 struct pata_acpi *acpi = adev->link->ap->private_data;
170 return ata_pci_default_filter(adev, mask & acpi->mask[adev->devno]);
171}
172
173/**
174 * pacpi_set_piomode - set initial PIO mode data
175 * @ap: ATA interface
176 * @adev: ATA device
177 */
178
179static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev)
180{
181 int unit = adev->devno;
182 struct pata_acpi *acpi = ap->private_data;
183
184 if(!(acpi->gtm.flags & 0x10))
185 unit = 0;
186
187 /* Now stuff the nS values into the structure */
188 acpi->gtm.drive[unit].pio = pio_cycle[adev->pio_mode - XFER_PIO_0];
189 ata_acpi_stm(ap, &acpi->gtm);
190 /* See what mode we actually got */
191 ata_acpi_gtm(ap, &acpi->gtm);
192}
193
194/**
195 * pacpi_set_dmamode - set initial DMA mode data
196 * @ap: ATA interface
197 * @adev: ATA device
198 */
199
200static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev)
201{
202 int unit = adev->devno;
203 struct pata_acpi *acpi = ap->private_data;
204
205 if(!(acpi->gtm.flags & 0x10))
206 unit = 0;
207
208 /* Now stuff the nS values into the structure */
209 if (adev->dma_mode >= XFER_UDMA_0) {
210 acpi->gtm.drive[unit].dma = udma_cycle[adev->dma_mode - XFER_UDMA_0];
211 acpi->gtm.flags |= (1 << (2 * unit));
212 } else {
213 acpi->gtm.drive[unit].dma = mwdma_cycle[adev->dma_mode - XFER_MW_DMA_0];
214 acpi->gtm.flags &= ~(1 << (2 * unit));
215 }
216 ata_acpi_stm(ap, &acpi->gtm);
217 /* See what mode we actually got */
218 ata_acpi_gtm(ap, &acpi->gtm);
219}
220
221/**
222 * pacpi_qc_issue_prot - command issue
223 * @qc: command pending
224 *
225 * Called when the libata layer is about to issue a command. We wrap
226 * this interface so that we can load the correct ATA timings if
227 * neccessary.
228 */
229
230static unsigned int pacpi_qc_issue_prot(struct ata_queued_cmd *qc)
231{
232 struct ata_port *ap = qc->ap;
233 struct ata_device *adev = qc->dev;
234 struct pata_acpi *acpi = ap->private_data;
235
236 if (acpi->gtm.flags & 0x10)
237 return ata_qc_issue_prot(qc);
238
239 if (adev != acpi->last) {
240 pacpi_set_piomode(ap, adev);
241 if (adev->dma_mode)
242 pacpi_set_dmamode(ap, adev);
243 acpi->last = adev;
244 }
245 return ata_qc_issue_prot(qc);
246}
247
248/**
249 * pacpi_port_start - port setup
250 * @ap: ATA port being set up
251 *
252 * Use the port_start hook to maintain private control structures
253 */
254
255static int pacpi_port_start(struct ata_port *ap)
256{
257 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
258 struct pata_acpi *acpi;
259
260 int ret;
261
262 if (ap->acpi_handle == NULL)
263 return -ENODEV;
264
265 acpi = ap->private_data = devm_kzalloc(&pdev->dev, sizeof(struct pata_acpi), GFP_KERNEL);
266 if (ap->private_data == NULL)
267 return -ENOMEM;
268 acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]);
269 acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]);
270 ret = ata_sff_port_start(ap);
271 if (ret < 0)
272 return ret;
273
274 return ret;
275}
276
277static struct scsi_host_template pacpi_sht = {
278 .module = THIS_MODULE,
279 .name = DRV_NAME,
280 .ioctl = ata_scsi_ioctl,
281 .queuecommand = ata_scsi_queuecmd,
282 .can_queue = ATA_DEF_QUEUE,
283 .this_id = ATA_SHT_THIS_ID,
284 .sg_tablesize = LIBATA_MAX_PRD,
285 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
286 .emulated = ATA_SHT_EMULATED,
287 .use_clustering = ATA_SHT_USE_CLUSTERING,
288 .proc_name = DRV_NAME,
289 .dma_boundary = ATA_DMA_BOUNDARY,
290 .slave_configure = ata_scsi_slave_config,
291 .slave_destroy = ata_scsi_slave_destroy,
292 /* Use standard CHS mapping rules */
293 .bios_param = ata_std_bios_param,
294};
295
296static const struct ata_port_operations pacpi_ops = {
297 .set_piomode = pacpi_set_piomode,
298 .set_dmamode = pacpi_set_dmamode,
299 .mode_filter = pacpi_mode_filter,
300
301 /* Task file is PCI ATA format, use helpers */
302 .tf_load = ata_tf_load,
303 .tf_read = ata_tf_read,
304 .check_status = ata_check_status,
305 .exec_command = ata_exec_command,
306 .dev_select = ata_std_dev_select,
307
308 .freeze = ata_bmdma_freeze,
309 .thaw = ata_bmdma_thaw,
310 .error_handler = pacpi_error_handler,
311 .post_internal_cmd = ata_bmdma_post_internal_cmd,
312 .cable_detect = pacpi_cable_detect,
313
314 /* BMDMA handling is PCI ATA format, use helpers */
315 .bmdma_setup = ata_bmdma_setup,
316 .bmdma_start = ata_bmdma_start,
317 .bmdma_stop = ata_bmdma_stop,
318 .bmdma_status = ata_bmdma_status,
319 .qc_prep = ata_qc_prep,
320 .qc_issue = pacpi_qc_issue_prot,
321 .data_xfer = ata_data_xfer,
322
323 /* Timeout handling */
324 .irq_handler = ata_interrupt,
325 .irq_clear = ata_bmdma_irq_clear,
326 .irq_on = ata_irq_on,
327
328 /* Generic PATA PCI ATA helpers */
329 .port_start = pacpi_port_start,
330};
331
332
333/**
334 * pacpi_init_one - Register ACPI ATA PCI device with kernel services
335 * @pdev: PCI device to register
336 * @ent: Entry in pacpi_pci_tbl matching with @pdev
337 *
338 * Called from kernel PCI layer.
339 *
340 * LOCKING:
341 * Inherited from PCI layer (may sleep).
342 *
343 * RETURNS:
344 * Zero on success, or -ERRNO value.
345 */
346
347static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
348{
349 static const struct ata_port_info info = {
350 .sht = &pacpi_sht,
351 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
352
353 .pio_mask = 0x1f,
354 .mwdma_mask = 0x07,
355 .udma_mask = 0x7f,
356
357 .port_ops = &pacpi_ops,
358 };
359 const struct ata_port_info *ppi[] = { &info, NULL };
360 return ata_pci_init_one(pdev, ppi);
361}
362
363static const struct pci_device_id pacpi_pci_tbl[] = {
364 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
365 { } /* terminate list */
366};
367
368static struct pci_driver pacpi_pci_driver = {
369 .name = DRV_NAME,
370 .id_table = pacpi_pci_tbl,
371 .probe = pacpi_init_one,
372 .remove = ata_pci_remove_one,
373 .suspend = ata_pci_device_suspend,
374 .resume = ata_pci_device_resume,
375};
376
377static int __init pacpi_init(void)
378{
379 return pci_register_driver(&pacpi_pci_driver);
380}
381
382static void __exit pacpi_exit(void)
383{
384 pci_unregister_driver(&pacpi_pci_driver);
385}
386
387module_init(pacpi_init);
388module_exit(pacpi_exit);
389
390MODULE_AUTHOR("Alan Cox");
391MODULE_DESCRIPTION("SCSI low-level driver for ATA in ACPI mode");
392MODULE_LICENSE("GPL");
393MODULE_DEVICE_TABLE(pci, pacpi_pci_tbl);
394MODULE_VERSION(DRV_VERSION);
395
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 32a10c99c0..364534e7af 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -305,7 +305,6 @@ static struct scsi_host_template ali_sht = {
305 */ 305 */
306 306
307static struct ata_port_operations ali_early_port_ops = { 307static struct ata_port_operations ali_early_port_ops = {
308 .port_disable = ata_port_disable,
309 .set_piomode = ali_set_piomode, 308 .set_piomode = ali_set_piomode,
310 .tf_load = ata_tf_load, 309 .tf_load = ata_tf_load,
311 .tf_read = ata_tf_read, 310 .tf_read = ata_tf_read,
@@ -327,9 +326,8 @@ static struct ata_port_operations ali_early_port_ops = {
327 .irq_handler = ata_interrupt, 326 .irq_handler = ata_interrupt,
328 .irq_clear = ata_bmdma_irq_clear, 327 .irq_clear = ata_bmdma_irq_clear,
329 .irq_on = ata_irq_on, 328 .irq_on = ata_irq_on,
330 .irq_ack = ata_irq_ack,
331 329
332 .port_start = ata_port_start, 330 .port_start = ata_sff_port_start,
333}; 331};
334 332
335/* 333/*
@@ -337,8 +335,6 @@ static struct ata_port_operations ali_early_port_ops = {
337 * detect 335 * detect
338 */ 336 */
339static struct ata_port_operations ali_20_port_ops = { 337static struct ata_port_operations ali_20_port_ops = {
340 .port_disable = ata_port_disable,
341
342 .set_piomode = ali_set_piomode, 338 .set_piomode = ali_set_piomode,
343 .set_dmamode = ali_set_dmamode, 339 .set_dmamode = ali_set_dmamode,
344 .mode_filter = ali_20_filter, 340 .mode_filter = ali_20_filter,
@@ -369,16 +365,14 @@ static struct ata_port_operations ali_20_port_ops = {
369 .irq_handler = ata_interrupt, 365 .irq_handler = ata_interrupt,
370 .irq_clear = ata_bmdma_irq_clear, 366 .irq_clear = ata_bmdma_irq_clear,
371 .irq_on = ata_irq_on, 367 .irq_on = ata_irq_on,
372 .irq_ack = ata_irq_ack,
373 368
374 .port_start = ata_port_start, 369 .port_start = ata_sff_port_start,
375}; 370};
376 371
377/* 372/*
378 * Port operations for DMA capable ALi with cable detect 373 * Port operations for DMA capable ALi with cable detect
379 */ 374 */
380static struct ata_port_operations ali_c2_port_ops = { 375static struct ata_port_operations ali_c2_port_ops = {
381 .port_disable = ata_port_disable,
382 .set_piomode = ali_set_piomode, 376 .set_piomode = ali_set_piomode,
383 .set_dmamode = ali_set_dmamode, 377 .set_dmamode = ali_set_dmamode,
384 .mode_filter = ata_pci_default_filter, 378 .mode_filter = ata_pci_default_filter,
@@ -408,16 +402,14 @@ static struct ata_port_operations ali_c2_port_ops = {
408 .irq_handler = ata_interrupt, 402 .irq_handler = ata_interrupt,
409 .irq_clear = ata_bmdma_irq_clear, 403 .irq_clear = ata_bmdma_irq_clear,
410 .irq_on = ata_irq_on, 404 .irq_on = ata_irq_on,
411 .irq_ack = ata_irq_ack,
412 405
413 .port_start = ata_port_start, 406 .port_start = ata_sff_port_start,
414}; 407};
415 408
416/* 409/*
417 * Port operations for DMA capable ALi with cable detect and LBA48 410 * Port operations for DMA capable ALi with cable detect and LBA48
418 */ 411 */
419static struct ata_port_operations ali_c5_port_ops = { 412static struct ata_port_operations ali_c5_port_ops = {
420 .port_disable = ata_port_disable,
421 .set_piomode = ali_set_piomode, 413 .set_piomode = ali_set_piomode,
422 .set_dmamode = ali_set_dmamode, 414 .set_dmamode = ali_set_dmamode,
423 .mode_filter = ata_pci_default_filter, 415 .mode_filter = ata_pci_default_filter,
@@ -446,9 +438,8 @@ static struct ata_port_operations ali_c5_port_ops = {
446 .irq_handler = ata_interrupt, 438 .irq_handler = ata_interrupt,
447 .irq_clear = ata_bmdma_irq_clear, 439 .irq_clear = ata_bmdma_irq_clear,
448 .irq_on = ata_irq_on, 440 .irq_on = ata_irq_on,
449 .irq_ack = ata_irq_ack,
450 441
451 .port_start = ata_port_start, 442 .port_start = ata_sff_port_start,
452}; 443};
453 444
454 445
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 04048fcf63..c5779ad4ab 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -119,27 +119,28 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse
119} 119}
120 120
121/** 121/**
122 * amd_probe_init - perform reset handling 122 * amd_pre_reset - perform reset handling
123 * @ap: ATA port 123 * @link: ATA link
124 * @deadline: deadline jiffies for the operation 124 * @deadline: deadline jiffies for the operation
125 * 125 *
126 * Reset sequence checking enable bits to see which ports are 126 * Reset sequence checking enable bits to see which ports are
127 * active. 127 * active.
128 */ 128 */
129 129
130static int amd_pre_reset(struct ata_port *ap, unsigned long deadline) 130static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
131{ 131{
132 static const struct pci_bits amd_enable_bits[] = { 132 static const struct pci_bits amd_enable_bits[] = {
133 { 0x40, 1, 0x02, 0x02 }, 133 { 0x40, 1, 0x02, 0x02 },
134 { 0x40, 1, 0x01, 0x01 } 134 { 0x40, 1, 0x01, 0x01 }
135 }; 135 };
136 136
137 struct ata_port *ap = link->ap;
137 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 138 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
138 139
139 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) 140 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
140 return -ENOENT; 141 return -ENOENT;
141 142
142 return ata_std_prereset(ap, deadline); 143 return ata_std_prereset(link, deadline);
143} 144}
144 145
145static void amd_error_handler(struct ata_port *ap) 146static void amd_error_handler(struct ata_port *ap)
@@ -221,25 +222,26 @@ static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
221 222
222/** 223/**
223 * nv_probe_init - cable detection 224 * nv_probe_init - cable detection
224 * @ap: ATA port 225 * @lin: ATA link
225 * 226 *
226 * Perform cable detection. The BIOS stores this in PCI config 227 * Perform cable detection. The BIOS stores this in PCI config
227 * space for us. 228 * space for us.
228 */ 229 */
229 230
230static int nv_pre_reset(struct ata_port *ap, unsigned long deadline) 231static int nv_pre_reset(struct ata_link *link, unsigned long deadline)
231{ 232{
232 static const struct pci_bits nv_enable_bits[] = { 233 static const struct pci_bits nv_enable_bits[] = {
233 { 0x50, 1, 0x02, 0x02 }, 234 { 0x50, 1, 0x02, 0x02 },
234 { 0x50, 1, 0x01, 0x01 } 235 { 0x50, 1, 0x01, 0x01 }
235 }; 236 };
236 237
238 struct ata_port *ap = link->ap;
237 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 239 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
238 240
239 if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no])) 241 if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
240 return -ENOENT; 242 return -ENOENT;
241 243
242 return ata_std_prereset(ap, deadline); 244 return ata_std_prereset(link, deadline);
243} 245}
244 246
245static void nv_error_handler(struct ata_port *ap) 247static void nv_error_handler(struct ata_port *ap)
@@ -268,6 +270,9 @@ static int nv_cable_detect(struct ata_port *ap)
268 pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma); 270 pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
269 if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400) 271 if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
270 cbl = ATA_CBL_PATA80; 272 cbl = ATA_CBL_PATA80;
273 /* And a triple check across suspend/resume with ACPI around */
274 if (ata_acpi_cbl_80wire(ap))
275 cbl = ATA_CBL_PATA80;
271 return cbl; 276 return cbl;
272} 277}
273 278
@@ -327,7 +332,6 @@ static struct scsi_host_template amd_sht = {
327}; 332};
328 333
329static struct ata_port_operations amd33_port_ops = { 334static struct ata_port_operations amd33_port_ops = {
330 .port_disable = ata_port_disable,
331 .set_piomode = amd33_set_piomode, 335 .set_piomode = amd33_set_piomode,
332 .set_dmamode = amd33_set_dmamode, 336 .set_dmamode = amd33_set_dmamode,
333 .mode_filter = ata_pci_default_filter, 337 .mode_filter = ata_pci_default_filter,
@@ -356,13 +360,11 @@ static struct ata_port_operations amd33_port_ops = {
356 .irq_handler = ata_interrupt, 360 .irq_handler = ata_interrupt,
357 .irq_clear = ata_bmdma_irq_clear, 361 .irq_clear = ata_bmdma_irq_clear,
358 .irq_on = ata_irq_on, 362 .irq_on = ata_irq_on,
359 .irq_ack = ata_irq_ack,
360 363
361 .port_start = ata_port_start, 364 .port_start = ata_sff_port_start,
362}; 365};
363 366
364static struct ata_port_operations amd66_port_ops = { 367static struct ata_port_operations amd66_port_ops = {
365 .port_disable = ata_port_disable,
366 .set_piomode = amd66_set_piomode, 368 .set_piomode = amd66_set_piomode,
367 .set_dmamode = amd66_set_dmamode, 369 .set_dmamode = amd66_set_dmamode,
368 .mode_filter = ata_pci_default_filter, 370 .mode_filter = ata_pci_default_filter,
@@ -391,13 +393,11 @@ static struct ata_port_operations amd66_port_ops = {
391 .irq_handler = ata_interrupt, 393 .irq_handler = ata_interrupt,
392 .irq_clear = ata_bmdma_irq_clear, 394 .irq_clear = ata_bmdma_irq_clear,
393 .irq_on = ata_irq_on, 395 .irq_on = ata_irq_on,
394 .irq_ack = ata_irq_ack,
395 396
396 .port_start = ata_port_start, 397 .port_start = ata_sff_port_start,
397}; 398};
398 399
399static struct ata_port_operations amd100_port_ops = { 400static struct ata_port_operations amd100_port_ops = {
400 .port_disable = ata_port_disable,
401 .set_piomode = amd100_set_piomode, 401 .set_piomode = amd100_set_piomode,
402 .set_dmamode = amd100_set_dmamode, 402 .set_dmamode = amd100_set_dmamode,
403 .mode_filter = ata_pci_default_filter, 403 .mode_filter = ata_pci_default_filter,
@@ -426,13 +426,11 @@ static struct ata_port_operations amd100_port_ops = {
426 .irq_handler = ata_interrupt, 426 .irq_handler = ata_interrupt,
427 .irq_clear = ata_bmdma_irq_clear, 427 .irq_clear = ata_bmdma_irq_clear,
428 .irq_on = ata_irq_on, 428 .irq_on = ata_irq_on,
429 .irq_ack = ata_irq_ack,
430 429
431 .port_start = ata_port_start, 430 .port_start = ata_sff_port_start,
432}; 431};
433 432
434static struct ata_port_operations amd133_port_ops = { 433static struct ata_port_operations amd133_port_ops = {
435 .port_disable = ata_port_disable,
436 .set_piomode = amd133_set_piomode, 434 .set_piomode = amd133_set_piomode,
437 .set_dmamode = amd133_set_dmamode, 435 .set_dmamode = amd133_set_dmamode,
438 .mode_filter = ata_pci_default_filter, 436 .mode_filter = ata_pci_default_filter,
@@ -461,13 +459,11 @@ static struct ata_port_operations amd133_port_ops = {
461 .irq_handler = ata_interrupt, 459 .irq_handler = ata_interrupt,
462 .irq_clear = ata_bmdma_irq_clear, 460 .irq_clear = ata_bmdma_irq_clear,
463 .irq_on = ata_irq_on, 461 .irq_on = ata_irq_on,
464 .irq_ack = ata_irq_ack,
465 462
466 .port_start = ata_port_start, 463 .port_start = ata_sff_port_start,
467}; 464};
468 465
469static struct ata_port_operations nv100_port_ops = { 466static struct ata_port_operations nv100_port_ops = {
470 .port_disable = ata_port_disable,
471 .set_piomode = nv100_set_piomode, 467 .set_piomode = nv100_set_piomode,
472 .set_dmamode = nv100_set_dmamode, 468 .set_dmamode = nv100_set_dmamode,
473 .mode_filter = ata_pci_default_filter, 469 .mode_filter = ata_pci_default_filter,
@@ -496,13 +492,11 @@ static struct ata_port_operations nv100_port_ops = {
496 .irq_handler = ata_interrupt, 492 .irq_handler = ata_interrupt,
497 .irq_clear = ata_bmdma_irq_clear, 493 .irq_clear = ata_bmdma_irq_clear,
498 .irq_on = ata_irq_on, 494 .irq_on = ata_irq_on,
499 .irq_ack = ata_irq_ack,
500 495
501 .port_start = ata_port_start, 496 .port_start = ata_sff_port_start,
502}; 497};
503 498
504static struct ata_port_operations nv133_port_ops = { 499static struct ata_port_operations nv133_port_ops = {
505 .port_disable = ata_port_disable,
506 .set_piomode = nv133_set_piomode, 500 .set_piomode = nv133_set_piomode,
507 .set_dmamode = nv133_set_dmamode, 501 .set_dmamode = nv133_set_dmamode,
508 .mode_filter = ata_pci_default_filter, 502 .mode_filter = ata_pci_default_filter,
@@ -531,9 +525,8 @@ static struct ata_port_operations nv133_port_ops = {
531 .irq_handler = ata_interrupt, 525 .irq_handler = ata_interrupt,
532 .irq_clear = ata_bmdma_irq_clear, 526 .irq_clear = ata_bmdma_irq_clear,
533 .irq_on = ata_irq_on, 527 .irq_on = ata_irq_on,
534 .irq_ack = ata_irq_ack,
535 528
536 .port_start = ata_port_start, 529 .port_start = ata_sff_port_start,
537}; 530};
538 531
539static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 532static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index b5352ebece..d421831032 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -40,8 +40,9 @@
40 40
41static int clock = 0; 41static int clock = 0;
42 42
43static int artop6210_pre_reset(struct ata_port *ap, unsigned long deadline) 43static int artop6210_pre_reset(struct ata_link *link, unsigned long deadline)
44{ 44{
45 struct ata_port *ap = link->ap;
45 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 46 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
46 const struct pci_bits artop_enable_bits[] = { 47 const struct pci_bits artop_enable_bits[] = {
47 { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */ 48 { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
@@ -51,7 +52,7 @@ static int artop6210_pre_reset(struct ata_port *ap, unsigned long deadline)
51 if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) 52 if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
52 return -ENOENT; 53 return -ENOENT;
53 54
54 return ata_std_prereset(ap, deadline); 55 return ata_std_prereset(link, deadline);
55} 56}
56 57
57/** 58/**
@@ -71,27 +72,28 @@ static void artop6210_error_handler(struct ata_port *ap)
71 72
72/** 73/**
73 * artop6260_pre_reset - check for 40/80 pin 74 * artop6260_pre_reset - check for 40/80 pin
74 * @ap: Port 75 * @link: link
75 * @deadline: deadline jiffies for the operation 76 * @deadline: deadline jiffies for the operation
76 * 77 *
77 * The ARTOP hardware reports the cable detect bits in register 0x49. 78 * The ARTOP hardware reports the cable detect bits in register 0x49.
78 * Nothing complicated needed here. 79 * Nothing complicated needed here.
79 */ 80 */
80 81
81static int artop6260_pre_reset(struct ata_port *ap, unsigned long deadline) 82static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
82{ 83{
83 static const struct pci_bits artop_enable_bits[] = { 84 static const struct pci_bits artop_enable_bits[] = {
84 { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */ 85 { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
85 { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */ 86 { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
86 }; 87 };
87 88
89 struct ata_port *ap = link->ap;
88 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 90 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
89 91
90 /* Odd numbered device ids are the units with enable bits (the -R cards) */ 92 /* Odd numbered device ids are the units with enable bits (the -R cards) */
91 if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) 93 if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
92 return -ENOENT; 94 return -ENOENT;
93 95
94 return ata_std_prereset(ap, deadline); 96 return ata_std_prereset(link, deadline);
95} 97}
96 98
97/** 99/**
@@ -330,7 +332,6 @@ static struct scsi_host_template artop_sht = {
330}; 332};
331 333
332static const struct ata_port_operations artop6210_ops = { 334static const struct ata_port_operations artop6210_ops = {
333 .port_disable = ata_port_disable,
334 .set_piomode = artop6210_set_piomode, 335 .set_piomode = artop6210_set_piomode,
335 .set_dmamode = artop6210_set_dmamode, 336 .set_dmamode = artop6210_set_dmamode,
336 .mode_filter = ata_pci_default_filter, 337 .mode_filter = ata_pci_default_filter,
@@ -359,13 +360,11 @@ static const struct ata_port_operations artop6210_ops = {
359 .irq_handler = ata_interrupt, 360 .irq_handler = ata_interrupt,
360 .irq_clear = ata_bmdma_irq_clear, 361 .irq_clear = ata_bmdma_irq_clear,
361 .irq_on = ata_irq_on, 362 .irq_on = ata_irq_on,
362 .irq_ack = ata_irq_ack,
363 363
364 .port_start = ata_port_start, 364 .port_start = ata_sff_port_start,
365}; 365};
366 366
367static const struct ata_port_operations artop6260_ops = { 367static const struct ata_port_operations artop6260_ops = {
368 .port_disable = ata_port_disable,
369 .set_piomode = artop6260_set_piomode, 368 .set_piomode = artop6260_set_piomode,
370 .set_dmamode = artop6260_set_dmamode, 369 .set_dmamode = artop6260_set_dmamode,
371 370
@@ -392,9 +391,8 @@ static const struct ata_port_operations artop6260_ops = {
392 .irq_handler = ata_interrupt, 391 .irq_handler = ata_interrupt,
393 .irq_clear = ata_bmdma_irq_clear, 392 .irq_clear = ata_bmdma_irq_clear,
394 .irq_on = ata_irq_on, 393 .irq_on = ata_irq_on,
395 .irq_ack = ata_irq_ack,
396 394
397 .port_start = ata_port_start, 395 .port_start = ata_sff_port_start,
398}; 396};
399 397
400 398
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
new file mode 100644
index 0000000000..bb250a48e2
--- /dev/null
+++ b/drivers/ata/pata_at32.c
@@ -0,0 +1,441 @@
1/*
2 * AVR32 SMC/CFC PATA Driver
3 *
4 * Copyright (C) 2007 Atmel Norway
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 */
10
11#define DEBUG
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/device.h>
17#include <linux/platform_device.h>
18#include <linux/delay.h>
19#include <linux/interrupt.h>
20#include <linux/irq.h>
21#include <scsi/scsi_host.h>
22#include <linux/ata.h>
23#include <linux/libata.h>
24#include <linux/err.h>
25#include <linux/io.h>
26
27#include <asm/arch/board.h>
28#include <asm/arch/smc.h>
29
30#define DRV_NAME "pata_at32"
31#define DRV_VERSION "0.0.2"
32
33/*
34 * CompactFlash controller memory layout relative to the base address:
35 *
36 * Attribute memory: 0000 0000 -> 003f ffff
37 * Common memory: 0040 0000 -> 007f ffff
38 * I/O memory: 0080 0000 -> 00bf ffff
39 * True IDE Mode: 00c0 0000 -> 00df ffff
40 * Alt IDE Mode: 00e0 0000 -> 00ff ffff
41 *
42 * Only True IDE and Alt True IDE mode are needed for this driver.
43 *
44 * True IDE mode => CS0 = 0, CS1 = 1 (cmd, error, stat, etc)
45 * Alt True IDE mode => CS0 = 1, CS1 = 0 (ctl, alt_stat)
46 */
47#define CF_IDE_OFFSET 0x00c00000
48#define CF_ALT_IDE_OFFSET 0x00e00000
49#define CF_RES_SIZE 2048
50
51/*
52 * Define DEBUG_BUS if you are doing debugging of your own EBI -> PATA
53 * adaptor with a logic analyzer or similar.
54 */
55#undef DEBUG_BUS
56
57/*
58 * ATA PIO modes
59 *
60 * Name | Mb/s | Min cycle time | Mask
61 * --------+-------+----------------+--------
62 * Mode 0 | 3.3 | 600 ns | 0x01
63 * Mode 1 | 5.2 | 383 ns | 0x03
64 * Mode 2 | 8.3 | 240 ns | 0x07
65 * Mode 3 | 11.1 | 180 ns | 0x0f
66 * Mode 4 | 16.7 | 120 ns | 0x1f
67 */
68#define PIO_MASK (0x1f)
69
70/*
71 * Struct containing private information about device.
72 */
73struct at32_ide_info {
74 unsigned int irq;
75 struct resource res_ide;
76 struct resource res_alt;
77 void __iomem *ide_addr;
78 void __iomem *alt_addr;
79 unsigned int cs;
80 struct smc_config smc;
81};
82
83/*
84 * Setup SMC for the given ATA timing.
85 */
86static int pata_at32_setup_timing(struct device *dev,
87 struct at32_ide_info *info,
88 const struct ata_timing *timing)
89{
90 /* These two values are found through testing */
91 const int min_recover = 25;
92 const int ncs_hold = 15;
93
94 struct smc_config *smc = &info->smc;
95
96 int active;
97 int recover;
98
99 /* Total cycle time */
100 smc->read_cycle = timing->cyc8b;
101
102 /* DIOR <= CFIOR timings */
103 smc->nrd_setup = timing->setup;
104 smc->nrd_pulse = timing->act8b;
105
106 /* Compute recover, extend total cycle if needed */
107 active = smc->nrd_setup + smc->nrd_pulse;
108 recover = smc->read_cycle - active;
109
110 if (recover < min_recover) {
111 smc->read_cycle = active + min_recover;
112 recover = min_recover;
113 }
114
115 /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
116 smc->ncs_read_setup = 0;
117 smc->ncs_read_pulse = active + ncs_hold;
118
119 /* Write timings same as read timings */
120 smc->write_cycle = smc->read_cycle;
121 smc->nwe_setup = smc->nrd_setup;
122 smc->nwe_pulse = smc->nrd_pulse;
123 smc->ncs_write_setup = smc->ncs_read_setup;
124 smc->ncs_write_pulse = smc->ncs_read_pulse;
125
126 /* Do some debugging output */
127 dev_dbg(dev, "SMC: C=%d S=%d P=%d R=%d NCSS=%d NCSP=%d NCSR=%d\n",
128 smc->read_cycle, smc->nrd_setup, smc->nrd_pulse,
129 recover, smc->ncs_read_setup, smc->ncs_read_pulse,
130 smc->read_cycle - smc->ncs_read_pulse);
131
132 /* Finally, configure the SMC */
133 return smc_set_configuration(info->cs, smc);
134}
135
136/*
137 * Procedures for libATA.
138 */
139static void pata_at32_set_piomode(struct ata_port *ap, struct ata_device *adev)
140{
141 struct ata_timing timing;
142 struct at32_ide_info *info = ap->host->private_data;
143
144 int ret;
145
146 /* Compute ATA timing */
147 ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0);
148 if (ret) {
149 dev_warn(ap->dev, "Failed to compute ATA timing %d\n", ret);
150 return;
151 }
152
153 /* Setup SMC to ATA timing */
154 ret = pata_at32_setup_timing(ap->dev, info, &timing);
155 if (ret) {
156 dev_warn(ap->dev, "Failed to setup ATA timing %d\n", ret);
157 return;
158 }
159}
160
161static void pata_at32_irq_clear(struct ata_port *ap)
162{
163 /* No DMA controller yet */
164}
165
166static struct scsi_host_template at32_sht = {
167 .module = THIS_MODULE,
168 .name = DRV_NAME,
169 .ioctl = ata_scsi_ioctl,
170 .queuecommand = ata_scsi_queuecmd,
171 .can_queue = ATA_DEF_QUEUE,
172 .this_id = ATA_SHT_THIS_ID,
173 .sg_tablesize = LIBATA_MAX_PRD,
174 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
175 .emulated = ATA_SHT_EMULATED,
176 .use_clustering = ATA_SHT_USE_CLUSTERING,
177 .proc_name = DRV_NAME,
178 .dma_boundary = ATA_DMA_BOUNDARY,
179 .slave_configure = ata_scsi_slave_config,
180 .slave_destroy = ata_scsi_slave_destroy,
181 .bios_param = ata_std_bios_param,
182};
183
184static struct ata_port_operations at32_port_ops = {
185 .port_disable = ata_port_disable,
186 .set_piomode = pata_at32_set_piomode,
187 .tf_load = ata_tf_load,
188 .tf_read = ata_tf_read,
189 .exec_command = ata_exec_command,
190 .check_status = ata_check_status,
191 .dev_select = ata_std_dev_select,
192
193 .freeze = ata_bmdma_freeze,
194 .thaw = ata_bmdma_thaw,
195 .error_handler = ata_bmdma_error_handler,
196 .post_internal_cmd = ata_bmdma_post_internal_cmd,
197 .cable_detect = ata_cable_40wire,
198
199 .qc_prep = ata_qc_prep,
200 .qc_issue = ata_qc_issue_prot,
201
202 .data_xfer = ata_data_xfer,
203
204 .irq_clear = pata_at32_irq_clear,
205 .irq_on = ata_irq_on,
206 .irq_ack = ata_irq_ack,
207
208 .port_start = ata_sff_port_start,
209};
210
211static int __init pata_at32_init_one(struct device *dev,
212 struct at32_ide_info *info)
213{
214 struct ata_host *host;
215 struct ata_port *ap;
216
217 host = ata_host_alloc(dev, 1);
218 if (!host)
219 return -ENOMEM;
220
221 ap = host->ports[0];
222
223 /* Setup ATA bindings */
224 ap->ops = &at32_port_ops;
225 ap->pio_mask = PIO_MASK;
226 ap->flags = ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS
227 | ATA_FLAG_PIO_POLLING;
228
229 /*
230 * Since all 8-bit taskfile transfers has to go on the lower
231 * byte of the data bus and there is a bug in the SMC that
232 * makes it impossible to alter the bus width during runtime,
233 * we need to hardwire the address signals as follows:
234 *
235 * A_IDE(2:0) <= A_EBI(3:1)
236 *
237 * This makes all addresses on the EBI even, thus all data
238 * will be on the lower byte of the data bus. All addresses
239 * used by libATA need to be altered according to this.
240 */
241 ap->ioaddr.altstatus_addr = info->alt_addr + (0x06 << 1);
242 ap->ioaddr.ctl_addr = info->alt_addr + (0x06 << 1);
243
244 ap->ioaddr.data_addr = info->ide_addr + (ATA_REG_DATA << 1);
245 ap->ioaddr.error_addr = info->ide_addr + (ATA_REG_ERR << 1);
246 ap->ioaddr.feature_addr = info->ide_addr + (ATA_REG_FEATURE << 1);
247 ap->ioaddr.nsect_addr = info->ide_addr + (ATA_REG_NSECT << 1);
248 ap->ioaddr.lbal_addr = info->ide_addr + (ATA_REG_LBAL << 1);
249 ap->ioaddr.lbam_addr = info->ide_addr + (ATA_REG_LBAM << 1);
250 ap->ioaddr.lbah_addr = info->ide_addr + (ATA_REG_LBAH << 1);
251 ap->ioaddr.device_addr = info->ide_addr + (ATA_REG_DEVICE << 1);
252 ap->ioaddr.status_addr = info->ide_addr + (ATA_REG_STATUS << 1);
253 ap->ioaddr.command_addr = info->ide_addr + (ATA_REG_CMD << 1);
254
255 /* Set info as private data of ATA host */
256 host->private_data = info;
257
258 /* Register ATA device and return */
259 return ata_host_activate(host, info->irq, ata_interrupt,
260 IRQF_SHARED | IRQF_TRIGGER_RISING,
261 &at32_sht);
262}
263
264/*
265 * This function may come in handy for people analyzing their own
266 * EBI -> PATA adaptors.
267 */
268#ifdef DEBUG_BUS
269
270static void __init pata_at32_debug_bus(struct device *dev,
271 struct at32_ide_info *info)
272{
273 const int d1 = 0xff;
274 const int d2 = 0x00;
275
276 int i;
277
278 /* Write 8-bit values (registers) */
279 iowrite8(d1, info->alt_addr + (0x06 << 1));
280 iowrite8(d2, info->alt_addr + (0x06 << 1));
281
282 for (i = 0; i < 8; i++) {
283 iowrite8(d1, info->ide_addr + (i << 1));
284 iowrite8(d2, info->ide_addr + (i << 1));
285 }
286
287 /* Write 16 bit values (data) */
288 iowrite16(d1, info->ide_addr);
289 iowrite16(d1 << 8, info->ide_addr);
290
291 iowrite16(d1, info->ide_addr);
292 iowrite16(d1 << 8, info->ide_addr);
293}
294
295#endif
296
297static int __init pata_at32_probe(struct platform_device *pdev)
298{
299 const struct ata_timing initial_timing =
300 {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
301
302 struct device *dev = &pdev->dev;
303 struct at32_ide_info *info;
304 struct ide_platform_data *board = pdev->dev.platform_data;
305 struct resource *res;
306
307 int irq;
308 int ret;
309
310 if (!board)
311 return -ENXIO;
312
313 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
314 if (!res)
315 return -ENXIO;
316
317 /* Retrive IRQ */
318 irq = platform_get_irq(pdev, 0);
319 if (irq < 0)
320 return irq;
321
322 /* Setup struct containing private infomation */
323 info = kzalloc(sizeof(struct at32_ide_info), GFP_KERNEL);
324 if (!info)
325 return -ENOMEM;
326
327 memset(info, 0, sizeof(struct at32_ide_info));
328
329 info->irq = irq;
330 info->cs = board->cs;
331
332 /* Request memory resources */
333 info->res_ide.start = res->start + CF_IDE_OFFSET;
334 info->res_ide.end = info->res_ide.start + CF_RES_SIZE - 1;
335 info->res_ide.name = "ide";
336 info->res_ide.flags = IORESOURCE_MEM;
337
338 ret = request_resource(res, &info->res_ide);
339 if (ret)
340 goto err_req_res_ide;
341
342 info->res_alt.start = res->start + CF_ALT_IDE_OFFSET;
343 info->res_alt.end = info->res_alt.start + CF_RES_SIZE - 1;
344 info->res_alt.name = "alt";
345 info->res_alt.flags = IORESOURCE_MEM;
346
347 ret = request_resource(res, &info->res_alt);
348 if (ret)
349 goto err_req_res_alt;
350
351 /* Setup non-timing elements of SMC */
352 info->smc.bus_width = 2; /* 16 bit data bus */
353 info->smc.nrd_controlled = 1; /* Sample data on rising edge of NRD */
354 info->smc.nwe_controlled = 0; /* Drive data on falling edge of NCS */
355 info->smc.nwait_mode = 3; /* NWAIT is in READY mode */
356 info->smc.byte_write = 0; /* Byte select access type */
357 info->smc.tdf_mode = 0; /* TDF optimization disabled */
358 info->smc.tdf_cycles = 0; /* No TDF wait cycles */
359
360 /* Setup ATA timing */
361 ret = pata_at32_setup_timing(dev, info, &initial_timing);
362 if (ret)
363 goto err_setup_timing;
364
365 /* Setup ATA addresses */
366 ret = -ENOMEM;
367 info->ide_addr = devm_ioremap(dev, info->res_ide.start, 16);
368 info->alt_addr = devm_ioremap(dev, info->res_alt.start, 16);
369 if (!info->ide_addr || !info->alt_addr)
370 goto err_ioremap;
371
372#ifdef DEBUG_BUS
373 pata_at32_debug_bus(dev, info);
374#endif
375
376 /* Register ATA device */
377 ret = pata_at32_init_one(dev, info);
378 if (ret)
379 goto err_ata_device;
380
381 return 0;
382
383 err_ata_device:
384 err_ioremap:
385 err_setup_timing:
386 release_resource(&info->res_alt);
387 err_req_res_alt:
388 release_resource(&info->res_ide);
389 err_req_res_ide:
390 kfree(info);
391
392 return ret;
393}
394
395static int __exit pata_at32_remove(struct platform_device *pdev)
396{
397 struct ata_host *host = platform_get_drvdata(pdev);
398 struct at32_ide_info *info;
399
400 if (!host)
401 return 0;
402
403 info = host->private_data;
404 ata_host_detach(host);
405
406 if (!info)
407 return 0;
408
409 release_resource(&info->res_ide);
410 release_resource(&info->res_alt);
411
412 kfree(info);
413
414 return 0;
415}
416
417static struct platform_driver pata_at32_driver = {
418 .remove = __exit_p(pata_at32_remove),
419 .driver = {
420 .name = "at32_ide",
421 .owner = THIS_MODULE,
422 },
423};
424
425static int __init pata_at32_init(void)
426{
427 return platform_driver_probe(&pata_at32_driver, pata_at32_probe);
428}
429
430static void __exit pata_at32_exit(void)
431{
432 platform_driver_unregister(&pata_at32_driver);
433}
434
435module_init(pata_at32_init);
436module_exit(pata_at32_exit);
437
438MODULE_LICENSE("GPL");
439MODULE_DESCRIPTION("AVR32 SMC/CFC PATA Driver");
440MODULE_AUTHOR("Kristoffer Nyborg Gregertsen <kngregertsen@norway.atmel.com>");
441MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 86f85a2cab..9623f52955 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -33,8 +33,9 @@ enum {
33 ATIIXP_IDE_UDMA_MODE = 0x56 33 ATIIXP_IDE_UDMA_MODE = 0x56
34}; 34};
35 35
36static int atiixp_pre_reset(struct ata_port *ap, unsigned long deadline) 36static int atiixp_pre_reset(struct ata_link *link, unsigned long deadline)
37{ 37{
38 struct ata_port *ap = link->ap;
38 static const struct pci_bits atiixp_enable_bits[] = { 39 static const struct pci_bits atiixp_enable_bits[] = {
39 { 0x48, 1, 0x01, 0x00 }, 40 { 0x48, 1, 0x01, 0x00 },
40 { 0x48, 1, 0x08, 0x00 } 41 { 0x48, 1, 0x08, 0x00 }
@@ -44,7 +45,7 @@ static int atiixp_pre_reset(struct ata_port *ap, unsigned long deadline)
44 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no])) 45 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
45 return -ENOENT; 46 return -ENOENT;
46 47
47 return ata_std_prereset(ap, deadline); 48 return ata_std_prereset(link, deadline);
48} 49}
49 50
50static void atiixp_error_handler(struct ata_port *ap) 51static void atiixp_error_handler(struct ata_port *ap)
@@ -172,6 +173,9 @@ static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
172 * 173 *
173 * When DMA begins we need to ensure that the UDMA control 174 * When DMA begins we need to ensure that the UDMA control
174 * register for the channel is correctly set. 175 * register for the channel is correctly set.
176 *
177 * Note: The host lock held by the libata layer protects
178 * us from two channels both trying to set DMA bits at once
175 */ 179 */
176 180
177static void atiixp_bmdma_start(struct ata_queued_cmd *qc) 181static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
@@ -198,6 +202,9 @@ static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
198 * 202 *
199 * DMA has completed. Clear the UDMA flag as the next operations will 203 * DMA has completed. Clear the UDMA flag as the next operations will
200 * be PIO ones not UDMA data transfer. 204 * be PIO ones not UDMA data transfer.
205 *
206 * Note: The host lock held by the libata layer protects
207 * us from two channels both trying to set DMA bits at once
201 */ 208 */
202 209
203static void atiixp_bmdma_stop(struct ata_queued_cmd *qc) 210static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
@@ -232,7 +239,6 @@ static struct scsi_host_template atiixp_sht = {
232}; 239};
233 240
234static struct ata_port_operations atiixp_port_ops = { 241static struct ata_port_operations atiixp_port_ops = {
235 .port_disable = ata_port_disable,
236 .set_piomode = atiixp_set_piomode, 242 .set_piomode = atiixp_set_piomode,
237 .set_dmamode = atiixp_set_dmamode, 243 .set_dmamode = atiixp_set_dmamode,
238 .mode_filter = ata_pci_default_filter, 244 .mode_filter = ata_pci_default_filter,
@@ -261,9 +267,8 @@ static struct ata_port_operations atiixp_port_ops = {
261 .irq_handler = ata_interrupt, 267 .irq_handler = ata_interrupt,
262 .irq_clear = ata_bmdma_irq_clear, 268 .irq_clear = ata_bmdma_irq_clear,
263 .irq_on = ata_irq_on, 269 .irq_on = ata_irq_on,
264 .irq_ack = ata_irq_ack,
265 270
266 .port_start = ata_port_start, 271 .port_start = ata_sff_port_start,
267}; 272};
268 273
269static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id) 274static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
new file mode 100644
index 0000000000..747549e456
--- /dev/null
+++ b/drivers/ata/pata_bf54x.c
@@ -0,0 +1,1627 @@
1/*
2 * File: drivers/ata/pata_bf54x.c
3 * Author: Sonic Zhang <sonic.zhang@analog.com>
4 *
5 * Created:
6 * Description: PATA Driver for blackfin 54x
7 *
8 * Modified:
9 * Copyright 2007 Analog Devices Inc.
10 *
11 * Bugs: Enter bugs at http://blackfin.uclinux.org/
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, see the file COPYING, or write
25 * to the Free Software Foundation, Inc.,
26 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/init.h>
33#include <linux/blkdev.h>
34#include <linux/delay.h>
35#include <linux/device.h>
36#include <scsi/scsi_host.h>
37#include <linux/libata.h>
38#include <linux/platform_device.h>
39#include <asm/dma.h>
40#include <asm/gpio.h>
41#include <asm/portmux.h>
42
43#define DRV_NAME "pata-bf54x"
44#define DRV_VERSION "0.9"
45
46#define ATA_REG_CTRL 0x0E
47#define ATA_REG_ALTSTATUS ATA_REG_CTRL
48
49/* These are the offset of the controller's registers */
50#define ATAPI_OFFSET_CONTROL 0x00
51#define ATAPI_OFFSET_STATUS 0x04
52#define ATAPI_OFFSET_DEV_ADDR 0x08
53#define ATAPI_OFFSET_DEV_TXBUF 0x0c
54#define ATAPI_OFFSET_DEV_RXBUF 0x10
55#define ATAPI_OFFSET_INT_MASK 0x14
56#define ATAPI_OFFSET_INT_STATUS 0x18
57#define ATAPI_OFFSET_XFER_LEN 0x1c
58#define ATAPI_OFFSET_LINE_STATUS 0x20
59#define ATAPI_OFFSET_SM_STATE 0x24
60#define ATAPI_OFFSET_TERMINATE 0x28
61#define ATAPI_OFFSET_PIO_TFRCNT 0x2c
62#define ATAPI_OFFSET_DMA_TFRCNT 0x30
63#define ATAPI_OFFSET_UMAIN_TFRCNT 0x34
64#define ATAPI_OFFSET_UDMAOUT_TFRCNT 0x38
65#define ATAPI_OFFSET_REG_TIM_0 0x40
66#define ATAPI_OFFSET_PIO_TIM_0 0x44
67#define ATAPI_OFFSET_PIO_TIM_1 0x48
68#define ATAPI_OFFSET_MULTI_TIM_0 0x50
69#define ATAPI_OFFSET_MULTI_TIM_1 0x54
70#define ATAPI_OFFSET_MULTI_TIM_2 0x58
71#define ATAPI_OFFSET_ULTRA_TIM_0 0x60
72#define ATAPI_OFFSET_ULTRA_TIM_1 0x64
73#define ATAPI_OFFSET_ULTRA_TIM_2 0x68
74#define ATAPI_OFFSET_ULTRA_TIM_3 0x6c
75
76
77#define ATAPI_GET_CONTROL(base)\
78 bfin_read16(base + ATAPI_OFFSET_CONTROL)
79#define ATAPI_SET_CONTROL(base, val)\
80 bfin_write16(base + ATAPI_OFFSET_CONTROL, val)
81#define ATAPI_GET_STATUS(base)\
82 bfin_read16(base + ATAPI_OFFSET_STATUS)
83#define ATAPI_GET_DEV_ADDR(base)\
84 bfin_read16(base + ATAPI_OFFSET_DEV_ADDR)
85#define ATAPI_SET_DEV_ADDR(base, val)\
86 bfin_write16(base + ATAPI_OFFSET_DEV_ADDR, val)
87#define ATAPI_GET_DEV_TXBUF(base)\
88 bfin_read16(base + ATAPI_OFFSET_DEV_TXBUF)
89#define ATAPI_SET_DEV_TXBUF(base, val)\
90 bfin_write16(base + ATAPI_OFFSET_DEV_TXBUF, val)
91#define ATAPI_GET_DEV_RXBUF(base)\
92 bfin_read16(base + ATAPI_OFFSET_DEV_RXBUF)
93#define ATAPI_SET_DEV_RXBUF(base, val)\
94 bfin_write16(base + ATAPI_OFFSET_DEV_RXBUF, val)
95#define ATAPI_GET_INT_MASK(base)\
96 bfin_read16(base + ATAPI_OFFSET_INT_MASK)
97#define ATAPI_SET_INT_MASK(base, val)\
98 bfin_write16(base + ATAPI_OFFSET_INT_MASK, val)
99#define ATAPI_GET_INT_STATUS(base)\
100 bfin_read16(base + ATAPI_OFFSET_INT_STATUS)
101#define ATAPI_SET_INT_STATUS(base, val)\
102 bfin_write16(base + ATAPI_OFFSET_INT_STATUS, val)
103#define ATAPI_GET_XFER_LEN(base)\
104 bfin_read16(base + ATAPI_OFFSET_XFER_LEN)
105#define ATAPI_SET_XFER_LEN(base, val)\
106 bfin_write16(base + ATAPI_OFFSET_XFER_LEN, val)
107#define ATAPI_GET_LINE_STATUS(base)\
108 bfin_read16(base + ATAPI_OFFSET_LINE_STATUS)
109#define ATAPI_GET_SM_STATE(base)\
110 bfin_read16(base + ATAPI_OFFSET_SM_STATE)
111#define ATAPI_GET_TERMINATE(base)\
112 bfin_read16(base + ATAPI_OFFSET_TERMINATE)
113#define ATAPI_SET_TERMINATE(base, val)\
114 bfin_write16(base + ATAPI_OFFSET_TERMINATE, val)
115#define ATAPI_GET_PIO_TFRCNT(base)\
116 bfin_read16(base + ATAPI_OFFSET_PIO_TFRCNT)
117#define ATAPI_GET_DMA_TFRCNT(base)\
118 bfin_read16(base + ATAPI_OFFSET_DMA_TFRCNT)
119#define ATAPI_GET_UMAIN_TFRCNT(base)\
120 bfin_read16(base + ATAPI_OFFSET_UMAIN_TFRCNT)
121#define ATAPI_GET_UDMAOUT_TFRCNT(base)\
122 bfin_read16(base + ATAPI_OFFSET_UDMAOUT_TFRCNT)
123#define ATAPI_GET_REG_TIM_0(base)\
124 bfin_read16(base + ATAPI_OFFSET_REG_TIM_0)
125#define ATAPI_SET_REG_TIM_0(base, val)\
126 bfin_write16(base + ATAPI_OFFSET_REG_TIM_0, val)
127#define ATAPI_GET_PIO_TIM_0(base)\
128 bfin_read16(base + ATAPI_OFFSET_PIO_TIM_0)
129#define ATAPI_SET_PIO_TIM_0(base, val)\
130 bfin_write16(base + ATAPI_OFFSET_PIO_TIM_0, val)
131#define ATAPI_GET_PIO_TIM_1(base)\
132 bfin_read16(base + ATAPI_OFFSET_PIO_TIM_1)
133#define ATAPI_SET_PIO_TIM_1(base, val)\
134 bfin_write16(base + ATAPI_OFFSET_PIO_TIM_1, val)
135#define ATAPI_GET_MULTI_TIM_0(base)\
136 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_0)
137#define ATAPI_SET_MULTI_TIM_0(base, val)\
138 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_0, val)
139#define ATAPI_GET_MULTI_TIM_1(base)\
140 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_1)
141#define ATAPI_SET_MULTI_TIM_1(base, val)\
142 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_1, val)
143#define ATAPI_GET_MULTI_TIM_2(base)\
144 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_2)
145#define ATAPI_SET_MULTI_TIM_2(base, val)\
146 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_2, val)
147#define ATAPI_GET_ULTRA_TIM_0(base)\
148 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_0)
149#define ATAPI_SET_ULTRA_TIM_0(base, val)\
150 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_0, val)
151#define ATAPI_GET_ULTRA_TIM_1(base)\
152 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_1)
153#define ATAPI_SET_ULTRA_TIM_1(base, val)\
154 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_1, val)
155#define ATAPI_GET_ULTRA_TIM_2(base)\
156 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_2)
157#define ATAPI_SET_ULTRA_TIM_2(base, val)\
158 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_2, val)
159#define ATAPI_GET_ULTRA_TIM_3(base)\
160 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_3)
161#define ATAPI_SET_ULTRA_TIM_3(base, val)\
162 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_3, val)
163
164/**
165 * PIO Mode - Frequency compatibility
166 */
167/* mode: 0 1 2 3 4 */
168static const u32 pio_fsclk[] =
169{ 33333333, 33333333, 33333333, 33333333, 33333333 };
170
171/**
172 * MDMA Mode - Frequency compatibility
173 */
174/* mode: 0 1 2 */
175static const u32 mdma_fsclk[] = { 33333333, 33333333, 33333333 };
176
177/**
178 * UDMA Mode - Frequency compatibility
179 *
180 * UDMA5 - 100 MB/s - SCLK = 133 MHz
181 * UDMA4 - 66 MB/s - SCLK >= 80 MHz
182 * UDMA3 - 44.4 MB/s - SCLK >= 50 MHz
183 * UDMA2 - 33 MB/s - SCLK >= 40 MHz
184 */
185/* mode: 0 1 2 3 4 5 */
186static const u32 udma_fsclk[] =
187{ 33333333, 33333333, 40000000, 50000000, 80000000, 133333333 };
188
189/**
190 * Register transfer timing table
191 */
192/* mode: 0 1 2 3 4 */
193/* Cycle Time */
194static const u32 reg_t0min[] = { 600, 383, 330, 180, 120 };
195/* DIOR/DIOW to end cycle */
196static const u32 reg_t2min[] = { 290, 290, 290, 70, 25 };
197/* DIOR/DIOW asserted pulse width */
198static const u32 reg_teocmin[] = { 290, 290, 290, 80, 70 };
199
200/**
201 * PIO timing table
202 */
203/* mode: 0 1 2 3 4 */
204/* Cycle Time */
205static const u32 pio_t0min[] = { 600, 383, 240, 180, 120 };
206/* Address valid to DIOR/DIORW */
207static const u32 pio_t1min[] = { 70, 50, 30, 30, 25 };
208/* DIOR/DIOW to end cycle */
209static const u32 pio_t2min[] = { 165, 125, 100, 80, 70 };
210/* DIOR/DIOW asserted pulse width */
211static const u32 pio_teocmin[] = { 165, 125, 100, 70, 25 };
212/* DIOW data hold */
213static const u32 pio_t4min[] = { 30, 20, 15, 10, 10 };
214
215/* ******************************************************************
216 * Multiword DMA timing table
217 * ******************************************************************
218 */
219/* mode: 0 1 2 */
220/* Cycle Time */
221static const u32 mdma_t0min[] = { 480, 150, 120 };
222/* DIOR/DIOW asserted pulse width */
223static const u32 mdma_tdmin[] = { 215, 80, 70 };
224/* DMACK to read data released */
225static const u32 mdma_thmin[] = { 20, 15, 10 };
226/* DIOR/DIOW to DMACK hold */
227static const u32 mdma_tjmin[] = { 20, 5, 5 };
228/* DIOR negated pulse width */
229static const u32 mdma_tkrmin[] = { 50, 50, 25 };
230/* DIOR negated pulse width */
231static const u32 mdma_tkwmin[] = { 215, 50, 25 };
232/* CS[1:0] valid to DIOR/DIOW */
233static const u32 mdma_tmmin[] = { 50, 30, 25 };
234/* DMACK to read data released */
235static const u32 mdma_tzmax[] = { 20, 25, 25 };
236
237/**
238 * Ultra DMA timing table
239 */
240/* mode: 0 1 2 3 4 5 */
241static const u32 udma_tcycmin[] = { 112, 73, 54, 39, 25, 17 };
242static const u32 udma_tdvsmin[] = { 70, 48, 31, 20, 7, 5 };
243static const u32 udma_tenvmax[] = { 70, 70, 70, 55, 55, 50 };
244static const u32 udma_trpmin[] = { 160, 125, 100, 100, 100, 85 };
245static const u32 udma_tmin[] = { 5, 5, 5, 5, 3, 3 };
246
247
248static const u32 udma_tmlimin = 20;
249static const u32 udma_tzahmin = 20;
250static const u32 udma_tenvmin = 20;
251static const u32 udma_tackmin = 20;
252static const u32 udma_tssmin = 50;
253
254/**
255 *
256 * Function: num_clocks_min
257 *
258 * Description:
259 * calculate number of SCLK cycles to meet minimum timing
260 */
261static unsigned short num_clocks_min(unsigned long tmin,
262 unsigned long fsclk)
263{
264 unsigned long tmp ;
265 unsigned short result;
266
267 tmp = tmin * (fsclk/1000/1000) / 1000;
268 result = (unsigned short)tmp;
269 if ((tmp*1000*1000) < (tmin*(fsclk/1000))) {
270 result++;
271 }
272
273 return result;
274}
275
276/**
277 * bfin_set_piomode - Initialize host controller PATA PIO timings
278 * @ap: Port whose timings we are configuring
279 * @adev: um
280 *
281 * Set PIO mode for device.
282 *
283 * LOCKING:
284 * None (inherited from caller).
285 */
286
287static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev)
288{
289 int mode = adev->pio_mode - XFER_PIO_0;
290 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
291 unsigned int fsclk = get_sclk();
292 unsigned short teoc_reg, t2_reg, teoc_pio;
293 unsigned short t4_reg, t2_pio, t1_reg;
294 unsigned short n0, n6, t6min = 5;
295
296 /* the most restrictive timing value is t6 and tc, the DIOW - data hold
297 * If one SCLK pulse is longer than this minimum value then register
298 * transfers cannot be supported at this frequency.
299 */
300 n6 = num_clocks_min(t6min, fsclk);
301 if (mode >= 0 && mode <= 4 && n6 >= 1) {
302 pr_debug("set piomode: mode=%d, fsclk=%ud\n", mode, fsclk);
303 /* calculate the timing values for register transfers. */
304 while (mode > 0 && pio_fsclk[mode] > fsclk)
305 mode--;
306
307 /* DIOR/DIOW to end cycle time */
308 t2_reg = num_clocks_min(reg_t2min[mode], fsclk);
309 /* DIOR/DIOW asserted pulse width */
310 teoc_reg = num_clocks_min(reg_teocmin[mode], fsclk);
311 /* Cycle Time */
312 n0 = num_clocks_min(reg_t0min[mode], fsclk);
313
314 /* increase t2 until we meed the minimum cycle length */
315 if (t2_reg + teoc_reg < n0)
316 t2_reg = n0 - teoc_reg;
317
318 /* calculate the timing values for pio transfers. */
319
320 /* DIOR/DIOW to end cycle time */
321 t2_pio = num_clocks_min(pio_t2min[mode], fsclk);
322 /* DIOR/DIOW asserted pulse width */
323 teoc_pio = num_clocks_min(pio_teocmin[mode], fsclk);
324 /* Cycle Time */
325 n0 = num_clocks_min(pio_t0min[mode], fsclk);
326
327 /* increase t2 until we meed the minimum cycle length */
328 if (t2_pio + teoc_pio < n0)
329 t2_pio = n0 - teoc_pio;
330
331 /* Address valid to DIOR/DIORW */
332 t1_reg = num_clocks_min(pio_t1min[mode], fsclk);
333
334 /* DIOW data hold */
335 t4_reg = num_clocks_min(pio_t4min[mode], fsclk);
336
337 ATAPI_SET_REG_TIM_0(base, (teoc_reg<<8 | t2_reg));
338 ATAPI_SET_PIO_TIM_0(base, (t4_reg<<12 | t2_pio<<4 | t1_reg));
339 ATAPI_SET_PIO_TIM_1(base, teoc_pio);
340 if (mode > 2) {
341 ATAPI_SET_CONTROL(base,
342 ATAPI_GET_CONTROL(base) | IORDY_EN);
343 } else {
344 ATAPI_SET_CONTROL(base,
345 ATAPI_GET_CONTROL(base) & ~IORDY_EN);
346 }
347
348 /* Disable host ATAPI PIO interrupts */
349 ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
350 & ~(PIO_DONE_MASK | HOST_TERM_XFER_MASK));
351 SSYNC();
352 }
353}
354
355/**
356 * bfin_set_dmamode - Initialize host controller PATA DMA timings
357 * @ap: Port whose timings we are configuring
358 * @adev: um
359 * @udma: udma mode, 0 - 6
360 *
361 * Set UDMA mode for device.
362 *
363 * LOCKING:
364 * None (inherited from caller).
365 */
366
367static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
368{
369 int mode;
370 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
371 unsigned long fsclk = get_sclk();
372 unsigned short tenv, tack, tcyc_tdvs, tdvs, tmli, tss, trp, tzah;
373 unsigned short tm, td, tkr, tkw, teoc, th;
374 unsigned short n0, nf, tfmin = 5;
375 unsigned short nmin, tcyc;
376
377 mode = adev->dma_mode - XFER_UDMA_0;
378 if (mode >= 0 && mode <= 5) {
379 pr_debug("set udmamode: mode=%d\n", mode);
380 /* the most restrictive timing value is t6 and tc,
381 * the DIOW - data hold. If one SCLK pulse is longer
382 * than this minimum value then register
383 * transfers cannot be supported at this frequency.
384 */
385 while (mode > 0 && udma_fsclk[mode] > fsclk)
386 mode--;
387
388 nmin = num_clocks_min(udma_tmin[mode], fsclk);
389 if (nmin >= 1) {
390 /* calculate the timing values for Ultra DMA. */
391 tdvs = num_clocks_min(udma_tdvsmin[mode], fsclk);
392 tcyc = num_clocks_min(udma_tcycmin[mode], fsclk);
393 tcyc_tdvs = 2;
394
395 /* increase tcyc - tdvs (tcyc_tdvs) until we meed
396 * the minimum cycle length
397 */
398 if (tdvs + tcyc_tdvs < tcyc)
399 tcyc_tdvs = tcyc - tdvs;
400
401 /* Mow assign the values required for the timing
402 * registers
403 */
404 if (tcyc_tdvs < 2)
405 tcyc_tdvs = 2;
406
407 if (tdvs < 2)
408 tdvs = 2;
409
410 tack = num_clocks_min(udma_tackmin, fsclk);
411 tss = num_clocks_min(udma_tssmin, fsclk);
412 tmli = num_clocks_min(udma_tmlimin, fsclk);
413 tzah = num_clocks_min(udma_tzahmin, fsclk);
414 trp = num_clocks_min(udma_trpmin[mode], fsclk);
415 tenv = num_clocks_min(udma_tenvmin, fsclk);
416 if (tenv <= udma_tenvmax[mode]) {
417 ATAPI_SET_ULTRA_TIM_0(base, (tenv<<8 | tack));
418 ATAPI_SET_ULTRA_TIM_1(base,
419 (tcyc_tdvs<<8 | tdvs));
420 ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss));
421 ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah));
422
423 /* Enable host ATAPI Untra DMA interrupts */
424 ATAPI_SET_INT_MASK(base,
425 ATAPI_GET_INT_MASK(base)
426 | UDMAIN_DONE_MASK
427 | UDMAOUT_DONE_MASK
428 | UDMAIN_TERM_MASK
429 | UDMAOUT_TERM_MASK);
430 }
431 }
432 }
433
434 mode = adev->dma_mode - XFER_MW_DMA_0;
435 if (mode >= 0 && mode <= 2) {
436 pr_debug("set mdmamode: mode=%d\n", mode);
437 /* the most restrictive timing value is tf, the DMACK to
438 * read data released. If one SCLK pulse is longer than
439 * this maximum value then the MDMA mode
440 * cannot be supported at this frequency.
441 */
442 while (mode > 0 && mdma_fsclk[mode] > fsclk)
443 mode--;
444
445 nf = num_clocks_min(tfmin, fsclk);
446 if (nf >= 1) {
447 /* calculate the timing values for Multi-word DMA. */
448
449 /* DIOR/DIOW asserted pulse width */
450 td = num_clocks_min(mdma_tdmin[mode], fsclk);
451
452 /* DIOR negated pulse width */
453 tkw = num_clocks_min(mdma_tkwmin[mode], fsclk);
454
455 /* Cycle Time */
456 n0 = num_clocks_min(mdma_t0min[mode], fsclk);
457
458 /* increase tk until we meed the minimum cycle length */
459 if (tkw + td < n0)
460 tkw = n0 - td;
461
462 /* DIOR negated pulse width - read */
463 tkr = num_clocks_min(mdma_tkrmin[mode], fsclk);
464 /* CS{1:0] valid to DIOR/DIOW */
465 tm = num_clocks_min(mdma_tmmin[mode], fsclk);
466 /* DIOR/DIOW to DMACK hold */
467 teoc = num_clocks_min(mdma_tjmin[mode], fsclk);
468 /* DIOW Data hold */
469 th = num_clocks_min(mdma_thmin[mode], fsclk);
470
471 ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td));
472 ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw));
473 ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th));
474
475 /* Enable host ATAPI Multi DMA interrupts */
476 ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
477 | MULTI_DONE_MASK | MULTI_TERM_MASK);
478 SSYNC();
479 }
480 }
481 return;
482}
483
484/**
485 *
486 * Function: wait_complete
487 *
488 * Description: Waits the interrupt from device
489 *
490 */
491static inline void wait_complete(void __iomem *base, unsigned short mask)
492{
493 unsigned short status;
494 unsigned int i = 0;
495
496#define PATA_BF54X_WAIT_TIMEOUT 10000
497
498 for (i = 0; i < PATA_BF54X_WAIT_TIMEOUT; i++) {
499 status = ATAPI_GET_INT_STATUS(base) & mask;
500 if (status)
501 break;
502 }
503
504 ATAPI_SET_INT_STATUS(base, mask);
505}
506
507/**
508 *
509 * Function: write_atapi_register
510 *
511 * Description: Writes to ATA Device Resgister
512 *
513 */
514
515static void write_atapi_register(void __iomem *base,
516 unsigned long ata_reg, unsigned short value)
517{
518 /* Program the ATA_DEV_TXBUF register with write data (to be
519 * written into the device).
520 */
521 ATAPI_SET_DEV_TXBUF(base, value);
522
523 /* Program the ATA_DEV_ADDR register with address of the
524 * device register (0x01 to 0x0F).
525 */
526 ATAPI_SET_DEV_ADDR(base, ata_reg);
527
528 /* Program the ATA_CTRL register with dir set to write (1)
529 */
530 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
531
532 /* ensure PIO DMA is not set */
533 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
534
535 /* and start the transfer */
536 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
537
538 /* Wait for the interrupt to indicate the end of the transfer.
539 * (We need to wait on and clear rhe ATA_DEV_INT interrupt status)
540 */
541 wait_complete(base, PIO_DONE_INT);
542}
543
544/**
545 *
546 * Function: read_atapi_register
547 *
548 *Description: Reads from ATA Device Resgister
549 *
550 */
551
552static unsigned short read_atapi_register(void __iomem *base,
553 unsigned long ata_reg)
554{
555 /* Program the ATA_DEV_ADDR register with address of the
556 * device register (0x01 to 0x0F).
557 */
558 ATAPI_SET_DEV_ADDR(base, ata_reg);
559
560 /* Program the ATA_CTRL register with dir set to read (0) and
561 */
562 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
563
564 /* ensure PIO DMA is not set */
565 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
566
567 /* and start the transfer */
568 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
569
570 /* Wait for the interrupt to indicate the end of the transfer.
571 * (PIO_DONE interrupt is set and it doesn't seem to matter
572 * that we don't clear it)
573 */
574 wait_complete(base, PIO_DONE_INT);
575
576 /* Read the ATA_DEV_RXBUF register with write data (to be
577 * written into the device).
578 */
579 return ATAPI_GET_DEV_RXBUF(base);
580}
581
582/**
583 *
584 * Function: write_atapi_register_data
585 *
586 * Description: Writes to ATA Device Resgister
587 *
588 */
589
590static void write_atapi_data(void __iomem *base,
591 int len, unsigned short *buf)
592{
593 int i;
594
595 /* Set transfer length to 1 */
596 ATAPI_SET_XFER_LEN(base, 1);
597
598 /* Program the ATA_DEV_ADDR register with address of the
599 * ATA_REG_DATA
600 */
601 ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
602
603 /* Program the ATA_CTRL register with dir set to write (1)
604 */
605 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
606
607 /* ensure PIO DMA is not set */
608 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
609
610 for (i = 0; i < len; i++) {
611 /* Program the ATA_DEV_TXBUF register with write data (to be
612 * written into the device).
613 */
614 ATAPI_SET_DEV_TXBUF(base, buf[i]);
615
616 /* and start the transfer */
617 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
618
619 /* Wait for the interrupt to indicate the end of the transfer.
620 * (We need to wait on and clear rhe ATA_DEV_INT
621 * interrupt status)
622 */
623 wait_complete(base, PIO_DONE_INT);
624 }
625}
626
627/**
628 *
629 * Function: read_atapi_register_data
630 *
631 * Description: Reads from ATA Device Resgister
632 *
633 */
634
635static void read_atapi_data(void __iomem *base,
636 int len, unsigned short *buf)
637{
638 int i;
639
640 /* Set transfer length to 1 */
641 ATAPI_SET_XFER_LEN(base, 1);
642
643 /* Program the ATA_DEV_ADDR register with address of the
644 * ATA_REG_DATA
645 */
646 ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
647
648 /* Program the ATA_CTRL register with dir set to read (0) and
649 */
650 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
651
652 /* ensure PIO DMA is not set */
653 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
654
655 for (i = 0; i < len; i++) {
656 /* and start the transfer */
657 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
658
659 /* Wait for the interrupt to indicate the end of the transfer.
660 * (PIO_DONE interrupt is set and it doesn't seem to matter
661 * that we don't clear it)
662 */
663 wait_complete(base, PIO_DONE_INT);
664
665 /* Read the ATA_DEV_RXBUF register with write data (to be
666 * written into the device).
667 */
668 buf[i] = ATAPI_GET_DEV_RXBUF(base);
669 }
670}
671
672/**
673 * bfin_tf_load - send taskfile registers to host controller
674 * @ap: Port to which output is sent
675 * @tf: ATA taskfile register set
676 *
677 * Note: Original code is ata_tf_load().
678 */
679
680static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
681{
682 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
683 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
684
685 if (tf->ctl != ap->last_ctl) {
686 write_atapi_register(base, ATA_REG_CTRL, tf->ctl);
687 ap->last_ctl = tf->ctl;
688 ata_wait_idle(ap);
689 }
690
691 if (is_addr) {
692 if (tf->flags & ATA_TFLAG_LBA48) {
693 write_atapi_register(base, ATA_REG_FEATURE,
694 tf->hob_feature);
695 write_atapi_register(base, ATA_REG_NSECT,
696 tf->hob_nsect);
697 write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal);
698 write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam);
699 write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah);
700 pr_debug("hob: feat 0x%X nsect 0x%X, lba 0x%X "
701 "0x%X 0x%X\n",
702 tf->hob_feature,
703 tf->hob_nsect,
704 tf->hob_lbal,
705 tf->hob_lbam,
706 tf->hob_lbah);
707 }
708
709 write_atapi_register(base, ATA_REG_FEATURE, tf->feature);
710 write_atapi_register(base, ATA_REG_NSECT, tf->nsect);
711 write_atapi_register(base, ATA_REG_LBAL, tf->lbal);
712 write_atapi_register(base, ATA_REG_LBAM, tf->lbam);
713 write_atapi_register(base, ATA_REG_LBAH, tf->lbah);
714 pr_debug("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
715 tf->feature,
716 tf->nsect,
717 tf->lbal,
718 tf->lbam,
719 tf->lbah);
720 }
721
722 if (tf->flags & ATA_TFLAG_DEVICE) {
723 write_atapi_register(base, ATA_REG_DEVICE, tf->device);
724 pr_debug("device 0x%X\n", tf->device);
725 }
726
727 ata_wait_idle(ap);
728}
729
730/**
731 * bfin_check_status - Read device status reg & clear interrupt
732 * @ap: port where the device is
733 *
734 * Note: Original code is ata_check_status().
735 */
736
737static u8 bfin_check_status(struct ata_port *ap)
738{
739 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
740 return read_atapi_register(base, ATA_REG_STATUS);
741}
742
743/**
744 * bfin_tf_read - input device's ATA taskfile shadow registers
745 * @ap: Port from which input is read
746 * @tf: ATA taskfile register set for storing input
747 *
748 * Note: Original code is ata_tf_read().
749 */
750
751static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
752{
753 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
754
755 tf->command = bfin_check_status(ap);
756 tf->feature = read_atapi_register(base, ATA_REG_ERR);
757 tf->nsect = read_atapi_register(base, ATA_REG_NSECT);
758 tf->lbal = read_atapi_register(base, ATA_REG_LBAL);
759 tf->lbam = read_atapi_register(base, ATA_REG_LBAM);
760 tf->lbah = read_atapi_register(base, ATA_REG_LBAH);
761 tf->device = read_atapi_register(base, ATA_REG_DEVICE);
762
763 if (tf->flags & ATA_TFLAG_LBA48) {
764 write_atapi_register(base, ATA_REG_CTRL, tf->ctl | ATA_HOB);
765 tf->hob_feature = read_atapi_register(base, ATA_REG_ERR);
766 tf->hob_nsect = read_atapi_register(base, ATA_REG_NSECT);
767 tf->hob_lbal = read_atapi_register(base, ATA_REG_LBAL);
768 tf->hob_lbam = read_atapi_register(base, ATA_REG_LBAM);
769 tf->hob_lbah = read_atapi_register(base, ATA_REG_LBAH);
770 }
771}
772
773/**
774 * bfin_exec_command - issue ATA command to host controller
775 * @ap: port to which command is being issued
776 * @tf: ATA taskfile register set
777 *
778 * Note: Original code is ata_exec_command().
779 */
780
781static void bfin_exec_command(struct ata_port *ap,
782 const struct ata_taskfile *tf)
783{
784 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
785 pr_debug("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
786
787 write_atapi_register(base, ATA_REG_CMD, tf->command);
788 ata_pause(ap);
789}
790
791/**
792 * bfin_check_altstatus - Read device alternate status reg
793 * @ap: port where the device is
794 */
795
796static u8 bfin_check_altstatus(struct ata_port *ap)
797{
798 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
799 return read_atapi_register(base, ATA_REG_ALTSTATUS);
800}
801
802/**
803 * bfin_std_dev_select - Select device 0/1 on ATA bus
804 * @ap: ATA channel to manipulate
805 * @device: ATA device (numbered from zero) to select
806 *
807 * Note: Original code is ata_std_dev_select().
808 */
809
810static void bfin_std_dev_select(struct ata_port *ap, unsigned int device)
811{
812 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
813 u8 tmp;
814
815 if (device == 0)
816 tmp = ATA_DEVICE_OBS;
817 else
818 tmp = ATA_DEVICE_OBS | ATA_DEV1;
819
820 write_atapi_register(base, ATA_REG_DEVICE, tmp);
821 ata_pause(ap);
822}
823
824/**
825 * bfin_bmdma_setup - Set up IDE DMA transaction
826 * @qc: Info associated with this ATA transaction.
827 *
828 * Note: Original code is ata_bmdma_setup().
829 */
830
831static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
832{
833 unsigned short config = WDSIZE_16;
834 struct scatterlist *sg;
835
836 pr_debug("in atapi dma setup\n");
837 /* Program the ATA_CTRL register with dir */
838 if (qc->tf.flags & ATA_TFLAG_WRITE) {
839 /* fill the ATAPI DMA controller */
840 set_dma_config(CH_ATAPI_TX, config);
841 set_dma_x_modify(CH_ATAPI_TX, 2);
842 ata_for_each_sg(sg, qc) {
843 set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
844 set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
845 }
846 } else {
847 config |= WNR;
848 /* fill the ATAPI DMA controller */
849 set_dma_config(CH_ATAPI_RX, config);
850 set_dma_x_modify(CH_ATAPI_RX, 2);
851 ata_for_each_sg(sg, qc) {
852 set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
853 set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
854 }
855 }
856}
857
858/**
859 * bfin_bmdma_start - Start an IDE DMA transaction
860 * @qc: Info associated with this ATA transaction.
861 *
862 * Note: Original code is ata_bmdma_start().
863 */
864
865static void bfin_bmdma_start(struct ata_queued_cmd *qc)
866{
867 struct ata_port *ap = qc->ap;
868 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
869 struct scatterlist *sg;
870
871 pr_debug("in atapi dma start\n");
872 if (!(ap->udma_mask || ap->mwdma_mask))
873 return;
874
875 /* start ATAPI DMA controller*/
876 if (qc->tf.flags & ATA_TFLAG_WRITE) {
877 /*
878 * On blackfin arch, uncacheable memory is not
879 * allocated with flag GFP_DMA. DMA buffer from
880 * common kenel code should be flushed if WB
881 * data cache is enabled. Otherwise, this loop
882 * is an empty loop and optimized out.
883 */
884 ata_for_each_sg(sg, qc) {
885 flush_dcache_range(sg_dma_address(sg),
886 sg_dma_address(sg) + sg_dma_len(sg));
887 }
888 enable_dma(CH_ATAPI_TX);
889 pr_debug("enable udma write\n");
890
891 /* Send ATA DMA write command */
892 bfin_exec_command(ap, &qc->tf);
893
894 /* set ATA DMA write direction */
895 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
896 | XFER_DIR));
897 } else {
898 enable_dma(CH_ATAPI_RX);
899 pr_debug("enable udma read\n");
900
901 /* Send ATA DMA read command */
902 bfin_exec_command(ap, &qc->tf);
903
904 /* set ATA DMA read direction */
905 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
906 & ~XFER_DIR));
907 }
908
909 /* Reset all transfer count */
910 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
911
912 /* Set transfer length to buffer len */
913 ata_for_each_sg(sg, qc) {
914 ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
915 }
916
917 /* Enable ATA DMA operation*/
918 if (ap->udma_mask)
919 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
920 | ULTRA_START);
921 else
922 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
923 | MULTI_START);
924}
925
926/**
927 * bfin_bmdma_stop - Stop IDE DMA transfer
928 * @qc: Command we are ending DMA for
929 */
930
931static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
932{
933 struct ata_port *ap = qc->ap;
934 struct scatterlist *sg;
935
936 pr_debug("in atapi dma stop\n");
937 if (!(ap->udma_mask || ap->mwdma_mask))
938 return;
939
940 /* stop ATAPI DMA controller*/
941 if (qc->tf.flags & ATA_TFLAG_WRITE)
942 disable_dma(CH_ATAPI_TX);
943 else {
944 disable_dma(CH_ATAPI_RX);
945 if (ap->hsm_task_state & HSM_ST_LAST) {
946 /*
947 * On blackfin arch, uncacheable memory is not
948 * allocated with flag GFP_DMA. DMA buffer from
949 * common kenel code should be invalidated if
950 * data cache is enabled. Otherwise, this loop
951 * is an empty loop and optimized out.
952 */
953 ata_for_each_sg(sg, qc) {
954 invalidate_dcache_range(
955 sg_dma_address(sg),
956 sg_dma_address(sg)
957 + sg_dma_len(sg));
958 }
959 }
960 }
961}
962
963/**
964 * bfin_devchk - PATA device presence detection
965 * @ap: ATA channel to examine
966 * @device: Device to examine (starting at zero)
967 *
968 * Note: Original code is ata_devchk().
969 */
970
971static unsigned int bfin_devchk(struct ata_port *ap,
972 unsigned int device)
973{
974 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
975 u8 nsect, lbal;
976
977 bfin_std_dev_select(ap, device);
978
979 write_atapi_register(base, ATA_REG_NSECT, 0x55);
980 write_atapi_register(base, ATA_REG_LBAL, 0xaa);
981
982 write_atapi_register(base, ATA_REG_NSECT, 0xaa);
983 write_atapi_register(base, ATA_REG_LBAL, 0x55);
984
985 write_atapi_register(base, ATA_REG_NSECT, 0x55);
986 write_atapi_register(base, ATA_REG_LBAL, 0xaa);
987
988 nsect = read_atapi_register(base, ATA_REG_NSECT);
989 lbal = read_atapi_register(base, ATA_REG_LBAL);
990
991 if ((nsect == 0x55) && (lbal == 0xaa))
992 return 1; /* we found a device */
993
994 return 0; /* nothing found */
995}
996
997/**
998 * bfin_bus_post_reset - PATA device post reset
999 *
1000 * Note: Original code is ata_bus_post_reset().
1001 */
1002
1003static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1004{
1005 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1006 unsigned int dev0 = devmask & (1 << 0);
1007 unsigned int dev1 = devmask & (1 << 1);
1008 unsigned long timeout;
1009
1010 /* if device 0 was found in ata_devchk, wait for its
1011 * BSY bit to clear
1012 */
1013 if (dev0)
1014 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1015
1016 /* if device 1 was found in ata_devchk, wait for
1017 * register access, then wait for BSY to clear
1018 */
1019 timeout = jiffies + ATA_TMOUT_BOOT;
1020 while (dev1) {
1021 u8 nsect, lbal;
1022
1023 bfin_std_dev_select(ap, 1);
1024 nsect = read_atapi_register(base, ATA_REG_NSECT);
1025 lbal = read_atapi_register(base, ATA_REG_LBAL);
1026 if ((nsect == 1) && (lbal == 1))
1027 break;
1028 if (time_after(jiffies, timeout)) {
1029 dev1 = 0;
1030 break;
1031 }
1032 msleep(50); /* give drive a breather */
1033 }
1034 if (dev1)
1035 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1036
1037 /* is all this really necessary? */
1038 bfin_std_dev_select(ap, 0);
1039 if (dev1)
1040 bfin_std_dev_select(ap, 1);
1041 if (dev0)
1042 bfin_std_dev_select(ap, 0);
1043}
1044
1045/**
1046 * bfin_bus_softreset - PATA device software reset
1047 *
1048 * Note: Original code is ata_bus_softreset().
1049 */
1050
1051static unsigned int bfin_bus_softreset(struct ata_port *ap,
1052 unsigned int devmask)
1053{
1054 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1055
1056 /* software reset. causes dev0 to be selected */
1057 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1058 udelay(20);
1059 write_atapi_register(base, ATA_REG_CTRL, ap->ctl | ATA_SRST);
1060 udelay(20);
1061 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1062
1063 /* spec mandates ">= 2ms" before checking status.
1064 * We wait 150ms, because that was the magic delay used for
1065 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1066 * between when the ATA command register is written, and then
1067 * status is checked. Because waiting for "a while" before
1068 * checking status is fine, post SRST, we perform this magic
1069 * delay here as well.
1070 *
1071 * Old drivers/ide uses the 2mS rule and then waits for ready
1072 */
1073 msleep(150);
1074
1075 /* Before we perform post reset processing we want to see if
1076 * the bus shows 0xFF because the odd clown forgets the D7
1077 * pulldown resistor.
1078 */
1079 if (bfin_check_status(ap) == 0xFF)
1080 return 0;
1081
1082 bfin_bus_post_reset(ap, devmask);
1083
1084 return 0;
1085}
1086
1087/**
1088 * bfin_std_softreset - reset host port via ATA SRST
1089 * @ap: port to reset
1090 * @classes: resulting classes of attached devices
1091 *
1092 * Note: Original code is ata_std_softreset().
1093 */
1094
1095static int bfin_std_softreset(struct ata_port *ap, unsigned int *classes,
1096 unsigned long deadline)
1097{
1098 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1099 unsigned int devmask = 0, err_mask;
1100 u8 err;
1101
1102 if (ata_port_offline(ap)) {
1103 classes[0] = ATA_DEV_NONE;
1104 goto out;
1105 }
1106
1107 /* determine if device 0/1 are present */
1108 if (bfin_devchk(ap, 0))
1109 devmask |= (1 << 0);
1110 if (slave_possible && bfin_devchk(ap, 1))
1111 devmask |= (1 << 1);
1112
1113 /* select device 0 again */
1114 bfin_std_dev_select(ap, 0);
1115
1116 /* issue bus reset */
1117 err_mask = bfin_bus_softreset(ap, devmask);
1118 if (err_mask) {
1119 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
1120 err_mask);
1121 return -EIO;
1122 }
1123
1124 /* determine by signature whether we have ATA or ATAPI devices */
1125 classes[0] = ata_dev_try_classify(ap, 0, &err);
1126 if (slave_possible && err != 0x81)
1127 classes[1] = ata_dev_try_classify(ap, 1, &err);
1128
1129 out:
1130 return 0;
1131}
1132
1133/**
1134 * bfin_bmdma_status - Read IDE DMA status
1135 * @ap: Port associated with this ATA transaction.
1136 */
1137
1138static unsigned char bfin_bmdma_status(struct ata_port *ap)
1139{
1140 unsigned char host_stat = 0;
1141 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1142 unsigned short int_status = ATAPI_GET_INT_STATUS(base);
1143
1144 if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON)) {
1145 host_stat = ATA_DMA_ACTIVE;
1146 }
1147 if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT)) {
1148 host_stat = ATA_DMA_INTR;
1149 }
1150 if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT)) {
1151 host_stat = ATA_DMA_ERR;
1152 }
1153
1154 return host_stat;
1155}
1156
1157/**
1158 * bfin_data_xfer - Transfer data by PIO
1159 * @adev: device for this I/O
1160 * @buf: data buffer
1161 * @buflen: buffer length
1162 * @write_data: read/write
1163 *
1164 * Note: Original code is ata_data_xfer().
1165 */
1166
1167static void bfin_data_xfer(struct ata_device *adev, unsigned char *buf,
1168 unsigned int buflen, int write_data)
1169{
1170 struct ata_port *ap = adev->ap;
1171 unsigned int words = buflen >> 1;
1172 unsigned short *buf16 = (u16 *) buf;
1173 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1174
1175 /* Transfer multiple of 2 bytes */
1176 if (write_data) {
1177 write_atapi_data(base, words, buf16);
1178 } else {
1179 read_atapi_data(base, words, buf16);
1180 }
1181
1182 /* Transfer trailing 1 byte, if any. */
1183 if (unlikely(buflen & 0x01)) {
1184 unsigned short align_buf[1] = { 0 };
1185 unsigned char *trailing_buf = buf + buflen - 1;
1186
1187 if (write_data) {
1188 memcpy(align_buf, trailing_buf, 1);
1189 write_atapi_data(base, 1, align_buf);
1190 } else {
1191 read_atapi_data(base, 1, align_buf);
1192 memcpy(trailing_buf, align_buf, 1);
1193 }
1194 }
1195}
1196
1197/**
1198 * bfin_irq_clear - Clear ATAPI interrupt.
1199 * @ap: Port associated with this ATA transaction.
1200 *
1201 * Note: Original code is ata_bmdma_irq_clear().
1202 */
1203
1204static void bfin_irq_clear(struct ata_port *ap)
1205{
1206 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1207
1208 pr_debug("in atapi irq clear\n");
1209 ATAPI_SET_INT_STATUS(base, 0x1FF);
1210}
1211
1212/**
1213 * bfin_irq_on - Enable interrupts on a port.
1214 * @ap: Port on which interrupts are enabled.
1215 *
1216 * Note: Original code is ata_irq_on().
1217 */
1218
1219static unsigned char bfin_irq_on(struct ata_port *ap)
1220{
1221 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1222 u8 tmp;
1223
1224 pr_debug("in atapi irq on\n");
1225 ap->ctl &= ~ATA_NIEN;
1226 ap->last_ctl = ap->ctl;
1227
1228 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1229 tmp = ata_wait_idle(ap);
1230
1231 bfin_irq_clear(ap);
1232
1233 return tmp;
1234}
1235
1236/**
1237 * bfin_irq_ack - Acknowledge a device interrupt.
1238 * @ap: Port on which interrupts are enabled.
1239 *
1240 * Note: Original code is ata_irq_ack().
1241 */
1242
1243static unsigned char bfin_irq_ack(struct ata_port *ap, unsigned int chk_drq)
1244{
1245 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1246 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
1247 unsigned char status;
1248
1249 pr_debug("in atapi irq ack\n");
1250 status = ata_busy_wait(ap, bits, 1000);
1251 if (status & bits)
1252 if (ata_msg_err(ap))
1253 dev_err(ap->dev, "abnormal status 0x%X\n", status);
1254
1255 /* get controller status; clear intr, err bits */
1256 ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT
1257 | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT
1258 | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT);
1259
1260 return bfin_bmdma_status(ap);
1261}
1262
1263/**
1264 * bfin_bmdma_freeze - Freeze DMA controller port
1265 * @ap: port to freeze
1266 *
1267 * Note: Original code is ata_bmdma_freeze().
1268 */
1269
1270static void bfin_bmdma_freeze(struct ata_port *ap)
1271{
1272 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1273
1274 pr_debug("in atapi dma freeze\n");
1275 ap->ctl |= ATA_NIEN;
1276 ap->last_ctl = ap->ctl;
1277
1278 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1279
1280 /* Under certain circumstances, some controllers raise IRQ on
1281 * ATA_NIEN manipulation. Also, many controllers fail to mask
1282 * previously pending IRQ on ATA_NIEN assertion. Clear it.
1283 */
1284 ata_chk_status(ap);
1285
1286 bfin_irq_clear(ap);
1287}
1288
1289/**
1290 * bfin_bmdma_thaw - Thaw DMA controller port
1291 * @ap: port to thaw
1292 *
1293 * Note: Original code is ata_bmdma_thaw().
1294 */
1295
1296void bfin_bmdma_thaw(struct ata_port *ap)
1297{
1298 bfin_check_status(ap);
1299 bfin_irq_clear(ap);
1300 bfin_irq_on(ap);
1301}
1302
1303/**
1304 * bfin_std_postreset - standard postreset callback
1305 * @ap: the target ata_port
1306 * @classes: classes of attached devices
1307 *
1308 * Note: Original code is ata_std_postreset().
1309 */
1310
1311static void bfin_std_postreset(struct ata_port *ap, unsigned int *classes)
1312{
1313 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1314
1315 /* re-enable interrupts */
1316 bfin_irq_on(ap);
1317
1318 /* is double-select really necessary? */
1319 if (classes[0] != ATA_DEV_NONE)
1320 bfin_std_dev_select(ap, 1);
1321 if (classes[1] != ATA_DEV_NONE)
1322 bfin_std_dev_select(ap, 0);
1323
1324 /* bail out if no device is present */
1325 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
1326 return;
1327 }
1328
1329 /* set up device control */
1330 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1331}
1332
1333/**
1334 * bfin_error_handler - Stock error handler for DMA controller
1335 * @ap: port to handle error for
1336 */
1337
1338static void bfin_error_handler(struct ata_port *ap)
1339{
1340 ata_bmdma_drive_eh(ap, ata_std_prereset, bfin_std_softreset, NULL,
1341 bfin_std_postreset);
1342}
1343
1344static void bfin_port_stop(struct ata_port *ap)
1345{
1346 pr_debug("in atapi port stop\n");
1347 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
1348 free_dma(CH_ATAPI_RX);
1349 free_dma(CH_ATAPI_TX);
1350 }
1351}
1352
1353static int bfin_port_start(struct ata_port *ap)
1354{
1355 pr_debug("in atapi port start\n");
1356 if (!(ap->udma_mask || ap->mwdma_mask))
1357 return 0;
1358
1359 if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
1360 if (request_dma(CH_ATAPI_TX,
1361 "BFIN ATAPI TX DMA") >= 0)
1362 return 0;
1363
1364 free_dma(CH_ATAPI_RX);
1365 }
1366
1367 ap->udma_mask = 0;
1368 ap->mwdma_mask = 0;
1369 dev_err(ap->dev, "Unable to request ATAPI DMA!"
1370 " Continue in PIO mode.\n");
1371
1372 return 0;
1373}
1374
1375static struct scsi_host_template bfin_sht = {
1376 .module = THIS_MODULE,
1377 .name = DRV_NAME,
1378 .ioctl = ata_scsi_ioctl,
1379 .queuecommand = ata_scsi_queuecmd,
1380 .can_queue = ATA_DEF_QUEUE,
1381 .this_id = ATA_SHT_THIS_ID,
1382 .sg_tablesize = SG_NONE,
1383 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
1384 .emulated = ATA_SHT_EMULATED,
1385 .use_clustering = ATA_SHT_USE_CLUSTERING,
1386 .proc_name = DRV_NAME,
1387 .dma_boundary = ATA_DMA_BOUNDARY,
1388 .slave_configure = ata_scsi_slave_config,
1389 .slave_destroy = ata_scsi_slave_destroy,
1390 .bios_param = ata_std_bios_param,
1391#ifdef CONFIG_PM
1392 .resume = ata_scsi_device_resume,
1393 .suspend = ata_scsi_device_suspend,
1394#endif
1395};
1396
1397static const struct ata_port_operations bfin_pata_ops = {
1398 .port_disable = ata_port_disable,
1399 .set_piomode = bfin_set_piomode,
1400 .set_dmamode = bfin_set_dmamode,
1401
1402 .tf_load = bfin_tf_load,
1403 .tf_read = bfin_tf_read,
1404 .exec_command = bfin_exec_command,
1405 .check_status = bfin_check_status,
1406 .check_altstatus = bfin_check_altstatus,
1407 .dev_select = bfin_std_dev_select,
1408
1409 .bmdma_setup = bfin_bmdma_setup,
1410 .bmdma_start = bfin_bmdma_start,
1411 .bmdma_stop = bfin_bmdma_stop,
1412 .bmdma_status = bfin_bmdma_status,
1413 .data_xfer = bfin_data_xfer,
1414
1415 .qc_prep = ata_noop_qc_prep,
1416 .qc_issue = ata_qc_issue_prot,
1417
1418 .freeze = bfin_bmdma_freeze,
1419 .thaw = bfin_bmdma_thaw,
1420 .error_handler = bfin_error_handler,
1421 .post_internal_cmd = bfin_bmdma_stop,
1422
1423 .irq_handler = ata_interrupt,
1424 .irq_clear = bfin_irq_clear,
1425 .irq_on = bfin_irq_on,
1426 .irq_ack = bfin_irq_ack,
1427
1428 .port_start = bfin_port_start,
1429 .port_stop = bfin_port_stop,
1430};
1431
1432static struct ata_port_info bfin_port_info[] = {
1433 {
1434 .sht = &bfin_sht,
1435 .flags = ATA_FLAG_SLAVE_POSS
1436 | ATA_FLAG_MMIO
1437 | ATA_FLAG_NO_LEGACY,
1438 .pio_mask = 0x1f, /* pio0-4 */
1439 .mwdma_mask = 0,
1440#ifdef CONFIG_PATA_BF54X_DMA
1441 .udma_mask = ATA_UDMA5,
1442#else
1443 .udma_mask = 0,
1444#endif
1445 .port_ops = &bfin_pata_ops,
1446 },
1447};
1448
1449/**
1450 * bfin_reset_controller - initialize BF54x ATAPI controller.
1451 */
1452
1453static int bfin_reset_controller(struct ata_host *host)
1454{
1455 void __iomem *base = (void __iomem *)host->ports[0]->ioaddr.ctl_addr;
1456 int count;
1457 unsigned short status;
1458
1459 /* Disable all ATAPI interrupts */
1460 ATAPI_SET_INT_MASK(base, 0);
1461 SSYNC();
1462
1463 /* Assert the RESET signal 25us*/
1464 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | DEV_RST);
1465 udelay(30);
1466
1467 /* Negate the RESET signal for 2ms*/
1468 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) & ~DEV_RST);
1469 msleep(2);
1470
1471 /* Wait on Busy flag to clear */
1472 count = 10000000;
1473 do {
1474 status = read_atapi_register(base, ATA_REG_STATUS);
1475 } while (count-- && (status & ATA_BUSY));
1476
1477 /* Enable only ATAPI Device interrupt */
1478 ATAPI_SET_INT_MASK(base, 1);
1479 SSYNC();
1480
1481 return (!count);
1482}
1483
1484/**
1485 * atapi_io_port - define atapi peripheral port pins.
1486 */
1487static unsigned short atapi_io_port[] = {
1488 P_ATAPI_RESET,
1489 P_ATAPI_DIOR,
1490 P_ATAPI_DIOW,
1491 P_ATAPI_CS0,
1492 P_ATAPI_CS1,
1493 P_ATAPI_DMACK,
1494 P_ATAPI_DMARQ,
1495 P_ATAPI_INTRQ,
1496 P_ATAPI_IORDY,
1497 0
1498};
1499
1500/**
1501 * bfin_atapi_probe - attach a bfin atapi interface
1502 * @pdev: platform device
1503 *
1504 * Register a bfin atapi interface.
1505 *
1506 *
1507 * Platform devices are expected to contain 2 resources per port:
1508 *
1509 * - I/O Base (IORESOURCE_IO)
1510 * - IRQ (IORESOURCE_IRQ)
1511 *
1512 */
1513static int __devinit bfin_atapi_probe(struct platform_device *pdev)
1514{
1515 int board_idx = 0;
1516 struct resource *res;
1517 struct ata_host *host;
1518 const struct ata_port_info *ppi[] =
1519 { &bfin_port_info[board_idx], NULL };
1520
1521 /*
1522 * Simple resource validation ..
1523 */
1524 if (unlikely(pdev->num_resources != 2)) {
1525 dev_err(&pdev->dev, "invalid number of resources\n");
1526 return -EINVAL;
1527 }
1528
1529 /*
1530 * Get the register base first
1531 */
1532 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1533 if (res == NULL)
1534 return -EINVAL;
1535
1536 /*
1537 * Now that that's out of the way, wire up the port..
1538 */
1539 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
1540 if (!host)
1541 return -ENOMEM;
1542
1543 host->ports[0]->ioaddr.ctl_addr = (void *)res->start;
1544
1545 if (peripheral_request_list(atapi_io_port, "atapi-io-port")) {
1546 dev_err(&pdev->dev, "Requesting Peripherals faild\n");
1547 return -EFAULT;
1548 }
1549
1550 if (bfin_reset_controller(host)) {
1551 peripheral_free_list(atapi_io_port);
1552 dev_err(&pdev->dev, "Fail to reset ATAPI device\n");
1553 return -EFAULT;
1554 }
1555
1556 if (ata_host_activate(host, platform_get_irq(pdev, 0),
1557 ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) {
1558 peripheral_free_list(atapi_io_port);
1559 dev_err(&pdev->dev, "Fail to attach ATAPI device\n");
1560 return -ENODEV;
1561 }
1562
1563 return 0;
1564}
1565
1566/**
1567 * bfin_atapi_remove - unplug a bfin atapi interface
1568 * @pdev: platform device
1569 *
1570 * A bfin atapi device has been unplugged. Perform the needed
1571 * cleanup. Also called on module unload for any active devices.
1572 */
1573static int __devexit bfin_atapi_remove(struct platform_device *pdev)
1574{
1575 struct device *dev = &pdev->dev;
1576 struct ata_host *host = dev_get_drvdata(dev);
1577
1578 ata_host_detach(host);
1579
1580 peripheral_free_list(atapi_io_port);
1581
1582 return 0;
1583}
1584
1585#ifdef CONFIG_PM
1586int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state)
1587{
1588 return 0;
1589}
1590
1591int bfin_atapi_resume(struct platform_device *pdev)
1592{
1593 return 0;
1594}
1595#endif
1596
1597static struct platform_driver bfin_atapi_driver = {
1598 .probe = bfin_atapi_probe,
1599 .remove = __devexit_p(bfin_atapi_remove),
1600 .driver = {
1601 .name = DRV_NAME,
1602 .owner = THIS_MODULE,
1603#ifdef CONFIG_PM
1604 .suspend = bfin_atapi_suspend,
1605 .resume = bfin_atapi_resume,
1606#endif
1607 },
1608};
1609
1610static int __init bfin_atapi_init(void)
1611{
1612 pr_info("register bfin atapi driver\n");
1613 return platform_driver_register(&bfin_atapi_driver);
1614}
1615
1616static void __exit bfin_atapi_exit(void)
1617{
1618 platform_driver_unregister(&bfin_atapi_driver);
1619}
1620
1621module_init(bfin_atapi_init);
1622module_exit(bfin_atapi_exit);
1623
1624MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
1625MODULE_DESCRIPTION("PATA driver for blackfin 54x ATAPI controller");
1626MODULE_LICENSE("GPL");
1627MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 0feb5ae8c4..43d198f909 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -153,7 +153,7 @@ static int cmd640_port_start(struct ata_port *ap)
153 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 153 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
154 struct cmd640_reg *timing; 154 struct cmd640_reg *timing;
155 155
156 int ret = ata_port_start(ap); 156 int ret = ata_sff_port_start(ap);
157 if (ret < 0) 157 if (ret < 0)
158 return ret; 158 return ret;
159 159
@@ -184,7 +184,6 @@ static struct scsi_host_template cmd640_sht = {
184}; 184};
185 185
186static struct ata_port_operations cmd640_port_ops = { 186static struct ata_port_operations cmd640_port_ops = {
187 .port_disable = ata_port_disable,
188 .set_piomode = cmd640_set_piomode, 187 .set_piomode = cmd640_set_piomode,
189 .mode_filter = ata_pci_default_filter, 188 .mode_filter = ata_pci_default_filter,
190 .tf_load = ata_tf_load, 189 .tf_load = ata_tf_load,
@@ -213,7 +212,6 @@ static struct ata_port_operations cmd640_port_ops = {
213 .irq_handler = ata_interrupt, 212 .irq_handler = ata_interrupt,
214 .irq_clear = ata_bmdma_irq_clear, 213 .irq_clear = ata_bmdma_irq_clear,
215 .irq_on = ata_irq_on, 214 .irq_on = ata_irq_on,
216 .irq_ack = ata_irq_ack,
217 215
218 .port_start = cmd640_port_start, 216 .port_start = cmd640_port_start,
219}; 217};
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index e34b632487..9e412c26b2 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -31,7 +31,7 @@
31#include <linux/libata.h> 31#include <linux/libata.h>
32 32
33#define DRV_NAME "pata_cmd64x" 33#define DRV_NAME "pata_cmd64x"
34#define DRV_VERSION "0.2.4" 34#define DRV_VERSION "0.2.5"
35 35
36/* 36/*
37 * CMD64x specific registers definition. 37 * CMD64x specific registers definition.
@@ -88,14 +88,15 @@ static int cmd648_cable_detect(struct ata_port *ap)
88} 88}
89 89
90/** 90/**
91 * cmd64x_set_piomode - set initial PIO mode data 91 * cmd64x_set_piomode - set PIO and MWDMA timing
92 * @ap: ATA interface 92 * @ap: ATA interface
93 * @adev: ATA device 93 * @adev: ATA device
94 * @mode: mode
94 * 95 *
95 * Called to do the PIO mode setup. 96 * Called to do the PIO and MWDMA mode setup.
96 */ 97 */
97 98
98static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev) 99static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 mode)
99{ 100{
100 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 101 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
101 struct ata_timing t; 102 struct ata_timing t;
@@ -117,8 +118,9 @@ static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
117 int arttim = arttim_port[ap->port_no][adev->devno]; 118 int arttim = arttim_port[ap->port_no][adev->devno];
118 int drwtim = drwtim_port[ap->port_no][adev->devno]; 119 int drwtim = drwtim_port[ap->port_no][adev->devno];
119 120
120 121 /* ata_timing_compute is smart and will produce timings for MWDMA
121 if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) { 122 that don't violate the drives PIO capabilities. */
123 if (ata_timing_compute(adev, mode, &t, T, 0) < 0) {
122 printk(KERN_ERR DRV_NAME ": mode computation failed.\n"); 124 printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
123 return; 125 return;
124 } 126 }
@@ -168,6 +170,20 @@ static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
168} 170}
169 171
170/** 172/**
173 * cmd64x_set_piomode - set initial PIO mode data
174 * @ap: ATA interface
175 * @adev: ATA device
176 *
177 * Used when configuring the devices ot set the PIO timings. All the
178 * actual work is done by the PIO/MWDMA setting helper
179 */
180
181static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
182{
183 cmd64x_set_timing(ap, adev, adev->pio_mode);
184}
185
186/**
171 * cmd64x_set_dmamode - set initial DMA mode data 187 * cmd64x_set_dmamode - set initial DMA mode data
172 * @ap: ATA interface 188 * @ap: ATA interface
173 * @adev: ATA device 189 * @adev: ATA device
@@ -180,9 +196,6 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
180 static const u8 udma_data[] = { 196 static const u8 udma_data[] = {
181 0x30, 0x20, 0x10, 0x20, 0x10, 0x00 197 0x30, 0x20, 0x10, 0x20, 0x10, 0x00
182 }; 198 };
183 static const u8 mwdma_data[] = {
184 0x30, 0x20, 0x10
185 };
186 199
187 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 200 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
188 u8 regU, regD; 201 u8 regU, regD;
@@ -208,8 +221,10 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
208 regU |= 1 << adev->devno; /* UDMA on */ 221 regU |= 1 << adev->devno; /* UDMA on */
209 if (adev->dma_mode > 2) /* 15nS timing */ 222 if (adev->dma_mode > 2) /* 15nS timing */
210 regU |= 4 << adev->devno; 223 regU |= 4 << adev->devno;
211 } else 224 } else {
212 regD |= mwdma_data[adev->dma_mode - XFER_MW_DMA_0] << shift; 225 regU &= ~ (1 << adev->devno); /* UDMA off */
226 cmd64x_set_timing(ap, adev, adev->dma_mode);
227 }
213 228
214 regD |= 0x20 << adev->devno; 229 regD |= 0x20 << adev->devno;
215 230
@@ -269,7 +284,6 @@ static struct scsi_host_template cmd64x_sht = {
269}; 284};
270 285
271static struct ata_port_operations cmd64x_port_ops = { 286static struct ata_port_operations cmd64x_port_ops = {
272 .port_disable = ata_port_disable,
273 .set_piomode = cmd64x_set_piomode, 287 .set_piomode = cmd64x_set_piomode,
274 .set_dmamode = cmd64x_set_dmamode, 288 .set_dmamode = cmd64x_set_dmamode,
275 .mode_filter = ata_pci_default_filter, 289 .mode_filter = ata_pci_default_filter,
@@ -298,13 +312,11 @@ static struct ata_port_operations cmd64x_port_ops = {
298 .irq_handler = ata_interrupt, 312 .irq_handler = ata_interrupt,
299 .irq_clear = ata_bmdma_irq_clear, 313 .irq_clear = ata_bmdma_irq_clear,
300 .irq_on = ata_irq_on, 314 .irq_on = ata_irq_on,
301 .irq_ack = ata_irq_ack,
302 315
303 .port_start = ata_port_start, 316 .port_start = ata_port_start,
304}; 317};
305 318
306static struct ata_port_operations cmd646r1_port_ops = { 319static struct ata_port_operations cmd646r1_port_ops = {
307 .port_disable = ata_port_disable,
308 .set_piomode = cmd64x_set_piomode, 320 .set_piomode = cmd64x_set_piomode,
309 .set_dmamode = cmd64x_set_dmamode, 321 .set_dmamode = cmd64x_set_dmamode,
310 .mode_filter = ata_pci_default_filter, 322 .mode_filter = ata_pci_default_filter,
@@ -333,13 +345,11 @@ static struct ata_port_operations cmd646r1_port_ops = {
333 .irq_handler = ata_interrupt, 345 .irq_handler = ata_interrupt,
334 .irq_clear = ata_bmdma_irq_clear, 346 .irq_clear = ata_bmdma_irq_clear,
335 .irq_on = ata_irq_on, 347 .irq_on = ata_irq_on,
336 .irq_ack = ata_irq_ack,
337 348
338 .port_start = ata_port_start, 349 .port_start = ata_port_start,
339}; 350};
340 351
341static struct ata_port_operations cmd648_port_ops = { 352static struct ata_port_operations cmd648_port_ops = {
342 .port_disable = ata_port_disable,
343 .set_piomode = cmd64x_set_piomode, 353 .set_piomode = cmd64x_set_piomode,
344 .set_dmamode = cmd64x_set_dmamode, 354 .set_dmamode = cmd64x_set_dmamode,
345 .mode_filter = ata_pci_default_filter, 355 .mode_filter = ata_pci_default_filter,
@@ -368,7 +378,6 @@ static struct ata_port_operations cmd648_port_ops = {
368 .irq_handler = ata_interrupt, 378 .irq_handler = ata_interrupt,
369 .irq_clear = ata_bmdma_irq_clear, 379 .irq_clear = ata_bmdma_irq_clear,
370 .irq_on = ata_irq_on, 380 .irq_on = ata_irq_on,
371 .irq_ack = ata_irq_ack,
372 381
373 .port_start = ata_port_start, 382 .port_start = ata_port_start,
374}; 383};
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index e2459088cd..33f7f0843f 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -158,7 +158,6 @@ static struct scsi_host_template cs5520_sht = {
158}; 158};
159 159
160static struct ata_port_operations cs5520_port_ops = { 160static struct ata_port_operations cs5520_port_ops = {
161 .port_disable = ata_port_disable,
162 .set_piomode = cs5520_set_piomode, 161 .set_piomode = cs5520_set_piomode,
163 .set_dmamode = cs5520_set_dmamode, 162 .set_dmamode = cs5520_set_dmamode,
164 163
@@ -184,13 +183,14 @@ static struct ata_port_operations cs5520_port_ops = {
184 183
185 .irq_clear = ata_bmdma_irq_clear, 184 .irq_clear = ata_bmdma_irq_clear,
186 .irq_on = ata_irq_on, 185 .irq_on = ata_irq_on,
187 .irq_ack = ata_irq_ack,
188 186
189 .port_start = ata_port_start, 187 .port_start = ata_sff_port_start,
190}; 188};
191 189
192static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 190static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
193{ 191{
192 static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
193 static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
194 struct ata_port_info pi = { 194 struct ata_port_info pi = {
195 .flags = ATA_FLAG_SLAVE_POSS, 195 .flags = ATA_FLAG_SLAVE_POSS,
196 .pio_mask = 0x1f, 196 .pio_mask = 0x1f,
@@ -244,10 +244,10 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
244 } 244 }
245 245
246 /* Map IO ports and initialize host accordingly */ 246 /* Map IO ports and initialize host accordingly */
247 iomap[0] = devm_ioport_map(&pdev->dev, 0x1F0, 8); 247 iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8);
248 iomap[1] = devm_ioport_map(&pdev->dev, 0x3F6, 1); 248 iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1);
249 iomap[2] = devm_ioport_map(&pdev->dev, 0x170, 8); 249 iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8);
250 iomap[3] = devm_ioport_map(&pdev->dev, 0x376, 1); 250 iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1);
251 iomap[4] = pcim_iomap(pdev, 2, 0); 251 iomap[4] = pcim_iomap(pdev, 2, 0);
252 252
253 if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4]) 253 if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
@@ -260,6 +260,10 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
260 ioaddr->bmdma_addr = iomap[4]; 260 ioaddr->bmdma_addr = iomap[4];
261 ata_std_ports(ioaddr); 261 ata_std_ports(ioaddr);
262 262
263 ata_port_desc(host->ports[0],
264 "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]);
265 ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma");
266
263 ioaddr = &host->ports[1]->ioaddr; 267 ioaddr = &host->ports[1]->ioaddr;
264 ioaddr->cmd_addr = iomap[2]; 268 ioaddr->cmd_addr = iomap[2];
265 ioaddr->ctl_addr = iomap[3]; 269 ioaddr->ctl_addr = iomap[3];
@@ -267,6 +271,10 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
267 ioaddr->bmdma_addr = iomap[4] + 8; 271 ioaddr->bmdma_addr = iomap[4] + 8;
268 ata_std_ports(ioaddr); 272 ata_std_ports(ioaddr);
269 273
274 ata_port_desc(host->ports[1],
275 "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]);
276 ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma");
277
270 /* activate the host */ 278 /* activate the host */
271 pci_set_master(pdev); 279 pci_set_master(pdev);
272 rc = ata_host_start(host); 280 rc = ata_host_start(host);
@@ -285,33 +293,12 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
285 if (rc) 293 if (rc)
286 return rc; 294 return rc;
287 295
288 if (i == 0) 296 ata_port_desc(ap, "irq %d", irq[i]);
289 host->irq = irq[0];
290 else
291 host->irq2 = irq[1];
292 } 297 }
293 298
294 return ata_host_register(host, &cs5520_sht); 299 return ata_host_register(host, &cs5520_sht);
295} 300}
296 301
297/**
298 * cs5520_remove_one - device unload
299 * @pdev: PCI device being removed
300 *
301 * Handle an unplug/unload event for a PCI device. Unload the
302 * PCI driver but do not use the default handler as we manage
303 * resources ourself and *MUST NOT* disable the device as it has
304 * other functions.
305 */
306
307static void __devexit cs5520_remove_one(struct pci_dev *pdev)
308{
309 struct device *dev = pci_dev_to_dev(pdev);
310 struct ata_host *host = dev_get_drvdata(dev);
311
312 ata_host_detach(host);
313}
314
315#ifdef CONFIG_PM 302#ifdef CONFIG_PM
316/** 303/**
317 * cs5520_reinit_one - device resume 304 * cs5520_reinit_one - device resume
@@ -368,7 +355,7 @@ static struct pci_driver cs5520_pci_driver = {
368 .name = DRV_NAME, 355 .name = DRV_NAME,
369 .id_table = pata_cs5520, 356 .id_table = pata_cs5520,
370 .probe = cs5520_init_one, 357 .probe = cs5520_init_one,
371 .remove = cs5520_remove_one, 358 .remove = ata_pci_remove_one,
372#ifdef CONFIG_PM 359#ifdef CONFIG_PM
373 .suspend = cs5520_pci_device_suspend, 360 .suspend = cs5520_pci_device_suspend,
374 .resume = cs5520_reinit_one, 361 .resume = cs5520_reinit_one,
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index eaaea848b6..57e827e410 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -179,7 +179,6 @@ static struct scsi_host_template cs5530_sht = {
179}; 179};
180 180
181static struct ata_port_operations cs5530_port_ops = { 181static struct ata_port_operations cs5530_port_ops = {
182 .port_disable = ata_port_disable,
183 .set_piomode = cs5530_set_piomode, 182 .set_piomode = cs5530_set_piomode,
184 .set_dmamode = cs5530_set_dmamode, 183 .set_dmamode = cs5530_set_dmamode,
185 .mode_filter = ata_pci_default_filter, 184 .mode_filter = ata_pci_default_filter,
@@ -209,9 +208,8 @@ static struct ata_port_operations cs5530_port_ops = {
209 .irq_handler = ata_interrupt, 208 .irq_handler = ata_interrupt,
210 .irq_clear = ata_bmdma_irq_clear, 209 .irq_clear = ata_bmdma_irq_clear,
211 .irq_on = ata_irq_on, 210 .irq_on = ata_irq_on,
212 .irq_ack = ata_irq_ack,
213 211
214 .port_start = ata_port_start, 212 .port_start = ata_sff_port_start,
215}; 213};
216 214
217static const struct dmi_system_id palmax_dmi_table[] = { 215static const struct dmi_system_id palmax_dmi_table[] = {
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index 360b6f32e1..3578593a88 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -176,7 +176,6 @@ static struct scsi_host_template cs5535_sht = {
176}; 176};
177 177
178static struct ata_port_operations cs5535_port_ops = { 178static struct ata_port_operations cs5535_port_ops = {
179 .port_disable = ata_port_disable,
180 .set_piomode = cs5535_set_piomode, 179 .set_piomode = cs5535_set_piomode,
181 .set_dmamode = cs5535_set_dmamode, 180 .set_dmamode = cs5535_set_dmamode,
182 .mode_filter = ata_pci_default_filter, 181 .mode_filter = ata_pci_default_filter,
@@ -206,9 +205,8 @@ static struct ata_port_operations cs5535_port_ops = {
206 .irq_handler = ata_interrupt, 205 .irq_handler = ata_interrupt,
207 .irq_clear = ata_bmdma_irq_clear, 206 .irq_clear = ata_bmdma_irq_clear,
208 .irq_on = ata_irq_on, 207 .irq_on = ata_irq_on,
209 .irq_ack = ata_irq_ack,
210 208
211 .port_start = ata_port_start, 209 .port_start = ata_sff_port_start,
212}; 210};
213 211
214/** 212/**
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 6cbc8778bf..fc5f9c4e5d 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -128,7 +128,6 @@ static struct scsi_host_template cy82c693_sht = {
128}; 128};
129 129
130static struct ata_port_operations cy82c693_port_ops = { 130static struct ata_port_operations cy82c693_port_ops = {
131 .port_disable = ata_port_disable,
132 .set_piomode = cy82c693_set_piomode, 131 .set_piomode = cy82c693_set_piomode,
133 .set_dmamode = cy82c693_set_dmamode, 132 .set_dmamode = cy82c693_set_dmamode,
134 .mode_filter = ata_pci_default_filter, 133 .mode_filter = ata_pci_default_filter,
@@ -158,9 +157,8 @@ static struct ata_port_operations cy82c693_port_ops = {
158 .irq_handler = ata_interrupt, 157 .irq_handler = ata_interrupt,
159 .irq_clear = ata_bmdma_irq_clear, 158 .irq_clear = ata_bmdma_irq_clear,
160 .irq_on = ata_irq_on, 159 .irq_on = ata_irq_on,
161 .irq_ack = ata_irq_ack,
162 160
163 .port_start = ata_port_start, 161 .port_start = ata_sff_port_start,
164}; 162};
165 163
166static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 164static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index c8ba59c561..043dcd3510 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -26,25 +26,26 @@
26 26
27/** 27/**
28 * efar_pre_reset - Enable bits 28 * efar_pre_reset - Enable bits
29 * @ap: Port 29 * @link: ATA link
30 * @deadline: deadline jiffies for the operation 30 * @deadline: deadline jiffies for the operation
31 * 31 *
32 * Perform cable detection for the EFAR ATA interface. This is 32 * Perform cable detection for the EFAR ATA interface. This is
33 * different to the PIIX arrangement 33 * different to the PIIX arrangement
34 */ 34 */
35 35
36static int efar_pre_reset(struct ata_port *ap, unsigned long deadline) 36static int efar_pre_reset(struct ata_link *link, unsigned long deadline)
37{ 37{
38 static const struct pci_bits efar_enable_bits[] = { 38 static const struct pci_bits efar_enable_bits[] = {
39 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ 39 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
40 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */ 40 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
41 }; 41 };
42 struct ata_port *ap = link->ap;
42 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 43 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
43 44
44 if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no])) 45 if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no]))
45 return -ENOENT; 46 return -ENOENT;
46 47
47 return ata_std_prereset(ap, deadline); 48 return ata_std_prereset(link, deadline);
48} 49}
49 50
50/** 51/**
@@ -250,7 +251,6 @@ static struct scsi_host_template efar_sht = {
250}; 251};
251 252
252static const struct ata_port_operations efar_ops = { 253static const struct ata_port_operations efar_ops = {
253 .port_disable = ata_port_disable,
254 .set_piomode = efar_set_piomode, 254 .set_piomode = efar_set_piomode,
255 .set_dmamode = efar_set_dmamode, 255 .set_dmamode = efar_set_dmamode,
256 .mode_filter = ata_pci_default_filter, 256 .mode_filter = ata_pci_default_filter,
@@ -278,9 +278,8 @@ static const struct ata_port_operations efar_ops = {
278 .irq_handler = ata_interrupt, 278 .irq_handler = ata_interrupt,
279 .irq_clear = ata_bmdma_irq_clear, 279 .irq_clear = ata_bmdma_irq_clear,
280 .irq_on = ata_irq_on, 280 .irq_on = ata_irq_on,
281 .irq_ack = ata_irq_ack,
282 281
283 .port_start = ata_port_start, 282 .port_start = ata_sff_port_start,
284}; 283};
285 284
286 285
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 6f7d34ad19..0713872cf6 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -312,7 +312,6 @@ static struct scsi_host_template hpt36x_sht = {
312 */ 312 */
313 313
314static struct ata_port_operations hpt366_port_ops = { 314static struct ata_port_operations hpt366_port_ops = {
315 .port_disable = ata_port_disable,
316 .set_piomode = hpt366_set_piomode, 315 .set_piomode = hpt366_set_piomode,
317 .set_dmamode = hpt366_set_dmamode, 316 .set_dmamode = hpt366_set_dmamode,
318 .mode_filter = hpt366_filter, 317 .mode_filter = hpt366_filter,
@@ -342,9 +341,8 @@ static struct ata_port_operations hpt366_port_ops = {
342 .irq_handler = ata_interrupt, 341 .irq_handler = ata_interrupt,
343 .irq_clear = ata_bmdma_irq_clear, 342 .irq_clear = ata_bmdma_irq_clear,
344 .irq_on = ata_irq_on, 343 .irq_on = ata_irq_on,
345 .irq_ack = ata_irq_ack,
346 344
347 .port_start = ata_port_start, 345 .port_start = ata_sff_port_start,
348}; 346};
349 347
350/** 348/**
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index c5ddd937db..e61cb1fd57 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -304,15 +304,16 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
304 304
305/** 305/**
306 * hpt37x_pre_reset - reset the hpt37x bus 306 * hpt37x_pre_reset - reset the hpt37x bus
307 * @ap: ATA port to reset 307 * @link: ATA link to reset
308 * @deadline: deadline jiffies for the operation 308 * @deadline: deadline jiffies for the operation
309 * 309 *
310 * Perform the initial reset handling for the 370/372 and 374 func 0 310 * Perform the initial reset handling for the 370/372 and 374 func 0
311 */ 311 */
312 312
313static int hpt37x_pre_reset(struct ata_port *ap, unsigned long deadline) 313static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline)
314{ 314{
315 u8 scr2, ata66; 315 u8 scr2, ata66;
316 struct ata_port *ap = link->ap;
316 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 317 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
317 static const struct pci_bits hpt37x_enable_bits[] = { 318 static const struct pci_bits hpt37x_enable_bits[] = {
318 { 0x50, 1, 0x04, 0x04 }, 319 { 0x50, 1, 0x04, 0x04 },
@@ -337,7 +338,7 @@ static int hpt37x_pre_reset(struct ata_port *ap, unsigned long deadline)
337 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); 338 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
338 udelay(100); 339 udelay(100);
339 340
340 return ata_std_prereset(ap, deadline); 341 return ata_std_prereset(link, deadline);
341} 342}
342 343
343/** 344/**
@@ -352,7 +353,7 @@ static void hpt37x_error_handler(struct ata_port *ap)
352 ata_bmdma_drive_eh(ap, hpt37x_pre_reset, ata_std_softreset, NULL, ata_std_postreset); 353 ata_bmdma_drive_eh(ap, hpt37x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
353} 354}
354 355
355static int hpt374_pre_reset(struct ata_port *ap, unsigned long deadline) 356static int hpt374_pre_reset(struct ata_link *link, unsigned long deadline)
356{ 357{
357 static const struct pci_bits hpt37x_enable_bits[] = { 358 static const struct pci_bits hpt37x_enable_bits[] = {
358 { 0x50, 1, 0x04, 0x04 }, 359 { 0x50, 1, 0x04, 0x04 },
@@ -360,6 +361,7 @@ static int hpt374_pre_reset(struct ata_port *ap, unsigned long deadline)
360 }; 361 };
361 u16 mcr3, mcr6; 362 u16 mcr3, mcr6;
362 u8 ata66; 363 u8 ata66;
364 struct ata_port *ap = link->ap;
363 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 365 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
364 366
365 if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no])) 367 if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
@@ -387,7 +389,7 @@ static int hpt374_pre_reset(struct ata_port *ap, unsigned long deadline)
387 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); 389 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
388 udelay(100); 390 udelay(100);
389 391
390 return ata_std_prereset(ap, deadline); 392 return ata_std_prereset(link, deadline);
391} 393}
392 394
393/** 395/**
@@ -642,7 +644,6 @@ static struct scsi_host_template hpt37x_sht = {
642 */ 644 */
643 645
644static struct ata_port_operations hpt370_port_ops = { 646static struct ata_port_operations hpt370_port_ops = {
645 .port_disable = ata_port_disable,
646 .set_piomode = hpt370_set_piomode, 647 .set_piomode = hpt370_set_piomode,
647 .set_dmamode = hpt370_set_dmamode, 648 .set_dmamode = hpt370_set_dmamode,
648 .mode_filter = hpt370_filter, 649 .mode_filter = hpt370_filter,
@@ -671,9 +672,8 @@ static struct ata_port_operations hpt370_port_ops = {
671 .irq_handler = ata_interrupt, 672 .irq_handler = ata_interrupt,
672 .irq_clear = ata_bmdma_irq_clear, 673 .irq_clear = ata_bmdma_irq_clear,
673 .irq_on = ata_irq_on, 674 .irq_on = ata_irq_on,
674 .irq_ack = ata_irq_ack,
675 675
676 .port_start = ata_port_start, 676 .port_start = ata_sff_port_start,
677}; 677};
678 678
679/* 679/*
@@ -681,7 +681,6 @@ static struct ata_port_operations hpt370_port_ops = {
681 */ 681 */
682 682
683static struct ata_port_operations hpt370a_port_ops = { 683static struct ata_port_operations hpt370a_port_ops = {
684 .port_disable = ata_port_disable,
685 .set_piomode = hpt370_set_piomode, 684 .set_piomode = hpt370_set_piomode,
686 .set_dmamode = hpt370_set_dmamode, 685 .set_dmamode = hpt370_set_dmamode,
687 .mode_filter = hpt370a_filter, 686 .mode_filter = hpt370a_filter,
@@ -710,9 +709,8 @@ static struct ata_port_operations hpt370a_port_ops = {
710 .irq_handler = ata_interrupt, 709 .irq_handler = ata_interrupt,
711 .irq_clear = ata_bmdma_irq_clear, 710 .irq_clear = ata_bmdma_irq_clear,
712 .irq_on = ata_irq_on, 711 .irq_on = ata_irq_on,
713 .irq_ack = ata_irq_ack,
714 712
715 .port_start = ata_port_start, 713 .port_start = ata_sff_port_start,
716}; 714};
717 715
718/* 716/*
@@ -721,7 +719,6 @@ static struct ata_port_operations hpt370a_port_ops = {
721 */ 719 */
722 720
723static struct ata_port_operations hpt372_port_ops = { 721static struct ata_port_operations hpt372_port_ops = {
724 .port_disable = ata_port_disable,
725 .set_piomode = hpt372_set_piomode, 722 .set_piomode = hpt372_set_piomode,
726 .set_dmamode = hpt372_set_dmamode, 723 .set_dmamode = hpt372_set_dmamode,
727 .mode_filter = ata_pci_default_filter, 724 .mode_filter = ata_pci_default_filter,
@@ -750,9 +747,8 @@ static struct ata_port_operations hpt372_port_ops = {
750 .irq_handler = ata_interrupt, 747 .irq_handler = ata_interrupt,
751 .irq_clear = ata_bmdma_irq_clear, 748 .irq_clear = ata_bmdma_irq_clear,
752 .irq_on = ata_irq_on, 749 .irq_on = ata_irq_on,
753 .irq_ack = ata_irq_ack,
754 750
755 .port_start = ata_port_start, 751 .port_start = ata_sff_port_start,
756}; 752};
757 753
758/* 754/*
@@ -761,7 +757,6 @@ static struct ata_port_operations hpt372_port_ops = {
761 */ 757 */
762 758
763static struct ata_port_operations hpt374_port_ops = { 759static struct ata_port_operations hpt374_port_ops = {
764 .port_disable = ata_port_disable,
765 .set_piomode = hpt372_set_piomode, 760 .set_piomode = hpt372_set_piomode,
766 .set_dmamode = hpt372_set_dmamode, 761 .set_dmamode = hpt372_set_dmamode,
767 .mode_filter = ata_pci_default_filter, 762 .mode_filter = ata_pci_default_filter,
@@ -790,9 +785,8 @@ static struct ata_port_operations hpt374_port_ops = {
790 .irq_handler = ata_interrupt, 785 .irq_handler = ata_interrupt,
791 .irq_clear = ata_bmdma_irq_clear, 786 .irq_clear = ata_bmdma_irq_clear,
792 .irq_on = ata_irq_on, 787 .irq_on = ata_irq_on,
793 .irq_ack = ata_irq_ack,
794 788
795 .port_start = ata_port_start, 789 .port_start = ata_sff_port_start,
796}; 790};
797 791
798/** 792/**
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index f8f234bfc8..9f1c084f84 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -141,21 +141,22 @@ static int hpt3x2n_cable_detect(struct ata_port *ap)
141 141
142/** 142/**
143 * hpt3x2n_pre_reset - reset the hpt3x2n bus 143 * hpt3x2n_pre_reset - reset the hpt3x2n bus
144 * @ap: ATA port to reset 144 * @link: ATA link to reset
145 * @deadline: deadline jiffies for the operation 145 * @deadline: deadline jiffies for the operation
146 * 146 *
147 * Perform the initial reset handling for the 3x2n series controllers. 147 * Perform the initial reset handling for the 3x2n series controllers.
148 * Reset the hardware and state machine, 148 * Reset the hardware and state machine,
149 */ 149 */
150 150
151static int hpt3xn_pre_reset(struct ata_port *ap, unsigned long deadline) 151static int hpt3xn_pre_reset(struct ata_link *link, unsigned long deadline)
152{ 152{
153 struct ata_port *ap = link->ap;
153 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 154 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
154 /* Reset the state machine */ 155 /* Reset the state machine */
155 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); 156 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
156 udelay(100); 157 udelay(100);
157 158
158 return ata_std_prereset(ap, deadline); 159 return ata_std_prereset(link, deadline);
159} 160}
160 161
161/** 162/**
@@ -360,7 +361,6 @@ static struct scsi_host_template hpt3x2n_sht = {
360 */ 361 */
361 362
362static struct ata_port_operations hpt3x2n_port_ops = { 363static struct ata_port_operations hpt3x2n_port_ops = {
363 .port_disable = ata_port_disable,
364 .set_piomode = hpt3x2n_set_piomode, 364 .set_piomode = hpt3x2n_set_piomode,
365 .set_dmamode = hpt3x2n_set_dmamode, 365 .set_dmamode = hpt3x2n_set_dmamode,
366 .mode_filter = ata_pci_default_filter, 366 .mode_filter = ata_pci_default_filter,
@@ -390,9 +390,8 @@ static struct ata_port_operations hpt3x2n_port_ops = {
390 .irq_handler = ata_interrupt, 390 .irq_handler = ata_interrupt,
391 .irq_clear = ata_bmdma_irq_clear, 391 .irq_clear = ata_bmdma_irq_clear,
392 .irq_on = ata_irq_on, 392 .irq_on = ata_irq_on,
393 .irq_ack = ata_irq_ack,
394 393
395 .port_start = ata_port_start, 394 .port_start = ata_sff_port_start,
396}; 395};
397 396
398/** 397/**
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index be0f05efac..cb8bdb6887 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -120,7 +120,6 @@ static struct scsi_host_template hpt3x3_sht = {
120}; 120};
121 121
122static struct ata_port_operations hpt3x3_port_ops = { 122static struct ata_port_operations hpt3x3_port_ops = {
123 .port_disable = ata_port_disable,
124 .set_piomode = hpt3x3_set_piomode, 123 .set_piomode = hpt3x3_set_piomode,
125#if defined(CONFIG_PATA_HPT3X3_DMA) 124#if defined(CONFIG_PATA_HPT3X3_DMA)
126 .set_dmamode = hpt3x3_set_dmamode, 125 .set_dmamode = hpt3x3_set_dmamode,
@@ -153,9 +152,8 @@ static struct ata_port_operations hpt3x3_port_ops = {
153 .irq_handler = ata_interrupt, 152 .irq_handler = ata_interrupt,
154 .irq_clear = ata_bmdma_irq_clear, 153 .irq_clear = ata_bmdma_irq_clear,
155 .irq_on = ata_irq_on, 154 .irq_on = ata_irq_on,
156 .irq_ack = ata_irq_ack,
157 155
158 .port_start = ata_port_start, 156 .port_start = ata_sff_port_start,
159}; 157};
160 158
161/** 159/**
@@ -239,7 +237,8 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
239 base = host->iomap[4]; /* Bus mastering base */ 237 base = host->iomap[4]; /* Bus mastering base */
240 238
241 for (i = 0; i < host->n_ports; i++) { 239 for (i = 0; i < host->n_ports; i++) {
242 struct ata_ioports *ioaddr = &host->ports[i]->ioaddr; 240 struct ata_port *ap = host->ports[i];
241 struct ata_ioports *ioaddr = &ap->ioaddr;
243 242
244 ioaddr->cmd_addr = base + offset_cmd[i]; 243 ioaddr->cmd_addr = base + offset_cmd[i];
245 ioaddr->altstatus_addr = 244 ioaddr->altstatus_addr =
@@ -247,6 +246,9 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
247 ioaddr->scr_addr = NULL; 246 ioaddr->scr_addr = NULL;
248 ata_std_ports(ioaddr); 247 ata_std_ports(ioaddr);
249 ioaddr->bmdma_addr = base + 8 * i; 248 ioaddr->bmdma_addr = base + 8 * i;
249
250 ata_port_pbar_desc(ap, 4, -1, "ioport");
251 ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
250 } 252 }
251 pci_set_master(pdev); 253 pci_set_master(pdev);
252 return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, 254 return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 64a711776c..be30923566 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -70,6 +70,8 @@ struct pata_icside_info {
70 unsigned int mwdma_mask; 70 unsigned int mwdma_mask;
71 unsigned int nr_ports; 71 unsigned int nr_ports;
72 const struct portinfo *port[2]; 72 const struct portinfo *port[2];
73 unsigned long raw_base;
74 unsigned long raw_ioc_base;
73}; 75};
74 76
75#define ICS_TYPE_A3IN 0 77#define ICS_TYPE_A3IN 0
@@ -357,26 +359,7 @@ static void pata_icside_error_handler(struct ata_port *ap)
357 pata_icside_postreset); 359 pata_icside_postreset);
358} 360}
359 361
360static u8 pata_icside_irq_ack(struct ata_port *ap, unsigned int chk_drq)
361{
362 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
363 u8 status;
364
365 status = ata_busy_wait(ap, bits, 1000);
366 if (status & bits)
367 if (ata_msg_err(ap))
368 printk(KERN_ERR "abnormal status 0x%X\n", status);
369
370 if (ata_msg_intr(ap))
371 printk(KERN_INFO "%s: irq ack: drv_stat 0x%X\n",
372 __FUNCTION__, status);
373
374 return status;
375}
376
377static struct ata_port_operations pata_icside_port_ops = { 362static struct ata_port_operations pata_icside_port_ops = {
378 .port_disable = ata_port_disable,
379
380 .set_dmamode = pata_icside_set_dmamode, 363 .set_dmamode = pata_icside_set_dmamode,
381 364
382 .tf_load = ata_tf_load, 365 .tf_load = ata_tf_load,
@@ -403,7 +386,6 @@ static struct ata_port_operations pata_icside_port_ops = {
403 386
404 .irq_clear = ata_dummy_noret, 387 .irq_clear = ata_dummy_noret,
405 .irq_on = ata_irq_on, 388 .irq_on = ata_irq_on,
406 .irq_ack = pata_icside_irq_ack,
407 389
408 .port_start = pata_icside_port_start, 390 .port_start = pata_icside_port_start,
409 391
@@ -412,9 +394,10 @@ static struct ata_port_operations pata_icside_port_ops = {
412}; 394};
413 395
414static void __devinit 396static void __devinit
415pata_icside_setup_ioaddr(struct ata_ioports *ioaddr, void __iomem *base, 397pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
416 const struct portinfo *info) 398 const struct portinfo *info)
417{ 399{
400 struct ata_ioports *ioaddr = &ap->ioaddr;
418 void __iomem *cmd = base + info->dataoffset; 401 void __iomem *cmd = base + info->dataoffset;
419 402
420 ioaddr->cmd_addr = cmd; 403 ioaddr->cmd_addr = cmd;
@@ -431,6 +414,13 @@ pata_icside_setup_ioaddr(struct ata_ioports *ioaddr, void __iomem *base,
431 414
432 ioaddr->ctl_addr = base + info->ctrloffset; 415 ioaddr->ctl_addr = base + info->ctrloffset;
433 ioaddr->altstatus_addr = ioaddr->ctl_addr; 416 ioaddr->altstatus_addr = ioaddr->ctl_addr;
417
418 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
419 info->raw_base + info->dataoffset,
420 info->raw_base + info->ctrloffset);
421
422 if (info->raw_ioc_base)
423 ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base);
434} 424}
435 425
436static int __devinit pata_icside_register_v5(struct pata_icside_info *info) 426static int __devinit pata_icside_register_v5(struct pata_icside_info *info)
@@ -451,6 +441,8 @@ static int __devinit pata_icside_register_v5(struct pata_icside_info *info)
451 info->nr_ports = 1; 441 info->nr_ports = 1;
452 info->port[0] = &pata_icside_portinfo_v5; 442 info->port[0] = &pata_icside_portinfo_v5;
453 443
444 info->raw_base = ecard_resource_start(ec, ECARD_RES_MEMC);
445
454 return 0; 446 return 0;
455} 447}
456 448
@@ -491,6 +483,9 @@ static int __devinit pata_icside_register_v6(struct pata_icside_info *info)
491 info->port[0] = &pata_icside_portinfo_v6_1; 483 info->port[0] = &pata_icside_portinfo_v6_1;
492 info->port[1] = &pata_icside_portinfo_v6_2; 484 info->port[1] = &pata_icside_portinfo_v6_2;
493 485
486 info->raw_base = ecard_resource_start(ec, ECARD_RES_EASI);
487 info->raw_ioc_base = ecard_resource_start(ec, ECARD_RES_IOCFAST);
488
494 return icside_dma_init(info); 489 return icside_dma_init(info);
495} 490}
496 491
@@ -527,7 +522,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
527 ap->flags |= ATA_FLAG_SLAVE_POSS; 522 ap->flags |= ATA_FLAG_SLAVE_POSS;
528 ap->ops = &pata_icside_port_ops; 523 ap->ops = &pata_icside_port_ops;
529 524
530 pata_icside_setup_ioaddr(&ap->ioaddr, info->base, info->port[i]); 525 pata_icside_setup_ioaddr(ap, info->base, info->port[i]);
531 } 526 }
532 527
533 return ata_host_activate(host, ec->irq, ata_interrupt, 0, 528 return ata_host_activate(host, ec->irq, ata_interrupt, 0,
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 9e553c5420..88ab0e1d35 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -38,7 +38,6 @@ static struct scsi_host_template isapnp_sht = {
38}; 38};
39 39
40static struct ata_port_operations isapnp_port_ops = { 40static struct ata_port_operations isapnp_port_ops = {
41 .port_disable = ata_port_disable,
42 .tf_load = ata_tf_load, 41 .tf_load = ata_tf_load,
43 .tf_read = ata_tf_read, 42 .tf_read = ata_tf_read,
44 .check_status = ata_check_status, 43 .check_status = ata_check_status,
@@ -58,9 +57,8 @@ static struct ata_port_operations isapnp_port_ops = {
58 57
59 .irq_clear = ata_bmdma_irq_clear, 58 .irq_clear = ata_bmdma_irq_clear,
60 .irq_on = ata_irq_on, 59 .irq_on = ata_irq_on,
61 .irq_ack = ata_irq_ack,
62 60
63 .port_start = ata_port_start, 61 .port_start = ata_sff_port_start,
64}; 62};
65 63
66/** 64/**
@@ -112,6 +110,10 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
112 110
113 ata_std_ports(&ap->ioaddr); 111 ata_std_ports(&ap->ioaddr);
114 112
113 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
114 (unsigned long long)pnp_port_start(idev, 0),
115 (unsigned long long)pnp_port_start(idev, 1));
116
115 /* activate */ 117 /* activate */
116 return ata_host_activate(host, pnp_irq(idev, 0), ata_interrupt, 0, 118 return ata_host_activate(host, pnp_irq(idev, 0), ata_interrupt, 0,
117 &isapnp_sht); 119 &isapnp_sht);
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index b8af55e891..1eda821e5e 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -23,23 +23,24 @@
23 23
24/** 24/**
25 * it8213_pre_reset - check for 40/80 pin 25 * it8213_pre_reset - check for 40/80 pin
26 * @ap: Port 26 * @link: link
27 * @deadline: deadline jiffies for the operation 27 * @deadline: deadline jiffies for the operation
28 * 28 *
29 * Filter out ports by the enable bits before doing the normal reset 29 * Filter out ports by the enable bits before doing the normal reset
30 * and probe. 30 * and probe.
31 */ 31 */
32 32
33static int it8213_pre_reset(struct ata_port *ap, unsigned long deadline) 33static int it8213_pre_reset(struct ata_link *link, unsigned long deadline)
34{ 34{
35 static const struct pci_bits it8213_enable_bits[] = { 35 static const struct pci_bits it8213_enable_bits[] = {
36 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ 36 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
37 }; 37 };
38 struct ata_port *ap = link->ap;
38 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 39 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
39 if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no])) 40 if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no]))
40 return -ENOENT; 41 return -ENOENT;
41 42
42 return ata_std_prereset(ap, deadline); 43 return ata_std_prereset(link, deadline);
43} 44}
44 45
45/** 46/**
@@ -260,7 +261,6 @@ static struct scsi_host_template it8213_sht = {
260}; 261};
261 262
262static const struct ata_port_operations it8213_ops = { 263static const struct ata_port_operations it8213_ops = {
263 .port_disable = ata_port_disable,
264 .set_piomode = it8213_set_piomode, 264 .set_piomode = it8213_set_piomode,
265 .set_dmamode = it8213_set_dmamode, 265 .set_dmamode = it8213_set_dmamode,
266 .mode_filter = ata_pci_default_filter, 266 .mode_filter = ata_pci_default_filter,
@@ -288,9 +288,8 @@ static const struct ata_port_operations it8213_ops = {
288 .irq_handler = ata_interrupt, 288 .irq_handler = ata_interrupt,
289 .irq_clear = ata_bmdma_irq_clear, 289 .irq_clear = ata_bmdma_irq_clear,
290 .irq_on = ata_irq_on, 290 .irq_on = ata_irq_on,
291 .irq_ack = ata_irq_ack,
292 291
293 .port_start = ata_port_start, 292 .port_start = ata_sff_port_start,
294}; 293};
295 294
296 295
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 5d8b91e70e..988ef736b9 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -391,7 +391,7 @@ static void it821x_passthru_dev_select(struct ata_port *ap,
391{ 391{
392 struct it821x_dev *itdev = ap->private_data; 392 struct it821x_dev *itdev = ap->private_data;
393 if (itdev && device != itdev->last_device) { 393 if (itdev && device != itdev->last_device) {
394 struct ata_device *adev = &ap->device[device]; 394 struct ata_device *adev = &ap->link.device[device];
395 it821x_program(ap, adev, itdev->pio[adev->devno]); 395 it821x_program(ap, adev, itdev->pio[adev->devno]);
396 itdev->last_device = device; 396 itdev->last_device = device;
397 } 397 }
@@ -450,7 +450,7 @@ static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc)
450 450
451/** 451/**
452 * it821x_smart_set_mode - mode setting 452 * it821x_smart_set_mode - mode setting
453 * @ap: interface to set up 453 * @link: interface to set up
454 * @unused: device that failed (error only) 454 * @unused: device that failed (error only)
455 * 455 *
456 * Use a non standard set_mode function. We don't want to be tuned. 456 * Use a non standard set_mode function. We don't want to be tuned.
@@ -459,12 +459,11 @@ static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc)
459 * and respect them. 459 * and respect them.
460 */ 460 */
461 461
462static int it821x_smart_set_mode(struct ata_port *ap, struct ata_device **unused) 462static int it821x_smart_set_mode(struct ata_link *link, struct ata_device **unused)
463{ 463{
464 int i; 464 struct ata_device *dev;
465 465
466 for (i = 0; i < ATA_MAX_DEVICES; i++) { 466 ata_link_for_each_dev(dev, link) {
467 struct ata_device *dev = &ap->device[i];
468 if (ata_dev_enabled(dev)) { 467 if (ata_dev_enabled(dev)) {
469 /* We don't really care */ 468 /* We don't really care */
470 dev->pio_mode = XFER_PIO_0; 469 dev->pio_mode = XFER_PIO_0;
@@ -564,7 +563,7 @@ static int it821x_port_start(struct ata_port *ap)
564 struct it821x_dev *itdev; 563 struct it821x_dev *itdev;
565 u8 conf; 564 u8 conf;
566 565
567 int ret = ata_port_start(ap); 566 int ret = ata_sff_port_start(ap);
568 if (ret < 0) 567 if (ret < 0)
569 return ret; 568 return ret;
570 569
@@ -621,7 +620,6 @@ static struct scsi_host_template it821x_sht = {
621 620
622static struct ata_port_operations it821x_smart_port_ops = { 621static struct ata_port_operations it821x_smart_port_ops = {
623 .set_mode = it821x_smart_set_mode, 622 .set_mode = it821x_smart_set_mode,
624 .port_disable = ata_port_disable,
625 .tf_load = ata_tf_load, 623 .tf_load = ata_tf_load,
626 .tf_read = ata_tf_read, 624 .tf_read = ata_tf_read,
627 .mode_filter = ata_pci_default_filter, 625 .mode_filter = ata_pci_default_filter,
@@ -651,13 +649,11 @@ static struct ata_port_operations it821x_smart_port_ops = {
651 .irq_handler = ata_interrupt, 649 .irq_handler = ata_interrupt,
652 .irq_clear = ata_bmdma_irq_clear, 650 .irq_clear = ata_bmdma_irq_clear,
653 .irq_on = ata_irq_on, 651 .irq_on = ata_irq_on,
654 .irq_ack = ata_irq_ack,
655 652
656 .port_start = it821x_port_start, 653 .port_start = it821x_port_start,
657}; 654};
658 655
659static struct ata_port_operations it821x_passthru_port_ops = { 656static struct ata_port_operations it821x_passthru_port_ops = {
660 .port_disable = ata_port_disable,
661 .set_piomode = it821x_passthru_set_piomode, 657 .set_piomode = it821x_passthru_set_piomode,
662 .set_dmamode = it821x_passthru_set_dmamode, 658 .set_dmamode = it821x_passthru_set_dmamode,
663 .mode_filter = ata_pci_default_filter, 659 .mode_filter = ata_pci_default_filter,
@@ -688,7 +684,6 @@ static struct ata_port_operations it821x_passthru_port_ops = {
688 .irq_clear = ata_bmdma_irq_clear, 684 .irq_clear = ata_bmdma_irq_clear,
689 .irq_handler = ata_interrupt, 685 .irq_handler = ata_interrupt,
690 .irq_on = ata_irq_on, 686 .irq_on = ata_irq_on,
691 .irq_ack = ata_irq_ack,
692 687
693 .port_start = it821x_port_start, 688 .port_start = it821x_port_start,
694}; 689};
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 5dea3584c6..fcd532afbf 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -26,12 +26,11 @@
26#define DRV_NAME "pata_ixp4xx_cf" 26#define DRV_NAME "pata_ixp4xx_cf"
27#define DRV_VERSION "0.2" 27#define DRV_VERSION "0.2"
28 28
29static int ixp4xx_set_mode(struct ata_port *ap, struct ata_device **error) 29static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error)
30{ 30{
31 int i; 31 struct ata_device *dev;
32 32
33 for (i = 0; i < ATA_MAX_DEVICES; i++) { 33 ata_link_for_each_dev(dev, link) {
34 struct ata_device *dev = &ap->device[i];
35 if (ata_dev_enabled(dev)) { 34 if (ata_dev_enabled(dev)) {
36 ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n"); 35 ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n");
37 dev->pio_mode = XFER_PIO_0; 36 dev->pio_mode = XFER_PIO_0;
@@ -49,7 +48,7 @@ static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
49 unsigned int i; 48 unsigned int i;
50 unsigned int words = buflen >> 1; 49 unsigned int words = buflen >> 1;
51 u16 *buf16 = (u16 *) buf; 50 u16 *buf16 = (u16 *) buf;
52 struct ata_port *ap = adev->ap; 51 struct ata_port *ap = adev->link->ap;
53 void __iomem *mmio = ap->ioaddr.data_addr; 52 void __iomem *mmio = ap->ioaddr.data_addr;
54 struct ixp4xx_pata_data *data = ap->host->dev->platform_data; 53 struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
55 54
@@ -108,7 +107,6 @@ static struct ata_port_operations ixp4xx_port_ops = {
108 .set_mode = ixp4xx_set_mode, 107 .set_mode = ixp4xx_set_mode,
109 .mode_filter = ata_pci_default_filter, 108 .mode_filter = ata_pci_default_filter,
110 109
111 .port_disable = ata_port_disable,
112 .tf_load = ata_tf_load, 110 .tf_load = ata_tf_load,
113 .tf_read = ata_tf_read, 111 .tf_read = ata_tf_read,
114 .exec_command = ata_exec_command, 112 .exec_command = ata_exec_command,
@@ -128,14 +126,17 @@ static struct ata_port_operations ixp4xx_port_ops = {
128 .irq_handler = ata_interrupt, 126 .irq_handler = ata_interrupt,
129 .irq_clear = ata_bmdma_irq_clear, 127 .irq_clear = ata_bmdma_irq_clear,
130 .irq_on = ata_irq_on, 128 .irq_on = ata_irq_on,
131 .irq_ack = ata_dummy_irq_ack,
132 129
133 .port_start = ata_port_start, 130 .port_start = ata_port_start,
134}; 131};
135 132
136static void ixp4xx_setup_port(struct ata_ioports *ioaddr, 133static void ixp4xx_setup_port(struct ata_ioports *ioaddr,
137 struct ixp4xx_pata_data *data) 134 struct ixp4xx_pata_data *data,
135 unsigned long raw_cs0, unsigned long raw_cs1)
138{ 136{
137 unsigned long raw_cmd = raw_cs0;
138 unsigned long raw_ctl = raw_cs1 + 0x06;
139
139 ioaddr->cmd_addr = data->cs0; 140 ioaddr->cmd_addr = data->cs0;
140 ioaddr->altstatus_addr = data->cs1 + 0x06; 141 ioaddr->altstatus_addr = data->cs1 + 0x06;
141 ioaddr->ctl_addr = data->cs1 + 0x06; 142 ioaddr->ctl_addr = data->cs1 + 0x06;
@@ -161,7 +162,12 @@ static void ixp4xx_setup_port(struct ata_ioports *ioaddr,
161 *(unsigned long *)&ioaddr->device_addr ^= 0x03; 162 *(unsigned long *)&ioaddr->device_addr ^= 0x03;
162 *(unsigned long *)&ioaddr->status_addr ^= 0x03; 163 *(unsigned long *)&ioaddr->status_addr ^= 0x03;
163 *(unsigned long *)&ioaddr->command_addr ^= 0x03; 164 *(unsigned long *)&ioaddr->command_addr ^= 0x03;
165
166 raw_cmd ^= 0x03;
167 raw_ctl ^= 0x03;
164#endif 168#endif
169
170 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", raw_cmd, raw_ctl);
165} 171}
166 172
167static __devinit int ixp4xx_pata_probe(struct platform_device *pdev) 173static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
@@ -206,7 +212,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
206 ap->pio_mask = 0x1f; /* PIO4 */ 212 ap->pio_mask = 0x1f; /* PIO4 */
207 ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_NO_ATAPI; 213 ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_NO_ATAPI;
208 214
209 ixp4xx_setup_port(&ap->ioaddr, data); 215 ixp4xx_setup_port(ap, data, cs0->start, cs1->start);
210 216
211 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 217 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
212 218
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 4d67f238ee..225a7223a7 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -29,7 +29,7 @@ typedef enum {
29 29
30/** 30/**
31 * jmicron_pre_reset - check for 40/80 pin 31 * jmicron_pre_reset - check for 40/80 pin
32 * @ap: Port 32 * @link: ATA link
33 * @deadline: deadline jiffies for the operation 33 * @deadline: deadline jiffies for the operation
34 * 34 *
35 * Perform the PATA port setup we need. 35 * Perform the PATA port setup we need.
@@ -39,9 +39,9 @@ typedef enum {
39 * and setup here. We assume that has been done by init_one and the 39 * and setup here. We assume that has been done by init_one and the
40 * BIOS. 40 * BIOS.
41 */ 41 */
42 42static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline)
43static int jmicron_pre_reset(struct ata_port *ap, unsigned long deadline)
44{ 43{
44 struct ata_port *ap = link->ap;
45 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 45 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
46 u32 control; 46 u32 control;
47 u32 control5; 47 u32 control5;
@@ -103,7 +103,7 @@ static int jmicron_pre_reset(struct ata_port *ap, unsigned long deadline)
103 ap->cbl = ATA_CBL_SATA; 103 ap->cbl = ATA_CBL_SATA;
104 break; 104 break;
105 } 105 }
106 return ata_std_prereset(ap, deadline); 106 return ata_std_prereset(link, deadline);
107} 107}
108 108
109/** 109/**
@@ -141,8 +141,6 @@ static struct scsi_host_template jmicron_sht = {
141}; 141};
142 142
143static const struct ata_port_operations jmicron_ops = { 143static const struct ata_port_operations jmicron_ops = {
144 .port_disable = ata_port_disable,
145
146 /* Task file is PCI ATA format, use helpers */ 144 /* Task file is PCI ATA format, use helpers */
147 .tf_load = ata_tf_load, 145 .tf_load = ata_tf_load,
148 .tf_read = ata_tf_read, 146 .tf_read = ata_tf_read,
@@ -168,7 +166,6 @@ static const struct ata_port_operations jmicron_ops = {
168 .irq_handler = ata_interrupt, 166 .irq_handler = ata_interrupt,
169 .irq_clear = ata_bmdma_irq_clear, 167 .irq_clear = ata_bmdma_irq_clear,
170 .irq_on = ata_irq_on, 168 .irq_on = ata_irq_on,
171 .irq_ack = ata_irq_ack,
172 169
173 /* Generic PATA PCI ATA helpers */ 170 /* Generic PATA PCI ATA helpers */
174 .port_start = ata_port_start, 171 .port_start = ata_port_start,
@@ -207,17 +204,8 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
207} 204}
208 205
209static const struct pci_device_id jmicron_pci_tbl[] = { 206static const struct pci_device_id jmicron_pci_tbl[] = {
210 { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, 207 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
211 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 361 }, 208 PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
212 { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363,
213 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 363 },
214 { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365,
215 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 365 },
216 { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366,
217 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 366 },
218 { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368,
219 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 368 },
220
221 { } /* terminate list */ 209 { } /* terminate list */
222}; 210};
223 211
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index edffc25d2d..7bed8d8063 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -96,7 +96,7 @@ static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
96 96
97/** 97/**
98 * legacy_set_mode - mode setting 98 * legacy_set_mode - mode setting
99 * @ap: IDE interface 99 * @link: IDE link
100 * @unused: Device that failed when error is returned 100 * @unused: Device that failed when error is returned
101 * 101 *
102 * Use a non standard set_mode function. We don't want to be tuned. 102 * Use a non standard set_mode function. We don't want to be tuned.
@@ -107,12 +107,11 @@ static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
107 * expand on this as per hdparm in the base kernel. 107 * expand on this as per hdparm in the base kernel.
108 */ 108 */
109 109
110static int legacy_set_mode(struct ata_port *ap, struct ata_device **unused) 110static int legacy_set_mode(struct ata_link *link, struct ata_device **unused)
111{ 111{
112 int i; 112 struct ata_device *dev;
113 113
114 for (i = 0; i < ATA_MAX_DEVICES; i++) { 114 ata_link_for_each_dev(dev, link) {
115 struct ata_device *dev = &ap->device[i];
116 if (ata_dev_enabled(dev)) { 115 if (ata_dev_enabled(dev)) {
117 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n"); 116 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
118 dev->pio_mode = XFER_PIO_0; 117 dev->pio_mode = XFER_PIO_0;
@@ -151,7 +150,6 @@ static struct scsi_host_template legacy_sht = {
151 */ 150 */
152 151
153static struct ata_port_operations simple_port_ops = { 152static struct ata_port_operations simple_port_ops = {
154 .port_disable = ata_port_disable,
155 .tf_load = ata_tf_load, 153 .tf_load = ata_tf_load,
156 .tf_read = ata_tf_read, 154 .tf_read = ata_tf_read,
157 .check_status = ata_check_status, 155 .check_status = ata_check_status,
@@ -172,7 +170,6 @@ static struct ata_port_operations simple_port_ops = {
172 .irq_handler = ata_interrupt, 170 .irq_handler = ata_interrupt,
173 .irq_clear = ata_bmdma_irq_clear, 171 .irq_clear = ata_bmdma_irq_clear,
174 .irq_on = ata_irq_on, 172 .irq_on = ata_irq_on,
175 .irq_ack = ata_irq_ack,
176 173
177 .port_start = ata_port_start, 174 .port_start = ata_port_start,
178}; 175};
@@ -180,7 +177,6 @@ static struct ata_port_operations simple_port_ops = {
180static struct ata_port_operations legacy_port_ops = { 177static struct ata_port_operations legacy_port_ops = {
181 .set_mode = legacy_set_mode, 178 .set_mode = legacy_set_mode,
182 179
183 .port_disable = ata_port_disable,
184 .tf_load = ata_tf_load, 180 .tf_load = ata_tf_load,
185 .tf_read = ata_tf_read, 181 .tf_read = ata_tf_read,
186 .check_status = ata_check_status, 182 .check_status = ata_check_status,
@@ -201,7 +197,6 @@ static struct ata_port_operations legacy_port_ops = {
201 .irq_handler = ata_interrupt, 197 .irq_handler = ata_interrupt,
202 .irq_clear = ata_bmdma_irq_clear, 198 .irq_clear = ata_bmdma_irq_clear,
203 .irq_on = ata_irq_on, 199 .irq_on = ata_irq_on,
204 .irq_ack = ata_irq_ack,
205 200
206 .port_start = ata_port_start, 201 .port_start = ata_port_start,
207}; 202};
@@ -256,7 +251,7 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
256 251
257static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) 252static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
258{ 253{
259 struct ata_port *ap = adev->ap; 254 struct ata_port *ap = adev->link->ap;
260 int slop = buflen & 3; 255 int slop = buflen & 3;
261 unsigned long flags; 256 unsigned long flags;
262 257
@@ -296,7 +291,6 @@ static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsig
296static struct ata_port_operations pdc20230_port_ops = { 291static struct ata_port_operations pdc20230_port_ops = {
297 .set_piomode = pdc20230_set_piomode, 292 .set_piomode = pdc20230_set_piomode,
298 293
299 .port_disable = ata_port_disable,
300 .tf_load = ata_tf_load, 294 .tf_load = ata_tf_load,
301 .tf_read = ata_tf_read, 295 .tf_read = ata_tf_read,
302 .check_status = ata_check_status, 296 .check_status = ata_check_status,
@@ -317,7 +311,6 @@ static struct ata_port_operations pdc20230_port_ops = {
317 .irq_handler = ata_interrupt, 311 .irq_handler = ata_interrupt,
318 .irq_clear = ata_bmdma_irq_clear, 312 .irq_clear = ata_bmdma_irq_clear,
319 .irq_on = ata_irq_on, 313 .irq_on = ata_irq_on,
320 .irq_ack = ata_irq_ack,
321 314
322 .port_start = ata_port_start, 315 .port_start = ata_port_start,
323}; 316};
@@ -352,7 +345,6 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
352static struct ata_port_operations ht6560a_port_ops = { 345static struct ata_port_operations ht6560a_port_ops = {
353 .set_piomode = ht6560a_set_piomode, 346 .set_piomode = ht6560a_set_piomode,
354 347
355 .port_disable = ata_port_disable,
356 .tf_load = ata_tf_load, 348 .tf_load = ata_tf_load,
357 .tf_read = ata_tf_read, 349 .tf_read = ata_tf_read,
358 .check_status = ata_check_status, 350 .check_status = ata_check_status,
@@ -373,7 +365,6 @@ static struct ata_port_operations ht6560a_port_ops = {
373 .irq_handler = ata_interrupt, 365 .irq_handler = ata_interrupt,
374 .irq_clear = ata_bmdma_irq_clear, 366 .irq_clear = ata_bmdma_irq_clear,
375 .irq_on = ata_irq_on, 367 .irq_on = ata_irq_on,
376 .irq_ack = ata_irq_ack,
377 368
378 .port_start = ata_port_start, 369 .port_start = ata_port_start,
379}; 370};
@@ -419,7 +410,6 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
419static struct ata_port_operations ht6560b_port_ops = { 410static struct ata_port_operations ht6560b_port_ops = {
420 .set_piomode = ht6560b_set_piomode, 411 .set_piomode = ht6560b_set_piomode,
421 412
422 .port_disable = ata_port_disable,
423 .tf_load = ata_tf_load, 413 .tf_load = ata_tf_load,
424 .tf_read = ata_tf_read, 414 .tf_read = ata_tf_read,
425 .check_status = ata_check_status, 415 .check_status = ata_check_status,
@@ -440,7 +430,6 @@ static struct ata_port_operations ht6560b_port_ops = {
440 .irq_handler = ata_interrupt, 430 .irq_handler = ata_interrupt,
441 .irq_clear = ata_bmdma_irq_clear, 431 .irq_clear = ata_bmdma_irq_clear,
442 .irq_on = ata_irq_on, 432 .irq_on = ata_irq_on,
443 .irq_ack = ata_irq_ack,
444 433
445 .port_start = ata_port_start, 434 .port_start = ata_port_start,
446}; 435};
@@ -541,7 +530,6 @@ static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev
541static struct ata_port_operations opti82c611a_port_ops = { 530static struct ata_port_operations opti82c611a_port_ops = {
542 .set_piomode = opti82c611a_set_piomode, 531 .set_piomode = opti82c611a_set_piomode,
543 532
544 .port_disable = ata_port_disable,
545 .tf_load = ata_tf_load, 533 .tf_load = ata_tf_load,
546 .tf_read = ata_tf_read, 534 .tf_read = ata_tf_read,
547 .check_status = ata_check_status, 535 .check_status = ata_check_status,
@@ -562,7 +550,6 @@ static struct ata_port_operations opti82c611a_port_ops = {
562 .irq_handler = ata_interrupt, 550 .irq_handler = ata_interrupt,
563 .irq_clear = ata_bmdma_irq_clear, 551 .irq_clear = ata_bmdma_irq_clear,
564 .irq_on = ata_irq_on, 552 .irq_on = ata_irq_on,
565 .irq_ack = ata_irq_ack,
566 553
567 .port_start = ata_port_start, 554 .port_start = ata_port_start,
568}; 555};
@@ -675,7 +662,6 @@ static unsigned int opti82c46x_qc_issue_prot(struct ata_queued_cmd *qc)
675static struct ata_port_operations opti82c46x_port_ops = { 662static struct ata_port_operations opti82c46x_port_ops = {
676 .set_piomode = opti82c46x_set_piomode, 663 .set_piomode = opti82c46x_set_piomode,
677 664
678 .port_disable = ata_port_disable,
679 .tf_load = ata_tf_load, 665 .tf_load = ata_tf_load,
680 .tf_read = ata_tf_read, 666 .tf_read = ata_tf_read,
681 .check_status = ata_check_status, 667 .check_status = ata_check_status,
@@ -696,7 +682,6 @@ static struct ata_port_operations opti82c46x_port_ops = {
696 .irq_handler = ata_interrupt, 682 .irq_handler = ata_interrupt,
697 .irq_clear = ata_bmdma_irq_clear, 683 .irq_clear = ata_bmdma_irq_clear,
698 .irq_on = ata_irq_on, 684 .irq_on = ata_irq_on,
699 .irq_ack = ata_irq_ack,
700 685
701 .port_start = ata_port_start, 686 .port_start = ata_port_start,
702}; 687};
@@ -814,6 +799,8 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
814 ata_std_ports(&ap->ioaddr); 799 ata_std_ports(&ap->ioaddr);
815 ap->private_data = ld; 800 ap->private_data = ld;
816 801
802 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, ctrl);
803
817 ret = ata_host_activate(host, irq, ata_interrupt, 0, &legacy_sht); 804 ret = ata_host_activate(host, irq, ata_interrupt, 0, &legacy_sht);
818 if (ret) 805 if (ret)
819 goto fail; 806 goto fail;
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index b45506f1ef..9afc8a32b2 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -24,14 +24,15 @@
24 24
25/** 25/**
26 * marvell_pre_reset - check for 40/80 pin 26 * marvell_pre_reset - check for 40/80 pin
27 * @ap: Port 27 * @link: link
28 * @deadline: deadline jiffies for the operation 28 * @deadline: deadline jiffies for the operation
29 * 29 *
30 * Perform the PATA port setup we need. 30 * Perform the PATA port setup we need.
31 */ 31 */
32 32
33static int marvell_pre_reset(struct ata_port *ap, unsigned long deadline) 33static int marvell_pre_reset(struct ata_link *link, unsigned long deadline)
34{ 34{
35 struct ata_port *ap = link->ap;
35 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 36 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
36 u32 devices; 37 u32 devices;
37 void __iomem *barp; 38 void __iomem *barp;
@@ -54,7 +55,7 @@ static int marvell_pre_reset(struct ata_port *ap, unsigned long deadline)
54 (!(devices & 0x10))) /* PATA enable ? */ 55 (!(devices & 0x10))) /* PATA enable ? */
55 return -ENOENT; 56 return -ENOENT;
56 57
57 return ata_std_prereset(ap, deadline); 58 return ata_std_prereset(link, deadline);
58} 59}
59 60
60static int marvell_cable_detect(struct ata_port *ap) 61static int marvell_cable_detect(struct ata_port *ap)
@@ -110,8 +111,6 @@ static struct scsi_host_template marvell_sht = {
110}; 111};
111 112
112static const struct ata_port_operations marvell_ops = { 113static const struct ata_port_operations marvell_ops = {
113 .port_disable = ata_port_disable,
114
115 /* Task file is PCI ATA format, use helpers */ 114 /* Task file is PCI ATA format, use helpers */
116 .tf_load = ata_tf_load, 115 .tf_load = ata_tf_load,
117 .tf_read = ata_tf_read, 116 .tf_read = ata_tf_read,
@@ -138,10 +137,9 @@ static const struct ata_port_operations marvell_ops = {
138 .irq_handler = ata_interrupt, 137 .irq_handler = ata_interrupt,
139 .irq_clear = ata_bmdma_irq_clear, 138 .irq_clear = ata_bmdma_irq_clear,
140 .irq_on = ata_irq_on, 139 .irq_on = ata_irq_on,
141 .irq_ack = ata_irq_ack,
142 140
143 /* Generic PATA PCI ATA helpers */ 141 /* Generic PATA PCI ATA helpers */
144 .port_start = ata_port_start, 142 .port_start = ata_sff_port_start,
145}; 143};
146 144
147 145
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 099f4cdc4c..412140f028 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -283,7 +283,6 @@ static struct scsi_host_template mpc52xx_ata_sht = {
283}; 283};
284 284
285static struct ata_port_operations mpc52xx_ata_port_ops = { 285static struct ata_port_operations mpc52xx_ata_port_ops = {
286 .port_disable = ata_port_disable,
287 .set_piomode = mpc52xx_ata_set_piomode, 286 .set_piomode = mpc52xx_ata_set_piomode,
288 .dev_select = mpc52xx_ata_dev_select, 287 .dev_select = mpc52xx_ata_dev_select,
289 .tf_load = ata_tf_load, 288 .tf_load = ata_tf_load,
@@ -299,12 +298,12 @@ static struct ata_port_operations mpc52xx_ata_port_ops = {
299 .data_xfer = ata_data_xfer, 298 .data_xfer = ata_data_xfer,
300 .irq_clear = ata_bmdma_irq_clear, 299 .irq_clear = ata_bmdma_irq_clear,
301 .irq_on = ata_irq_on, 300 .irq_on = ata_irq_on,
302 .irq_ack = ata_irq_ack,
303 .port_start = ata_port_start, 301 .port_start = ata_port_start,
304}; 302};
305 303
306static int __devinit 304static int __devinit
307mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv) 305mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
306 unsigned long raw_ata_regs)
308{ 307{
309 struct ata_host *host; 308 struct ata_host *host;
310 struct ata_port *ap; 309 struct ata_port *ap;
@@ -338,6 +337,8 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv)
338 aio->status_addr = &priv->ata_regs->tf_command; 337 aio->status_addr = &priv->ata_regs->tf_command;
339 aio->command_addr = &priv->ata_regs->tf_command; 338 aio->command_addr = &priv->ata_regs->tf_command;
340 339
340 ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs);
341
341 /* activate host */ 342 /* activate host */
342 return ata_host_activate(host, priv->ata_irq, ata_interrupt, 0, 343 return ata_host_activate(host, priv->ata_irq, ata_interrupt, 0,
343 &mpc52xx_ata_sht); 344 &mpc52xx_ata_sht);
@@ -434,7 +435,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
434 } 435 }
435 436
436 /* Register ourselves to libata */ 437 /* Register ourselves to libata */
437 rv = mpc52xx_ata_init_one(&op->dev, priv); 438 rv = mpc52xx_ata_init_one(&op->dev, priv, res_mem.start);
438 if (rv) { 439 if (rv) {
439 printk(KERN_ERR DRV_NAME ": " 440 printk(KERN_ERR DRV_NAME ": "
440 "Error while registering to ATA layer\n"); 441 "Error while registering to ATA layer\n");
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 4ea4283829..d5483087a3 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -46,15 +46,16 @@ enum {
46 SECONDARY = (1 << 14) 46 SECONDARY = (1 << 14)
47}; 47};
48 48
49static int mpiix_pre_reset(struct ata_port *ap, unsigned long deadline) 49static int mpiix_pre_reset(struct ata_link *link, unsigned long deadline)
50{ 50{
51 struct ata_port *ap = link->ap;
51 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 52 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
52 static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 }; 53 static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 };
53 54
54 if (!pci_test_config_bits(pdev, &mpiix_enable_bits)) 55 if (!pci_test_config_bits(pdev, &mpiix_enable_bits))
55 return -ENOENT; 56 return -ENOENT;
56 57
57 return ata_std_prereset(ap, deadline); 58 return ata_std_prereset(link, deadline);
58} 59}
59 60
60/** 61/**
@@ -168,7 +169,6 @@ static struct scsi_host_template mpiix_sht = {
168}; 169};
169 170
170static struct ata_port_operations mpiix_port_ops = { 171static struct ata_port_operations mpiix_port_ops = {
171 .port_disable = ata_port_disable,
172 .set_piomode = mpiix_set_piomode, 172 .set_piomode = mpiix_set_piomode,
173 173
174 .tf_load = ata_tf_load, 174 .tf_load = ata_tf_load,
@@ -189,9 +189,8 @@ static struct ata_port_operations mpiix_port_ops = {
189 189
190 .irq_clear = ata_bmdma_irq_clear, 190 .irq_clear = ata_bmdma_irq_clear,
191 .irq_on = ata_irq_on, 191 .irq_on = ata_irq_on,
192 .irq_ack = ata_irq_ack,
193 192
194 .port_start = ata_port_start, 193 .port_start = ata_sff_port_start,
195}; 194};
196 195
197static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) 196static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
@@ -202,7 +201,7 @@ static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
202 struct ata_port *ap; 201 struct ata_port *ap;
203 void __iomem *cmd_addr, *ctl_addr; 202 void __iomem *cmd_addr, *ctl_addr;
204 u16 idetim; 203 u16 idetim;
205 int irq; 204 int cmd, ctl, irq;
206 205
207 if (!printed_version++) 206 if (!printed_version++)
208 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); 207 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
@@ -210,6 +209,7 @@ static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
210 host = ata_host_alloc(&dev->dev, 1); 209 host = ata_host_alloc(&dev->dev, 1);
211 if (!host) 210 if (!host)
212 return -ENOMEM; 211 return -ENOMEM;
212 ap = host->ports[0];
213 213
214 /* MPIIX has many functions which can be turned on or off according 214 /* MPIIX has many functions which can be turned on or off according
215 to other devices present. Make sure IDE is enabled before we try 215 to other devices present. Make sure IDE is enabled before we try
@@ -221,25 +221,28 @@ static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
221 221
222 /* See if it's primary or secondary channel... */ 222 /* See if it's primary or secondary channel... */
223 if (!(idetim & SECONDARY)) { 223 if (!(idetim & SECONDARY)) {
224 cmd = 0x1F0;
225 ctl = 0x3F6;
224 irq = 14; 226 irq = 14;
225 cmd_addr = devm_ioport_map(&dev->dev, 0x1F0, 8);
226 ctl_addr = devm_ioport_map(&dev->dev, 0x3F6, 1);
227 } else { 227 } else {
228 cmd = 0x170;
229 ctl = 0x376;
228 irq = 15; 230 irq = 15;
229 cmd_addr = devm_ioport_map(&dev->dev, 0x170, 8);
230 ctl_addr = devm_ioport_map(&dev->dev, 0x376, 1);
231 } 231 }
232 232
233 cmd_addr = devm_ioport_map(&dev->dev, cmd, 8);
234 ctl_addr = devm_ioport_map(&dev->dev, ctl, 1);
233 if (!cmd_addr || !ctl_addr) 235 if (!cmd_addr || !ctl_addr)
234 return -ENOMEM; 236 return -ENOMEM;
235 237
238 ata_port_desc(ap, "cmd 0x%x ctl 0x%x", cmd, ctl);
239
236 /* We do our own plumbing to avoid leaking special cases for whacko 240 /* We do our own plumbing to avoid leaking special cases for whacko
237 ancient hardware into the core code. There are two issues to 241 ancient hardware into the core code. There are two issues to
238 worry about. #1 The chip is a bridge so if in legacy mode and 242 worry about. #1 The chip is a bridge so if in legacy mode and
239 without BARs set fools the setup. #2 If you pci_disable_device 243 without BARs set fools the setup. #2 If you pci_disable_device
240 the MPIIX your box goes castors up */ 244 the MPIIX your box goes castors up */
241 245
242 ap = host->ports[0];
243 ap->ops = &mpiix_port_ops; 246 ap->ops = &mpiix_port_ops;
244 ap->pio_mask = 0x1F; 247 ap->pio_mask = 0x1F;
245 ap->flags |= ATA_FLAG_SLAVE_POSS; 248 ap->flags |= ATA_FLAG_SLAVE_POSS;
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 40eb574828..25c922abd5 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -40,8 +40,6 @@ static struct scsi_host_template netcell_sht = {
40}; 40};
41 41
42static const struct ata_port_operations netcell_ops = { 42static const struct ata_port_operations netcell_ops = {
43 .port_disable = ata_port_disable,
44
45 /* Task file is PCI ATA format, use helpers */ 43 /* Task file is PCI ATA format, use helpers */
46 .tf_load = ata_tf_load, 44 .tf_load = ata_tf_load,
47 .tf_read = ata_tf_read, 45 .tf_read = ata_tf_read,
@@ -68,10 +66,9 @@ static const struct ata_port_operations netcell_ops = {
68 .irq_handler = ata_interrupt, 66 .irq_handler = ata_interrupt,
69 .irq_clear = ata_bmdma_irq_clear, 67 .irq_clear = ata_bmdma_irq_clear,
70 .irq_on = ata_irq_on, 68 .irq_on = ata_irq_on,
71 .irq_ack = ata_irq_ack,
72 69
73 /* Generic PATA PCI ATA helpers */ 70 /* Generic PATA PCI ATA helpers */
74 .port_start = ata_port_start, 71 .port_start = ata_sff_port_start,
75}; 72};
76 73
77 74
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 2f5d714ebf..6e8e55745b 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -32,14 +32,15 @@
32 32
33/** 33/**
34 * ns87410_pre_reset - probe begin 34 * ns87410_pre_reset - probe begin
35 * @ap: ATA port 35 * @link: ATA link
36 * @deadline: deadline jiffies for the operation 36 * @deadline: deadline jiffies for the operation
37 * 37 *
38 * Check enabled ports 38 * Check enabled ports
39 */ 39 */
40 40
41static int ns87410_pre_reset(struct ata_port *ap, unsigned long deadline) 41static int ns87410_pre_reset(struct ata_link *link, unsigned long deadline)
42{ 42{
43 struct ata_port *ap = link->ap;
43 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 44 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
44 static const struct pci_bits ns87410_enable_bits[] = { 45 static const struct pci_bits ns87410_enable_bits[] = {
45 { 0x43, 1, 0x08, 0x08 }, 46 { 0x43, 1, 0x08, 0x08 },
@@ -49,7 +50,7 @@ static int ns87410_pre_reset(struct ata_port *ap, unsigned long deadline)
49 if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no])) 50 if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no]))
50 return -ENOENT; 51 return -ENOENT;
51 52
52 return ata_std_prereset(ap, deadline); 53 return ata_std_prereset(link, deadline);
53} 54}
54 55
55/** 56/**
@@ -161,7 +162,6 @@ static struct scsi_host_template ns87410_sht = {
161}; 162};
162 163
163static struct ata_port_operations ns87410_port_ops = { 164static struct ata_port_operations ns87410_port_ops = {
164 .port_disable = ata_port_disable,
165 .set_piomode = ns87410_set_piomode, 165 .set_piomode = ns87410_set_piomode,
166 166
167 .tf_load = ata_tf_load, 167 .tf_load = ata_tf_load,
@@ -184,9 +184,8 @@ static struct ata_port_operations ns87410_port_ops = {
184 .irq_handler = ata_interrupt, 184 .irq_handler = ata_interrupt,
185 .irq_clear = ata_bmdma_irq_clear, 185 .irq_clear = ata_bmdma_irq_clear,
186 .irq_on = ata_irq_on, 186 .irq_on = ata_irq_on,
187 .irq_ack = ata_irq_ack,
188 187
189 .port_start = ata_port_start, 188 .port_start = ata_sff_port_start,
190}; 189};
191 190
192static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id) 191static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
new file mode 100644
index 0000000000..bb97ef583f
--- /dev/null
+++ b/drivers/ata/pata_ns87415.c
@@ -0,0 +1,467 @@
1/*
2 * pata_ns87415.c - NS87415 (non PARISC) PATA
3 *
4 * (C) 2005 Red Hat <alan@redhat.com>
5 *
6 * This is a fairly generic MWDMA controller. It has some limitations
7 * as it requires timing reloads on PIO/DMA transitions but it is otherwise
8 * fairly well designed.
9 *
10 * This driver assumes the firmware has left the chip in a valid ST506
11 * compliant state, either legacy IRQ 14/15 or native INTA shared. You
12 * may need to add platform code if your system fails to do this.
13 *
14 * The same cell appears in the 87560 controller used by some PARISC
15 * systems. This has its own special mountain of errata.
16 *
17 * TODO:
18 * Test PARISC SuperIO
19 * Get someone to test on SPARC
20 * Implement lazy pio/dma switching for better performance
21 * 8bit shared timing.
22 * See if we need to kill the FIFO for ATAPI
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/blkdev.h>
30#include <linux/delay.h>
31#include <linux/device.h>
32#include <scsi/scsi_host.h>
33#include <linux/libata.h>
34#include <linux/ata.h>
35
36#define DRV_NAME "pata_ns87415"
37#define DRV_VERSION "0.0.1"
38
39/**
40 * ns87415_set_mode - Initialize host controller mode timings
41 * @ap: Port whose timings we are configuring
42 * @adev: Device whose timings we are configuring
43 * @mode: Mode to set
44 *
45 * Program the mode registers for this controller, channel and
46 * device. Because the chip is quite an old design we have to do this
47 * for PIO/DMA switches.
48 *
49 * LOCKING:
50 * None (inherited from caller).
51 */
52
53static void ns87415_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode)
54{
55 struct pci_dev *dev = to_pci_dev(ap->host->dev);
56 int unit = 2 * ap->port_no + adev->devno;
57 int timing = 0x44 + 2 * unit;
58 unsigned long T = 1000000000 / 33333; /* PCI clocks */
59 struct ata_timing t;
60 u16 clocking;
61 u8 iordy;
62 u8 status;
63
64 /* Timing register format is 17 - low nybble read timing with
65 the high nybble being 16 - x for recovery time in PCI clocks */
66
67 ata_timing_compute(adev, adev->pio_mode, &t, T, 0);
68
69 clocking = 17 - FIT(t.active, 2, 17);
70 clocking |= (16 - FIT(t.recover, 1, 16)) << 4;
71 /* Use the same timing for read and write bytes */
72 clocking |= (clocking << 8);
73 pci_write_config_word(dev, timing, clocking);
74
75 /* Set the IORDY enable versus DMA enable on or off properly */
76 pci_read_config_byte(dev, 0x42, &iordy);
77 iordy &= ~(1 << (4 + unit));
78 if (mode >= XFER_MW_DMA_0 || !ata_pio_need_iordy(adev))
79 iordy |= (1 << (4 + unit));
80
81 /* Paranoia: We shouldn't ever get here with busy write buffers
82 but if so wait */
83
84 pci_read_config_byte(dev, 0x43, &status);
85 while (status & 0x03) {
86 udelay(1);
87 pci_read_config_byte(dev, 0x43, &status);
88 }
89 /* Flip the IORDY/DMA bits now we are sure the write buffers are
90 clear */
91 pci_write_config_byte(dev, 0x42, iordy);
92
93 /* TODO: Set byte 54 command timing to the best 8bit
94 mode shared by all four devices */
95}
96
97/**
98 * ns87415_set_piomode - Initialize host controller PATA PIO timings
99 * @ap: Port whose timings we are configuring
100 * @adev: Device to program
101 *
102 * Set PIO mode for device, in host controller PCI config space.
103 *
104 * LOCKING:
105 * None (inherited from caller).
106 */
107
108static void ns87415_set_piomode(struct ata_port *ap, struct ata_device *adev)
109{
110 ns87415_set_mode(ap, adev, adev->pio_mode);
111}
112
113/**
114 * ns87415_bmdma_setup - Set up DMA
115 * @qc: Command block
116 *
117 * Set up for bus masterng DMA. We have to do this ourselves
118 * rather than use the helper due to a chip erratum
119 */
120
121static void ns87415_bmdma_setup(struct ata_queued_cmd *qc)
122{
123 struct ata_port *ap = qc->ap;
124 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
125 u8 dmactl;
126
127 /* load PRD table addr. */
128 mb(); /* make sure PRD table writes are visible to controller */
129 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
130
131 /* specify data direction, triple-check start bit is clear */
132 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
133 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
134 /* Due to an erratum we need to write these bits to the wrong
135 place - which does save us an I/O bizarrely */
136 dmactl |= ATA_DMA_INTR | ATA_DMA_ERR;
137 if (!rw)
138 dmactl |= ATA_DMA_WR;
139 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
140 /* issue r/w command */
141 ap->ops->exec_command(ap, &qc->tf);
142}
143
144/**
145 * ns87415_bmdma_start - Begin DMA transfer
146 * @qc: Command block
147 *
148 * Switch the timings for the chip and set up for a DMA transfer
149 * before the DMA burst begins.
150 *
151 * FIXME: We should do lazy switching on bmdma_start versus
152 * ata_pio_data_xfer for better performance.
153 */
154
155static void ns87415_bmdma_start(struct ata_queued_cmd *qc)
156{
157 ns87415_set_mode(qc->ap, qc->dev, qc->dev->dma_mode);
158 ata_bmdma_start(qc);
159}
160
161/**
162 * ns87415_bmdma_stop - End DMA transfer
163 * @qc: Command block
164 *
165 * End DMA mode and switch the controller back into PIO mode
166 */
167
168static void ns87415_bmdma_stop(struct ata_queued_cmd *qc)
169{
170 ata_bmdma_stop(qc);
171 ns87415_set_mode(qc->ap, qc->dev, qc->dev->pio_mode);
172}
173
174/**
175 * ns87415_bmdma_irq_clear - Clear interrupt
176 * @ap: Channel to clear
177 *
178 * Erratum: Due to a chip bug regisers 02 and 0A bit 1 and 2 (the
179 * error bits) are reset by writing to register 00 or 08.
180 */
181
182static void ns87415_bmdma_irq_clear(struct ata_port *ap)
183{
184 void __iomem *mmio = ap->ioaddr.bmdma_addr;
185
186 if (!mmio)
187 return;
188 iowrite8((ioread8(mmio + ATA_DMA_CMD) | ATA_DMA_INTR | ATA_DMA_ERR),
189 mmio + ATA_DMA_CMD);
190}
191
192/**
193 * ns87415_check_atapi_dma - ATAPI DMA filter
194 * @qc: Command block
195 *
196 * Disable ATAPI DMA (for now). We may be able to do DMA if we
197 * kill the prefetching. This isn't clear.
198 */
199
200static int ns87415_check_atapi_dma(struct ata_queued_cmd *qc)
201{
202 return -EOPNOTSUPP;
203}
204
205#if defined(CONFIG_SUPERIO)
206
207/* SUPERIO 87560 is a PoS chip that NatSem denies exists.
208 * Unfortunately, it's built-in on all Astro-based PA-RISC workstations
209 * which use the integrated NS87514 cell for CD-ROM support.
210 * i.e we have to support for CD-ROM installs.
211 * See drivers/parisc/superio.c for more gory details.
212 *
213 * Workarounds taken from drivers/ide/pci/ns87415.c
214 */
215
216#include <asm/superio.h>
217
218/**
219 * ns87560_read_buggy - workaround buggy Super I/O chip
220 * @port: Port to read
221 *
222 * Work around chipset problems in the 87560 SuperIO chip
223 */
224
225static u8 ns87560_read_buggy(void __iomem *port)
226{
227 u8 tmp;
228 int retries = SUPERIO_IDE_MAX_RETRIES;
229 do {
230 tmp = ioread8(port);
231 if (tmp != 0)
232 return tmp;
233 udelay(50);
234 } while(retries-- > 0);
235 return tmp;
236}
237
238/**
239 * ns87560_check_status
240 * @ap: channel to check
241 *
242 * Return the status of the channel working around the
243 * 87560 flaws.
244 */
245
246static u8 ns87560_check_status(struct ata_port *ap)
247{
248 return ns87560_read_buggy(ap->ioaddr.status_addr);
249}
250
251/**
252 * ns87560_tf_read - input device's ATA taskfile shadow registers
253 * @ap: Port from which input is read
254 * @tf: ATA taskfile register set for storing input
255 *
256 * Reads ATA taskfile registers for currently-selected device
257 * into @tf. Work around the 87560 bugs.
258 *
259 * LOCKING:
260 * Inherited from caller.
261 */
262void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
263{
264 struct ata_ioports *ioaddr = &ap->ioaddr;
265
266 tf->command = ns87560_check_status(ap);
267 tf->feature = ioread8(ioaddr->error_addr);
268 tf->nsect = ioread8(ioaddr->nsect_addr);
269 tf->lbal = ioread8(ioaddr->lbal_addr);
270 tf->lbam = ioread8(ioaddr->lbam_addr);
271 tf->lbah = ioread8(ioaddr->lbah_addr);
272 tf->device = ns87560_read_buggy(ioaddr->device_addr);
273
274 if (tf->flags & ATA_TFLAG_LBA48) {
275 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
276 tf->hob_feature = ioread8(ioaddr->error_addr);
277 tf->hob_nsect = ioread8(ioaddr->nsect_addr);
278 tf->hob_lbal = ioread8(ioaddr->lbal_addr);
279 tf->hob_lbam = ioread8(ioaddr->lbam_addr);
280 tf->hob_lbah = ioread8(ioaddr->lbah_addr);
281 iowrite8(tf->ctl, ioaddr->ctl_addr);
282 ap->last_ctl = tf->ctl;
283 }
284}
285
286/**
287 * ns87560_bmdma_status
288 * @ap: channel to check
289 *
290 * Return the DMA status of the channel working around the
291 * 87560 flaws.
292 */
293
294static u8 ns87560_bmdma_status(struct ata_port *ap)
295{
296 return ns87560_read_buggy(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
297}
298
299static const struct ata_port_operations ns87560_pata_ops = {
300 .set_piomode = ns87415_set_piomode,
301 .mode_filter = ata_pci_default_filter,
302
303 .tf_load = ata_tf_load,
304 .tf_read = ns87560_tf_read,
305 .check_status = ns87560_check_status,
306 .check_atapi_dma = ns87415_check_atapi_dma,
307 .exec_command = ata_exec_command,
308 .dev_select = ata_std_dev_select,
309
310 .freeze = ata_bmdma_freeze,
311 .thaw = ata_bmdma_thaw,
312 .error_handler = ata_bmdma_error_handler,
313 .post_internal_cmd = ata_bmdma_post_internal_cmd,
314 .cable_detect = ata_cable_40wire,
315
316 .bmdma_setup = ns87415_bmdma_setup,
317 .bmdma_start = ns87415_bmdma_start,
318 .bmdma_stop = ns87415_bmdma_stop,
319 .bmdma_status = ns87560_bmdma_status,
320 .qc_prep = ata_qc_prep,
321 .qc_issue = ata_qc_issue_prot,
322 .data_xfer = ata_data_xfer,
323
324 .irq_handler = ata_interrupt,
325 .irq_clear = ns87415_bmdma_irq_clear,
326 .irq_on = ata_irq_on,
327
328 .port_start = ata_sff_port_start,
329};
330
331#endif /* 87560 SuperIO Support */
332
333
334static const struct ata_port_operations ns87415_pata_ops = {
335 .set_piomode = ns87415_set_piomode,
336 .mode_filter = ata_pci_default_filter,
337
338 .tf_load = ata_tf_load,
339 .tf_read = ata_tf_read,
340 .check_status = ata_check_status,
341 .check_atapi_dma = ns87415_check_atapi_dma,
342 .exec_command = ata_exec_command,
343 .dev_select = ata_std_dev_select,
344
345 .freeze = ata_bmdma_freeze,
346 .thaw = ata_bmdma_thaw,
347 .error_handler = ata_bmdma_error_handler,
348 .post_internal_cmd = ata_bmdma_post_internal_cmd,
349 .cable_detect = ata_cable_40wire,
350
351 .bmdma_setup = ns87415_bmdma_setup,
352 .bmdma_start = ns87415_bmdma_start,
353 .bmdma_stop = ns87415_bmdma_stop,
354 .bmdma_status = ata_bmdma_status,
355 .qc_prep = ata_qc_prep,
356 .qc_issue = ata_qc_issue_prot,
357 .data_xfer = ata_data_xfer,
358
359 .irq_handler = ata_interrupt,
360 .irq_clear = ns87415_bmdma_irq_clear,
361 .irq_on = ata_irq_on,
362
363 .port_start = ata_sff_port_start,
364};
365
366static struct scsi_host_template ns87415_sht = {
367 .module = THIS_MODULE,
368 .name = DRV_NAME,
369 .ioctl = ata_scsi_ioctl,
370 .queuecommand = ata_scsi_queuecmd,
371 .can_queue = ATA_DEF_QUEUE,
372 .this_id = ATA_SHT_THIS_ID,
373 .sg_tablesize = LIBATA_MAX_PRD,
374 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
375 .emulated = ATA_SHT_EMULATED,
376 .use_clustering = ATA_SHT_USE_CLUSTERING,
377 .proc_name = DRV_NAME,
378 .dma_boundary = ATA_DMA_BOUNDARY,
379 .slave_configure = ata_scsi_slave_config,
380 .slave_destroy = ata_scsi_slave_destroy,
381 .bios_param = ata_std_bios_param,
382};
383
384
385/**
386 * ns87415_init_one - Register 87415 ATA PCI device with kernel services
387 * @pdev: PCI device to register
388 * @ent: Entry in ns87415_pci_tbl matching with @pdev
389 *
390 * Called from kernel PCI layer. We probe for combined mode (sigh),
391 * and then hand over control to libata, for it to do the rest.
392 *
393 * LOCKING:
394 * Inherited from PCI layer (may sleep).
395 *
396 * RETURNS:
397 * Zero on success, or -ERRNO value.
398 */
399
400static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
401{
402 static int printed_version;
403 static const struct ata_port_info info = {
404 .sht = &ns87415_sht,
405 .flags = ATA_FLAG_SLAVE_POSS,
406 .pio_mask = 0x1f, /* pio0-4 */
407 .mwdma_mask = 0x07, /* mwdma0-2 */
408 .port_ops = &ns87415_pata_ops,
409 };
410 const struct ata_port_info *ppi[] = { &info, NULL };
411#if defined(CONFIG_SUPERIO)
412 static const struct ata_port_info info87560 = {
413 .sht = &ns87415_sht,
414 .flags = ATA_FLAG_SLAVE_POSS,
415 .pio_mask = 0x1f, /* pio0-4 */
416 .mwdma_mask = 0x07, /* mwdma0-2 */
417 .port_ops = &ns87560_pata_ops,
418 };
419
420 if (PCI_SLOT(pdev->devfn) == 0x0E)
421 ppi[0] = &info87560;
422#endif
423 if (!printed_version++)
424 dev_printk(KERN_DEBUG, &pdev->dev,
425 "version " DRV_VERSION "\n");
426 /* Select 512 byte sectors */
427 pci_write_config_byte(pdev, 0x55, 0xEE);
428 /* Select PIO0 8bit clocking */
429 pci_write_config_byte(pdev, 0x54, 0xB7);
430 return ata_pci_init_one(pdev, ppi);
431}
432
433static const struct pci_device_id ns87415_pci_tbl[] = {
434 { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87415), },
435
436 { } /* terminate list */
437};
438
439static struct pci_driver ns87415_pci_driver = {
440 .name = DRV_NAME,
441 .id_table = ns87415_pci_tbl,
442 .probe = ns87415_init_one,
443 .remove = ata_pci_remove_one,
444#ifdef CONFIG_PM
445 .suspend = ata_pci_device_suspend,
446 .resume = ata_pci_device_resume,
447#endif
448};
449
450static int __init ns87415_init(void)
451{
452 return pci_register_driver(&ns87415_pci_driver);
453}
454
455static void __exit ns87415_exit(void)
456{
457 pci_unregister_driver(&ns87415_pci_driver);
458}
459
460module_init(ns87415_init);
461module_exit(ns87415_exit);
462
463MODULE_AUTHOR("Alan Cox");
464MODULE_DESCRIPTION("ATA low-level driver for NS87415 controllers");
465MODULE_LICENSE("GPL");
466MODULE_DEVICE_TABLE(pci, ns87415_pci_tbl);
467MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 091a70a0ef..3cd5eb2b6c 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -29,14 +29,15 @@
29 29
30/** 30/**
31 * oldpiix_pre_reset - probe begin 31 * oldpiix_pre_reset - probe begin
32 * @ap: ATA port 32 * @link: ATA link
33 * @deadline: deadline jiffies for the operation 33 * @deadline: deadline jiffies for the operation
34 * 34 *
35 * Set up cable type and use generic probe init 35 * Set up cable type and use generic probe init
36 */ 36 */
37 37
38static int oldpiix_pre_reset(struct ata_port *ap, unsigned long deadline) 38static int oldpiix_pre_reset(struct ata_link *link, unsigned long deadline)
39{ 39{
40 struct ata_port *ap = link->ap;
40 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 41 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
41 static const struct pci_bits oldpiix_enable_bits[] = { 42 static const struct pci_bits oldpiix_enable_bits[] = {
42 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ 43 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
@@ -46,7 +47,7 @@ static int oldpiix_pre_reset(struct ata_port *ap, unsigned long deadline)
46 if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no])) 47 if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no]))
47 return -ENOENT; 48 return -ENOENT;
48 49
49 return ata_std_prereset(ap, deadline); 50 return ata_std_prereset(link, deadline);
50} 51}
51 52
52/** 53/**
@@ -237,7 +238,6 @@ static struct scsi_host_template oldpiix_sht = {
237}; 238};
238 239
239static const struct ata_port_operations oldpiix_pata_ops = { 240static const struct ata_port_operations oldpiix_pata_ops = {
240 .port_disable = ata_port_disable,
241 .set_piomode = oldpiix_set_piomode, 241 .set_piomode = oldpiix_set_piomode,
242 .set_dmamode = oldpiix_set_dmamode, 242 .set_dmamode = oldpiix_set_dmamode,
243 .mode_filter = ata_pci_default_filter, 243 .mode_filter = ata_pci_default_filter,
@@ -265,9 +265,8 @@ static const struct ata_port_operations oldpiix_pata_ops = {
265 .irq_handler = ata_interrupt, 265 .irq_handler = ata_interrupt,
266 .irq_clear = ata_bmdma_irq_clear, 266 .irq_clear = ata_bmdma_irq_clear,
267 .irq_on = ata_irq_on, 267 .irq_on = ata_irq_on,
268 .irq_ack = ata_irq_ack,
269 268
270 .port_start = ata_port_start, 269 .port_start = ata_sff_port_start,
271}; 270};
272 271
273 272
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index 458bf67f76..8f79447b61 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -46,14 +46,15 @@ enum {
46 46
47/** 47/**
48 * opti_pre_reset - probe begin 48 * opti_pre_reset - probe begin
49 * @ap: ATA port 49 * @link: ATA link
50 * @deadline: deadline jiffies for the operation 50 * @deadline: deadline jiffies for the operation
51 * 51 *
52 * Set up cable type and use generic probe init 52 * Set up cable type and use generic probe init
53 */ 53 */
54 54
55static int opti_pre_reset(struct ata_port *ap, unsigned long deadline) 55static int opti_pre_reset(struct ata_link *link, unsigned long deadline)
56{ 56{
57 struct ata_port *ap = link->ap;
57 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 58 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
58 static const struct pci_bits opti_enable_bits[] = { 59 static const struct pci_bits opti_enable_bits[] = {
59 { 0x45, 1, 0x80, 0x00 }, 60 { 0x45, 1, 0x80, 0x00 },
@@ -63,7 +64,7 @@ static int opti_pre_reset(struct ata_port *ap, unsigned long deadline)
63 if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no])) 64 if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no]))
64 return -ENOENT; 65 return -ENOENT;
65 66
66 return ata_std_prereset(ap, deadline); 67 return ata_std_prereset(link, deadline);
67} 68}
68 69
69/** 70/**
@@ -182,7 +183,6 @@ static struct scsi_host_template opti_sht = {
182}; 183};
183 184
184static struct ata_port_operations opti_port_ops = { 185static struct ata_port_operations opti_port_ops = {
185 .port_disable = ata_port_disable,
186 .set_piomode = opti_set_piomode, 186 .set_piomode = opti_set_piomode,
187 .tf_load = ata_tf_load, 187 .tf_load = ata_tf_load,
188 .tf_read = ata_tf_read, 188 .tf_read = ata_tf_read,
@@ -209,9 +209,8 @@ static struct ata_port_operations opti_port_ops = {
209 .irq_handler = ata_interrupt, 209 .irq_handler = ata_interrupt,
210 .irq_clear = ata_bmdma_irq_clear, 210 .irq_clear = ata_bmdma_irq_clear,
211 .irq_on = ata_irq_on, 211 .irq_on = ata_irq_on,
212 .irq_ack = ata_irq_ack,
213 212
214 .port_start = ata_port_start, 213 .port_start = ata_sff_port_start,
215}; 214};
216 215
217static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id) 216static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index f89bdfde16..6b07b5b485 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -47,14 +47,15 @@ static int pci_clock; /* 0 = 33 1 = 25 */
47 47
48/** 48/**
49 * optidma_pre_reset - probe begin 49 * optidma_pre_reset - probe begin
50 * @ap: ATA port 50 * @link: ATA link
51 * @deadline: deadline jiffies for the operation 51 * @deadline: deadline jiffies for the operation
52 * 52 *
53 * Set up cable type and use generic probe init 53 * Set up cable type and use generic probe init
54 */ 54 */
55 55
56static int optidma_pre_reset(struct ata_port *ap, unsigned long deadline) 56static int optidma_pre_reset(struct ata_link *link, unsigned long deadline)
57{ 57{
58 struct ata_port *ap = link->ap;
58 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 59 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
59 static const struct pci_bits optidma_enable_bits = { 60 static const struct pci_bits optidma_enable_bits = {
60 0x40, 1, 0x08, 0x00 61 0x40, 1, 0x08, 0x00
@@ -63,7 +64,7 @@ static int optidma_pre_reset(struct ata_port *ap, unsigned long deadline)
63 if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits)) 64 if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits))
64 return -ENOENT; 65 return -ENOENT;
65 66
66 return ata_std_prereset(ap, deadline); 67 return ata_std_prereset(link, deadline);
67} 68}
68 69
69/** 70/**
@@ -323,25 +324,26 @@ static u8 optidma_make_bits43(struct ata_device *adev)
323 324
324/** 325/**
325 * optidma_set_mode - mode setup 326 * optidma_set_mode - mode setup
326 * @ap: port to set up 327 * @link: link to set up
327 * 328 *
328 * Use the standard setup to tune the chipset and then finalise the 329 * Use the standard setup to tune the chipset and then finalise the
329 * configuration by writing the nibble of extra bits of data into 330 * configuration by writing the nibble of extra bits of data into
330 * the chip. 331 * the chip.
331 */ 332 */
332 333
333static int optidma_set_mode(struct ata_port *ap, struct ata_device **r_failed) 334static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed)
334{ 335{
336 struct ata_port *ap = link->ap;
335 u8 r; 337 u8 r;
336 int nybble = 4 * ap->port_no; 338 int nybble = 4 * ap->port_no;
337 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 339 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
338 int rc = ata_do_set_mode(ap, r_failed); 340 int rc = ata_do_set_mode(link, r_failed);
339 if (rc == 0) { 341 if (rc == 0) {
340 pci_read_config_byte(pdev, 0x43, &r); 342 pci_read_config_byte(pdev, 0x43, &r);
341 343
342 r &= (0x0F << nybble); 344 r &= (0x0F << nybble);
343 r |= (optidma_make_bits43(&ap->device[0]) + 345 r |= (optidma_make_bits43(&link->device[0]) +
344 (optidma_make_bits43(&ap->device[0]) << 2)) << nybble; 346 (optidma_make_bits43(&link->device[0]) << 2)) << nybble;
345 pci_write_config_byte(pdev, 0x43, r); 347 pci_write_config_byte(pdev, 0x43, r);
346 } 348 }
347 return rc; 349 return rc;
@@ -366,7 +368,6 @@ static struct scsi_host_template optidma_sht = {
366}; 368};
367 369
368static struct ata_port_operations optidma_port_ops = { 370static struct ata_port_operations optidma_port_ops = {
369 .port_disable = ata_port_disable,
370 .set_piomode = optidma_set_pio_mode, 371 .set_piomode = optidma_set_pio_mode,
371 .set_dmamode = optidma_set_dma_mode, 372 .set_dmamode = optidma_set_dma_mode,
372 373
@@ -396,13 +397,11 @@ static struct ata_port_operations optidma_port_ops = {
396 .irq_handler = ata_interrupt, 397 .irq_handler = ata_interrupt,
397 .irq_clear = ata_bmdma_irq_clear, 398 .irq_clear = ata_bmdma_irq_clear,
398 .irq_on = ata_irq_on, 399 .irq_on = ata_irq_on,
399 .irq_ack = ata_irq_ack,
400 400
401 .port_start = ata_port_start, 401 .port_start = ata_sff_port_start,
402}; 402};
403 403
404static struct ata_port_operations optiplus_port_ops = { 404static struct ata_port_operations optiplus_port_ops = {
405 .port_disable = ata_port_disable,
406 .set_piomode = optiplus_set_pio_mode, 405 .set_piomode = optiplus_set_pio_mode,
407 .set_dmamode = optiplus_set_dma_mode, 406 .set_dmamode = optiplus_set_dma_mode,
408 407
@@ -432,9 +431,8 @@ static struct ata_port_operations optiplus_port_ops = {
432 .irq_handler = ata_interrupt, 431 .irq_handler = ata_interrupt,
433 .irq_clear = ata_bmdma_irq_clear, 432 .irq_clear = ata_bmdma_irq_clear,
434 .irq_on = ata_irq_on, 433 .irq_on = ata_irq_on,
435 .irq_ack = ata_irq_ack,
436 434
437 .port_start = ata_port_start, 435 .port_start = ata_sff_port_start,
438}; 436};
439 437
440/** 438/**
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 0f2b027624..782ff4ada9 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -56,7 +56,7 @@ struct ata_pcmcia_info {
56 56
57/** 57/**
58 * pcmcia_set_mode - PCMCIA specific mode setup 58 * pcmcia_set_mode - PCMCIA specific mode setup
59 * @ap: Port 59 * @link: link
60 * @r_failed_dev: Return pointer for failed device 60 * @r_failed_dev: Return pointer for failed device
61 * 61 *
62 * Perform the tuning and setup of the devices and timings, which 62 * Perform the tuning and setup of the devices and timings, which
@@ -65,13 +65,13 @@ struct ata_pcmcia_info {
65 * decode, which alas is embarrassingly common in the PC world 65 * decode, which alas is embarrassingly common in the PC world
66 */ 66 */
67 67
68static int pcmcia_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) 68static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
69{ 69{
70 struct ata_device *master = &ap->device[0]; 70 struct ata_device *master = &link->device[0];
71 struct ata_device *slave = &ap->device[1]; 71 struct ata_device *slave = &link->device[1];
72 72
73 if (!ata_dev_enabled(master) || !ata_dev_enabled(slave)) 73 if (!ata_dev_enabled(master) || !ata_dev_enabled(slave))
74 return ata_do_set_mode(ap, r_failed_dev); 74 return ata_do_set_mode(link, r_failed_dev);
75 75
76 if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV, 76 if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV,
77 ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) 77 ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0)
@@ -84,7 +84,7 @@ static int pcmcia_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev
84 ata_dev_disable(slave); 84 ata_dev_disable(slave);
85 } 85 }
86 } 86 }
87 return ata_do_set_mode(ap, r_failed_dev); 87 return ata_do_set_mode(link, r_failed_dev);
88} 88}
89 89
90static struct scsi_host_template pcmcia_sht = { 90static struct scsi_host_template pcmcia_sht = {
@@ -107,7 +107,6 @@ static struct scsi_host_template pcmcia_sht = {
107 107
108static struct ata_port_operations pcmcia_port_ops = { 108static struct ata_port_operations pcmcia_port_ops = {
109 .set_mode = pcmcia_set_mode, 109 .set_mode = pcmcia_set_mode,
110 .port_disable = ata_port_disable,
111 .tf_load = ata_tf_load, 110 .tf_load = ata_tf_load,
112 .tf_read = ata_tf_read, 111 .tf_read = ata_tf_read,
113 .check_status = ata_check_status, 112 .check_status = ata_check_status,
@@ -127,7 +126,6 @@ static struct ata_port_operations pcmcia_port_ops = {
127 126
128 .irq_clear = ata_bmdma_irq_clear, 127 .irq_clear = ata_bmdma_irq_clear,
129 .irq_on = ata_irq_on, 128 .irq_on = ata_irq_on,
130 .irq_ack = ata_irq_ack,
131 129
132 .port_start = ata_sff_port_start, 130 .port_start = ata_sff_port_start,
133}; 131};
@@ -304,6 +302,8 @@ next_entry:
304 ap->ioaddr.ctl_addr = ctl_addr; 302 ap->ioaddr.ctl_addr = ctl_addr;
305 ata_std_ports(&ap->ioaddr); 303 ata_std_ports(&ap->ioaddr);
306 304
305 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base);
306
307 /* activate */ 307 /* activate */
308 ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_interrupt, 308 ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_interrupt,
309 IRQF_SHARED, &pcmcia_sht); 309 IRQF_SHARED, &pcmcia_sht);
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index bb64a986e8..3d3f1558cd 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -69,7 +69,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
69static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc); 69static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc);
70static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask); 70static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask);
71static int pdc2027x_cable_detect(struct ata_port *ap); 71static int pdc2027x_cable_detect(struct ata_port *ap);
72static int pdc2027x_set_mode(struct ata_port *ap, struct ata_device **r_failed); 72static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed);
73 73
74/* 74/*
75 * ATA Timing Tables based on 133MHz controller clock. 75 * ATA Timing Tables based on 133MHz controller clock.
@@ -147,7 +147,6 @@ static struct scsi_host_template pdc2027x_sht = {
147}; 147};
148 148
149static struct ata_port_operations pdc2027x_pata100_ops = { 149static struct ata_port_operations pdc2027x_pata100_ops = {
150 .port_disable = ata_port_disable,
151 .mode_filter = ata_pci_default_filter, 150 .mode_filter = ata_pci_default_filter,
152 151
153 .tf_load = ata_tf_load, 152 .tf_load = ata_tf_load,
@@ -173,13 +172,11 @@ static struct ata_port_operations pdc2027x_pata100_ops = {
173 172
174 .irq_clear = ata_bmdma_irq_clear, 173 .irq_clear = ata_bmdma_irq_clear,
175 .irq_on = ata_irq_on, 174 .irq_on = ata_irq_on,
176 .irq_ack = ata_irq_ack,
177 175
178 .port_start = ata_port_start, 176 .port_start = ata_sff_port_start,
179}; 177};
180 178
181static struct ata_port_operations pdc2027x_pata133_ops = { 179static struct ata_port_operations pdc2027x_pata133_ops = {
182 .port_disable = ata_port_disable,
183 .set_piomode = pdc2027x_set_piomode, 180 .set_piomode = pdc2027x_set_piomode,
184 .set_dmamode = pdc2027x_set_dmamode, 181 .set_dmamode = pdc2027x_set_dmamode,
185 .set_mode = pdc2027x_set_mode, 182 .set_mode = pdc2027x_set_mode,
@@ -208,9 +205,8 @@ static struct ata_port_operations pdc2027x_pata133_ops = {
208 205
209 .irq_clear = ata_bmdma_irq_clear, 206 .irq_clear = ata_bmdma_irq_clear,
210 .irq_on = ata_irq_on, 207 .irq_on = ata_irq_on,
211 .irq_ack = ata_irq_ack,
212 208
213 .port_start = ata_port_start, 209 .port_start = ata_sff_port_start,
214}; 210};
215 211
216static struct ata_port_info pdc2027x_port_info[] = { 212static struct ata_port_info pdc2027x_port_info[] = {
@@ -277,7 +273,7 @@ static int pdc2027x_cable_detect(struct ata_port *ap)
277 u32 cgcr; 273 u32 cgcr;
278 274
279 /* check cable detect results */ 275 /* check cable detect results */
280 cgcr = readl(port_mmio(ap, PDC_GLOBAL_CTL)); 276 cgcr = ioread32(port_mmio(ap, PDC_GLOBAL_CTL));
281 if (cgcr & (1 << 26)) 277 if (cgcr & (1 << 26))
282 goto cbl40; 278 goto cbl40;
283 279
@@ -295,12 +291,12 @@ cbl40:
295 */ 291 */
296static inline int pdc2027x_port_enabled(struct ata_port *ap) 292static inline int pdc2027x_port_enabled(struct ata_port *ap)
297{ 293{
298 return readb(port_mmio(ap, PDC_ATA_CTL)) & 0x02; 294 return ioread8(port_mmio(ap, PDC_ATA_CTL)) & 0x02;
299} 295}
300 296
301/** 297/**
302 * pdc2027x_prereset - prereset for PATA host controller 298 * pdc2027x_prereset - prereset for PATA host controller
303 * @ap: Target port 299 * @link: Target link
304 * @deadline: deadline jiffies for the operation 300 * @deadline: deadline jiffies for the operation
305 * 301 *
306 * Probeinit including cable detection. 302 * Probeinit including cable detection.
@@ -309,12 +305,12 @@ static inline int pdc2027x_port_enabled(struct ata_port *ap)
309 * None (inherited from caller). 305 * None (inherited from caller).
310 */ 306 */
311 307
312static int pdc2027x_prereset(struct ata_port *ap, unsigned long deadline) 308static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline)
313{ 309{
314 /* Check whether port enabled */ 310 /* Check whether port enabled */
315 if (!pdc2027x_port_enabled(ap)) 311 if (!pdc2027x_port_enabled(link->ap))
316 return -ENOENT; 312 return -ENOENT;
317 return ata_std_prereset(ap, deadline); 313 return ata_std_prereset(link, deadline);
318} 314}
319 315
320/** 316/**
@@ -387,16 +383,16 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
387 /* Set the PIO timing registers using value table for 133MHz */ 383 /* Set the PIO timing registers using value table for 133MHz */
388 PDPRINTK("Set pio regs... \n"); 384 PDPRINTK("Set pio regs... \n");
389 385
390 ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0)); 386 ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
391 ctcr0 &= 0xffff0000; 387 ctcr0 &= 0xffff0000;
392 ctcr0 |= pdc2027x_pio_timing_tbl[pio].value0 | 388 ctcr0 |= pdc2027x_pio_timing_tbl[pio].value0 |
393 (pdc2027x_pio_timing_tbl[pio].value1 << 8); 389 (pdc2027x_pio_timing_tbl[pio].value1 << 8);
394 writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0)); 390 iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
395 391
396 ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1)); 392 ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
397 ctcr1 &= 0x00ffffff; 393 ctcr1 &= 0x00ffffff;
398 ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24); 394 ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
399 writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); 395 iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
400 396
401 PDPRINTK("Set pio regs done\n"); 397 PDPRINTK("Set pio regs done\n");
402 398
@@ -430,18 +426,18 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
430 * If tHOLD is '1', the hardware will add half clock for data hold time. 426 * If tHOLD is '1', the hardware will add half clock for data hold time.
431 * This code segment seems to be no effect. tHOLD will be overwritten below. 427 * This code segment seems to be no effect. tHOLD will be overwritten below.
432 */ 428 */
433 ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1)); 429 ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
434 writel(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1)); 430 iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
435 } 431 }
436 432
437 PDPRINTK("Set udma regs... \n"); 433 PDPRINTK("Set udma regs... \n");
438 434
439 ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1)); 435 ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
440 ctcr1 &= 0xff000000; 436 ctcr1 &= 0xff000000;
441 ctcr1 |= pdc2027x_udma_timing_tbl[udma_mode].value0 | 437 ctcr1 |= pdc2027x_udma_timing_tbl[udma_mode].value0 |
442 (pdc2027x_udma_timing_tbl[udma_mode].value1 << 8) | 438 (pdc2027x_udma_timing_tbl[udma_mode].value1 << 8) |
443 (pdc2027x_udma_timing_tbl[udma_mode].value2 << 16); 439 (pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
444 writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1)); 440 iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
445 441
446 PDPRINTK("Set udma regs done\n"); 442 PDPRINTK("Set udma regs done\n");
447 443
@@ -453,13 +449,13 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
453 unsigned int mdma_mode = dma_mode & 0x07; 449 unsigned int mdma_mode = dma_mode & 0x07;
454 450
455 PDPRINTK("Set mdma regs... \n"); 451 PDPRINTK("Set mdma regs... \n");
456 ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0)); 452 ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
457 453
458 ctcr0 &= 0x0000ffff; 454 ctcr0 &= 0x0000ffff;
459 ctcr0 |= (pdc2027x_mdma_timing_tbl[mdma_mode].value0 << 16) | 455 ctcr0 |= (pdc2027x_mdma_timing_tbl[mdma_mode].value0 << 16) |
460 (pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24); 456 (pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24);
461 457
462 writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0)); 458 iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
463 PDPRINTK("Set mdma regs done\n"); 459 PDPRINTK("Set mdma regs done\n");
464 460
465 PDPRINTK("Set to mdma mode[%u] \n", mdma_mode); 461 PDPRINTK("Set to mdma mode[%u] \n", mdma_mode);
@@ -470,24 +466,24 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
470 466
471/** 467/**
472 * pdc2027x_set_mode - Set the timing registers back to correct values. 468 * pdc2027x_set_mode - Set the timing registers back to correct values.
473 * @ap: Port to configure 469 * @link: link to configure
474 * @r_failed: Returned device for failure 470 * @r_failed: Returned device for failure
475 * 471 *
476 * The pdc2027x hardware will look at "SET FEATURES" and change the timing registers 472 * The pdc2027x hardware will look at "SET FEATURES" and change the timing registers
477 * automatically. The values set by the hardware might be incorrect, under 133Mhz PLL. 473 * automatically. The values set by the hardware might be incorrect, under 133Mhz PLL.
478 * This function overwrites the possibly incorrect values set by the hardware to be correct. 474 * This function overwrites the possibly incorrect values set by the hardware to be correct.
479 */ 475 */
480static int pdc2027x_set_mode(struct ata_port *ap, struct ata_device **r_failed) 476static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed)
481{ 477{
482 int i; 478 struct ata_port *ap = link->ap;
483 479 struct ata_device *dev;
484 i = ata_do_set_mode(ap, r_failed); 480 int rc;
485 if (i < 0)
486 return i;
487 481
488 for (i = 0; i < ATA_MAX_DEVICES; i++) { 482 rc = ata_do_set_mode(link, r_failed);
489 struct ata_device *dev = &ap->device[i]; 483 if (rc < 0)
484 return rc;
490 485
486 ata_link_for_each_dev(dev, link) {
491 if (ata_dev_enabled(dev)) { 487 if (ata_dev_enabled(dev)) {
492 488
493 pdc2027x_set_piomode(ap, dev); 489 pdc2027x_set_piomode(ap, dev);
@@ -496,9 +492,9 @@ static int pdc2027x_set_mode(struct ata_port *ap, struct ata_device **r_failed)
496 * Enable prefetch if the device support PIO only. 492 * Enable prefetch if the device support PIO only.
497 */ 493 */
498 if (dev->xfer_shift == ATA_SHIFT_PIO) { 494 if (dev->xfer_shift == ATA_SHIFT_PIO) {
499 u32 ctcr1 = readl(dev_mmio(ap, dev, PDC_CTCR1)); 495 u32 ctcr1 = ioread32(dev_mmio(ap, dev, PDC_CTCR1));
500 ctcr1 |= (1 << 25); 496 ctcr1 |= (1 << 25);
501 writel(ctcr1, dev_mmio(ap, dev, PDC_CTCR1)); 497 iowrite32(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
502 498
503 PDPRINTK("Turn on prefetch\n"); 499 PDPRINTK("Turn on prefetch\n");
504 } else { 500 } else {
@@ -563,14 +559,12 @@ static long pdc_read_counter(struct ata_host *host)
563 u32 bccrl, bccrh, bccrlv, bccrhv; 559 u32 bccrl, bccrh, bccrlv, bccrhv;
564 560
565retry: 561retry:
566 bccrl = readl(mmio_base + PDC_BYTE_COUNT) & 0x7fff; 562 bccrl = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff;
567 bccrh = readl(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff; 563 bccrh = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff;
568 rmb();
569 564
570 /* Read the counter values again for verification */ 565 /* Read the counter values again for verification */
571 bccrlv = readl(mmio_base + PDC_BYTE_COUNT) & 0x7fff; 566 bccrlv = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff;
572 bccrhv = readl(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff; 567 bccrhv = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff;
573 rmb();
574 568
575 counter = (bccrh << 15) | bccrl; 569 counter = (bccrh << 15) | bccrl;
576 570
@@ -619,7 +613,7 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
619 /* Show the current clock value of PLL control register 613 /* Show the current clock value of PLL control register
620 * (maybe already configured by the firmware) 614 * (maybe already configured by the firmware)
621 */ 615 */
622 pll_ctl = readw(mmio_base + PDC_PLL_CTL); 616 pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
623 617
624 PDPRINTK("pll_ctl[%X]\n", pll_ctl); 618 PDPRINTK("pll_ctl[%X]\n", pll_ctl);
625#endif 619#endif
@@ -659,8 +653,8 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
659 653
660 PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl); 654 PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl);
661 655
662 writew(pll_ctl, mmio_base + PDC_PLL_CTL); 656 iowrite16(pll_ctl, mmio_base + PDC_PLL_CTL);
663 readw(mmio_base + PDC_PLL_CTL); /* flush */ 657 ioread16(mmio_base + PDC_PLL_CTL); /* flush */
664 658
665 /* Wait the PLL circuit to be stable */ 659 /* Wait the PLL circuit to be stable */
666 mdelay(30); 660 mdelay(30);
@@ -670,7 +664,7 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
670 * Show the current clock value of PLL control register 664 * Show the current clock value of PLL control register
671 * (maybe configured by the firmware) 665 * (maybe configured by the firmware)
672 */ 666 */
673 pll_ctl = readw(mmio_base + PDC_PLL_CTL); 667 pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
674 668
675 PDPRINTK("pll_ctl[%X]\n", pll_ctl); 669 PDPRINTK("pll_ctl[%X]\n", pll_ctl);
676#endif 670#endif
@@ -693,10 +687,10 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
693 long pll_clock, usec_elapsed; 687 long pll_clock, usec_elapsed;
694 688
695 /* Start the test mode */ 689 /* Start the test mode */
696 scr = readl(mmio_base + PDC_SYS_CTL); 690 scr = ioread32(mmio_base + PDC_SYS_CTL);
697 PDPRINTK("scr[%X]\n", scr); 691 PDPRINTK("scr[%X]\n", scr);
698 writel(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL); 692 iowrite32(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL);
699 readl(mmio_base + PDC_SYS_CTL); /* flush */ 693 ioread32(mmio_base + PDC_SYS_CTL); /* flush */
700 694
701 /* Read current counter value */ 695 /* Read current counter value */
702 start_count = pdc_read_counter(host); 696 start_count = pdc_read_counter(host);
@@ -710,10 +704,10 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
710 do_gettimeofday(&end_time); 704 do_gettimeofday(&end_time);
711 705
712 /* Stop the test mode */ 706 /* Stop the test mode */
713 scr = readl(mmio_base + PDC_SYS_CTL); 707 scr = ioread32(mmio_base + PDC_SYS_CTL);
714 PDPRINTK("scr[%X]\n", scr); 708 PDPRINTK("scr[%X]\n", scr);
715 writel(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL); 709 iowrite32(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL);
716 readl(mmio_base + PDC_SYS_CTL); /* flush */ 710 ioread32(mmio_base + PDC_SYS_CTL); /* flush */
717 711
718 /* calculate the input clock in Hz */ 712 /* calculate the input clock in Hz */
719 usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 + 713 usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 +
@@ -745,9 +739,6 @@ static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
745 */ 739 */
746 pll_clock = pdc_detect_pll_input_clock(host); 740 pll_clock = pdc_detect_pll_input_clock(host);
747 741
748 if (pll_clock < 0) /* counter overflow? Try again. */
749 pll_clock = pdc_detect_pll_input_clock(host);
750
751 dev_printk(KERN_INFO, host->dev, "PLL input clock %ld kHz\n", pll_clock/1000); 742 dev_printk(KERN_INFO, host->dev, "PLL input clock %ld kHz\n", pll_clock/1000);
752 743
753 /* Adjust PLL control register */ 744 /* Adjust PLL control register */
@@ -791,12 +782,14 @@ static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
791static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 782static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
792{ 783{
793 static int printed_version; 784 static int printed_version;
785 static const unsigned long cmd_offset[] = { 0x17c0, 0x15c0 };
786 static const unsigned long bmdma_offset[] = { 0x1000, 0x1008 };
794 unsigned int board_idx = (unsigned int) ent->driver_data; 787 unsigned int board_idx = (unsigned int) ent->driver_data;
795 const struct ata_port_info *ppi[] = 788 const struct ata_port_info *ppi[] =
796 { &pdc2027x_port_info[board_idx], NULL }; 789 { &pdc2027x_port_info[board_idx], NULL };
797 struct ata_host *host; 790 struct ata_host *host;
798 void __iomem *mmio_base; 791 void __iomem *mmio_base;
799 int rc; 792 int i, rc;
800 793
801 if (!printed_version++) 794 if (!printed_version++)
802 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 795 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -826,10 +819,15 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
826 819
827 mmio_base = host->iomap[PDC_MMIO_BAR]; 820 mmio_base = host->iomap[PDC_MMIO_BAR];
828 821
829 pdc_ata_setup_port(&host->ports[0]->ioaddr, mmio_base + 0x17c0); 822 for (i = 0; i < 2; i++) {
830 host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x1000; 823 struct ata_port *ap = host->ports[i];
831 pdc_ata_setup_port(&host->ports[1]->ioaddr, mmio_base + 0x15c0); 824
832 host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x1008; 825 pdc_ata_setup_port(&ap->ioaddr, mmio_base + cmd_offset[i]);
826 ap->ioaddr.bmdma_addr = mmio_base + bmdma_offset[i];
827
828 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
829 ata_port_pbar_desc(ap, PDC_MMIO_BAR, cmd_offset[i], "cmd");
830 }
833 831
834 //pci_enable_intx(pdev); 832 //pci_enable_intx(pdev);
835 833
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 92447bed5e..65d951618c 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -9,7 +9,7 @@
9 * First cut with LBA48/ATAPI 9 * First cut with LBA48/ATAPI
10 * 10 *
11 * TODO: 11 * TODO:
12 * Channel interlock/reset on both required 12 * Channel interlock/reset on both required ?
13 */ 13 */
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -22,7 +22,7 @@
22#include <linux/libata.h> 22#include <linux/libata.h>
23 23
24#define DRV_NAME "pata_pdc202xx_old" 24#define DRV_NAME "pata_pdc202xx_old"
25#define DRV_VERSION "0.4.2" 25#define DRV_VERSION "0.4.3"
26 26
27static int pdc2026x_cable_detect(struct ata_port *ap) 27static int pdc2026x_cable_detect(struct ata_port *ap)
28{ 28{
@@ -106,9 +106,9 @@ static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
106 { 0x20, 0x01 } 106 { 0x20, 0x01 }
107 }; 107 };
108 static u8 mdma_timing[3][2] = { 108 static u8 mdma_timing[3][2] = {
109 { 0x60, 0x03 },
110 { 0x60, 0x04 },
111 { 0xe0, 0x0f }, 109 { 0xe0, 0x0f },
110 { 0x60, 0x04 },
111 { 0x60, 0x03 },
112 }; 112 };
113 u8 r_bp, r_cp; 113 u8 r_bp, r_cp;
114 114
@@ -139,6 +139,9 @@ static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
139 * 139 *
140 * In UDMA3 or higher we have to clock switch for the duration of the 140 * In UDMA3 or higher we have to clock switch for the duration of the
141 * DMA transfer sequence. 141 * DMA transfer sequence.
142 *
143 * Note: The host lock held by the libata layer protects
144 * us from two channels both trying to set DMA bits at once
142 */ 145 */
143 146
144static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc) 147static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
@@ -187,6 +190,9 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
187 * 190 *
188 * After a DMA completes we need to put the clock back to 33MHz for 191 * After a DMA completes we need to put the clock back to 33MHz for
189 * PIO timings. 192 * PIO timings.
193 *
194 * Note: The host lock held by the libata layer protects
195 * us from two channels both trying to set DMA bits at once
190 */ 196 */
191 197
192static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc) 198static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
@@ -206,7 +212,6 @@ static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
206 iowrite32(0, atapi_reg); 212 iowrite32(0, atapi_reg);
207 iowrite8(ioread8(clock) & ~sel66, clock); 213 iowrite8(ioread8(clock) & ~sel66, clock);
208 } 214 }
209 /* Check we keep host level locking here */
210 /* Flip back to 33Mhz for PIO */ 215 /* Flip back to 33Mhz for PIO */
211 if (adev->dma_mode >= XFER_UDMA_2) 216 if (adev->dma_mode >= XFER_UDMA_2)
212 iowrite8(ioread8(clock) & ~sel66, clock); 217 iowrite8(ioread8(clock) & ~sel66, clock);
@@ -247,7 +252,6 @@ static struct scsi_host_template pdc202xx_sht = {
247}; 252};
248 253
249static struct ata_port_operations pdc2024x_port_ops = { 254static struct ata_port_operations pdc2024x_port_ops = {
250 .port_disable = ata_port_disable,
251 .set_piomode = pdc202xx_set_piomode, 255 .set_piomode = pdc202xx_set_piomode,
252 .set_dmamode = pdc202xx_set_dmamode, 256 .set_dmamode = pdc202xx_set_dmamode,
253 .mode_filter = ata_pci_default_filter, 257 .mode_filter = ata_pci_default_filter,
@@ -275,13 +279,11 @@ static struct ata_port_operations pdc2024x_port_ops = {
275 .irq_handler = ata_interrupt, 279 .irq_handler = ata_interrupt,
276 .irq_clear = ata_bmdma_irq_clear, 280 .irq_clear = ata_bmdma_irq_clear,
277 .irq_on = ata_irq_on, 281 .irq_on = ata_irq_on,
278 .irq_ack = ata_irq_ack,
279 282
280 .port_start = ata_port_start, 283 .port_start = ata_sff_port_start,
281}; 284};
282 285
283static struct ata_port_operations pdc2026x_port_ops = { 286static struct ata_port_operations pdc2026x_port_ops = {
284 .port_disable = ata_port_disable,
285 .set_piomode = pdc202xx_set_piomode, 287 .set_piomode = pdc202xx_set_piomode,
286 .set_dmamode = pdc202xx_set_dmamode, 288 .set_dmamode = pdc202xx_set_dmamode,
287 .mode_filter = ata_pci_default_filter, 289 .mode_filter = ata_pci_default_filter,
@@ -310,9 +312,8 @@ static struct ata_port_operations pdc2026x_port_ops = {
310 .irq_handler = ata_interrupt, 312 .irq_handler = ata_interrupt,
311 .irq_clear = ata_bmdma_irq_clear, 313 .irq_clear = ata_bmdma_irq_clear,
312 .irq_on = ata_irq_on, 314 .irq_on = ata_irq_on,
313 .irq_ack = ata_irq_ack,
314 315
315 .port_start = ata_port_start, 316 .port_start = ata_sff_port_start,
316}; 317};
317 318
318static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id) 319static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 5086d03f2d..fc72a96564 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -30,13 +30,11 @@ static int pio_mask = 1;
30 * Provide our own set_mode() as we don't want to change anything that has 30 * Provide our own set_mode() as we don't want to change anything that has
31 * already been configured.. 31 * already been configured..
32 */ 32 */
33static int pata_platform_set_mode(struct ata_port *ap, struct ata_device **unused) 33static int pata_platform_set_mode(struct ata_link *link, struct ata_device **unused)
34{ 34{
35 int i; 35 struct ata_device *dev;
36
37 for (i = 0; i < ATA_MAX_DEVICES; i++) {
38 struct ata_device *dev = &ap->device[i];
39 36
37 ata_link_for_each_dev(dev, link) {
40 if (ata_dev_enabled(dev)) { 38 if (ata_dev_enabled(dev)) {
41 /* We don't really care */ 39 /* We don't really care */
42 dev->pio_mode = dev->xfer_mode = XFER_PIO_0; 40 dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
@@ -71,7 +69,6 @@ static struct scsi_host_template pata_platform_sht = {
71static struct ata_port_operations pata_platform_port_ops = { 69static struct ata_port_operations pata_platform_port_ops = {
72 .set_mode = pata_platform_set_mode, 70 .set_mode = pata_platform_set_mode,
73 71
74 .port_disable = ata_port_disable,
75 .tf_load = ata_tf_load, 72 .tf_load = ata_tf_load,
76 .tf_read = ata_tf_read, 73 .tf_read = ata_tf_read,
77 .check_status = ata_check_status, 74 .check_status = ata_check_status,
@@ -91,7 +88,6 @@ static struct ata_port_operations pata_platform_port_ops = {
91 88
92 .irq_clear = ata_bmdma_irq_clear, 89 .irq_clear = ata_bmdma_irq_clear,
93 .irq_on = ata_irq_on, 90 .irq_on = ata_irq_on,
94 .irq_ack = ata_irq_ack,
95 91
96 .port_start = ata_dummy_ret0, 92 .port_start = ata_dummy_ret0,
97}; 93};
@@ -209,9 +205,13 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
209 205
210 ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; 206 ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
211 207
212 pp_info = (struct pata_platform_info *)(pdev->dev.platform_data); 208 pp_info = pdev->dev.platform_data;
213 pata_platform_setup_port(&ap->ioaddr, pp_info); 209 pata_platform_setup_port(&ap->ioaddr, pp_info);
214 210
211 ata_port_desc(ap, "%s cmd 0x%llx ctl 0x%llx", mmio ? "mmio" : "ioport",
212 (unsigned long long)io_res->start,
213 (unsigned long long)ctl_res->start);
214
215 /* activate */ 215 /* activate */
216 return ata_host_activate(host, platform_get_irq(pdev, 0), 216 return ata_host_activate(host, platform_get_irq(pdev, 0),
217 ata_interrupt, pp_info ? pp_info->irq_flags 217 ata_interrupt, pp_info ? pp_info->irq_flags
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index 1998c19e87..7d4c696c4c 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -126,7 +126,7 @@ static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
126 126
127static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) 127static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
128{ 128{
129 struct ata_port *ap = adev->ap; 129 struct ata_port *ap = adev->link->ap;
130 int slop = buflen & 3; 130 int slop = buflen & 3;
131 131
132 if (ata_id_has_dword_io(adev->id)) { 132 if (ata_id_has_dword_io(adev->id)) {
@@ -170,7 +170,6 @@ static struct scsi_host_template qdi_sht = {
170}; 170};
171 171
172static struct ata_port_operations qdi6500_port_ops = { 172static struct ata_port_operations qdi6500_port_ops = {
173 .port_disable = ata_port_disable,
174 .set_piomode = qdi6500_set_piomode, 173 .set_piomode = qdi6500_set_piomode,
175 174
176 .tf_load = ata_tf_load, 175 .tf_load = ata_tf_load,
@@ -192,13 +191,11 @@ static struct ata_port_operations qdi6500_port_ops = {
192 191
193 .irq_clear = ata_bmdma_irq_clear, 192 .irq_clear = ata_bmdma_irq_clear,
194 .irq_on = ata_irq_on, 193 .irq_on = ata_irq_on,
195 .irq_ack = ata_irq_ack,
196 194
197 .port_start = ata_port_start, 195 .port_start = ata_sff_port_start,
198}; 196};
199 197
200static struct ata_port_operations qdi6580_port_ops = { 198static struct ata_port_operations qdi6580_port_ops = {
201 .port_disable = ata_port_disable,
202 .set_piomode = qdi6580_set_piomode, 199 .set_piomode = qdi6580_set_piomode,
203 200
204 .tf_load = ata_tf_load, 201 .tf_load = ata_tf_load,
@@ -220,9 +217,8 @@ static struct ata_port_operations qdi6580_port_ops = {
220 217
221 .irq_clear = ata_bmdma_irq_clear, 218 .irq_clear = ata_bmdma_irq_clear,
222 .irq_on = ata_irq_on, 219 .irq_on = ata_irq_on,
223 .irq_ack = ata_irq_ack,
224 220
225 .port_start = ata_port_start, 221 .port_start = ata_sff_port_start,
226}; 222};
227 223
228/** 224/**
@@ -238,6 +234,7 @@ static struct ata_port_operations qdi6580_port_ops = {
238 234
239static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast) 235static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast)
240{ 236{
237 unsigned long ctl = io + 0x206;
241 struct platform_device *pdev; 238 struct platform_device *pdev;
242 struct ata_host *host; 239 struct ata_host *host;
243 struct ata_port *ap; 240 struct ata_port *ap;
@@ -254,7 +251,7 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i
254 251
255 ret = -ENOMEM; 252 ret = -ENOMEM;
256 io_addr = devm_ioport_map(&pdev->dev, io, 8); 253 io_addr = devm_ioport_map(&pdev->dev, io, 8);
257 ctl_addr = devm_ioport_map(&pdev->dev, io + 0x206, 1); 254 ctl_addr = devm_ioport_map(&pdev->dev, ctl, 1);
258 if (!io_addr || !ctl_addr) 255 if (!io_addr || !ctl_addr)
259 goto fail; 256 goto fail;
260 257
@@ -279,6 +276,8 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i
279 ap->ioaddr.ctl_addr = ctl_addr; 276 ap->ioaddr.ctl_addr = ctl_addr;
280 ata_std_ports(&ap->ioaddr); 277 ata_std_ports(&ap->ioaddr);
281 278
279 ata_port_desc(ap, "cmd %lx ctl %lx", io, ctl);
280
282 /* 281 /*
283 * Hook in a private data structure per channel 282 * Hook in a private data structure per channel
284 */ 283 */
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 7d1aabed42..d5b76497f4 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -203,7 +203,6 @@ static struct scsi_host_template radisys_sht = {
203}; 203};
204 204
205static const struct ata_port_operations radisys_pata_ops = { 205static const struct ata_port_operations radisys_pata_ops = {
206 .port_disable = ata_port_disable,
207 .set_piomode = radisys_set_piomode, 206 .set_piomode = radisys_set_piomode,
208 .set_dmamode = radisys_set_dmamode, 207 .set_dmamode = radisys_set_dmamode,
209 .mode_filter = ata_pci_default_filter, 208 .mode_filter = ata_pci_default_filter,
@@ -231,9 +230,8 @@ static const struct ata_port_operations radisys_pata_ops = {
231 .irq_handler = ata_interrupt, 230 .irq_handler = ata_interrupt,
232 .irq_clear = ata_bmdma_irq_clear, 231 .irq_clear = ata_bmdma_irq_clear,
233 .irq_on = ata_irq_on, 232 .irq_on = ata_irq_on,
234 .irq_ack = ata_irq_ack,
235 233
236 .port_start = ata_port_start, 234 .port_start = ata_sff_port_start,
237}; 235};
238 236
239 237
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 7632fcb070..ba8a31c55e 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -26,7 +26,7 @@
26 26
27/** 27/**
28 * rz1000_set_mode - mode setting function 28 * rz1000_set_mode - mode setting function
29 * @ap: ATA interface 29 * @link: ATA link
30 * @unused: returned device on set_mode failure 30 * @unused: returned device on set_mode failure
31 * 31 *
32 * Use a non standard set_mode function. We don't want to be tuned. We 32 * Use a non standard set_mode function. We don't want to be tuned. We
@@ -34,12 +34,11 @@
34 * whacked out. 34 * whacked out.
35 */ 35 */
36 36
37static int rz1000_set_mode(struct ata_port *ap, struct ata_device **unused) 37static int rz1000_set_mode(struct ata_link *link, struct ata_device **unused)
38{ 38{
39 int i; 39 struct ata_device *dev;
40 40
41 for (i = 0; i < ATA_MAX_DEVICES; i++) { 41 ata_link_for_each_dev(dev, link) {
42 struct ata_device *dev = &ap->device[i];
43 if (ata_dev_enabled(dev)) { 42 if (ata_dev_enabled(dev)) {
44 /* We don't really care */ 43 /* We don't really care */
45 dev->pio_mode = XFER_PIO_0; 44 dev->pio_mode = XFER_PIO_0;
@@ -74,7 +73,6 @@ static struct scsi_host_template rz1000_sht = {
74static struct ata_port_operations rz1000_port_ops = { 73static struct ata_port_operations rz1000_port_ops = {
75 .set_mode = rz1000_set_mode, 74 .set_mode = rz1000_set_mode,
76 75
77 .port_disable = ata_port_disable,
78 .tf_load = ata_tf_load, 76 .tf_load = ata_tf_load,
79 .tf_read = ata_tf_read, 77 .tf_read = ata_tf_read,
80 .check_status = ata_check_status, 78 .check_status = ata_check_status,
@@ -100,9 +98,8 @@ static struct ata_port_operations rz1000_port_ops = {
100 .irq_handler = ata_interrupt, 98 .irq_handler = ata_interrupt,
101 .irq_clear = ata_bmdma_irq_clear, 99 .irq_clear = ata_bmdma_irq_clear,
102 .irq_on = ata_irq_on, 100 .irq_on = ata_irq_on,
103 .irq_ack = ata_irq_ack,
104 101
105 .port_start = ata_port_start, 102 .port_start = ata_sff_port_start,
106}; 103};
107 104
108static int rz1000_fifo_disable(struct pci_dev *pdev) 105static int rz1000_fifo_disable(struct pci_dev *pdev)
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 5edf67b1f3..21ebc485ca 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -197,7 +197,6 @@ static struct scsi_host_template sc1200_sht = {
197}; 197};
198 198
199static struct ata_port_operations sc1200_port_ops = { 199static struct ata_port_operations sc1200_port_ops = {
200 .port_disable = ata_port_disable,
201 .set_piomode = sc1200_set_piomode, 200 .set_piomode = sc1200_set_piomode,
202 .set_dmamode = sc1200_set_dmamode, 201 .set_dmamode = sc1200_set_dmamode,
203 .mode_filter = ata_pci_default_filter, 202 .mode_filter = ata_pci_default_filter,
@@ -227,9 +226,8 @@ static struct ata_port_operations sc1200_port_ops = {
227 .irq_handler = ata_interrupt, 226 .irq_handler = ata_interrupt,
228 .irq_clear = ata_bmdma_irq_clear, 227 .irq_clear = ata_bmdma_irq_clear,
229 .irq_on = ata_irq_on, 228 .irq_on = ata_irq_on,
230 .irq_ack = ata_irq_ack,
231 229
232 .port_start = ata_port_start, 230 .port_start = ata_sff_port_start,
233}; 231};
234 232
235/** 233/**
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 2d048ef25a..55576138fa 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -603,16 +603,17 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
603 * Note: Original code is ata_std_softreset(). 603 * Note: Original code is ata_std_softreset().
604 */ 604 */
605 605
606static int scc_std_softreset (struct ata_port *ap, unsigned int *classes, 606static int scc_std_softreset(struct ata_link *link, unsigned int *classes,
607 unsigned long deadline) 607 unsigned long deadline)
608{ 608{
609 struct ata_port *ap = link->ap;
609 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 610 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
610 unsigned int devmask = 0, err_mask; 611 unsigned int devmask = 0, err_mask;
611 u8 err; 612 u8 err;
612 613
613 DPRINTK("ENTER\n"); 614 DPRINTK("ENTER\n");
614 615
615 if (ata_port_offline(ap)) { 616 if (ata_link_offline(link)) {
616 classes[0] = ATA_DEV_NONE; 617 classes[0] = ATA_DEV_NONE;
617 goto out; 618 goto out;
618 } 619 }
@@ -636,9 +637,11 @@ static int scc_std_softreset (struct ata_port *ap, unsigned int *classes,
636 } 637 }
637 638
638 /* determine by signature whether we have ATA or ATAPI devices */ 639 /* determine by signature whether we have ATA or ATAPI devices */
639 classes[0] = ata_dev_try_classify(ap, 0, &err); 640 classes[0] = ata_dev_try_classify(&ap->link.device[0],
641 devmask & (1 << 0), &err);
640 if (slave_possible && err != 0x81) 642 if (slave_possible && err != 0x81)
641 classes[1] = ata_dev_try_classify(ap, 1, &err); 643 classes[1] = ata_dev_try_classify(&ap->link.device[1],
644 devmask & (1 << 1), &err);
642 645
643 out: 646 out:
644 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); 647 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
@@ -701,7 +704,7 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc)
701 printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME); 704 printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
702 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT); 705 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
703 /* TBD: SW reset */ 706 /* TBD: SW reset */
704 scc_std_softreset(ap, &classes, deadline); 707 scc_std_softreset(&ap->link, &classes, deadline);
705 continue; 708 continue;
706 } 709 }
707 710
@@ -740,7 +743,7 @@ static u8 scc_bmdma_status (struct ata_port *ap)
740 void __iomem *mmio = ap->ioaddr.bmdma_addr; 743 void __iomem *mmio = ap->ioaddr.bmdma_addr;
741 u8 host_stat = in_be32(mmio + SCC_DMA_STATUS); 744 u8 host_stat = in_be32(mmio + SCC_DMA_STATUS);
742 u32 int_status = in_be32(mmio + SCC_DMA_INTST); 745 u32 int_status = in_be32(mmio + SCC_DMA_INTST);
743 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); 746 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
744 static int retry = 0; 747 static int retry = 0;
745 748
746 /* return if IOS_SS is cleared */ 749 /* return if IOS_SS is cleared */
@@ -785,7 +788,7 @@ static u8 scc_bmdma_status (struct ata_port *ap)
785static void scc_data_xfer (struct ata_device *adev, unsigned char *buf, 788static void scc_data_xfer (struct ata_device *adev, unsigned char *buf,
786 unsigned int buflen, int write_data) 789 unsigned int buflen, int write_data)
787{ 790{
788 struct ata_port *ap = adev->ap; 791 struct ata_port *ap = adev->link->ap;
789 unsigned int words = buflen >> 1; 792 unsigned int words = buflen >> 1;
790 unsigned int i; 793 unsigned int i;
791 u16 *buf16 = (u16 *) buf; 794 u16 *buf16 = (u16 *) buf;
@@ -839,38 +842,6 @@ static u8 scc_irq_on (struct ata_port *ap)
839} 842}
840 843
841/** 844/**
842 * scc_irq_ack - Acknowledge a device interrupt.
843 * @ap: Port on which interrupts are enabled.
844 *
845 * Note: Original code is ata_irq_ack().
846 */
847
848static u8 scc_irq_ack (struct ata_port *ap, unsigned int chk_drq)
849{
850 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
851 u8 host_stat, post_stat, status;
852
853 status = ata_busy_wait(ap, bits, 1000);
854 if (status & bits)
855 if (ata_msg_err(ap))
856 printk(KERN_ERR "abnormal status 0x%X\n", status);
857
858 /* get controller status; clear intr, err bits */
859 host_stat = in_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS);
860 out_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS,
861 host_stat | ATA_DMA_INTR | ATA_DMA_ERR);
862
863 post_stat = in_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS);
864
865 if (ata_msg_intr(ap))
866 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
867 __FUNCTION__,
868 host_stat, post_stat, status);
869
870 return status;
871}
872
873/**
874 * scc_bmdma_freeze - Freeze BMDMA controller port 845 * scc_bmdma_freeze - Freeze BMDMA controller port
875 * @ap: port to freeze 846 * @ap: port to freeze
876 * 847 *
@@ -901,10 +872,10 @@ static void scc_bmdma_freeze (struct ata_port *ap)
901 * @deadline: deadline jiffies for the operation 872 * @deadline: deadline jiffies for the operation
902 */ 873 */
903 874
904static int scc_pata_prereset(struct ata_port *ap, unsigned long deadline) 875static int scc_pata_prereset(struct ata_link *link, unsigned long deadline)
905{ 876{
906 ap->cbl = ATA_CBL_PATA80; 877 link->ap->cbl = ATA_CBL_PATA80;
907 return ata_std_prereset(ap, deadline); 878 return ata_std_prereset(link, deadline);
908} 879}
909 880
910/** 881/**
@@ -915,8 +886,10 @@ static int scc_pata_prereset(struct ata_port *ap, unsigned long deadline)
915 * Note: Original code is ata_std_postreset(). 886 * Note: Original code is ata_std_postreset().
916 */ 887 */
917 888
918static void scc_std_postreset (struct ata_port *ap, unsigned int *classes) 889static void scc_std_postreset(struct ata_link *link, unsigned int *classes)
919{ 890{
891 struct ata_port *ap = link->ap;
892
920 DPRINTK("ENTER\n"); 893 DPRINTK("ENTER\n");
921 894
922 /* is double-select really necessary? */ 895 /* is double-select really necessary? */
@@ -1020,7 +993,6 @@ static struct scsi_host_template scc_sht = {
1020}; 993};
1021 994
1022static const struct ata_port_operations scc_pata_ops = { 995static const struct ata_port_operations scc_pata_ops = {
1023 .port_disable = ata_port_disable,
1024 .set_piomode = scc_set_piomode, 996 .set_piomode = scc_set_piomode,
1025 .set_dmamode = scc_set_dmamode, 997 .set_dmamode = scc_set_dmamode,
1026 .mode_filter = scc_mode_filter, 998 .mode_filter = scc_mode_filter,
@@ -1047,7 +1019,6 @@ static const struct ata_port_operations scc_pata_ops = {
1047 1019
1048 .irq_clear = scc_bmdma_irq_clear, 1020 .irq_clear = scc_bmdma_irq_clear,
1049 .irq_on = scc_irq_on, 1021 .irq_on = scc_irq_on,
1050 .irq_ack = scc_irq_ack,
1051 1022
1052 .port_start = scc_port_start, 1023 .port_start = scc_port_start,
1053 .port_stop = scc_port_stop, 1024 .port_stop = scc_port_stop,
@@ -1193,6 +1164,9 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1193 return rc; 1164 return rc;
1194 host->iomap = pcim_iomap_table(pdev); 1165 host->iomap = pcim_iomap_table(pdev);
1195 1166
1167 ata_port_pbar_desc(host->ports[0], SCC_CTRL_BAR, -1, "ctrl");
1168 ata_port_pbar_desc(host->ports[0], SCC_BMID_BAR, -1, "bmid");
1169
1196 rc = scc_host_init(host); 1170 rc = scc_host_init(host);
1197 if (rc) 1171 if (rc)
1198 return rc; 1172 return rc;
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 0faf99c8f1..df68806df4 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -318,7 +318,6 @@ static struct scsi_host_template serverworks_sht = {
318}; 318};
319 319
320static struct ata_port_operations serverworks_osb4_port_ops = { 320static struct ata_port_operations serverworks_osb4_port_ops = {
321 .port_disable = ata_port_disable,
322 .set_piomode = serverworks_set_piomode, 321 .set_piomode = serverworks_set_piomode,
323 .set_dmamode = serverworks_set_dmamode, 322 .set_dmamode = serverworks_set_dmamode,
324 .mode_filter = serverworks_osb4_filter, 323 .mode_filter = serverworks_osb4_filter,
@@ -348,13 +347,11 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
348 .irq_handler = ata_interrupt, 347 .irq_handler = ata_interrupt,
349 .irq_clear = ata_bmdma_irq_clear, 348 .irq_clear = ata_bmdma_irq_clear,
350 .irq_on = ata_irq_on, 349 .irq_on = ata_irq_on,
351 .irq_ack = ata_irq_ack,
352 350
353 .port_start = ata_port_start, 351 .port_start = ata_sff_port_start,
354}; 352};
355 353
356static struct ata_port_operations serverworks_csb_port_ops = { 354static struct ata_port_operations serverworks_csb_port_ops = {
357 .port_disable = ata_port_disable,
358 .set_piomode = serverworks_set_piomode, 355 .set_piomode = serverworks_set_piomode,
359 .set_dmamode = serverworks_set_dmamode, 356 .set_dmamode = serverworks_set_dmamode,
360 .mode_filter = serverworks_csb_filter, 357 .mode_filter = serverworks_csb_filter,
@@ -384,9 +381,8 @@ static struct ata_port_operations serverworks_csb_port_ops = {
384 .irq_handler = ata_interrupt, 381 .irq_handler = ata_interrupt,
385 .irq_clear = ata_bmdma_irq_clear, 382 .irq_clear = ata_bmdma_irq_clear,
386 .irq_on = ata_irq_on, 383 .irq_on = ata_irq_on,
387 .irq_ack = ata_irq_ack,
388 384
389 .port_start = ata_port_start, 385 .port_start = ata_sff_port_start,
390}; 386};
391 387
392static int serverworks_fixup_osb4(struct pci_dev *pdev) 388static int serverworks_fixup_osb4(struct pci_dev *pdev)
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 40395804a6..2eb75cd74a 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -95,15 +95,16 @@ static int sil680_cable_detect(struct ata_port *ap) {
95 95
96/** 96/**
97 * sil680_bus_reset - reset the SIL680 bus 97 * sil680_bus_reset - reset the SIL680 bus
98 * @ap: ATA port to reset 98 * @link: ATA link to reset
99 * @deadline: deadline jiffies for the operation 99 * @deadline: deadline jiffies for the operation
100 * 100 *
101 * Perform the SIL680 housekeeping when doing an ATA bus reset 101 * Perform the SIL680 housekeeping when doing an ATA bus reset
102 */ 102 */
103 103
104static int sil680_bus_reset(struct ata_port *ap,unsigned int *classes, 104static int sil680_bus_reset(struct ata_link *link, unsigned int *classes,
105 unsigned long deadline) 105 unsigned long deadline)
106{ 106{
107 struct ata_port *ap = link->ap;
107 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 108 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
108 unsigned long addr = sil680_selreg(ap, 0); 109 unsigned long addr = sil680_selreg(ap, 0);
109 u8 reset; 110 u8 reset;
@@ -112,7 +113,7 @@ static int sil680_bus_reset(struct ata_port *ap,unsigned int *classes,
112 pci_write_config_byte(pdev, addr, reset | 0x03); 113 pci_write_config_byte(pdev, addr, reset | 0x03);
113 udelay(25); 114 udelay(25);
114 pci_write_config_byte(pdev, addr, reset); 115 pci_write_config_byte(pdev, addr, reset);
115 return ata_std_softreset(ap, classes, deadline); 116 return ata_std_softreset(link, classes, deadline);
116} 117}
117 118
118static void sil680_error_handler(struct ata_port *ap) 119static void sil680_error_handler(struct ata_port *ap)
@@ -237,7 +238,6 @@ static struct scsi_host_template sil680_sht = {
237}; 238};
238 239
239static struct ata_port_operations sil680_port_ops = { 240static struct ata_port_operations sil680_port_ops = {
240 .port_disable = ata_port_disable,
241 .set_piomode = sil680_set_piomode, 241 .set_piomode = sil680_set_piomode,
242 .set_dmamode = sil680_set_dmamode, 242 .set_dmamode = sil680_set_dmamode,
243 .mode_filter = ata_pci_default_filter, 243 .mode_filter = ata_pci_default_filter,
@@ -266,9 +266,8 @@ static struct ata_port_operations sil680_port_ops = {
266 .irq_handler = ata_interrupt, 266 .irq_handler = ata_interrupt,
267 .irq_clear = ata_bmdma_irq_clear, 267 .irq_clear = ata_bmdma_irq_clear,
268 .irq_on = ata_irq_on, 268 .irq_on = ata_irq_on,
269 .irq_ack = ata_irq_ack,
270 269
271 .port_start = ata_port_start, 270 .port_start = ata_sff_port_start,
272}; 271};
273 272
274/** 273/**
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index cce2834b2b..3b5be77e86 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -84,7 +84,7 @@ static int sis_short_ata40(struct pci_dev *dev)
84 84
85static int sis_old_port_base(struct ata_device *adev) 85static int sis_old_port_base(struct ata_device *adev)
86{ 86{
87 return 0x40 + (4 * adev->ap->port_no) + (2 * adev->devno); 87 return 0x40 + (4 * adev->link->ap->port_no) + (2 * adev->devno);
88} 88}
89 89
90/** 90/**
@@ -133,19 +133,20 @@ static int sis_66_cable_detect(struct ata_port *ap)
133 133
134/** 134/**
135 * sis_pre_reset - probe begin 135 * sis_pre_reset - probe begin
136 * @ap: ATA port 136 * @link: ATA link
137 * @deadline: deadline jiffies for the operation 137 * @deadline: deadline jiffies for the operation
138 * 138 *
139 * Set up cable type and use generic probe init 139 * Set up cable type and use generic probe init
140 */ 140 */
141 141
142static int sis_pre_reset(struct ata_port *ap, unsigned long deadline) 142static int sis_pre_reset(struct ata_link *link, unsigned long deadline)
143{ 143{
144 static const struct pci_bits sis_enable_bits[] = { 144 static const struct pci_bits sis_enable_bits[] = {
145 { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */ 145 { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */
146 { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */ 146 { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */
147 }; 147 };
148 148
149 struct ata_port *ap = link->ap;
149 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 150 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
150 151
151 if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no])) 152 if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no]))
@@ -154,7 +155,7 @@ static int sis_pre_reset(struct ata_port *ap, unsigned long deadline)
154 /* Clear the FIFO settings. We can't enable the FIFO until 155 /* Clear the FIFO settings. We can't enable the FIFO until
155 we know we are poking at a disk */ 156 we know we are poking at a disk */
156 pci_write_config_byte(pdev, 0x4B, 0); 157 pci_write_config_byte(pdev, 0x4B, 0);
157 return ata_std_prereset(ap, deadline); 158 return ata_std_prereset(link, deadline);
158} 159}
159 160
160 161
@@ -530,7 +531,6 @@ static struct scsi_host_template sis_sht = {
530}; 531};
531 532
532static const struct ata_port_operations sis_133_ops = { 533static const struct ata_port_operations sis_133_ops = {
533 .port_disable = ata_port_disable,
534 .set_piomode = sis_133_set_piomode, 534 .set_piomode = sis_133_set_piomode,
535 .set_dmamode = sis_133_set_dmamode, 535 .set_dmamode = sis_133_set_dmamode,
536 .mode_filter = ata_pci_default_filter, 536 .mode_filter = ata_pci_default_filter,
@@ -558,13 +558,11 @@ static const struct ata_port_operations sis_133_ops = {
558 .irq_handler = ata_interrupt, 558 .irq_handler = ata_interrupt,
559 .irq_clear = ata_bmdma_irq_clear, 559 .irq_clear = ata_bmdma_irq_clear,
560 .irq_on = ata_irq_on, 560 .irq_on = ata_irq_on,
561 .irq_ack = ata_irq_ack,
562 561
563 .port_start = ata_port_start, 562 .port_start = ata_sff_port_start,
564}; 563};
565 564
566static const struct ata_port_operations sis_133_for_sata_ops = { 565static const struct ata_port_operations sis_133_for_sata_ops = {
567 .port_disable = ata_port_disable,
568 .set_piomode = sis_133_set_piomode, 566 .set_piomode = sis_133_set_piomode,
569 .set_dmamode = sis_133_set_dmamode, 567 .set_dmamode = sis_133_set_dmamode,
570 .mode_filter = ata_pci_default_filter, 568 .mode_filter = ata_pci_default_filter,
@@ -592,13 +590,11 @@ static const struct ata_port_operations sis_133_for_sata_ops = {
592 .irq_handler = ata_interrupt, 590 .irq_handler = ata_interrupt,
593 .irq_clear = ata_bmdma_irq_clear, 591 .irq_clear = ata_bmdma_irq_clear,
594 .irq_on = ata_irq_on, 592 .irq_on = ata_irq_on,
595 .irq_ack = ata_irq_ack,
596 593
597 .port_start = ata_port_start, 594 .port_start = ata_sff_port_start,
598}; 595};
599 596
600static const struct ata_port_operations sis_133_early_ops = { 597static const struct ata_port_operations sis_133_early_ops = {
601 .port_disable = ata_port_disable,
602 .set_piomode = sis_100_set_piomode, 598 .set_piomode = sis_100_set_piomode,
603 .set_dmamode = sis_133_early_set_dmamode, 599 .set_dmamode = sis_133_early_set_dmamode,
604 .mode_filter = ata_pci_default_filter, 600 .mode_filter = ata_pci_default_filter,
@@ -626,13 +622,11 @@ static const struct ata_port_operations sis_133_early_ops = {
626 .irq_handler = ata_interrupt, 622 .irq_handler = ata_interrupt,
627 .irq_clear = ata_bmdma_irq_clear, 623 .irq_clear = ata_bmdma_irq_clear,
628 .irq_on = ata_irq_on, 624 .irq_on = ata_irq_on,
629 .irq_ack = ata_irq_ack,
630 625
631 .port_start = ata_port_start, 626 .port_start = ata_sff_port_start,
632}; 627};
633 628
634static const struct ata_port_operations sis_100_ops = { 629static const struct ata_port_operations sis_100_ops = {
635 .port_disable = ata_port_disable,
636 .set_piomode = sis_100_set_piomode, 630 .set_piomode = sis_100_set_piomode,
637 .set_dmamode = sis_100_set_dmamode, 631 .set_dmamode = sis_100_set_dmamode,
638 .mode_filter = ata_pci_default_filter, 632 .mode_filter = ata_pci_default_filter,
@@ -660,13 +654,11 @@ static const struct ata_port_operations sis_100_ops = {
660 .irq_handler = ata_interrupt, 654 .irq_handler = ata_interrupt,
661 .irq_clear = ata_bmdma_irq_clear, 655 .irq_clear = ata_bmdma_irq_clear,
662 .irq_on = ata_irq_on, 656 .irq_on = ata_irq_on,
663 .irq_ack = ata_irq_ack,
664 657
665 .port_start = ata_port_start, 658 .port_start = ata_sff_port_start,
666}; 659};
667 660
668static const struct ata_port_operations sis_66_ops = { 661static const struct ata_port_operations sis_66_ops = {
669 .port_disable = ata_port_disable,
670 .set_piomode = sis_old_set_piomode, 662 .set_piomode = sis_old_set_piomode,
671 .set_dmamode = sis_66_set_dmamode, 663 .set_dmamode = sis_66_set_dmamode,
672 .mode_filter = ata_pci_default_filter, 664 .mode_filter = ata_pci_default_filter,
@@ -694,13 +686,11 @@ static const struct ata_port_operations sis_66_ops = {
694 .irq_handler = ata_interrupt, 686 .irq_handler = ata_interrupt,
695 .irq_clear = ata_bmdma_irq_clear, 687 .irq_clear = ata_bmdma_irq_clear,
696 .irq_on = ata_irq_on, 688 .irq_on = ata_irq_on,
697 .irq_ack = ata_irq_ack,
698 689
699 .port_start = ata_port_start, 690 .port_start = ata_sff_port_start,
700}; 691};
701 692
702static const struct ata_port_operations sis_old_ops = { 693static const struct ata_port_operations sis_old_ops = {
703 .port_disable = ata_port_disable,
704 .set_piomode = sis_old_set_piomode, 694 .set_piomode = sis_old_set_piomode,
705 .set_dmamode = sis_old_set_dmamode, 695 .set_dmamode = sis_old_set_dmamode,
706 .mode_filter = ata_pci_default_filter, 696 .mode_filter = ata_pci_default_filter,
@@ -728,9 +718,8 @@ static const struct ata_port_operations sis_old_ops = {
728 .irq_handler = ata_interrupt, 718 .irq_handler = ata_interrupt,
729 .irq_clear = ata_bmdma_irq_clear, 719 .irq_clear = ata_bmdma_irq_clear,
730 .irq_on = ata_irq_on, 720 .irq_on = ata_irq_on,
731 .irq_ack = ata_irq_ack,
732 721
733 .port_start = ata_port_start, 722 .port_start = ata_sff_port_start,
734}; 723};
735 724
736static const struct ata_port_info sis_info = { 725static const struct ata_port_info sis_info = {
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index c0f43bb259..1388cef52c 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -43,23 +43,24 @@ enum {
43 43
44/** 44/**
45 * sl82c105_pre_reset - probe begin 45 * sl82c105_pre_reset - probe begin
46 * @ap: ATA port 46 * @link: ATA link
47 * @deadline: deadline jiffies for the operation 47 * @deadline: deadline jiffies for the operation
48 * 48 *
49 * Set up cable type and use generic probe init 49 * Set up cable type and use generic probe init
50 */ 50 */
51 51
52static int sl82c105_pre_reset(struct ata_port *ap, unsigned long deadline) 52static int sl82c105_pre_reset(struct ata_link *link, unsigned long deadline)
53{ 53{
54 static const struct pci_bits sl82c105_enable_bits[] = { 54 static const struct pci_bits sl82c105_enable_bits[] = {
55 { 0x40, 1, 0x01, 0x01 }, 55 { 0x40, 1, 0x01, 0x01 },
56 { 0x40, 1, 0x10, 0x10 } 56 { 0x40, 1, 0x10, 0x10 }
57 }; 57 };
58 struct ata_port *ap = link->ap;
58 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 59 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
59 60
60 if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) 61 if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no]))
61 return -ENOENT; 62 return -ENOENT;
62 return ata_std_prereset(ap, deadline); 63 return ata_std_prereset(link, deadline);
63} 64}
64 65
65 66
@@ -224,7 +225,6 @@ static struct scsi_host_template sl82c105_sht = {
224}; 225};
225 226
226static struct ata_port_operations sl82c105_port_ops = { 227static struct ata_port_operations sl82c105_port_ops = {
227 .port_disable = ata_port_disable,
228 .set_piomode = sl82c105_set_piomode, 228 .set_piomode = sl82c105_set_piomode,
229 .mode_filter = ata_pci_default_filter, 229 .mode_filter = ata_pci_default_filter,
230 230
@@ -253,9 +253,8 @@ static struct ata_port_operations sl82c105_port_ops = {
253 .irq_handler = ata_interrupt, 253 .irq_handler = ata_interrupt,
254 .irq_clear = ata_bmdma_irq_clear, 254 .irq_clear = ata_bmdma_irq_clear,
255 .irq_on = ata_irq_on, 255 .irq_on = ata_irq_on,
256 .irq_ack = ata_irq_ack,
257 256
258 .port_start = ata_port_start, 257 .port_start = ata_sff_port_start,
259}; 258};
260 259
261/** 260/**
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index af21f443db..403eafcffe 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -47,25 +47,26 @@
47 47
48/** 48/**
49 * triflex_prereset - probe begin 49 * triflex_prereset - probe begin
50 * @ap: ATA port 50 * @link: ATA link
51 * @deadline: deadline jiffies for the operation 51 * @deadline: deadline jiffies for the operation
52 * 52 *
53 * Set up cable type and use generic probe init 53 * Set up cable type and use generic probe init
54 */ 54 */
55 55
56static int triflex_prereset(struct ata_port *ap, unsigned long deadline) 56static int triflex_prereset(struct ata_link *link, unsigned long deadline)
57{ 57{
58 static const struct pci_bits triflex_enable_bits[] = { 58 static const struct pci_bits triflex_enable_bits[] = {
59 { 0x80, 1, 0x01, 0x01 }, 59 { 0x80, 1, 0x01, 0x01 },
60 { 0x80, 1, 0x02, 0x02 } 60 { 0x80, 1, 0x02, 0x02 }
61 }; 61 };
62 62
63 struct ata_port *ap = link->ap;
63 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 64 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
64 65
65 if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no])) 66 if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no]))
66 return -ENOENT; 67 return -ENOENT;
67 68
68 return ata_std_prereset(ap, deadline); 69 return ata_std_prereset(link, deadline);
69} 70}
70 71
71 72
@@ -197,7 +198,6 @@ static struct scsi_host_template triflex_sht = {
197}; 198};
198 199
199static struct ata_port_operations triflex_port_ops = { 200static struct ata_port_operations triflex_port_ops = {
200 .port_disable = ata_port_disable,
201 .set_piomode = triflex_set_piomode, 201 .set_piomode = triflex_set_piomode,
202 .mode_filter = ata_pci_default_filter, 202 .mode_filter = ata_pci_default_filter,
203 203
@@ -226,9 +226,8 @@ static struct ata_port_operations triflex_port_ops = {
226 .irq_handler = ata_interrupt, 226 .irq_handler = ata_interrupt,
227 .irq_clear = ata_bmdma_irq_clear, 227 .irq_clear = ata_bmdma_irq_clear,
228 .irq_on = ata_irq_on, 228 .irq_on = ata_irq_on,
229 .irq_ack = ata_irq_ack,
230 229
231 .port_start = ata_port_start, 230 .port_start = ata_sff_port_start,
232}; 231};
233 232
234static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) 233static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index f143db4559..5d41b6612d 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -184,11 +184,15 @@ static int via_cable_detect(struct ata_port *ap) {
184 two drives */ 184 two drives */
185 if (ata66 & (0x10100000 >> (16 * ap->port_no))) 185 if (ata66 & (0x10100000 >> (16 * ap->port_no)))
186 return ATA_CBL_PATA80; 186 return ATA_CBL_PATA80;
187 /* Check with ACPI so we can spot BIOS reported SATA bridges */
188 if (ata_acpi_cbl_80wire(ap))
189 return ATA_CBL_PATA80;
187 return ATA_CBL_PATA40; 190 return ATA_CBL_PATA40;
188} 191}
189 192
190static int via_pre_reset(struct ata_port *ap, unsigned long deadline) 193static int via_pre_reset(struct ata_link *link, unsigned long deadline)
191{ 194{
195 struct ata_port *ap = link->ap;
192 const struct via_isa_bridge *config = ap->host->private_data; 196 const struct via_isa_bridge *config = ap->host->private_data;
193 197
194 if (!(config->flags & VIA_NO_ENABLES)) { 198 if (!(config->flags & VIA_NO_ENABLES)) {
@@ -201,7 +205,7 @@ static int via_pre_reset(struct ata_port *ap, unsigned long deadline)
201 return -ENOENT; 205 return -ENOENT;
202 } 206 }
203 207
204 return ata_std_prereset(ap, deadline); 208 return ata_std_prereset(link, deadline);
205} 209}
206 210
207 211
@@ -344,7 +348,6 @@ static struct scsi_host_template via_sht = {
344}; 348};
345 349
346static struct ata_port_operations via_port_ops = { 350static struct ata_port_operations via_port_ops = {
347 .port_disable = ata_port_disable,
348 .set_piomode = via_set_piomode, 351 .set_piomode = via_set_piomode,
349 .set_dmamode = via_set_dmamode, 352 .set_dmamode = via_set_dmamode,
350 .mode_filter = ata_pci_default_filter, 353 .mode_filter = ata_pci_default_filter,
@@ -374,13 +377,11 @@ static struct ata_port_operations via_port_ops = {
374 .irq_handler = ata_interrupt, 377 .irq_handler = ata_interrupt,
375 .irq_clear = ata_bmdma_irq_clear, 378 .irq_clear = ata_bmdma_irq_clear,
376 .irq_on = ata_irq_on, 379 .irq_on = ata_irq_on,
377 .irq_ack = ata_irq_ack,
378 380
379 .port_start = ata_port_start, 381 .port_start = ata_sff_port_start,
380}; 382};
381 383
382static struct ata_port_operations via_port_ops_noirq = { 384static struct ata_port_operations via_port_ops_noirq = {
383 .port_disable = ata_port_disable,
384 .set_piomode = via_set_piomode, 385 .set_piomode = via_set_piomode,
385 .set_dmamode = via_set_dmamode, 386 .set_dmamode = via_set_dmamode,
386 .mode_filter = ata_pci_default_filter, 387 .mode_filter = ata_pci_default_filter,
@@ -410,9 +411,8 @@ static struct ata_port_operations via_port_ops_noirq = {
410 .irq_handler = ata_interrupt, 411 .irq_handler = ata_interrupt,
411 .irq_clear = ata_bmdma_irq_clear, 412 .irq_clear = ata_bmdma_irq_clear,
412 .irq_on = ata_irq_on, 413 .irq_on = ata_irq_on,
413 .irq_ack = ata_irq_ack,
414 414
415 .port_start = ata_port_start, 415 .port_start = ata_sff_port_start,
416}; 416};
417 417
418/** 418/**
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
index 83abfeca40..549cbbe9fd 100644
--- a/drivers/ata/pata_winbond.c
+++ b/drivers/ata/pata_winbond.c
@@ -94,7 +94,7 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
94 94
95static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data) 95static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
96{ 96{
97 struct ata_port *ap = adev->ap; 97 struct ata_port *ap = adev->link->ap;
98 int slop = buflen & 3; 98 int slop = buflen & 3;
99 99
100 if (ata_id_has_dword_io(adev->id)) { 100 if (ata_id_has_dword_io(adev->id)) {
@@ -138,7 +138,6 @@ static struct scsi_host_template winbond_sht = {
138}; 138};
139 139
140static struct ata_port_operations winbond_port_ops = { 140static struct ata_port_operations winbond_port_ops = {
141 .port_disable = ata_port_disable,
142 .set_piomode = winbond_set_piomode, 141 .set_piomode = winbond_set_piomode,
143 142
144 .tf_load = ata_tf_load, 143 .tf_load = ata_tf_load,
@@ -160,9 +159,8 @@ static struct ata_port_operations winbond_port_ops = {
160 159
161 .irq_clear = ata_bmdma_irq_clear, 160 .irq_clear = ata_bmdma_irq_clear,
162 .irq_on = ata_irq_on, 161 .irq_on = ata_irq_on,
163 .irq_ack = ata_irq_ack,
164 162
165 .port_start = ata_port_start, 163 .port_start = ata_sff_port_start,
166}; 164};
167 165
168/** 166/**
@@ -199,6 +197,7 @@ static __init int winbond_init_one(unsigned long port)
199 197
200 for (i = 0; i < 2 ; i ++) { 198 for (i = 0; i < 2 ; i ++) {
201 unsigned long cmd_port = 0x1F0 - (0x80 * i); 199 unsigned long cmd_port = 0x1F0 - (0x80 * i);
200 unsigned long ctl_port = cmd_port + 0x206;
202 struct ata_host *host; 201 struct ata_host *host;
203 struct ata_port *ap; 202 struct ata_port *ap;
204 void __iomem *cmd_addr, *ctl_addr; 203 void __iomem *cmd_addr, *ctl_addr;
@@ -214,14 +213,16 @@ static __init int winbond_init_one(unsigned long port)
214 host = ata_host_alloc(&pdev->dev, 1); 213 host = ata_host_alloc(&pdev->dev, 1);
215 if (!host) 214 if (!host)
216 goto err_unregister; 215 goto err_unregister;
216 ap = host->ports[0];
217 217
218 rc = -ENOMEM; 218 rc = -ENOMEM;
219 cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8); 219 cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
220 ctl_addr = devm_ioport_map(&pdev->dev, cmd_port + 0x0206, 1); 220 ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1);
221 if (!cmd_addr || !ctl_addr) 221 if (!cmd_addr || !ctl_addr)
222 goto err_unregister; 222 goto err_unregister;
223 223
224 ap = host->ports[0]; 224 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port);
225
225 ap->ops = &winbond_port_ops; 226 ap->ops = &winbond_port_ops;
226 ap->pio_mask = 0x1F; 227 ap->pio_mask = 0x1F;
227 ap->flags |= ATA_FLAG_SLAVE_POSS; 228 ap->flags |= ATA_FLAG_SLAVE_POSS;
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 5c79271401..8d1b03d5bc 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -92,6 +92,8 @@ enum {
92 92
93 /* CPB bits */ 93 /* CPB bits */
94 cDONE = (1 << 0), 94 cDONE = (1 << 0),
95 cATERR = (1 << 3),
96
95 cVLD = (1 << 0), 97 cVLD = (1 << 0),
96 cDAT = (1 << 2), 98 cDAT = (1 << 2),
97 cIEN = (1 << 3), 99 cIEN = (1 << 3),
@@ -131,14 +133,15 @@ static int adma_ata_init_one (struct pci_dev *pdev,
131static int adma_port_start(struct ata_port *ap); 133static int adma_port_start(struct ata_port *ap);
132static void adma_host_stop(struct ata_host *host); 134static void adma_host_stop(struct ata_host *host);
133static void adma_port_stop(struct ata_port *ap); 135static void adma_port_stop(struct ata_port *ap);
134static void adma_phy_reset(struct ata_port *ap);
135static void adma_qc_prep(struct ata_queued_cmd *qc); 136static void adma_qc_prep(struct ata_queued_cmd *qc);
136static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); 137static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
137static int adma_check_atapi_dma(struct ata_queued_cmd *qc); 138static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
138static void adma_bmdma_stop(struct ata_queued_cmd *qc); 139static void adma_bmdma_stop(struct ata_queued_cmd *qc);
139static u8 adma_bmdma_status(struct ata_port *ap); 140static u8 adma_bmdma_status(struct ata_port *ap);
140static void adma_irq_clear(struct ata_port *ap); 141static void adma_irq_clear(struct ata_port *ap);
141static void adma_eng_timeout(struct ata_port *ap); 142static void adma_freeze(struct ata_port *ap);
143static void adma_thaw(struct ata_port *ap);
144static void adma_error_handler(struct ata_port *ap);
142 145
143static struct scsi_host_template adma_ata_sht = { 146static struct scsi_host_template adma_ata_sht = {
144 .module = THIS_MODULE, 147 .module = THIS_MODULE,
@@ -159,21 +162,20 @@ static struct scsi_host_template adma_ata_sht = {
159}; 162};
160 163
161static const struct ata_port_operations adma_ata_ops = { 164static const struct ata_port_operations adma_ata_ops = {
162 .port_disable = ata_port_disable,
163 .tf_load = ata_tf_load, 165 .tf_load = ata_tf_load,
164 .tf_read = ata_tf_read, 166 .tf_read = ata_tf_read,
165 .exec_command = ata_exec_command, 167 .exec_command = ata_exec_command,
166 .check_status = ata_check_status, 168 .check_status = ata_check_status,
167 .dev_select = ata_std_dev_select, 169 .dev_select = ata_std_dev_select,
168 .phy_reset = adma_phy_reset,
169 .check_atapi_dma = adma_check_atapi_dma, 170 .check_atapi_dma = adma_check_atapi_dma,
170 .data_xfer = ata_data_xfer, 171 .data_xfer = ata_data_xfer,
171 .qc_prep = adma_qc_prep, 172 .qc_prep = adma_qc_prep,
172 .qc_issue = adma_qc_issue, 173 .qc_issue = adma_qc_issue,
173 .eng_timeout = adma_eng_timeout, 174 .freeze = adma_freeze,
175 .thaw = adma_thaw,
176 .error_handler = adma_error_handler,
174 .irq_clear = adma_irq_clear, 177 .irq_clear = adma_irq_clear,
175 .irq_on = ata_irq_on, 178 .irq_on = ata_irq_on,
176 .irq_ack = ata_irq_ack,
177 .port_start = adma_port_start, 179 .port_start = adma_port_start,
178 .port_stop = adma_port_stop, 180 .port_stop = adma_port_stop,
179 .host_stop = adma_host_stop, 181 .host_stop = adma_host_stop,
@@ -184,7 +186,7 @@ static const struct ata_port_operations adma_ata_ops = {
184static struct ata_port_info adma_port_info[] = { 186static struct ata_port_info adma_port_info[] = {
185 /* board_1841_idx */ 187 /* board_1841_idx */
186 { 188 {
187 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | 189 .flags = ATA_FLAG_SLAVE_POSS |
188 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | 190 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
189 ATA_FLAG_PIO_POLLING, 191 ATA_FLAG_PIO_POLLING,
190 .pio_mask = 0x10, /* pio4 */ 192 .pio_mask = 0x10, /* pio4 */
@@ -273,24 +275,42 @@ static inline void adma_enter_reg_mode(struct ata_port *ap)
273 readb(chan + ADMA_STATUS); /* flush */ 275 readb(chan + ADMA_STATUS); /* flush */
274} 276}
275 277
276static void adma_phy_reset(struct ata_port *ap) 278static void adma_freeze(struct ata_port *ap)
277{ 279{
278 struct adma_port_priv *pp = ap->private_data; 280 void __iomem *chan = ADMA_PORT_REGS(ap);
281
282 /* mask/clear ATA interrupts */
283 writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
284 ata_check_status(ap);
279 285
280 pp->state = adma_state_idle; 286 /* reset ADMA to idle state */
287 writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
288 udelay(2);
289 writew(aPIOMD4 | aNIEN, chan + ADMA_CONTROL);
290 udelay(2);
291}
292
293static void adma_thaw(struct ata_port *ap)
294{
281 adma_reinit_engine(ap); 295 adma_reinit_engine(ap);
282 ata_port_probe(ap);
283 ata_bus_reset(ap);
284} 296}
285 297
286static void adma_eng_timeout(struct ata_port *ap) 298static int adma_prereset(struct ata_link *link, unsigned long deadline)
287{ 299{
300 struct ata_port *ap = link->ap;
288 struct adma_port_priv *pp = ap->private_data; 301 struct adma_port_priv *pp = ap->private_data;
289 302
290 if (pp->state != adma_state_idle) /* healthy paranoia */ 303 if (pp->state != adma_state_idle) /* healthy paranoia */
291 pp->state = adma_state_mmio; 304 pp->state = adma_state_mmio;
292 adma_reinit_engine(ap); 305 adma_reinit_engine(ap);
293 ata_eng_timeout(ap); 306
307 return ata_std_prereset(link, deadline);
308}
309
310static void adma_error_handler(struct ata_port *ap)
311{
312 ata_do_eh(ap, adma_prereset, ata_std_softreset, NULL,
313 ata_std_postreset);
294} 314}
295 315
296static int adma_fill_sg(struct ata_queued_cmd *qc) 316static int adma_fill_sg(struct ata_queued_cmd *qc)
@@ -464,14 +484,33 @@ static inline unsigned int adma_intr_pkt(struct ata_host *host)
464 pp = ap->private_data; 484 pp = ap->private_data;
465 if (!pp || pp->state != adma_state_pkt) 485 if (!pp || pp->state != adma_state_pkt)
466 continue; 486 continue;
467 qc = ata_qc_from_tag(ap, ap->active_tag); 487 qc = ata_qc_from_tag(ap, ap->link.active_tag);
468 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { 488 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
469 if ((status & (aPERR | aPSD | aUIRQ))) 489 if (status & aPERR)
490 qc->err_mask |= AC_ERR_HOST_BUS;
491 else if ((status & (aPSD | aUIRQ)))
470 qc->err_mask |= AC_ERR_OTHER; 492 qc->err_mask |= AC_ERR_OTHER;
493
494 if (pp->pkt[0] & cATERR)
495 qc->err_mask |= AC_ERR_DEV;
471 else if (pp->pkt[0] != cDONE) 496 else if (pp->pkt[0] != cDONE)
472 qc->err_mask |= AC_ERR_OTHER; 497 qc->err_mask |= AC_ERR_OTHER;
473 498
474 ata_qc_complete(qc); 499 if (!qc->err_mask)
500 ata_qc_complete(qc);
501 else {
502 struct ata_eh_info *ehi = &ap->link.eh_info;
503 ata_ehi_clear_desc(ehi);
504 ata_ehi_push_desc(ehi,
505 "ADMA-status 0x%02X", status);
506 ata_ehi_push_desc(ehi,
507 "pkt[0] 0x%02X", pp->pkt[0]);
508
509 if (qc->err_mask == AC_ERR_DEV)
510 ata_port_abort(ap);
511 else
512 ata_port_freeze(ap);
513 }
475 } 514 }
476 } 515 }
477 return handled; 516 return handled;
@@ -489,7 +528,7 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host)
489 struct adma_port_priv *pp = ap->private_data; 528 struct adma_port_priv *pp = ap->private_data;
490 if (!pp || pp->state != adma_state_mmio) 529 if (!pp || pp->state != adma_state_mmio)
491 continue; 530 continue;
492 qc = ata_qc_from_tag(ap, ap->active_tag); 531 qc = ata_qc_from_tag(ap, ap->link.active_tag);
493 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { 532 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
494 533
495 /* check main status, clearing INTRQ */ 534 /* check main status, clearing INTRQ */
@@ -502,7 +541,20 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host)
502 /* complete taskfile transaction */ 541 /* complete taskfile transaction */
503 pp->state = adma_state_idle; 542 pp->state = adma_state_idle;
504 qc->err_mask |= ac_err_mask(status); 543 qc->err_mask |= ac_err_mask(status);
505 ata_qc_complete(qc); 544 if (!qc->err_mask)
545 ata_qc_complete(qc);
546 else {
547 struct ata_eh_info *ehi =
548 &ap->link.eh_info;
549 ata_ehi_clear_desc(ehi);
550 ata_ehi_push_desc(ehi,
551 "status 0x%02X", status);
552
553 if (qc->err_mask == AC_ERR_DEV)
554 ata_port_abort(ap);
555 else
556 ata_port_freeze(ap);
557 }
506 handled = 1; 558 handled = 1;
507 } 559 }
508 } 560 }
@@ -652,9 +704,16 @@ static int adma_ata_init_one(struct pci_dev *pdev,
652 if (rc) 704 if (rc)
653 return rc; 705 return rc;
654 706
655 for (port_no = 0; port_no < ADMA_PORTS; ++port_no) 707 for (port_no = 0; port_no < ADMA_PORTS; ++port_no) {
656 adma_ata_setup_port(&host->ports[port_no]->ioaddr, 708 struct ata_port *ap = host->ports[port_no];
657 ADMA_ATA_REGS(mmio_base, port_no)); 709 void __iomem *port_base = ADMA_ATA_REGS(mmio_base, port_no);
710 unsigned int offset = port_base - mmio_base;
711
712 adma_ata_setup_port(&ap->ioaddr, port_base);
713
714 ata_port_pbar_desc(ap, ADMA_MMIO_BAR, -1, "mmio");
715 ata_port_pbar_desc(ap, ADMA_MMIO_BAR, offset, "port");
716 }
658 717
659 /* initialize adapter */ 718 /* initialize adapter */
660 adma_host_init(host, board_idx); 719 adma_host_init(host, board_idx);
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index fdbed8ecdf..08595f34b3 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -285,7 +285,7 @@ static void inic_irq_clear(struct ata_port *ap)
285static void inic_host_intr(struct ata_port *ap) 285static void inic_host_intr(struct ata_port *ap)
286{ 286{
287 void __iomem *port_base = inic_port_base(ap); 287 void __iomem *port_base = inic_port_base(ap);
288 struct ata_eh_info *ehi = &ap->eh_info; 288 struct ata_eh_info *ehi = &ap->link.eh_info;
289 u8 irq_stat; 289 u8 irq_stat;
290 290
291 /* fetch and clear irq */ 291 /* fetch and clear irq */
@@ -293,7 +293,8 @@ static void inic_host_intr(struct ata_port *ap)
293 writeb(irq_stat, port_base + PORT_IRQ_STAT); 293 writeb(irq_stat, port_base + PORT_IRQ_STAT);
294 294
295 if (likely(!(irq_stat & PIRQ_ERR))) { 295 if (likely(!(irq_stat & PIRQ_ERR))) {
296 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); 296 struct ata_queued_cmd *qc =
297 ata_qc_from_tag(ap, ap->link.active_tag);
297 298
298 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 299 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
299 ata_chk_status(ap); /* clear ATA interrupt */ 300 ata_chk_status(ap); /* clear ATA interrupt */
@@ -416,12 +417,13 @@ static void inic_thaw(struct ata_port *ap)
416 * SRST and SControl hardreset don't give valid signature on this 417 * SRST and SControl hardreset don't give valid signature on this
417 * controller. Only controller specific hardreset mechanism works. 418 * controller. Only controller specific hardreset mechanism works.
418 */ 419 */
419static int inic_hardreset(struct ata_port *ap, unsigned int *class, 420static int inic_hardreset(struct ata_link *link, unsigned int *class,
420 unsigned long deadline) 421 unsigned long deadline)
421{ 422{
423 struct ata_port *ap = link->ap;
422 void __iomem *port_base = inic_port_base(ap); 424 void __iomem *port_base = inic_port_base(ap);
423 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 425 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
424 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context); 426 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
425 u16 val; 427 u16 val;
426 int rc; 428 int rc;
427 429
@@ -434,15 +436,15 @@ static int inic_hardreset(struct ata_port *ap, unsigned int *class,
434 msleep(1); 436 msleep(1);
435 writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); 437 writew(val & ~IDMA_CTL_RST_ATA, idma_ctl);
436 438
437 rc = sata_phy_resume(ap, timing, deadline); 439 rc = sata_link_resume(link, timing, deadline);
438 if (rc) { 440 if (rc) {
439 ata_port_printk(ap, KERN_WARNING, "failed to resume " 441 ata_link_printk(link, KERN_WARNING, "failed to resume "
440 "link after reset (errno=%d)\n", rc); 442 "link after reset (errno=%d)\n", rc);
441 return rc; 443 return rc;
442 } 444 }
443 445
444 *class = ATA_DEV_NONE; 446 *class = ATA_DEV_NONE;
445 if (ata_port_online(ap)) { 447 if (ata_link_online(link)) {
446 struct ata_taskfile tf; 448 struct ata_taskfile tf;
447 449
448 /* wait a while before checking status */ 450 /* wait a while before checking status */
@@ -451,7 +453,7 @@ static int inic_hardreset(struct ata_port *ap, unsigned int *class,
451 rc = ata_wait_ready(ap, deadline); 453 rc = ata_wait_ready(ap, deadline);
452 /* link occupied, -ENODEV too is an error */ 454 /* link occupied, -ENODEV too is an error */
453 if (rc) { 455 if (rc) {
454 ata_port_printk(ap, KERN_WARNING, "device not ready " 456 ata_link_printk(link, KERN_WARNING, "device not ready "
455 "after hardreset (errno=%d)\n", rc); 457 "after hardreset (errno=%d)\n", rc);
456 return rc; 458 return rc;
457 } 459 }
@@ -550,7 +552,6 @@ static int inic_port_start(struct ata_port *ap)
550} 552}
551 553
552static struct ata_port_operations inic_port_ops = { 554static struct ata_port_operations inic_port_ops = {
553 .port_disable = ata_port_disable,
554 .tf_load = ata_tf_load, 555 .tf_load = ata_tf_load,
555 .tf_read = ata_tf_read, 556 .tf_read = ata_tf_read,
556 .check_status = ata_check_status, 557 .check_status = ata_check_status,
@@ -567,7 +568,6 @@ static struct ata_port_operations inic_port_ops = {
567 568
568 .irq_clear = inic_irq_clear, 569 .irq_clear = inic_irq_clear,
569 .irq_on = ata_irq_on, 570 .irq_on = ata_irq_on,
570 .irq_ack = ata_irq_ack,
571 571
572 .qc_prep = ata_qc_prep, 572 .qc_prep = ata_qc_prep,
573 .qc_issue = inic_qc_issue, 573 .qc_issue = inic_qc_issue,
@@ -693,16 +693,24 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
693 host->iomap = iomap = pcim_iomap_table(pdev); 693 host->iomap = iomap = pcim_iomap_table(pdev);
694 694
695 for (i = 0; i < NR_PORTS; i++) { 695 for (i = 0; i < NR_PORTS; i++) {
696 struct ata_ioports *port = &host->ports[i]->ioaddr; 696 struct ata_port *ap = host->ports[i];
697 void __iomem *port_base = iomap[MMIO_BAR] + i * PORT_SIZE; 697 struct ata_ioports *port = &ap->ioaddr;
698 unsigned int offset = i * PORT_SIZE;
698 699
699 port->cmd_addr = iomap[2 * i]; 700 port->cmd_addr = iomap[2 * i];
700 port->altstatus_addr = 701 port->altstatus_addr =
701 port->ctl_addr = (void __iomem *) 702 port->ctl_addr = (void __iomem *)
702 ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); 703 ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS);
703 port->scr_addr = port_base + PORT_SCR; 704 port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR;
704 705
705 ata_std_ports(port); 706 ata_std_ports(port);
707
708 ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio");
709 ata_port_pbar_desc(ap, MMIO_BAR, offset, "port");
710 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
711 (unsigned long long)pci_resource_start(pdev, 2 * i),
712 (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) |
713 ATA_PCI_CTL_OFS);
706 } 714 }
707 715
708 hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); 716 hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index d9832e234e..4df8311968 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -483,8 +483,6 @@ static struct scsi_host_template mv6_sht = {
483}; 483};
484 484
485static const struct ata_port_operations mv5_ops = { 485static const struct ata_port_operations mv5_ops = {
486 .port_disable = ata_port_disable,
487
488 .tf_load = ata_tf_load, 486 .tf_load = ata_tf_load,
489 .tf_read = ata_tf_read, 487 .tf_read = ata_tf_read,
490 .check_status = ata_check_status, 488 .check_status = ata_check_status,
@@ -499,7 +497,6 @@ static const struct ata_port_operations mv5_ops = {
499 497
500 .irq_clear = mv_irq_clear, 498 .irq_clear = mv_irq_clear,
501 .irq_on = ata_irq_on, 499 .irq_on = ata_irq_on,
502 .irq_ack = ata_irq_ack,
503 500
504 .error_handler = mv_error_handler, 501 .error_handler = mv_error_handler,
505 .post_internal_cmd = mv_post_int_cmd, 502 .post_internal_cmd = mv_post_int_cmd,
@@ -514,8 +511,6 @@ static const struct ata_port_operations mv5_ops = {
514}; 511};
515 512
516static const struct ata_port_operations mv6_ops = { 513static const struct ata_port_operations mv6_ops = {
517 .port_disable = ata_port_disable,
518
519 .tf_load = ata_tf_load, 514 .tf_load = ata_tf_load,
520 .tf_read = ata_tf_read, 515 .tf_read = ata_tf_read,
521 .check_status = ata_check_status, 516 .check_status = ata_check_status,
@@ -530,7 +525,6 @@ static const struct ata_port_operations mv6_ops = {
530 525
531 .irq_clear = mv_irq_clear, 526 .irq_clear = mv_irq_clear,
532 .irq_on = ata_irq_on, 527 .irq_on = ata_irq_on,
533 .irq_ack = ata_irq_ack,
534 528
535 .error_handler = mv_error_handler, 529 .error_handler = mv_error_handler,
536 .post_internal_cmd = mv_post_int_cmd, 530 .post_internal_cmd = mv_post_int_cmd,
@@ -545,8 +539,6 @@ static const struct ata_port_operations mv6_ops = {
545}; 539};
546 540
547static const struct ata_port_operations mv_iie_ops = { 541static const struct ata_port_operations mv_iie_ops = {
548 .port_disable = ata_port_disable,
549
550 .tf_load = ata_tf_load, 542 .tf_load = ata_tf_load,
551 .tf_read = ata_tf_read, 543 .tf_read = ata_tf_read,
552 .check_status = ata_check_status, 544 .check_status = ata_check_status,
@@ -561,7 +553,6 @@ static const struct ata_port_operations mv_iie_ops = {
561 553
562 .irq_clear = mv_irq_clear, 554 .irq_clear = mv_irq_clear,
563 .irq_on = ata_irq_on, 555 .irq_on = ata_irq_on,
564 .irq_ack = ata_irq_ack,
565 556
566 .error_handler = mv_error_handler, 557 .error_handler = mv_error_handler,
567 .post_internal_cmd = mv_post_int_cmd, 558 .post_internal_cmd = mv_post_int_cmd,
@@ -1415,7 +1406,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1415 struct mv_host_priv *hpriv = ap->host->private_data; 1406 struct mv_host_priv *hpriv = ap->host->private_data;
1416 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); 1407 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1417 unsigned int action = 0, err_mask = 0; 1408 unsigned int action = 0, err_mask = 0;
1418 struct ata_eh_info *ehi = &ap->eh_info; 1409 struct ata_eh_info *ehi = &ap->link.eh_info;
1419 1410
1420 ata_ehi_clear_desc(ehi); 1411 ata_ehi_clear_desc(ehi);
1421 1412
@@ -1423,8 +1414,8 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1423 /* just a guess: do we need to do this? should we 1414 /* just a guess: do we need to do this? should we
1424 * expand this, and do it in all cases? 1415 * expand this, and do it in all cases?
1425 */ 1416 */
1426 sata_scr_read(ap, SCR_ERROR, &serr); 1417 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1427 sata_scr_write_flush(ap, SCR_ERROR, serr); 1418 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1428 } 1419 }
1429 1420
1430 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1421 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
@@ -1468,8 +1459,8 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1468 } 1459 }
1469 1460
1470 if (edma_err_cause & EDMA_ERR_SERR) { 1461 if (edma_err_cause & EDMA_ERR_SERR) {
1471 sata_scr_read(ap, SCR_ERROR, &serr); 1462 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1472 sata_scr_write_flush(ap, SCR_ERROR, serr); 1463 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1473 err_mask = AC_ERR_ATA_BUS; 1464 err_mask = AC_ERR_ATA_BUS;
1474 action |= ATA_EH_HARDRESET; 1465 action |= ATA_EH_HARDRESET;
1475 } 1466 }
@@ -1508,7 +1499,7 @@ static void mv_intr_pio(struct ata_port *ap)
1508 return; 1499 return;
1509 1500
1510 /* get active ATA command */ 1501 /* get active ATA command */
1511 qc = ata_qc_from_tag(ap, ap->active_tag); 1502 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1512 if (unlikely(!qc)) /* no active tag */ 1503 if (unlikely(!qc)) /* no active tag */
1513 return; 1504 return;
1514 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */ 1505 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
@@ -1543,7 +1534,7 @@ static void mv_intr_edma(struct ata_port *ap)
1543 1534
1544 /* 50xx: get active ATA command */ 1535 /* 50xx: get active ATA command */
1545 if (IS_GEN_I(hpriv)) 1536 if (IS_GEN_I(hpriv))
1546 tag = ap->active_tag; 1537 tag = ap->link.active_tag;
1547 1538
1548 /* Gen II/IIE: get active ATA command via tag, to enable 1539 /* Gen II/IIE: get active ATA command via tag, to enable
1549 * support for queueing. this works transparently for 1540 * support for queueing. this works transparently for
@@ -1646,7 +1637,7 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1646 if (unlikely(have_err_bits)) { 1637 if (unlikely(have_err_bits)) {
1647 struct ata_queued_cmd *qc; 1638 struct ata_queued_cmd *qc;
1648 1639
1649 qc = ata_qc_from_tag(ap, ap->active_tag); 1640 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1650 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 1641 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1651 continue; 1642 continue;
1652 1643
@@ -1687,15 +1678,15 @@ static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1687 1678
1688 for (i = 0; i < host->n_ports; i++) { 1679 for (i = 0; i < host->n_ports; i++) {
1689 ap = host->ports[i]; 1680 ap = host->ports[i];
1690 if (!ata_port_offline(ap)) { 1681 if (!ata_link_offline(&ap->link)) {
1691 ehi = &ap->eh_info; 1682 ehi = &ap->link.eh_info;
1692 ata_ehi_clear_desc(ehi); 1683 ata_ehi_clear_desc(ehi);
1693 if (!printed++) 1684 if (!printed++)
1694 ata_ehi_push_desc(ehi, 1685 ata_ehi_push_desc(ehi,
1695 "PCI err cause 0x%08x", err_cause); 1686 "PCI err cause 0x%08x", err_cause);
1696 err_mask = AC_ERR_HOST_BUS; 1687 err_mask = AC_ERR_HOST_BUS;
1697 ehi->action = ATA_EH_HARDRESET; 1688 ehi->action = ATA_EH_HARDRESET;
1698 qc = ata_qc_from_tag(ap, ap->active_tag); 1689 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1699 if (qc) 1690 if (qc)
1700 qc->err_mask |= err_mask; 1691 qc->err_mask |= err_mask;
1701 else 1692 else
@@ -2198,14 +2189,14 @@ static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2198 2189
2199 /* Issue COMRESET via SControl */ 2190 /* Issue COMRESET via SControl */
2200comreset_retry: 2191comreset_retry:
2201 sata_scr_write_flush(ap, SCR_CONTROL, 0x301); 2192 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2202 msleep(1); 2193 msleep(1);
2203 2194
2204 sata_scr_write_flush(ap, SCR_CONTROL, 0x300); 2195 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2205 msleep(20); 2196 msleep(20);
2206 2197
2207 do { 2198 do {
2208 sata_scr_read(ap, SCR_STATUS, &sstatus); 2199 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2209 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) 2200 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2210 break; 2201 break;
2211 2202
@@ -2230,7 +2221,7 @@ comreset_retry:
2230 } 2221 }
2231#endif 2222#endif
2232 2223
2233 if (ata_port_offline(ap)) { 2224 if (ata_link_offline(&ap->link)) {
2234 *class = ATA_DEV_NONE; 2225 *class = ATA_DEV_NONE;
2235 return; 2226 return;
2236 } 2227 }
@@ -2257,7 +2248,7 @@ comreset_retry:
2257 */ 2248 */
2258 2249
2259 /* finally, read device signature from TF registers */ 2250 /* finally, read device signature from TF registers */
2260 *class = ata_dev_try_classify(ap, 0, NULL); 2251 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2261 2252
2262 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2253 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2263 2254
@@ -2266,10 +2257,11 @@ comreset_retry:
2266 VPRINTK("EXIT\n"); 2257 VPRINTK("EXIT\n");
2267} 2258}
2268 2259
2269static int mv_prereset(struct ata_port *ap, unsigned long deadline) 2260static int mv_prereset(struct ata_link *link, unsigned long deadline)
2270{ 2261{
2262 struct ata_port *ap = link->ap;
2271 struct mv_port_priv *pp = ap->private_data; 2263 struct mv_port_priv *pp = ap->private_data;
2272 struct ata_eh_context *ehc = &ap->eh_context; 2264 struct ata_eh_context *ehc = &link->eh_context;
2273 int rc; 2265 int rc;
2274 2266
2275 rc = mv_stop_dma(ap); 2267 rc = mv_stop_dma(ap);
@@ -2285,7 +2277,7 @@ static int mv_prereset(struct ata_port *ap, unsigned long deadline)
2285 if (ehc->i.action & ATA_EH_HARDRESET) 2277 if (ehc->i.action & ATA_EH_HARDRESET)
2286 return 0; 2278 return 0;
2287 2279
2288 if (ata_port_online(ap)) 2280 if (ata_link_online(link))
2289 rc = ata_wait_ready(ap, deadline); 2281 rc = ata_wait_ready(ap, deadline);
2290 else 2282 else
2291 rc = -ENODEV; 2283 rc = -ENODEV;
@@ -2293,9 +2285,10 @@ static int mv_prereset(struct ata_port *ap, unsigned long deadline)
2293 return rc; 2285 return rc;
2294} 2286}
2295 2287
2296static int mv_hardreset(struct ata_port *ap, unsigned int *class, 2288static int mv_hardreset(struct ata_link *link, unsigned int *class,
2297 unsigned long deadline) 2289 unsigned long deadline)
2298{ 2290{
2291 struct ata_port *ap = link->ap;
2299 struct mv_host_priv *hpriv = ap->host->private_data; 2292 struct mv_host_priv *hpriv = ap->host->private_data;
2300 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; 2293 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2301 2294
@@ -2308,16 +2301,17 @@ static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2308 return 0; 2301 return 0;
2309} 2302}
2310 2303
2311static void mv_postreset(struct ata_port *ap, unsigned int *classes) 2304static void mv_postreset(struct ata_link *link, unsigned int *classes)
2312{ 2305{
2306 struct ata_port *ap = link->ap;
2313 u32 serr; 2307 u32 serr;
2314 2308
2315 /* print link status */ 2309 /* print link status */
2316 sata_print_link_status(ap); 2310 sata_print_link_status(link);
2317 2311
2318 /* clear SError */ 2312 /* clear SError */
2319 sata_scr_read(ap, SCR_ERROR, &serr); 2313 sata_scr_read(link, SCR_ERROR, &serr);
2320 sata_scr_write_flush(ap, SCR_ERROR, serr); 2314 sata_scr_write_flush(link, SCR_ERROR, serr);
2321 2315
2322 /* bail out if no device is present */ 2316 /* bail out if no device is present */
2323 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { 2317 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
@@ -2590,8 +2584,14 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2590 } 2584 }
2591 2585
2592 for (port = 0; port < host->n_ports; port++) { 2586 for (port = 0; port < host->n_ports; port++) {
2587 struct ata_port *ap = host->ports[port];
2593 void __iomem *port_mmio = mv_port_base(mmio, port); 2588 void __iomem *port_mmio = mv_port_base(mmio, port);
2594 mv_port_init(&host->ports[port]->ioaddr, port_mmio); 2589 unsigned int offset = port_mmio - mmio;
2590
2591 mv_port_init(&ap->ioaddr, port_mmio);
2592
2593 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2594 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2595 } 2595 }
2596 2596
2597 for (hc = 0; hc < n_hc; hc++) { 2597 for (hc = 0; hc < n_hc; hc++) {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 40dc731398..40557fe2ff 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -340,7 +340,6 @@ static struct scsi_host_template nv_adma_sht = {
340}; 340};
341 341
342static const struct ata_port_operations nv_generic_ops = { 342static const struct ata_port_operations nv_generic_ops = {
343 .port_disable = ata_port_disable,
344 .tf_load = ata_tf_load, 343 .tf_load = ata_tf_load,
345 .tf_read = ata_tf_read, 344 .tf_read = ata_tf_read,
346 .exec_command = ata_exec_command, 345 .exec_command = ata_exec_command,
@@ -359,14 +358,12 @@ static const struct ata_port_operations nv_generic_ops = {
359 .data_xfer = ata_data_xfer, 358 .data_xfer = ata_data_xfer,
360 .irq_clear = ata_bmdma_irq_clear, 359 .irq_clear = ata_bmdma_irq_clear,
361 .irq_on = ata_irq_on, 360 .irq_on = ata_irq_on,
362 .irq_ack = ata_irq_ack,
363 .scr_read = nv_scr_read, 361 .scr_read = nv_scr_read,
364 .scr_write = nv_scr_write, 362 .scr_write = nv_scr_write,
365 .port_start = ata_port_start, 363 .port_start = ata_port_start,
366}; 364};
367 365
368static const struct ata_port_operations nv_nf2_ops = { 366static const struct ata_port_operations nv_nf2_ops = {
369 .port_disable = ata_port_disable,
370 .tf_load = ata_tf_load, 367 .tf_load = ata_tf_load,
371 .tf_read = ata_tf_read, 368 .tf_read = ata_tf_read,
372 .exec_command = ata_exec_command, 369 .exec_command = ata_exec_command,
@@ -385,14 +382,12 @@ static const struct ata_port_operations nv_nf2_ops = {
385 .data_xfer = ata_data_xfer, 382 .data_xfer = ata_data_xfer,
386 .irq_clear = ata_bmdma_irq_clear, 383 .irq_clear = ata_bmdma_irq_clear,
387 .irq_on = ata_irq_on, 384 .irq_on = ata_irq_on,
388 .irq_ack = ata_irq_ack,
389 .scr_read = nv_scr_read, 385 .scr_read = nv_scr_read,
390 .scr_write = nv_scr_write, 386 .scr_write = nv_scr_write,
391 .port_start = ata_port_start, 387 .port_start = ata_port_start,
392}; 388};
393 389
394static const struct ata_port_operations nv_ck804_ops = { 390static const struct ata_port_operations nv_ck804_ops = {
395 .port_disable = ata_port_disable,
396 .tf_load = ata_tf_load, 391 .tf_load = ata_tf_load,
397 .tf_read = ata_tf_read, 392 .tf_read = ata_tf_read,
398 .exec_command = ata_exec_command, 393 .exec_command = ata_exec_command,
@@ -411,7 +406,6 @@ static const struct ata_port_operations nv_ck804_ops = {
411 .data_xfer = ata_data_xfer, 406 .data_xfer = ata_data_xfer,
412 .irq_clear = ata_bmdma_irq_clear, 407 .irq_clear = ata_bmdma_irq_clear,
413 .irq_on = ata_irq_on, 408 .irq_on = ata_irq_on,
414 .irq_ack = ata_irq_ack,
415 .scr_read = nv_scr_read, 409 .scr_read = nv_scr_read,
416 .scr_write = nv_scr_write, 410 .scr_write = nv_scr_write,
417 .port_start = ata_port_start, 411 .port_start = ata_port_start,
@@ -419,7 +413,6 @@ static const struct ata_port_operations nv_ck804_ops = {
419}; 413};
420 414
421static const struct ata_port_operations nv_adma_ops = { 415static const struct ata_port_operations nv_adma_ops = {
422 .port_disable = ata_port_disable,
423 .tf_load = ata_tf_load, 416 .tf_load = ata_tf_load,
424 .tf_read = nv_adma_tf_read, 417 .tf_read = nv_adma_tf_read,
425 .check_atapi_dma = nv_adma_check_atapi_dma, 418 .check_atapi_dma = nv_adma_check_atapi_dma,
@@ -430,6 +423,7 @@ static const struct ata_port_operations nv_adma_ops = {
430 .bmdma_start = ata_bmdma_start, 423 .bmdma_start = ata_bmdma_start,
431 .bmdma_stop = ata_bmdma_stop, 424 .bmdma_stop = ata_bmdma_stop,
432 .bmdma_status = ata_bmdma_status, 425 .bmdma_status = ata_bmdma_status,
426 .qc_defer = ata_std_qc_defer,
433 .qc_prep = nv_adma_qc_prep, 427 .qc_prep = nv_adma_qc_prep,
434 .qc_issue = nv_adma_qc_issue, 428 .qc_issue = nv_adma_qc_issue,
435 .freeze = nv_adma_freeze, 429 .freeze = nv_adma_freeze,
@@ -439,7 +433,6 @@ static const struct ata_port_operations nv_adma_ops = {
439 .data_xfer = ata_data_xfer, 433 .data_xfer = ata_data_xfer,
440 .irq_clear = nv_adma_irq_clear, 434 .irq_clear = nv_adma_irq_clear,
441 .irq_on = ata_irq_on, 435 .irq_on = ata_irq_on,
442 .irq_ack = ata_irq_ack,
443 .scr_read = nv_scr_read, 436 .scr_read = nv_scr_read,
444 .scr_write = nv_scr_write, 437 .scr_write = nv_scr_write,
445 .port_start = nv_adma_port_start, 438 .port_start = nv_adma_port_start,
@@ -455,8 +448,8 @@ static const struct ata_port_info nv_port_info[] = {
455 /* generic */ 448 /* generic */
456 { 449 {
457 .sht = &nv_sht, 450 .sht = &nv_sht,
458 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 451 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
459 ATA_FLAG_HRST_TO_RESUME, 452 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
460 .pio_mask = NV_PIO_MASK, 453 .pio_mask = NV_PIO_MASK,
461 .mwdma_mask = NV_MWDMA_MASK, 454 .mwdma_mask = NV_MWDMA_MASK,
462 .udma_mask = NV_UDMA_MASK, 455 .udma_mask = NV_UDMA_MASK,
@@ -466,8 +459,8 @@ static const struct ata_port_info nv_port_info[] = {
466 /* nforce2/3 */ 459 /* nforce2/3 */
467 { 460 {
468 .sht = &nv_sht, 461 .sht = &nv_sht,
469 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 462 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
470 ATA_FLAG_HRST_TO_RESUME, 463 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
471 .pio_mask = NV_PIO_MASK, 464 .pio_mask = NV_PIO_MASK,
472 .mwdma_mask = NV_MWDMA_MASK, 465 .mwdma_mask = NV_MWDMA_MASK,
473 .udma_mask = NV_UDMA_MASK, 466 .udma_mask = NV_UDMA_MASK,
@@ -477,8 +470,8 @@ static const struct ata_port_info nv_port_info[] = {
477 /* ck804 */ 470 /* ck804 */
478 { 471 {
479 .sht = &nv_sht, 472 .sht = &nv_sht,
480 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 473 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
481 ATA_FLAG_HRST_TO_RESUME, 474 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
482 .pio_mask = NV_PIO_MASK, 475 .pio_mask = NV_PIO_MASK,
483 .mwdma_mask = NV_MWDMA_MASK, 476 .mwdma_mask = NV_MWDMA_MASK,
484 .udma_mask = NV_UDMA_MASK, 477 .udma_mask = NV_UDMA_MASK,
@@ -489,8 +482,8 @@ static const struct ata_port_info nv_port_info[] = {
489 { 482 {
490 .sht = &nv_adma_sht, 483 .sht = &nv_adma_sht,
491 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 484 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
492 ATA_FLAG_HRST_TO_RESUME |
493 ATA_FLAG_MMIO | ATA_FLAG_NCQ, 485 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
486 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
494 .pio_mask = NV_PIO_MASK, 487 .pio_mask = NV_PIO_MASK,
495 .mwdma_mask = NV_MWDMA_MASK, 488 .mwdma_mask = NV_MWDMA_MASK,
496 .udma_mask = NV_UDMA_MASK, 489 .udma_mask = NV_UDMA_MASK,
@@ -594,7 +587,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
594 /* Not a proper libata device, ignore */ 587 /* Not a proper libata device, ignore */
595 return rc; 588 return rc;
596 589
597 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) { 590 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
598 /* 591 /*
599 * NVIDIA reports that ADMA mode does not support ATAPI commands. 592 * NVIDIA reports that ADMA mode does not support ATAPI commands.
600 * Therefore ATAPI commands are sent through the legacy interface. 593 * Therefore ATAPI commands are sent through the legacy interface.
@@ -711,7 +704,7 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
711 flags & (NV_CPB_RESP_ATA_ERR | 704 flags & (NV_CPB_RESP_ATA_ERR |
712 NV_CPB_RESP_CMD_ERR | 705 NV_CPB_RESP_CMD_ERR |
713 NV_CPB_RESP_CPB_ERR)))) { 706 NV_CPB_RESP_CPB_ERR)))) {
714 struct ata_eh_info *ehi = &ap->eh_info; 707 struct ata_eh_info *ehi = &ap->link.eh_info;
715 int freeze = 0; 708 int freeze = 0;
716 709
717 ata_ehi_clear_desc(ehi); 710 ata_ehi_clear_desc(ehi);
@@ -747,7 +740,7 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
747 DPRINTK("Completing qc from tag %d\n",cpb_num); 740 DPRINTK("Completing qc from tag %d\n",cpb_num);
748 ata_qc_complete(qc); 741 ata_qc_complete(qc);
749 } else { 742 } else {
750 struct ata_eh_info *ehi = &ap->eh_info; 743 struct ata_eh_info *ehi = &ap->link.eh_info;
751 /* Notifier bits set without a command may indicate the drive 744 /* Notifier bits set without a command may indicate the drive
752 is misbehaving. Raise host state machine violation on this 745 is misbehaving. Raise host state machine violation on this
753 condition. */ 746 condition. */
@@ -764,7 +757,7 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
764 757
765static int nv_host_intr(struct ata_port *ap, u8 irq_stat) 758static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
766{ 759{
767 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); 760 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
768 761
769 /* freeze if hotplugged */ 762 /* freeze if hotplugged */
770 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) { 763 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
@@ -817,7 +810,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
817 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { 810 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
818 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) 811 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
819 >> (NV_INT_PORT_SHIFT * i); 812 >> (NV_INT_PORT_SHIFT * i);
820 if(ata_tag_valid(ap->active_tag)) 813 if(ata_tag_valid(ap->link.active_tag))
821 /** NV_INT_DEV indication seems unreliable at times 814 /** NV_INT_DEV indication seems unreliable at times
822 at least in ADMA mode. Force it on always when a 815 at least in ADMA mode. Force it on always when a
823 command is active, to prevent losing interrupts. */ 816 command is active, to prevent losing interrupts. */
@@ -852,7 +845,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
852 NV_ADMA_STAT_HOTUNPLUG | 845 NV_ADMA_STAT_HOTUNPLUG |
853 NV_ADMA_STAT_TIMEOUT | 846 NV_ADMA_STAT_TIMEOUT |
854 NV_ADMA_STAT_SERROR))) { 847 NV_ADMA_STAT_SERROR))) {
855 struct ata_eh_info *ehi = &ap->eh_info; 848 struct ata_eh_info *ehi = &ap->link.eh_info;
856 849
857 ata_ehi_clear_desc(ehi); 850 ata_ehi_clear_desc(ehi);
858 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status ); 851 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status );
@@ -879,10 +872,10 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
879 u32 check_commands; 872 u32 check_commands;
880 int pos, error = 0; 873 int pos, error = 0;
881 874
882 if(ata_tag_valid(ap->active_tag)) 875 if(ata_tag_valid(ap->link.active_tag))
883 check_commands = 1 << ap->active_tag; 876 check_commands = 1 << ap->link.active_tag;
884 else 877 else
885 check_commands = ap->sactive; 878 check_commands = ap->link.sactive;
886 879
887 /** Check CPBs for completed commands */ 880 /** Check CPBs for completed commands */
888 while ((pos = ffs(check_commands)) && !error) { 881 while ((pos = ffs(check_commands)) && !error) {
@@ -1333,7 +1326,7 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1333 !(ap->flags & ATA_FLAG_DISABLED)) { 1326 !(ap->flags & ATA_FLAG_DISABLED)) {
1334 struct ata_queued_cmd *qc; 1327 struct ata_queued_cmd *qc;
1335 1328
1336 qc = ata_qc_from_tag(ap, ap->active_tag); 1329 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1337 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) 1330 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1338 handled += ata_host_intr(ap, qc); 1331 handled += ata_host_intr(ap, qc);
1339 else 1332 else
@@ -1459,7 +1452,7 @@ static void nv_ck804_thaw(struct ata_port *ap)
1459 writeb(mask, mmio_base + NV_INT_ENABLE_CK804); 1452 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1460} 1453}
1461 1454
1462static int nv_hardreset(struct ata_port *ap, unsigned int *class, 1455static int nv_hardreset(struct ata_link *link, unsigned int *class,
1463 unsigned long deadline) 1456 unsigned long deadline)
1464{ 1457{
1465 unsigned int dummy; 1458 unsigned int dummy;
@@ -1468,7 +1461,7 @@ static int nv_hardreset(struct ata_port *ap, unsigned int *class,
1468 * some controllers. Don't classify on hardreset. For more 1461 * some controllers. Don't classify on hardreset. For more
1469 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352 1462 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1470 */ 1463 */
1471 return sata_std_hardreset(ap, &dummy, deadline); 1464 return sata_std_hardreset(link, &dummy, deadline);
1472} 1465}
1473 1466
1474static void nv_error_handler(struct ata_port *ap) 1467static void nv_error_handler(struct ata_port *ap)
@@ -1485,7 +1478,7 @@ static void nv_adma_error_handler(struct ata_port *ap)
1485 int i; 1478 int i;
1486 u16 tmp; 1479 u16 tmp;
1487 1480
1488 if(ata_tag_valid(ap->active_tag) || ap->sactive) { 1481 if(ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1489 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); 1482 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1490 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); 1483 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1491 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); 1484 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
@@ -1501,8 +1494,8 @@ static void nv_adma_error_handler(struct ata_port *ap)
1501 1494
1502 for( i=0;i<NV_ADMA_MAX_CPBS;i++) { 1495 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1503 struct nv_adma_cpb *cpb = &pp->cpb[i]; 1496 struct nv_adma_cpb *cpb = &pp->cpb[i];
1504 if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) || 1497 if( (ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1505 ap->sactive & (1 << i) ) 1498 ap->link.sactive & (1 << i) )
1506 ata_port_printk(ap, KERN_ERR, 1499 ata_port_printk(ap, KERN_ERR,
1507 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n", 1500 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1508 i, cpb->ctl_flags, cpb->resp_flags); 1501 i, cpb->ctl_flags, cpb->resp_flags);
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 25698cf0dc..903213153b 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -167,7 +167,6 @@ static struct scsi_host_template pdc_ata_sht = {
167}; 167};
168 168
169static const struct ata_port_operations pdc_sata_ops = { 169static const struct ata_port_operations pdc_sata_ops = {
170 .port_disable = ata_port_disable,
171 .tf_load = pdc_tf_load_mmio, 170 .tf_load = pdc_tf_load_mmio,
172 .tf_read = ata_tf_read, 171 .tf_read = ata_tf_read,
173 .check_status = ata_check_status, 172 .check_status = ata_check_status,
@@ -185,7 +184,6 @@ static const struct ata_port_operations pdc_sata_ops = {
185 .data_xfer = ata_data_xfer, 184 .data_xfer = ata_data_xfer,
186 .irq_clear = pdc_irq_clear, 185 .irq_clear = pdc_irq_clear,
187 .irq_on = ata_irq_on, 186 .irq_on = ata_irq_on,
188 .irq_ack = ata_irq_ack,
189 187
190 .scr_read = pdc_sata_scr_read, 188 .scr_read = pdc_sata_scr_read,
191 .scr_write = pdc_sata_scr_write, 189 .scr_write = pdc_sata_scr_write,
@@ -194,7 +192,6 @@ static const struct ata_port_operations pdc_sata_ops = {
194 192
195/* First-generation chips need a more restrictive ->check_atapi_dma op */ 193/* First-generation chips need a more restrictive ->check_atapi_dma op */
196static const struct ata_port_operations pdc_old_sata_ops = { 194static const struct ata_port_operations pdc_old_sata_ops = {
197 .port_disable = ata_port_disable,
198 .tf_load = pdc_tf_load_mmio, 195 .tf_load = pdc_tf_load_mmio,
199 .tf_read = ata_tf_read, 196 .tf_read = ata_tf_read,
200 .check_status = ata_check_status, 197 .check_status = ata_check_status,
@@ -212,7 +209,6 @@ static const struct ata_port_operations pdc_old_sata_ops = {
212 .data_xfer = ata_data_xfer, 209 .data_xfer = ata_data_xfer,
213 .irq_clear = pdc_irq_clear, 210 .irq_clear = pdc_irq_clear,
214 .irq_on = ata_irq_on, 211 .irq_on = ata_irq_on,
215 .irq_ack = ata_irq_ack,
216 212
217 .scr_read = pdc_sata_scr_read, 213 .scr_read = pdc_sata_scr_read,
218 .scr_write = pdc_sata_scr_write, 214 .scr_write = pdc_sata_scr_write,
@@ -220,7 +216,6 @@ static const struct ata_port_operations pdc_old_sata_ops = {
220}; 216};
221 217
222static const struct ata_port_operations pdc_pata_ops = { 218static const struct ata_port_operations pdc_pata_ops = {
223 .port_disable = ata_port_disable,
224 .tf_load = pdc_tf_load_mmio, 219 .tf_load = pdc_tf_load_mmio,
225 .tf_read = ata_tf_read, 220 .tf_read = ata_tf_read,
226 .check_status = ata_check_status, 221 .check_status = ata_check_status,
@@ -238,7 +233,6 @@ static const struct ata_port_operations pdc_pata_ops = {
238 .data_xfer = ata_data_xfer, 233 .data_xfer = ata_data_xfer,
239 .irq_clear = pdc_irq_clear, 234 .irq_clear = pdc_irq_clear,
240 .irq_on = ata_irq_on, 235 .irq_on = ata_irq_on,
241 .irq_ack = ata_irq_ack,
242 236
243 .port_start = pdc_common_port_start, 237 .port_start = pdc_common_port_start,
244}; 238};
@@ -475,7 +469,7 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
475 buf32[2] = 0; /* no next-packet */ 469 buf32[2] = 0; /* no next-packet */
476 470
477 /* select drive */ 471 /* select drive */
478 if (sata_scr_valid(ap)) { 472 if (sata_scr_valid(&ap->link)) {
479 dev_sel = PDC_DEVICE_SATA; 473 dev_sel = PDC_DEVICE_SATA;
480 } else { 474 } else {
481 dev_sel = ATA_DEVICE_OBS; 475 dev_sel = ATA_DEVICE_OBS;
@@ -626,7 +620,7 @@ static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
626static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc, 620static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
627 u32 port_status, u32 err_mask) 621 u32 port_status, u32 err_mask)
628{ 622{
629 struct ata_eh_info *ehi = &ap->eh_info; 623 struct ata_eh_info *ehi = &ap->link.eh_info;
630 unsigned int ac_err_mask = 0; 624 unsigned int ac_err_mask = 0;
631 625
632 ata_ehi_clear_desc(ehi); 626 ata_ehi_clear_desc(ehi);
@@ -643,7 +637,7 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
643 | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR)) 637 | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR))
644 ac_err_mask |= AC_ERR_HOST_BUS; 638 ac_err_mask |= AC_ERR_HOST_BUS;
645 639
646 if (sata_scr_valid(ap)) { 640 if (sata_scr_valid(&ap->link)) {
647 u32 serror; 641 u32 serror;
648 642
649 pdc_sata_scr_read(ap, SCR_ERROR, &serror); 643 pdc_sata_scr_read(ap, SCR_ERROR, &serror);
@@ -773,7 +767,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance)
773 tmp = hotplug_status & (0x11 << ata_no); 767 tmp = hotplug_status & (0x11 << ata_no);
774 if (tmp && ap && 768 if (tmp && ap &&
775 !(ap->flags & ATA_FLAG_DISABLED)) { 769 !(ap->flags & ATA_FLAG_DISABLED)) {
776 struct ata_eh_info *ehi = &ap->eh_info; 770 struct ata_eh_info *ehi = &ap->link.eh_info;
777 ata_ehi_clear_desc(ehi); 771 ata_ehi_clear_desc(ehi);
778 ata_ehi_hotplugged(ehi); 772 ata_ehi_hotplugged(ehi);
779 ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp); 773 ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp);
@@ -788,7 +782,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance)
788 !(ap->flags & ATA_FLAG_DISABLED)) { 782 !(ap->flags & ATA_FLAG_DISABLED)) {
789 struct ata_queued_cmd *qc; 783 struct ata_queued_cmd *qc;
790 784
791 qc = ata_qc_from_tag(ap, ap->active_tag); 785 qc = ata_qc_from_tag(ap, ap->link.active_tag);
792 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) 786 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
793 handled += pdc_host_intr(ap, qc); 787 handled += pdc_host_intr(ap, qc);
794 } 788 }
@@ -1009,10 +1003,15 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
1009 1003
1010 is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags); 1004 is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
1011 for (i = 0; i < host->n_ports; i++) { 1005 for (i = 0; i < host->n_ports; i++) {
1006 struct ata_port *ap = host->ports[i];
1012 unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); 1007 unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
1013 pdc_ata_setup_port(host->ports[i], 1008 unsigned int port_offset = 0x200 + ata_no * 0x80;
1014 base + 0x200 + ata_no * 0x80, 1009 unsigned int scr_offset = 0x400 + ata_no * 0x100;
1015 base + 0x400 + ata_no * 0x100); 1010
1011 pdc_ata_setup_port(ap, base + port_offset, base + scr_offset);
1012
1013 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1014 ata_port_pbar_desc(ap, PDC_MMIO_BAR, port_offset, "port");
1016 } 1015 }
1017 1016
1018 /* initialize adapter */ 1017 /* initialize adapter */
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 5e1dfdda69..c4c4cd29ee 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -145,7 +145,6 @@ static struct scsi_host_template qs_ata_sht = {
145}; 145};
146 146
147static const struct ata_port_operations qs_ata_ops = { 147static const struct ata_port_operations qs_ata_ops = {
148 .port_disable = ata_port_disable,
149 .tf_load = ata_tf_load, 148 .tf_load = ata_tf_load,
150 .tf_read = ata_tf_read, 149 .tf_read = ata_tf_read,
151 .check_status = ata_check_status, 150 .check_status = ata_check_status,
@@ -159,7 +158,6 @@ static const struct ata_port_operations qs_ata_ops = {
159 .eng_timeout = qs_eng_timeout, 158 .eng_timeout = qs_eng_timeout,
160 .irq_clear = qs_irq_clear, 159 .irq_clear = qs_irq_clear,
161 .irq_on = ata_irq_on, 160 .irq_on = ata_irq_on,
162 .irq_ack = ata_irq_ack,
163 .scr_read = qs_scr_read, 161 .scr_read = qs_scr_read,
164 .scr_write = qs_scr_write, 162 .scr_write = qs_scr_write,
165 .port_start = qs_port_start, 163 .port_start = qs_port_start,
@@ -404,7 +402,7 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host)
404 struct qs_port_priv *pp = ap->private_data; 402 struct qs_port_priv *pp = ap->private_data;
405 if (!pp || pp->state != qs_state_pkt) 403 if (!pp || pp->state != qs_state_pkt)
406 continue; 404 continue;
407 qc = ata_qc_from_tag(ap, ap->active_tag); 405 qc = ata_qc_from_tag(ap, ap->link.active_tag);
408 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { 406 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
409 switch (sHST) { 407 switch (sHST) {
410 case 0: /* successful CPB */ 408 case 0: /* successful CPB */
@@ -437,7 +435,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host)
437 struct qs_port_priv *pp = ap->private_data; 435 struct qs_port_priv *pp = ap->private_data;
438 if (!pp || pp->state != qs_state_mmio) 436 if (!pp || pp->state != qs_state_mmio)
439 continue; 437 continue;
440 qc = ata_qc_from_tag(ap, ap->active_tag); 438 qc = ata_qc_from_tag(ap, ap->link.active_tag);
441 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { 439 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
442 440
443 /* check main status, clearing INTRQ */ 441 /* check main status, clearing INTRQ */
@@ -637,9 +635,14 @@ static int qs_ata_init_one(struct pci_dev *pdev,
637 return rc; 635 return rc;
638 636
639 for (port_no = 0; port_no < host->n_ports; ++port_no) { 637 for (port_no = 0; port_no < host->n_ports; ++port_no) {
640 void __iomem *chan = 638 struct ata_port *ap = host->ports[port_no];
641 host->iomap[QS_MMIO_BAR] + (port_no * 0x4000); 639 unsigned int offset = port_no * 0x4000;
642 qs_ata_setup_port(&host->ports[port_no]->ioaddr, chan); 640 void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset;
641
642 qs_ata_setup_port(&ap->ioaddr, chan);
643
644 ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio");
645 ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port");
643 } 646 }
644 647
645 /* initialize adapter */ 648 /* initialize adapter */
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 8c72e714b4..ea3a0ab7e0 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -59,7 +59,8 @@ enum {
59 SIL_FLAG_MOD15WRITE = (1 << 30), 59 SIL_FLAG_MOD15WRITE = (1 << 30),
60 60
61 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 61 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
62 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME, 62 ATA_FLAG_MMIO,
63 SIL_DFL_LINK_FLAGS = ATA_LFLAG_HRST_TO_RESUME,
63 64
64 /* 65 /*
65 * Controller IDs 66 * Controller IDs
@@ -117,7 +118,7 @@ static int sil_pci_device_resume(struct pci_dev *pdev);
117static void sil_dev_config(struct ata_device *dev); 118static void sil_dev_config(struct ata_device *dev);
118static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 119static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
119static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 120static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
120static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed); 121static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
121static void sil_freeze(struct ata_port *ap); 122static void sil_freeze(struct ata_port *ap);
122static void sil_thaw(struct ata_port *ap); 123static void sil_thaw(struct ata_port *ap);
123 124
@@ -185,7 +186,6 @@ static struct scsi_host_template sil_sht = {
185}; 186};
186 187
187static const struct ata_port_operations sil_ops = { 188static const struct ata_port_operations sil_ops = {
188 .port_disable = ata_port_disable,
189 .dev_config = sil_dev_config, 189 .dev_config = sil_dev_config,
190 .tf_load = ata_tf_load, 190 .tf_load = ata_tf_load,
191 .tf_read = ata_tf_read, 191 .tf_read = ata_tf_read,
@@ -206,7 +206,6 @@ static const struct ata_port_operations sil_ops = {
206 .post_internal_cmd = ata_bmdma_post_internal_cmd, 206 .post_internal_cmd = ata_bmdma_post_internal_cmd,
207 .irq_clear = ata_bmdma_irq_clear, 207 .irq_clear = ata_bmdma_irq_clear,
208 .irq_on = ata_irq_on, 208 .irq_on = ata_irq_on,
209 .irq_ack = ata_irq_ack,
210 .scr_read = sil_scr_read, 209 .scr_read = sil_scr_read,
211 .scr_write = sil_scr_write, 210 .scr_write = sil_scr_write,
212 .port_start = ata_port_start, 211 .port_start = ata_port_start,
@@ -216,6 +215,7 @@ static const struct ata_port_info sil_port_info[] = {
216 /* sil_3112 */ 215 /* sil_3112 */
217 { 216 {
218 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE, 217 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
218 .link_flags = SIL_DFL_LINK_FLAGS,
219 .pio_mask = 0x1f, /* pio0-4 */ 219 .pio_mask = 0x1f, /* pio0-4 */
220 .mwdma_mask = 0x07, /* mwdma0-2 */ 220 .mwdma_mask = 0x07, /* mwdma0-2 */
221 .udma_mask = ATA_UDMA5, 221 .udma_mask = ATA_UDMA5,
@@ -225,6 +225,7 @@ static const struct ata_port_info sil_port_info[] = {
225 { 225 {
226 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE | 226 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
227 SIL_FLAG_NO_SATA_IRQ, 227 SIL_FLAG_NO_SATA_IRQ,
228 .link_flags = SIL_DFL_LINK_FLAGS,
228 .pio_mask = 0x1f, /* pio0-4 */ 229 .pio_mask = 0x1f, /* pio0-4 */
229 .mwdma_mask = 0x07, /* mwdma0-2 */ 230 .mwdma_mask = 0x07, /* mwdma0-2 */
230 .udma_mask = ATA_UDMA5, 231 .udma_mask = ATA_UDMA5,
@@ -233,6 +234,7 @@ static const struct ata_port_info sil_port_info[] = {
233 /* sil_3512 */ 234 /* sil_3512 */
234 { 235 {
235 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 236 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
237 .link_flags = SIL_DFL_LINK_FLAGS,
236 .pio_mask = 0x1f, /* pio0-4 */ 238 .pio_mask = 0x1f, /* pio0-4 */
237 .mwdma_mask = 0x07, /* mwdma0-2 */ 239 .mwdma_mask = 0x07, /* mwdma0-2 */
238 .udma_mask = ATA_UDMA5, 240 .udma_mask = ATA_UDMA5,
@@ -241,6 +243,7 @@ static const struct ata_port_info sil_port_info[] = {
241 /* sil_3114 */ 243 /* sil_3114 */
242 { 244 {
243 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 245 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
246 .link_flags = SIL_DFL_LINK_FLAGS,
244 .pio_mask = 0x1f, /* pio0-4 */ 247 .pio_mask = 0x1f, /* pio0-4 */
245 .mwdma_mask = 0x07, /* mwdma0-2 */ 248 .mwdma_mask = 0x07, /* mwdma0-2 */
246 .udma_mask = ATA_UDMA5, 249 .udma_mask = ATA_UDMA5,
@@ -290,35 +293,33 @@ static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
290 293
291/** 294/**
292 * sil_set_mode - wrap set_mode functions 295 * sil_set_mode - wrap set_mode functions
293 * @ap: port to set up 296 * @link: link to set up
294 * @r_failed: returned device when we fail 297 * @r_failed: returned device when we fail
295 * 298 *
296 * Wrap the libata method for device setup as after the setup we need 299 * Wrap the libata method for device setup as after the setup we need
297 * to inspect the results and do some configuration work 300 * to inspect the results and do some configuration work
298 */ 301 */
299 302
300static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed) 303static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
301{ 304{
302 struct ata_host *host = ap->host; 305 struct ata_port *ap = link->ap;
303 struct ata_device *dev; 306 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
304 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
305 void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode; 307 void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
306 u32 tmp, dev_mode[2]; 308 struct ata_device *dev;
307 unsigned int i; 309 u32 tmp, dev_mode[2] = { };
308 int rc; 310 int rc;
309 311
310 rc = ata_do_set_mode(ap, r_failed); 312 rc = ata_do_set_mode(link, r_failed);
311 if (rc) 313 if (rc)
312 return rc; 314 return rc;
313 315
314 for (i = 0; i < 2; i++) { 316 ata_link_for_each_dev(dev, link) {
315 dev = &ap->device[i];
316 if (!ata_dev_enabled(dev)) 317 if (!ata_dev_enabled(dev))
317 dev_mode[i] = 0; /* PIO0/1/2 */ 318 dev_mode[dev->devno] = 0; /* PIO0/1/2 */
318 else if (dev->flags & ATA_DFLAG_PIO) 319 else if (dev->flags & ATA_DFLAG_PIO)
319 dev_mode[i] = 1; /* PIO3/4 */ 320 dev_mode[dev->devno] = 1; /* PIO3/4 */
320 else 321 else
321 dev_mode[i] = 3; /* UDMA */ 322 dev_mode[dev->devno] = 3; /* UDMA */
322 /* value 2 indicates MDMA */ 323 /* value 2 indicates MDMA */
323 } 324 }
324 325
@@ -374,8 +375,8 @@ static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
374 375
375static void sil_host_intr(struct ata_port *ap, u32 bmdma2) 376static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
376{ 377{
377 struct ata_eh_info *ehi = &ap->eh_info; 378 struct ata_eh_info *ehi = &ap->link.eh_info;
378 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); 379 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
379 u8 status; 380 u8 status;
380 381
381 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { 382 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
@@ -394,8 +395,8 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
394 * repeat probing needlessly. 395 * repeat probing needlessly.
395 */ 396 */
396 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 397 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
397 ata_ehi_hotplugged(&ap->eh_info); 398 ata_ehi_hotplugged(&ap->link.eh_info);
398 ap->eh_info.serror |= serror; 399 ap->link.eh_info.serror |= serror;
399 } 400 }
400 401
401 goto freeze; 402 goto freeze;
@@ -562,8 +563,8 @@ static void sil_thaw(struct ata_port *ap)
562 */ 563 */
563static void sil_dev_config(struct ata_device *dev) 564static void sil_dev_config(struct ata_device *dev)
564{ 565{
565 struct ata_port *ap = dev->ap; 566 struct ata_port *ap = dev->link->ap;
566 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO; 567 int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
567 unsigned int n, quirks = 0; 568 unsigned int n, quirks = 0;
568 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 569 unsigned char model_num[ATA_ID_PROD_LEN + 1];
569 570
@@ -686,7 +687,8 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
686 mmio_base = host->iomap[SIL_MMIO_BAR]; 687 mmio_base = host->iomap[SIL_MMIO_BAR];
687 688
688 for (i = 0; i < host->n_ports; i++) { 689 for (i = 0; i < host->n_ports; i++) {
689 struct ata_ioports *ioaddr = &host->ports[i]->ioaddr; 690 struct ata_port *ap = host->ports[i];
691 struct ata_ioports *ioaddr = &ap->ioaddr;
690 692
691 ioaddr->cmd_addr = mmio_base + sil_port[i].tf; 693 ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
692 ioaddr->altstatus_addr = 694 ioaddr->altstatus_addr =
@@ -694,6 +696,9 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
694 ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma; 696 ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
695 ioaddr->scr_addr = mmio_base + sil_port[i].scr; 697 ioaddr->scr_addr = mmio_base + sil_port[i].scr;
696 ata_std_ports(ioaddr); 698 ata_std_ports(ioaddr);
699
700 ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio");
701 ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf");
697 } 702 }
698 703
699 /* initialize and activate */ 704 /* initialize and activate */
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 233e886933..b061927845 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -30,7 +30,7 @@
30#include <linux/libata.h> 30#include <linux/libata.h>
31 31
32#define DRV_NAME "sata_sil24" 32#define DRV_NAME "sata_sil24"
33#define DRV_VERSION "1.0" 33#define DRV_VERSION "1.1"
34 34
35/* 35/*
36 * Port request block (PRB) 32 bytes 36 * Port request block (PRB) 32 bytes
@@ -168,7 +168,7 @@ enum {
168 168
169 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | 169 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
170 PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG | 170 PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
171 PORT_IRQ_UNK_FIS, 171 PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_NOTIFY,
172 172
173 /* bits[27:16] are unmasked (raw) */ 173 /* bits[27:16] are unmasked (raw) */
174 PORT_IRQ_RAW_SHIFT = 16, 174 PORT_IRQ_RAW_SHIFT = 16,
@@ -237,8 +237,9 @@ enum {
237 /* host flags */ 237 /* host flags */
238 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 238 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
239 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 239 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
240 ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY | 240 ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
241 ATA_FLAG_ACPI_SATA, 241 ATA_FLAG_AN | ATA_FLAG_PMP,
242 SIL24_COMMON_LFLAGS = ATA_LFLAG_SKIP_D2H_BSY,
242 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ 243 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
243 244
244 IRQ_STAT_4PORTS = 0xf, 245 IRQ_STAT_4PORTS = 0xf,
@@ -322,6 +323,7 @@ struct sil24_port_priv {
322 union sil24_cmd_block *cmd_block; /* 32 cmd blocks */ 323 union sil24_cmd_block *cmd_block; /* 32 cmd blocks */
323 dma_addr_t cmd_block_dma; /* DMA base addr for them */ 324 dma_addr_t cmd_block_dma; /* DMA base addr for them */
324 struct ata_taskfile tf; /* Cached taskfile registers */ 325 struct ata_taskfile tf; /* Cached taskfile registers */
326 int do_port_rst;
325}; 327};
326 328
327static void sil24_dev_config(struct ata_device *dev); 329static void sil24_dev_config(struct ata_device *dev);
@@ -329,9 +331,12 @@ static u8 sil24_check_status(struct ata_port *ap);
329static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val); 331static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val);
330static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 332static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
331static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 333static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
334static int sil24_qc_defer(struct ata_queued_cmd *qc);
332static void sil24_qc_prep(struct ata_queued_cmd *qc); 335static void sil24_qc_prep(struct ata_queued_cmd *qc);
333static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); 336static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
334static void sil24_irq_clear(struct ata_port *ap); 337static void sil24_irq_clear(struct ata_port *ap);
338static void sil24_pmp_attach(struct ata_port *ap);
339static void sil24_pmp_detach(struct ata_port *ap);
335static void sil24_freeze(struct ata_port *ap); 340static void sil24_freeze(struct ata_port *ap);
336static void sil24_thaw(struct ata_port *ap); 341static void sil24_thaw(struct ata_port *ap);
337static void sil24_error_handler(struct ata_port *ap); 342static void sil24_error_handler(struct ata_port *ap);
@@ -340,6 +345,7 @@ static int sil24_port_start(struct ata_port *ap);
340static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 345static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
341#ifdef CONFIG_PM 346#ifdef CONFIG_PM
342static int sil24_pci_device_resume(struct pci_dev *pdev); 347static int sil24_pci_device_resume(struct pci_dev *pdev);
348static int sil24_port_resume(struct ata_port *ap);
343#endif 349#endif
344 350
345static const struct pci_device_id sil24_pci_tbl[] = { 351static const struct pci_device_id sil24_pci_tbl[] = {
@@ -384,8 +390,6 @@ static struct scsi_host_template sil24_sht = {
384}; 390};
385 391
386static const struct ata_port_operations sil24_ops = { 392static const struct ata_port_operations sil24_ops = {
387 .port_disable = ata_port_disable,
388
389 .dev_config = sil24_dev_config, 393 .dev_config = sil24_dev_config,
390 394
391 .check_status = sil24_check_status, 395 .check_status = sil24_check_status,
@@ -394,22 +398,28 @@ static const struct ata_port_operations sil24_ops = {
394 398
395 .tf_read = sil24_tf_read, 399 .tf_read = sil24_tf_read,
396 400
401 .qc_defer = sil24_qc_defer,
397 .qc_prep = sil24_qc_prep, 402 .qc_prep = sil24_qc_prep,
398 .qc_issue = sil24_qc_issue, 403 .qc_issue = sil24_qc_issue,
399 404
400 .irq_clear = sil24_irq_clear, 405 .irq_clear = sil24_irq_clear,
401 .irq_on = ata_dummy_irq_on,
402 .irq_ack = ata_dummy_irq_ack,
403 406
404 .scr_read = sil24_scr_read, 407 .scr_read = sil24_scr_read,
405 .scr_write = sil24_scr_write, 408 .scr_write = sil24_scr_write,
406 409
410 .pmp_attach = sil24_pmp_attach,
411 .pmp_detach = sil24_pmp_detach,
412
407 .freeze = sil24_freeze, 413 .freeze = sil24_freeze,
408 .thaw = sil24_thaw, 414 .thaw = sil24_thaw,
409 .error_handler = sil24_error_handler, 415 .error_handler = sil24_error_handler,
410 .post_internal_cmd = sil24_post_internal_cmd, 416 .post_internal_cmd = sil24_post_internal_cmd,
411 417
412 .port_start = sil24_port_start, 418 .port_start = sil24_port_start,
419
420#ifdef CONFIG_PM
421 .port_resume = sil24_port_resume,
422#endif
413}; 423};
414 424
415/* 425/*
@@ -424,6 +434,7 @@ static const struct ata_port_info sil24_port_info[] = {
424 { 434 {
425 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) | 435 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
426 SIL24_FLAG_PCIX_IRQ_WOC, 436 SIL24_FLAG_PCIX_IRQ_WOC,
437 .link_flags = SIL24_COMMON_LFLAGS,
427 .pio_mask = 0x1f, /* pio0-4 */ 438 .pio_mask = 0x1f, /* pio0-4 */
428 .mwdma_mask = 0x07, /* mwdma0-2 */ 439 .mwdma_mask = 0x07, /* mwdma0-2 */
429 .udma_mask = ATA_UDMA5, /* udma0-5 */ 440 .udma_mask = ATA_UDMA5, /* udma0-5 */
@@ -432,6 +443,7 @@ static const struct ata_port_info sil24_port_info[] = {
432 /* sil_3132 */ 443 /* sil_3132 */
433 { 444 {
434 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2), 445 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
446 .link_flags = SIL24_COMMON_LFLAGS,
435 .pio_mask = 0x1f, /* pio0-4 */ 447 .pio_mask = 0x1f, /* pio0-4 */
436 .mwdma_mask = 0x07, /* mwdma0-2 */ 448 .mwdma_mask = 0x07, /* mwdma0-2 */
437 .udma_mask = ATA_UDMA5, /* udma0-5 */ 449 .udma_mask = ATA_UDMA5, /* udma0-5 */
@@ -440,6 +452,7 @@ static const struct ata_port_info sil24_port_info[] = {
440 /* sil_3131/sil_3531 */ 452 /* sil_3131/sil_3531 */
441 { 453 {
442 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1), 454 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
455 .link_flags = SIL24_COMMON_LFLAGS,
443 .pio_mask = 0x1f, /* pio0-4 */ 456 .pio_mask = 0x1f, /* pio0-4 */
444 .mwdma_mask = 0x07, /* mwdma0-2 */ 457 .mwdma_mask = 0x07, /* mwdma0-2 */
445 .udma_mask = ATA_UDMA5, /* udma0-5 */ 458 .udma_mask = ATA_UDMA5, /* udma0-5 */
@@ -456,7 +469,7 @@ static int sil24_tag(int tag)
456 469
457static void sil24_dev_config(struct ata_device *dev) 470static void sil24_dev_config(struct ata_device *dev)
458{ 471{
459 void __iomem *port = dev->ap->ioaddr.cmd_addr; 472 void __iomem *port = dev->link->ap->ioaddr.cmd_addr;
460 473
461 if (dev->cdb_len == 16) 474 if (dev->cdb_len == 16)
462 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); 475 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
@@ -520,19 +533,78 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
520 *tf = pp->tf; 533 *tf = pp->tf;
521} 534}
522 535
536static void sil24_config_port(struct ata_port *ap)
537{
538 void __iomem *port = ap->ioaddr.cmd_addr;
539
540 /* configure IRQ WoC */
541 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
542 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
543 else
544 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
545
546 /* zero error counters. */
547 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
548 writel(0x8000, port + PORT_CRC_ERR_THRESH);
549 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
550 writel(0x0000, port + PORT_DECODE_ERR_CNT);
551 writel(0x0000, port + PORT_CRC_ERR_CNT);
552 writel(0x0000, port + PORT_HSHK_ERR_CNT);
553
554 /* always use 64bit activation */
555 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
556
557 /* clear port multiplier enable and resume bits */
558 writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
559}
560
561static void sil24_config_pmp(struct ata_port *ap, int attached)
562{
563 void __iomem *port = ap->ioaddr.cmd_addr;
564
565 if (attached)
566 writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT);
567 else
568 writel(PORT_CS_PMP_EN, port + PORT_CTRL_CLR);
569}
570
571static void sil24_clear_pmp(struct ata_port *ap)
572{
573 void __iomem *port = ap->ioaddr.cmd_addr;
574 int i;
575
576 writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
577
578 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) {
579 void __iomem *pmp_base = port + PORT_PMP + i * PORT_PMP_SIZE;
580
581 writel(0, pmp_base + PORT_PMP_STATUS);
582 writel(0, pmp_base + PORT_PMP_QACTIVE);
583 }
584}
585
523static int sil24_init_port(struct ata_port *ap) 586static int sil24_init_port(struct ata_port *ap)
524{ 587{
525 void __iomem *port = ap->ioaddr.cmd_addr; 588 void __iomem *port = ap->ioaddr.cmd_addr;
589 struct sil24_port_priv *pp = ap->private_data;
526 u32 tmp; 590 u32 tmp;
527 591
592 /* clear PMP error status */
593 if (ap->nr_pmp_links)
594 sil24_clear_pmp(ap);
595
528 writel(PORT_CS_INIT, port + PORT_CTRL_STAT); 596 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
529 ata_wait_register(port + PORT_CTRL_STAT, 597 ata_wait_register(port + PORT_CTRL_STAT,
530 PORT_CS_INIT, PORT_CS_INIT, 10, 100); 598 PORT_CS_INIT, PORT_CS_INIT, 10, 100);
531 tmp = ata_wait_register(port + PORT_CTRL_STAT, 599 tmp = ata_wait_register(port + PORT_CTRL_STAT,
532 PORT_CS_RDY, 0, 10, 100); 600 PORT_CS_RDY, 0, 10, 100);
533 601
534 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) 602 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) {
603 pp->do_port_rst = 1;
604 ap->link.eh_context.i.action |= ATA_EH_HARDRESET;
535 return -EIO; 605 return -EIO;
606 }
607
536 return 0; 608 return 0;
537} 609}
538 610
@@ -583,9 +655,10 @@ static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
583 return rc; 655 return rc;
584} 656}
585 657
586static int sil24_do_softreset(struct ata_port *ap, unsigned int *class, 658static int sil24_do_softreset(struct ata_link *link, unsigned int *class,
587 int pmp, unsigned long deadline) 659 int pmp, unsigned long deadline)
588{ 660{
661 struct ata_port *ap = link->ap;
589 unsigned long timeout_msec = 0; 662 unsigned long timeout_msec = 0;
590 struct ata_taskfile tf; 663 struct ata_taskfile tf;
591 const char *reason; 664 const char *reason;
@@ -593,7 +666,7 @@ static int sil24_do_softreset(struct ata_port *ap, unsigned int *class,
593 666
594 DPRINTK("ENTER\n"); 667 DPRINTK("ENTER\n");
595 668
596 if (ata_port_offline(ap)) { 669 if (ata_link_offline(link)) {
597 DPRINTK("PHY reports no device\n"); 670 DPRINTK("PHY reports no device\n");
598 *class = ATA_DEV_NONE; 671 *class = ATA_DEV_NONE;
599 goto out; 672 goto out;
@@ -609,7 +682,7 @@ static int sil24_do_softreset(struct ata_port *ap, unsigned int *class,
609 if (time_after(deadline, jiffies)) 682 if (time_after(deadline, jiffies))
610 timeout_msec = jiffies_to_msecs(deadline - jiffies); 683 timeout_msec = jiffies_to_msecs(deadline - jiffies);
611 684
612 ata_tf_init(ap->device, &tf); /* doesn't really matter */ 685 ata_tf_init(link->device, &tf); /* doesn't really matter */
613 rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST, 686 rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST,
614 timeout_msec); 687 timeout_msec);
615 if (rc == -EBUSY) { 688 if (rc == -EBUSY) {
@@ -631,29 +704,54 @@ static int sil24_do_softreset(struct ata_port *ap, unsigned int *class,
631 return 0; 704 return 0;
632 705
633 err: 706 err:
634 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason); 707 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
635 return -EIO; 708 return -EIO;
636} 709}
637 710
638static int sil24_softreset(struct ata_port *ap, unsigned int *class, 711static int sil24_softreset(struct ata_link *link, unsigned int *class,
639 unsigned long deadline) 712 unsigned long deadline)
640{ 713{
641 return sil24_do_softreset(ap, class, 0, deadline); 714 return sil24_do_softreset(link, class, SATA_PMP_CTRL_PORT, deadline);
642} 715}
643 716
644static int sil24_hardreset(struct ata_port *ap, unsigned int *class, 717static int sil24_hardreset(struct ata_link *link, unsigned int *class,
645 unsigned long deadline) 718 unsigned long deadline)
646{ 719{
720 struct ata_port *ap = link->ap;
647 void __iomem *port = ap->ioaddr.cmd_addr; 721 void __iomem *port = ap->ioaddr.cmd_addr;
722 struct sil24_port_priv *pp = ap->private_data;
723 int did_port_rst = 0;
648 const char *reason; 724 const char *reason;
649 int tout_msec, rc; 725 int tout_msec, rc;
650 u32 tmp; 726 u32 tmp;
651 727
728 retry:
729 /* Sometimes, DEV_RST is not enough to recover the controller.
730 * This happens often after PM DMA CS errata.
731 */
732 if (pp->do_port_rst) {
733 ata_port_printk(ap, KERN_WARNING, "controller in dubious "
734 "state, performing PORT_RST\n");
735
736 writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT);
737 msleep(10);
738 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
739 ata_wait_register(port + PORT_CTRL_STAT, PORT_CS_RDY, 0,
740 10, 5000);
741
742 /* restore port configuration */
743 sil24_config_port(ap);
744 sil24_config_pmp(ap, ap->nr_pmp_links);
745
746 pp->do_port_rst = 0;
747 did_port_rst = 1;
748 }
749
652 /* sil24 does the right thing(tm) without any protection */ 750 /* sil24 does the right thing(tm) without any protection */
653 sata_set_spd(ap); 751 sata_set_spd(link);
654 752
655 tout_msec = 100; 753 tout_msec = 100;
656 if (ata_port_online(ap)) 754 if (ata_link_online(link))
657 tout_msec = 5000; 755 tout_msec = 5000;
658 756
659 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); 757 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
@@ -663,14 +761,14 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class,
663 /* SStatus oscillates between zero and valid status after 761 /* SStatus oscillates between zero and valid status after
664 * DEV_RST, debounce it. 762 * DEV_RST, debounce it.
665 */ 763 */
666 rc = sata_phy_debounce(ap, sata_deb_timing_long, deadline); 764 rc = sata_link_debounce(link, sata_deb_timing_long, deadline);
667 if (rc) { 765 if (rc) {
668 reason = "PHY debouncing failed"; 766 reason = "PHY debouncing failed";
669 goto err; 767 goto err;
670 } 768 }
671 769
672 if (tmp & PORT_CS_DEV_RST) { 770 if (tmp & PORT_CS_DEV_RST) {
673 if (ata_port_offline(ap)) 771 if (ata_link_offline(link))
674 return 0; 772 return 0;
675 reason = "link not ready"; 773 reason = "link not ready";
676 goto err; 774 goto err;
@@ -685,7 +783,12 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class,
685 return -EAGAIN; 783 return -EAGAIN;
686 784
687 err: 785 err:
688 ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason); 786 if (!did_port_rst) {
787 pp->do_port_rst = 1;
788 goto retry;
789 }
790
791 ata_link_printk(link, KERN_ERR, "hardreset failed (%s)\n", reason);
689 return -EIO; 792 return -EIO;
690} 793}
691 794
@@ -705,6 +808,38 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
705 } 808 }
706} 809}
707 810
811static int sil24_qc_defer(struct ata_queued_cmd *qc)
812{
813 struct ata_link *link = qc->dev->link;
814 struct ata_port *ap = link->ap;
815 u8 prot = qc->tf.protocol;
816 int is_atapi = (prot == ATA_PROT_ATAPI ||
817 prot == ATA_PROT_ATAPI_NODATA ||
818 prot == ATA_PROT_ATAPI_DMA);
819
820 /* ATAPI commands completing with CHECK_SENSE cause various
821 * weird problems if other commands are active. PMP DMA CS
822 * errata doesn't cover all and HSM violation occurs even with
823 * only one other device active. Always run an ATAPI command
824 * by itself.
825 */
826 if (unlikely(ap->excl_link)) {
827 if (link == ap->excl_link) {
828 if (ap->nr_active_links)
829 return ATA_DEFER_PORT;
830 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
831 } else
832 return ATA_DEFER_PORT;
833 } else if (unlikely(is_atapi)) {
834 ap->excl_link = link;
835 if (ap->nr_active_links)
836 return ATA_DEFER_PORT;
837 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
838 }
839
840 return ata_std_qc_defer(qc);
841}
842
708static void sil24_qc_prep(struct ata_queued_cmd *qc) 843static void sil24_qc_prep(struct ata_queued_cmd *qc)
709{ 844{
710 struct ata_port *ap = qc->ap; 845 struct ata_port *ap = qc->ap;
@@ -748,7 +883,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
748 } 883 }
749 884
750 prb->ctrl = cpu_to_le16(ctrl); 885 prb->ctrl = cpu_to_le16(ctrl);
751 ata_tf_to_fis(&qc->tf, 0, 1, prb->fis); 886 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, prb->fis);
752 887
753 if (qc->flags & ATA_QCFLAG_DMAMAP) 888 if (qc->flags & ATA_QCFLAG_DMAMAP)
754 sil24_fill_sg(qc, sge); 889 sil24_fill_sg(qc, sge);
@@ -777,6 +912,39 @@ static void sil24_irq_clear(struct ata_port *ap)
777 /* unused */ 912 /* unused */
778} 913}
779 914
915static void sil24_pmp_attach(struct ata_port *ap)
916{
917 sil24_config_pmp(ap, 1);
918 sil24_init_port(ap);
919}
920
921static void sil24_pmp_detach(struct ata_port *ap)
922{
923 sil24_init_port(ap);
924 sil24_config_pmp(ap, 0);
925}
926
927static int sil24_pmp_softreset(struct ata_link *link, unsigned int *class,
928 unsigned long deadline)
929{
930 return sil24_do_softreset(link, class, link->pmp, deadline);
931}
932
933static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
934 unsigned long deadline)
935{
936 int rc;
937
938 rc = sil24_init_port(link->ap);
939 if (rc) {
940 ata_link_printk(link, KERN_ERR,
941 "hardreset failed (port not ready)\n");
942 return rc;
943 }
944
945 return sata_pmp_std_hardreset(link, class, deadline);
946}
947
780static void sil24_freeze(struct ata_port *ap) 948static void sil24_freeze(struct ata_port *ap)
781{ 949{
782 void __iomem *port = ap->ioaddr.cmd_addr; 950 void __iomem *port = ap->ioaddr.cmd_addr;
@@ -804,8 +972,10 @@ static void sil24_error_intr(struct ata_port *ap)
804{ 972{
805 void __iomem *port = ap->ioaddr.cmd_addr; 973 void __iomem *port = ap->ioaddr.cmd_addr;
806 struct sil24_port_priv *pp = ap->private_data; 974 struct sil24_port_priv *pp = ap->private_data;
807 struct ata_eh_info *ehi = &ap->eh_info; 975 struct ata_queued_cmd *qc = NULL;
808 int freeze = 0; 976 struct ata_link *link;
977 struct ata_eh_info *ehi;
978 int abort = 0, freeze = 0;
809 u32 irq_stat; 979 u32 irq_stat;
810 980
811 /* on error, we need to clear IRQ explicitly */ 981 /* on error, we need to clear IRQ explicitly */
@@ -813,10 +983,17 @@ static void sil24_error_intr(struct ata_port *ap)
813 writel(irq_stat, port + PORT_IRQ_STAT); 983 writel(irq_stat, port + PORT_IRQ_STAT);
814 984
815 /* first, analyze and record host port events */ 985 /* first, analyze and record host port events */
986 link = &ap->link;
987 ehi = &link->eh_info;
816 ata_ehi_clear_desc(ehi); 988 ata_ehi_clear_desc(ehi);
817 989
818 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat); 990 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
819 991
992 if (irq_stat & PORT_IRQ_SDB_NOTIFY) {
993 ata_ehi_push_desc(ehi, "SDB notify");
994 sata_async_notification(ap);
995 }
996
820 if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) { 997 if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
821 ata_ehi_hotplugged(ehi); 998 ata_ehi_hotplugged(ehi);
822 ata_ehi_push_desc(ehi, "%s", 999 ata_ehi_push_desc(ehi, "%s",
@@ -836,8 +1013,44 @@ static void sil24_error_intr(struct ata_port *ap)
836 if (irq_stat & PORT_IRQ_ERROR) { 1013 if (irq_stat & PORT_IRQ_ERROR) {
837 struct sil24_cerr_info *ci = NULL; 1014 struct sil24_cerr_info *ci = NULL;
838 unsigned int err_mask = 0, action = 0; 1015 unsigned int err_mask = 0, action = 0;
839 struct ata_queued_cmd *qc; 1016 u32 context, cerr;
840 u32 cerr; 1017 int pmp;
1018
1019 abort = 1;
1020
1021 /* DMA Context Switch Failure in Port Multiplier Mode
1022 * errata. If we have active commands to 3 or more
1023 * devices, any error condition on active devices can
1024 * corrupt DMA context switching.
1025 */
1026 if (ap->nr_active_links >= 3) {
1027 ehi->err_mask |= AC_ERR_OTHER;
1028 ehi->action |= ATA_EH_HARDRESET;
1029 ata_ehi_push_desc(ehi, "PMP DMA CS errata");
1030 pp->do_port_rst = 1;
1031 freeze = 1;
1032 }
1033
1034 /* find out the offending link and qc */
1035 if (ap->nr_pmp_links) {
1036 context = readl(port + PORT_CONTEXT);
1037 pmp = (context >> 5) & 0xf;
1038
1039 if (pmp < ap->nr_pmp_links) {
1040 link = &ap->pmp_link[pmp];
1041 ehi = &link->eh_info;
1042 qc = ata_qc_from_tag(ap, link->active_tag);
1043
1044 ata_ehi_clear_desc(ehi);
1045 ata_ehi_push_desc(ehi, "irq_stat 0x%08x",
1046 irq_stat);
1047 } else {
1048 err_mask |= AC_ERR_HSM;
1049 action |= ATA_EH_HARDRESET;
1050 freeze = 1;
1051 }
1052 } else
1053 qc = ata_qc_from_tag(ap, link->active_tag);
841 1054
842 /* analyze CMD_ERR */ 1055 /* analyze CMD_ERR */
843 cerr = readl(port + PORT_CMD_ERR); 1056 cerr = readl(port + PORT_CMD_ERR);
@@ -856,7 +1069,6 @@ static void sil24_error_intr(struct ata_port *ap)
856 } 1069 }
857 1070
858 /* record error info */ 1071 /* record error info */
859 qc = ata_qc_from_tag(ap, ap->active_tag);
860 if (qc) { 1072 if (qc) {
861 sil24_read_tf(ap, qc->tag, &pp->tf); 1073 sil24_read_tf(ap, qc->tag, &pp->tf);
862 qc->err_mask |= err_mask; 1074 qc->err_mask |= err_mask;
@@ -864,13 +1076,21 @@ static void sil24_error_intr(struct ata_port *ap)
864 ehi->err_mask |= err_mask; 1076 ehi->err_mask |= err_mask;
865 1077
866 ehi->action |= action; 1078 ehi->action |= action;
1079
1080 /* if PMP, resume */
1081 if (ap->nr_pmp_links)
1082 writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT);
867 } 1083 }
868 1084
869 /* freeze or abort */ 1085 /* freeze or abort */
870 if (freeze) 1086 if (freeze)
871 ata_port_freeze(ap); 1087 ata_port_freeze(ap);
872 else 1088 else if (abort) {
873 ata_port_abort(ap); 1089 if (qc)
1090 ata_link_abort(qc->dev->link);
1091 else
1092 ata_port_abort(ap);
1093 }
874} 1094}
875 1095
876static void sil24_finish_qc(struct ata_queued_cmd *qc) 1096static void sil24_finish_qc(struct ata_queued_cmd *qc)
@@ -910,7 +1130,7 @@ static inline void sil24_host_intr(struct ata_port *ap)
910 if (rc > 0) 1130 if (rc > 0)
911 return; 1131 return;
912 if (rc < 0) { 1132 if (rc < 0) {
913 struct ata_eh_info *ehi = &ap->eh_info; 1133 struct ata_eh_info *ehi = &ap->link.eh_info;
914 ehi->err_mask |= AC_ERR_HSM; 1134 ehi->err_mask |= AC_ERR_HSM;
915 ehi->action |= ATA_EH_SOFTRESET; 1135 ehi->action |= ATA_EH_SOFTRESET;
916 ata_port_freeze(ap); 1136 ata_port_freeze(ap);
@@ -921,7 +1141,7 @@ static inline void sil24_host_intr(struct ata_port *ap)
921 if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit()) 1141 if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
922 ata_port_printk(ap, KERN_INFO, "spurious interrupt " 1142 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
923 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n", 1143 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
924 slot_stat, ap->active_tag, ap->sactive); 1144 slot_stat, ap->link.active_tag, ap->link.sactive);
925} 1145}
926 1146
927static irqreturn_t sil24_interrupt(int irq, void *dev_instance) 1147static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
@@ -963,16 +1183,18 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
963 1183
964static void sil24_error_handler(struct ata_port *ap) 1184static void sil24_error_handler(struct ata_port *ap)
965{ 1185{
966 struct ata_eh_context *ehc = &ap->eh_context; 1186 struct sil24_port_priv *pp = ap->private_data;
967 1187
968 if (sil24_init_port(ap)) { 1188 if (sil24_init_port(ap))
969 ata_eh_freeze_port(ap); 1189 ata_eh_freeze_port(ap);
970 ehc->i.action |= ATA_EH_HARDRESET;
971 }
972 1190
973 /* perform recovery */ 1191 /* perform recovery */
974 ata_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset, 1192 sata_pmp_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset,
975 ata_std_postreset); 1193 ata_std_postreset, sata_pmp_std_prereset,
1194 sil24_pmp_softreset, sil24_pmp_hardreset,
1195 sata_pmp_std_postreset);
1196
1197 pp->do_port_rst = 0;
976} 1198}
977 1199
978static void sil24_post_internal_cmd(struct ata_queued_cmd *qc) 1200static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
@@ -980,8 +1202,8 @@ static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
980 struct ata_port *ap = qc->ap; 1202 struct ata_port *ap = qc->ap;
981 1203
982 /* make DMA engine forget about the failed command */ 1204 /* make DMA engine forget about the failed command */
983 if (qc->flags & ATA_QCFLAG_FAILED) 1205 if ((qc->flags & ATA_QCFLAG_FAILED) && sil24_init_port(ap))
984 sil24_init_port(ap); 1206 ata_eh_freeze_port(ap);
985} 1207}
986 1208
987static int sil24_port_start(struct ata_port *ap) 1209static int sil24_port_start(struct ata_port *ap)
@@ -1019,7 +1241,6 @@ static int sil24_port_start(struct ata_port *ap)
1019static void sil24_init_controller(struct ata_host *host) 1241static void sil24_init_controller(struct ata_host *host)
1020{ 1242{
1021 void __iomem *host_base = host->iomap[SIL24_HOST_BAR]; 1243 void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
1022 void __iomem *port_base = host->iomap[SIL24_PORT_BAR];
1023 u32 tmp; 1244 u32 tmp;
1024 int i; 1245 int i;
1025 1246
@@ -1031,7 +1252,8 @@ static void sil24_init_controller(struct ata_host *host)
1031 1252
1032 /* init ports */ 1253 /* init ports */
1033 for (i = 0; i < host->n_ports; i++) { 1254 for (i = 0; i < host->n_ports; i++) {
1034 void __iomem *port = port_base + i * PORT_REGS_SIZE; 1255 struct ata_port *ap = host->ports[i];
1256 void __iomem *port = ap->ioaddr.cmd_addr;
1035 1257
1036 /* Initial PHY setting */ 1258 /* Initial PHY setting */
1037 writel(0x20c, port + PORT_PHY_CFG); 1259 writel(0x20c, port + PORT_PHY_CFG);
@@ -1048,26 +1270,8 @@ static void sil24_init_controller(struct ata_host *host)
1048 "failed to clear port RST\n"); 1270 "failed to clear port RST\n");
1049 } 1271 }
1050 1272
1051 /* Configure IRQ WoC */ 1273 /* configure port */
1052 if (host->ports[0]->flags & SIL24_FLAG_PCIX_IRQ_WOC) 1274 sil24_config_port(ap);
1053 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1054 else
1055 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1056
1057 /* Zero error counters. */
1058 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
1059 writel(0x8000, port + PORT_CRC_ERR_THRESH);
1060 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
1061 writel(0x0000, port + PORT_DECODE_ERR_CNT);
1062 writel(0x0000, port + PORT_CRC_ERR_CNT);
1063 writel(0x0000, port + PORT_HSHK_ERR_CNT);
1064
1065 /* Always use 64bit activation */
1066 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1067
1068 /* Clear port multiplier enable and resume bits */
1069 writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME,
1070 port + PORT_CTRL_CLR);
1071 } 1275 }
1072 1276
1073 /* Turn on interrupts */ 1277 /* Turn on interrupts */
@@ -1118,12 +1322,15 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1118 host->iomap = iomap; 1322 host->iomap = iomap;
1119 1323
1120 for (i = 0; i < host->n_ports; i++) { 1324 for (i = 0; i < host->n_ports; i++) {
1121 void __iomem *port = iomap[SIL24_PORT_BAR] + i * PORT_REGS_SIZE; 1325 struct ata_port *ap = host->ports[i];
1326 size_t offset = ap->port_no * PORT_REGS_SIZE;
1327 void __iomem *port = iomap[SIL24_PORT_BAR] + offset;
1122 1328
1123 host->ports[i]->ioaddr.cmd_addr = port; 1329 host->ports[i]->ioaddr.cmd_addr = port;
1124 host->ports[i]->ioaddr.scr_addr = port + PORT_SCONTROL; 1330 host->ports[i]->ioaddr.scr_addr = port + PORT_SCONTROL;
1125 1331
1126 ata_std_ports(&host->ports[i]->ioaddr); 1332 ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host");
1333 ata_port_pbar_desc(ap, SIL24_PORT_BAR, offset, "port");
1127 } 1334 }
1128 1335
1129 /* configure and activate the device */ 1336 /* configure and activate the device */
@@ -1179,6 +1386,12 @@ static int sil24_pci_device_resume(struct pci_dev *pdev)
1179 1386
1180 return 0; 1387 return 0;
1181} 1388}
1389
1390static int sil24_port_resume(struct ata_port *ap)
1391{
1392 sil24_config_pmp(ap, ap->nr_pmp_links);
1393 return 0;
1394}
1182#endif 1395#endif
1183 1396
1184static int __init sil24_init(void) 1397static int __init sil24_init(void)
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 41c1d6e8f1..8d98a9fb0a 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -104,7 +104,6 @@ static struct scsi_host_template sis_sht = {
104}; 104};
105 105
106static const struct ata_port_operations sis_ops = { 106static const struct ata_port_operations sis_ops = {
107 .port_disable = ata_port_disable,
108 .tf_load = ata_tf_load, 107 .tf_load = ata_tf_load,
109 .tf_read = ata_tf_read, 108 .tf_read = ata_tf_read,
110 .check_status = ata_check_status, 109 .check_status = ata_check_status,
@@ -123,7 +122,6 @@ static const struct ata_port_operations sis_ops = {
123 .post_internal_cmd = ata_bmdma_post_internal_cmd, 122 .post_internal_cmd = ata_bmdma_post_internal_cmd,
124 .irq_clear = ata_bmdma_irq_clear, 123 .irq_clear = ata_bmdma_irq_clear,
125 .irq_on = ata_irq_on, 124 .irq_on = ata_irq_on,
126 .irq_ack = ata_irq_ack,
127 .scr_read = sis_scr_read, 125 .scr_read = sis_scr_read,
128 .scr_write = sis_scr_write, 126 .scr_write = sis_scr_write,
129 .port_start = ata_port_start, 127 .port_start = ata_port_start,
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index d9678e7bc3..12d613c48c 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -329,7 +329,6 @@ static struct scsi_host_template k2_sata_sht = {
329 329
330 330
331static const struct ata_port_operations k2_sata_ops = { 331static const struct ata_port_operations k2_sata_ops = {
332 .port_disable = ata_port_disable,
333 .tf_load = k2_sata_tf_load, 332 .tf_load = k2_sata_tf_load,
334 .tf_read = k2_sata_tf_read, 333 .tf_read = k2_sata_tf_read,
335 .check_status = k2_stat_check_status, 334 .check_status = k2_stat_check_status,
@@ -349,7 +348,6 @@ static const struct ata_port_operations k2_sata_ops = {
349 .post_internal_cmd = ata_bmdma_post_internal_cmd, 348 .post_internal_cmd = ata_bmdma_post_internal_cmd,
350 .irq_clear = ata_bmdma_irq_clear, 349 .irq_clear = ata_bmdma_irq_clear,
351 .irq_on = ata_irq_on, 350 .irq_on = ata_irq_on,
352 .irq_ack = ata_irq_ack,
353 .scr_read = k2_sata_scr_read, 351 .scr_read = k2_sata_scr_read,
354 .scr_write = k2_sata_scr_write, 352 .scr_write = k2_sata_scr_write,
355 .port_start = ata_port_start, 353 .port_start = ata_port_start,
@@ -445,9 +443,15 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
445 /* different controllers have different number of ports - currently 4 or 8 */ 443 /* different controllers have different number of ports - currently 4 or 8 */
446 /* All ports are on the same function. Multi-function device is no 444 /* All ports are on the same function. Multi-function device is no
447 * longer available. This should not be seen in any system. */ 445 * longer available. This should not be seen in any system. */
448 for (i = 0; i < host->n_ports; i++) 446 for (i = 0; i < host->n_ports; i++) {
449 k2_sata_setup_port(&host->ports[i]->ioaddr, 447 struct ata_port *ap = host->ports[i];
450 mmio_base + i * K2_SATA_PORT_OFFSET); 448 unsigned int offset = i * K2_SATA_PORT_OFFSET;
449
450 k2_sata_setup_port(&ap->ioaddr, mmio_base + offset);
451
452 ata_port_pbar_desc(ap, 5, -1, "mmio");
453 ata_port_pbar_desc(ap, 5, offset, "port");
454 }
451 455
452 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 456 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
453 if (rc) 457 if (rc)
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 97aefdd87b..9f9f7b3065 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -254,7 +254,6 @@ static struct scsi_host_template pdc_sata_sht = {
254}; 254};
255 255
256static const struct ata_port_operations pdc_20621_ops = { 256static const struct ata_port_operations pdc_20621_ops = {
257 .port_disable = ata_port_disable,
258 .tf_load = pdc_tf_load_mmio, 257 .tf_load = pdc_tf_load_mmio,
259 .tf_read = ata_tf_read, 258 .tf_read = ata_tf_read,
260 .check_status = ata_check_status, 259 .check_status = ata_check_status,
@@ -267,7 +266,6 @@ static const struct ata_port_operations pdc_20621_ops = {
267 .eng_timeout = pdc_eng_timeout, 266 .eng_timeout = pdc_eng_timeout,
268 .irq_clear = pdc20621_irq_clear, 267 .irq_clear = pdc20621_irq_clear,
269 .irq_on = ata_irq_on, 268 .irq_on = ata_irq_on,
270 .irq_ack = ata_irq_ack,
271 .port_start = pdc_port_start, 269 .port_start = pdc_port_start,
272}; 270};
273 271
@@ -854,7 +852,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance)
854 !(ap->flags & ATA_FLAG_DISABLED)) { 852 !(ap->flags & ATA_FLAG_DISABLED)) {
855 struct ata_queued_cmd *qc; 853 struct ata_queued_cmd *qc;
856 854
857 qc = ata_qc_from_tag(ap, ap->active_tag); 855 qc = ata_qc_from_tag(ap, ap->link.active_tag);
858 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) 856 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
859 handled += pdc20621_host_intr(ap, qc, (i > 4), 857 handled += pdc20621_host_intr(ap, qc, (i > 4),
860 mmio_base); 858 mmio_base);
@@ -881,7 +879,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
881 879
882 spin_lock_irqsave(&host->lock, flags); 880 spin_lock_irqsave(&host->lock, flags);
883 881
884 qc = ata_qc_from_tag(ap, ap->active_tag); 882 qc = ata_qc_from_tag(ap, ap->link.active_tag);
885 883
886 switch (qc->tf.protocol) { 884 switch (qc->tf.protocol) {
887 case ATA_PROT_DMA: 885 case ATA_PROT_DMA:
@@ -1383,9 +1381,8 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1383 const struct ata_port_info *ppi[] = 1381 const struct ata_port_info *ppi[] =
1384 { &pdc_port_info[ent->driver_data], NULL }; 1382 { &pdc_port_info[ent->driver_data], NULL };
1385 struct ata_host *host; 1383 struct ata_host *host;
1386 void __iomem *base;
1387 struct pdc_host_priv *hpriv; 1384 struct pdc_host_priv *hpriv;
1388 int rc; 1385 int i, rc;
1389 1386
1390 if (!printed_version++) 1387 if (!printed_version++)
1391 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1388 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -1411,11 +1408,17 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1411 return rc; 1408 return rc;
1412 host->iomap = pcim_iomap_table(pdev); 1409 host->iomap = pcim_iomap_table(pdev);
1413 1410
1414 base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS; 1411 for (i = 0; i < 4; i++) {
1415 pdc_sata_setup_port(&host->ports[0]->ioaddr, base + 0x200); 1412 struct ata_port *ap = host->ports[i];
1416 pdc_sata_setup_port(&host->ports[1]->ioaddr, base + 0x280); 1413 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1417 pdc_sata_setup_port(&host->ports[2]->ioaddr, base + 0x300); 1414 unsigned int offset = 0x200 + i * 0x80;
1418 pdc_sata_setup_port(&host->ports[3]->ioaddr, base + 0x380); 1415
1416 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1417
1418 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1419 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1420 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1421 }
1419 1422
1420 /* configure and activate */ 1423 /* configure and activate */
1421 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 1424 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index e6b8b45279..d394da085a 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -94,8 +94,6 @@ static struct scsi_host_template uli_sht = {
94}; 94};
95 95
96static const struct ata_port_operations uli_ops = { 96static const struct ata_port_operations uli_ops = {
97 .port_disable = ata_port_disable,
98
99 .tf_load = ata_tf_load, 97 .tf_load = ata_tf_load,
100 .tf_read = ata_tf_read, 98 .tf_read = ata_tf_read,
101 .check_status = ata_check_status, 99 .check_status = ata_check_status,
@@ -117,7 +115,6 @@ static const struct ata_port_operations uli_ops = {
117 115
118 .irq_clear = ata_bmdma_irq_clear, 116 .irq_clear = ata_bmdma_irq_clear,
119 .irq_on = ata_irq_on, 117 .irq_on = ata_irq_on,
120 .irq_ack = ata_irq_ack,
121 118
122 .scr_read = uli_scr_read, 119 .scr_read = uli_scr_read,
123 .scr_write = uli_scr_write, 120 .scr_write = uli_scr_write,
@@ -242,6 +239,12 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
242 hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4; 239 hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
243 ata_std_ports(ioaddr); 240 ata_std_ports(ioaddr);
244 241
242 ata_port_desc(host->ports[2],
243 "cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
244 (unsigned long long)pci_resource_start(pdev, 0) + 8,
245 ((unsigned long long)pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4,
246 (unsigned long long)pci_resource_start(pdev, 4) + 16);
247
245 ioaddr = &host->ports[3]->ioaddr; 248 ioaddr = &host->ports[3]->ioaddr;
246 ioaddr->cmd_addr = iomap[2] + 8; 249 ioaddr->cmd_addr = iomap[2] + 8;
247 ioaddr->altstatus_addr = 250 ioaddr->altstatus_addr =
@@ -250,6 +253,13 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
250 ioaddr->bmdma_addr = iomap[4] + 24; 253 ioaddr->bmdma_addr = iomap[4] + 24;
251 hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5; 254 hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
252 ata_std_ports(ioaddr); 255 ata_std_ports(ioaddr);
256
257 ata_port_desc(host->ports[2],
258 "cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
259 (unsigned long long)pci_resource_start(pdev, 2) + 9,
260 ((unsigned long long)pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4,
261 (unsigned long long)pci_resource_start(pdev, 4) + 24);
262
253 break; 263 break;
254 264
255 case uli_5289: 265 case uli_5289:
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 57fd30de8f..1dc9b4f2b2 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -57,7 +57,6 @@ enum {
57 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ 57 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
58 SATA_INT_GATE = 0x41, /* SATA interrupt gating */ 58 SATA_INT_GATE = 0x41, /* SATA interrupt gating */
59 SATA_NATIVE_MODE = 0x42, /* Native mode enable */ 59 SATA_NATIVE_MODE = 0x42, /* Native mode enable */
60 SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */
61 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ 60 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */
62 PATA_PIO_TIMING = 0xAB, /* PATA timing register */ 61 PATA_PIO_TIMING = 0xAB, /* PATA timing register */
63 62
@@ -68,7 +67,6 @@ enum {
68 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), 67 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
69 68
70 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ 69 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
71 SATA_2DEV = (1 << 5), /* SATA is master/slave */
72}; 70};
73 71
74static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 72static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -122,8 +120,6 @@ static struct scsi_host_template svia_sht = {
122}; 120};
123 121
124static const struct ata_port_operations vt6420_sata_ops = { 122static const struct ata_port_operations vt6420_sata_ops = {
125 .port_disable = ata_port_disable,
126
127 .tf_load = ata_tf_load, 123 .tf_load = ata_tf_load,
128 .tf_read = ata_tf_read, 124 .tf_read = ata_tf_read,
129 .check_status = ata_check_status, 125 .check_status = ata_check_status,
@@ -146,14 +142,11 @@ static const struct ata_port_operations vt6420_sata_ops = {
146 142
147 .irq_clear = ata_bmdma_irq_clear, 143 .irq_clear = ata_bmdma_irq_clear,
148 .irq_on = ata_irq_on, 144 .irq_on = ata_irq_on,
149 .irq_ack = ata_irq_ack,
150 145
151 .port_start = ata_port_start, 146 .port_start = ata_port_start,
152}; 147};
153 148
154static const struct ata_port_operations vt6421_pata_ops = { 149static const struct ata_port_operations vt6421_pata_ops = {
155 .port_disable = ata_port_disable,
156
157 .set_piomode = vt6421_set_pio_mode, 150 .set_piomode = vt6421_set_pio_mode,
158 .set_dmamode = vt6421_set_dma_mode, 151 .set_dmamode = vt6421_set_dma_mode,
159 152
@@ -180,14 +173,11 @@ static const struct ata_port_operations vt6421_pata_ops = {
180 173
181 .irq_clear = ata_bmdma_irq_clear, 174 .irq_clear = ata_bmdma_irq_clear,
182 .irq_on = ata_irq_on, 175 .irq_on = ata_irq_on,
183 .irq_ack = ata_irq_ack,
184 176
185 .port_start = ata_port_start, 177 .port_start = ata_port_start,
186}; 178};
187 179
188static const struct ata_port_operations vt6421_sata_ops = { 180static const struct ata_port_operations vt6421_sata_ops = {
189 .port_disable = ata_port_disable,
190
191 .tf_load = ata_tf_load, 181 .tf_load = ata_tf_load,
192 .tf_read = ata_tf_read, 182 .tf_read = ata_tf_read,
193 .check_status = ata_check_status, 183 .check_status = ata_check_status,
@@ -211,7 +201,6 @@ static const struct ata_port_operations vt6421_sata_ops = {
211 201
212 .irq_clear = ata_bmdma_irq_clear, 202 .irq_clear = ata_bmdma_irq_clear,
213 .irq_on = ata_irq_on, 203 .irq_on = ata_irq_on,
214 .irq_ack = ata_irq_ack,
215 204
216 .scr_read = svia_scr_read, 205 .scr_read = svia_scr_read,
217 .scr_write = svia_scr_write, 206 .scr_write = svia_scr_write,
@@ -276,7 +265,7 @@ static void svia_noop_freeze(struct ata_port *ap)
276 265
277/** 266/**
278 * vt6420_prereset - prereset for vt6420 267 * vt6420_prereset - prereset for vt6420
279 * @ap: target ATA port 268 * @link: target ATA link
280 * @deadline: deadline jiffies for the operation 269 * @deadline: deadline jiffies for the operation
281 * 270 *
282 * SCR registers on vt6420 are pieces of shit and may hang the 271 * SCR registers on vt6420 are pieces of shit and may hang the
@@ -294,9 +283,10 @@ static void svia_noop_freeze(struct ata_port *ap)
294 * RETURNS: 283 * RETURNS:
295 * 0 on success, -errno otherwise. 284 * 0 on success, -errno otherwise.
296 */ 285 */
297static int vt6420_prereset(struct ata_port *ap, unsigned long deadline) 286static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
298{ 287{
299 struct ata_eh_context *ehc = &ap->eh_context; 288 struct ata_port *ap = link->ap;
289 struct ata_eh_context *ehc = &ap->link.eh_context;
300 unsigned long timeout = jiffies + (HZ * 5); 290 unsigned long timeout = jiffies + (HZ * 5);
301 u32 sstatus, scontrol; 291 u32 sstatus, scontrol;
302 int online; 292 int online;
@@ -407,6 +397,9 @@ static void vt6421_init_addrs(struct ata_port *ap)
407 ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no); 397 ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no);
408 398
409 ata_std_ports(ioaddr); 399 ata_std_ports(ioaddr);
400
401 ata_port_pbar_desc(ap, ap->port_no, -1, "port");
402 ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma");
410} 403}
411 404
412static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) 405static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
@@ -513,7 +506,6 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
513 struct ata_host *host; 506 struct ata_host *host;
514 int board_id = (int) ent->driver_data; 507 int board_id = (int) ent->driver_data;
515 const int *bar_sizes; 508 const int *bar_sizes;
516 u8 tmp8;
517 509
518 if (!printed_version++) 510 if (!printed_version++)
519 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 511 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -522,19 +514,10 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
522 if (rc) 514 if (rc)
523 return rc; 515 return rc;
524 516
525 if (board_id == vt6420) { 517 if (board_id == vt6420)
526 pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8);
527 if (tmp8 & SATA_2DEV) {
528 dev_printk(KERN_ERR, &pdev->dev,
529 "SATA master/slave not supported (0x%x)\n",
530 (int) tmp8);
531 return -EIO;
532 }
533
534 bar_sizes = &svia_bar_sizes[0]; 518 bar_sizes = &svia_bar_sizes[0];
535 } else { 519 else
536 bar_sizes = &vt6421_bar_sizes[0]; 520 bar_sizes = &vt6421_bar_sizes[0];
537 }
538 521
539 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++) 522 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
540 if ((pci_resource_start(pdev, i) == 0) || 523 if ((pci_resource_start(pdev, i) == 0) ||
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 1920915dfa..0d9be16848 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -240,7 +240,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap)
240 return; 240 return;
241 } 241 }
242 242
243 qc = ata_qc_from_tag(ap, ap->active_tag); 243 qc = ata_qc_from_tag(ap, ap->link.active_tag);
244 if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) 244 if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
245 handled = ata_host_intr(ap, qc); 245 handled = ata_host_intr(ap, qc);
246 246
@@ -317,7 +317,6 @@ static struct scsi_host_template vsc_sata_sht = {
317 317
318 318
319static const struct ata_port_operations vsc_sata_ops = { 319static const struct ata_port_operations vsc_sata_ops = {
320 .port_disable = ata_port_disable,
321 .tf_load = vsc_sata_tf_load, 320 .tf_load = vsc_sata_tf_load,
322 .tf_read = vsc_sata_tf_read, 321 .tf_read = vsc_sata_tf_read,
323 .exec_command = ata_exec_command, 322 .exec_command = ata_exec_command,
@@ -336,7 +335,6 @@ static const struct ata_port_operations vsc_sata_ops = {
336 .post_internal_cmd = ata_bmdma_post_internal_cmd, 335 .post_internal_cmd = ata_bmdma_post_internal_cmd,
337 .irq_clear = ata_bmdma_irq_clear, 336 .irq_clear = ata_bmdma_irq_clear,
338 .irq_on = ata_irq_on, 337 .irq_on = ata_irq_on,
339 .irq_ack = ata_irq_ack,
340 .scr_read = vsc_sata_scr_read, 338 .scr_read = vsc_sata_scr_read,
341 .scr_write = vsc_sata_scr_write, 339 .scr_write = vsc_sata_scr_write,
342 .port_start = ata_port_start, 340 .port_start = ata_port_start,
@@ -408,9 +406,15 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
408 406
409 mmio_base = host->iomap[VSC_MMIO_BAR]; 407 mmio_base = host->iomap[VSC_MMIO_BAR];
410 408
411 for (i = 0; i < host->n_ports; i++) 409 for (i = 0; i < host->n_ports; i++) {
412 vsc_sata_setup_port(&host->ports[i]->ioaddr, 410 struct ata_port *ap = host->ports[i];
413 mmio_base + (i + 1) * VSC_SATA_PORT_OFFSET); 411 unsigned int offset = (i + 1) * VSC_SATA_PORT_OFFSET;
412
413 vsc_sata_setup_port(&ap->ioaddr, mmio_base + offset);
414
415 ata_port_pbar_desc(ap, VSC_MMIO_BAR, -1, "mmio");
416 ata_port_pbar_desc(ap, VSC_MMIO_BAR, offset, "port");
417 }
414 418
415 /* 419 /*
416 * Use 32 bit DMA mask, because 64 bit address support is poor. 420 * Use 32 bit DMA mask, because 64 bit address support is poor.
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 5d6312e334..d7da109c24 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -1,5 +1,13 @@
1menu "Generic Driver Options" 1menu "Generic Driver Options"
2 2
3config UEVENT_HELPER_PATH
4 string "path to uevent helper"
5 depends on HOTPLUG
6 default "/sbin/hotplug"
7 help
8 Path to uevent helper program forked by the kernel for
9 every uevent.
10
3config STANDALONE 11config STANDALONE
4 bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL 12 bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL
5 default y 13 default y
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 47eb02d9f1..10b2fb6c9c 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -18,8 +18,6 @@ extern int attribute_container_init(void);
18extern int bus_add_device(struct device * dev); 18extern int bus_add_device(struct device * dev);
19extern void bus_attach_device(struct device * dev); 19extern void bus_attach_device(struct device * dev);
20extern void bus_remove_device(struct device * dev); 20extern void bus_remove_device(struct device * dev);
21extern struct bus_type *get_bus(struct bus_type * bus);
22extern void put_bus(struct bus_type * bus);
23 21
24extern int bus_add_driver(struct device_driver *); 22extern int bus_add_driver(struct device_driver *);
25extern void bus_remove_driver(struct device_driver *); 23extern void bus_remove_driver(struct device_driver *);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 61c67526a6..9a19b071c5 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -30,6 +30,17 @@
30static int __must_check bus_rescan_devices_helper(struct device *dev, 30static int __must_check bus_rescan_devices_helper(struct device *dev,
31 void *data); 31 void *data);
32 32
33static struct bus_type *bus_get(struct bus_type *bus)
34{
35 return bus ? container_of(kset_get(&bus->subsys),
36 struct bus_type, subsys) : NULL;
37}
38
39static void bus_put(struct bus_type *bus)
40{
41 kset_put(&bus->subsys);
42}
43
33static ssize_t 44static ssize_t
34drv_attr_show(struct kobject * kobj, struct attribute * attr, char * buf) 45drv_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
35{ 46{
@@ -78,7 +89,7 @@ static void driver_release(struct kobject * kobj)
78 */ 89 */
79} 90}
80 91
81static struct kobj_type ktype_driver = { 92static struct kobj_type driver_ktype = {
82 .sysfs_ops = &driver_sysfs_ops, 93 .sysfs_ops = &driver_sysfs_ops,
83 .release = driver_release, 94 .release = driver_release,
84}; 95};
@@ -122,9 +133,9 @@ static struct sysfs_ops bus_sysfs_ops = {
122int bus_create_file(struct bus_type * bus, struct bus_attribute * attr) 133int bus_create_file(struct bus_type * bus, struct bus_attribute * attr)
123{ 134{
124 int error; 135 int error;
125 if (get_bus(bus)) { 136 if (bus_get(bus)) {
126 error = sysfs_create_file(&bus->subsys.kobj, &attr->attr); 137 error = sysfs_create_file(&bus->subsys.kobj, &attr->attr);
127 put_bus(bus); 138 bus_put(bus);
128 } else 139 } else
129 error = -EINVAL; 140 error = -EINVAL;
130 return error; 141 return error;
@@ -132,9 +143,9 @@ int bus_create_file(struct bus_type * bus, struct bus_attribute * attr)
132 143
133void bus_remove_file(struct bus_type * bus, struct bus_attribute * attr) 144void bus_remove_file(struct bus_type * bus, struct bus_attribute * attr)
134{ 145{
135 if (get_bus(bus)) { 146 if (bus_get(bus)) {
136 sysfs_remove_file(&bus->subsys.kobj, &attr->attr); 147 sysfs_remove_file(&bus->subsys.kobj, &attr->attr);
137 put_bus(bus); 148 bus_put(bus);
138 } 149 }
139} 150}
140 151
@@ -172,7 +183,7 @@ static int driver_helper(struct device *dev, void *data)
172static ssize_t driver_unbind(struct device_driver *drv, 183static ssize_t driver_unbind(struct device_driver *drv,
173 const char *buf, size_t count) 184 const char *buf, size_t count)
174{ 185{
175 struct bus_type *bus = get_bus(drv->bus); 186 struct bus_type *bus = bus_get(drv->bus);
176 struct device *dev; 187 struct device *dev;
177 int err = -ENODEV; 188 int err = -ENODEV;
178 189
@@ -186,7 +197,7 @@ static ssize_t driver_unbind(struct device_driver *drv,
186 err = count; 197 err = count;
187 } 198 }
188 put_device(dev); 199 put_device(dev);
189 put_bus(bus); 200 bus_put(bus);
190 return err; 201 return err;
191} 202}
192static DRIVER_ATTR(unbind, S_IWUSR, NULL, driver_unbind); 203static DRIVER_ATTR(unbind, S_IWUSR, NULL, driver_unbind);
@@ -199,7 +210,7 @@ static DRIVER_ATTR(unbind, S_IWUSR, NULL, driver_unbind);
199static ssize_t driver_bind(struct device_driver *drv, 210static ssize_t driver_bind(struct device_driver *drv,
200 const char *buf, size_t count) 211 const char *buf, size_t count)
201{ 212{
202 struct bus_type *bus = get_bus(drv->bus); 213 struct bus_type *bus = bus_get(drv->bus);
203 struct device *dev; 214 struct device *dev;
204 int err = -ENODEV; 215 int err = -ENODEV;
205 216
@@ -219,7 +230,7 @@ static ssize_t driver_bind(struct device_driver *drv,
219 err = -ENODEV; 230 err = -ENODEV;
220 } 231 }
221 put_device(dev); 232 put_device(dev);
222 put_bus(bus); 233 bus_put(bus);
223 return err; 234 return err;
224} 235}
225static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind); 236static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind);
@@ -430,7 +441,7 @@ static inline void remove_deprecated_bus_links(struct device *dev) { }
430 */ 441 */
431int bus_add_device(struct device * dev) 442int bus_add_device(struct device * dev)
432{ 443{
433 struct bus_type * bus = get_bus(dev->bus); 444 struct bus_type * bus = bus_get(dev->bus);
434 int error = 0; 445 int error = 0;
435 446
436 if (bus) { 447 if (bus) {
@@ -459,7 +470,7 @@ out_subsys:
459out_id: 470out_id:
460 device_remove_attrs(bus, dev); 471 device_remove_attrs(bus, dev);
461out_put: 472out_put:
462 put_bus(dev->bus); 473 bus_put(dev->bus);
463 return error; 474 return error;
464} 475}
465 476
@@ -509,7 +520,7 @@ void bus_remove_device(struct device * dev)
509 } 520 }
510 pr_debug("bus %s: remove device %s\n", dev->bus->name, dev->bus_id); 521 pr_debug("bus %s: remove device %s\n", dev->bus->name, dev->bus_id);
511 device_release_driver(dev); 522 device_release_driver(dev);
512 put_bus(dev->bus); 523 bus_put(dev->bus);
513 } 524 }
514} 525}
515 526
@@ -568,32 +579,29 @@ static void remove_bind_files(struct device_driver *drv)
568 driver_remove_file(drv, &driver_attr_unbind); 579 driver_remove_file(drv, &driver_attr_unbind);
569} 580}
570 581
582static BUS_ATTR(drivers_probe, S_IWUSR, NULL, store_drivers_probe);
583static BUS_ATTR(drivers_autoprobe, S_IWUSR | S_IRUGO,
584 show_drivers_autoprobe, store_drivers_autoprobe);
585
571static int add_probe_files(struct bus_type *bus) 586static int add_probe_files(struct bus_type *bus)
572{ 587{
573 int retval; 588 int retval;
574 589
575 bus->drivers_probe_attr.attr.name = "drivers_probe"; 590 retval = bus_create_file(bus, &bus_attr_drivers_probe);
576 bus->drivers_probe_attr.attr.mode = S_IWUSR;
577 bus->drivers_probe_attr.store = store_drivers_probe;
578 retval = bus_create_file(bus, &bus->drivers_probe_attr);
579 if (retval) 591 if (retval)
580 goto out; 592 goto out;
581 593
582 bus->drivers_autoprobe_attr.attr.name = "drivers_autoprobe"; 594 retval = bus_create_file(bus, &bus_attr_drivers_autoprobe);
583 bus->drivers_autoprobe_attr.attr.mode = S_IWUSR | S_IRUGO;
584 bus->drivers_autoprobe_attr.show = show_drivers_autoprobe;
585 bus->drivers_autoprobe_attr.store = store_drivers_autoprobe;
586 retval = bus_create_file(bus, &bus->drivers_autoprobe_attr);
587 if (retval) 595 if (retval)
588 bus_remove_file(bus, &bus->drivers_probe_attr); 596 bus_remove_file(bus, &bus_attr_drivers_probe);
589out: 597out:
590 return retval; 598 return retval;
591} 599}
592 600
593static void remove_probe_files(struct bus_type *bus) 601static void remove_probe_files(struct bus_type *bus)
594{ 602{
595 bus_remove_file(bus, &bus->drivers_autoprobe_attr); 603 bus_remove_file(bus, &bus_attr_drivers_autoprobe);
596 bus_remove_file(bus, &bus->drivers_probe_attr); 604 bus_remove_file(bus, &bus_attr_drivers_probe);
597} 605}
598#else 606#else
599static inline int add_bind_files(struct device_driver *drv) { return 0; } 607static inline int add_bind_files(struct device_driver *drv) { return 0; }
@@ -602,6 +610,17 @@ static inline int add_probe_files(struct bus_type *bus) { return 0; }
602static inline void remove_probe_files(struct bus_type *bus) {} 610static inline void remove_probe_files(struct bus_type *bus) {}
603#endif 611#endif
604 612
613static ssize_t driver_uevent_store(struct device_driver *drv,
614 const char *buf, size_t count)
615{
616 enum kobject_action action;
617
618 if (kobject_action_type(buf, count, &action) == 0)
619 kobject_uevent(&drv->kobj, action);
620 return count;
621}
622static DRIVER_ATTR(uevent, S_IWUSR, NULL, driver_uevent_store);
623
605/** 624/**
606 * bus_add_driver - Add a driver to the bus. 625 * bus_add_driver - Add a driver to the bus.
607 * @drv: driver. 626 * @drv: driver.
@@ -609,7 +628,7 @@ static inline void remove_probe_files(struct bus_type *bus) {}
609 */ 628 */
610int bus_add_driver(struct device_driver *drv) 629int bus_add_driver(struct device_driver *drv)
611{ 630{
612 struct bus_type * bus = get_bus(drv->bus); 631 struct bus_type * bus = bus_get(drv->bus);
613 int error = 0; 632 int error = 0;
614 633
615 if (!bus) 634 if (!bus)
@@ -632,6 +651,11 @@ int bus_add_driver(struct device_driver *drv)
632 klist_add_tail(&drv->knode_bus, &bus->klist_drivers); 651 klist_add_tail(&drv->knode_bus, &bus->klist_drivers);
633 module_add_driver(drv->owner, drv); 652 module_add_driver(drv->owner, drv);
634 653
654 error = driver_create_file(drv, &driver_attr_uevent);
655 if (error) {
656 printk(KERN_ERR "%s: uevent attr (%s) failed\n",
657 __FUNCTION__, drv->name);
658 }
635 error = driver_add_attrs(bus, drv); 659 error = driver_add_attrs(bus, drv);
636 if (error) { 660 if (error) {
637 /* How the hell do we get out of this pickle? Give up */ 661 /* How the hell do we get out of this pickle? Give up */
@@ -649,7 +673,7 @@ int bus_add_driver(struct device_driver *drv)
649out_unregister: 673out_unregister:
650 kobject_unregister(&drv->kobj); 674 kobject_unregister(&drv->kobj);
651out_put_bus: 675out_put_bus:
652 put_bus(bus); 676 bus_put(bus);
653 return error; 677 return error;
654} 678}
655 679
@@ -669,12 +693,13 @@ void bus_remove_driver(struct device_driver * drv)
669 693
670 remove_bind_files(drv); 694 remove_bind_files(drv);
671 driver_remove_attrs(drv->bus, drv); 695 driver_remove_attrs(drv->bus, drv);
696 driver_remove_file(drv, &driver_attr_uevent);
672 klist_remove(&drv->knode_bus); 697 klist_remove(&drv->knode_bus);
673 pr_debug("bus %s: remove driver %s\n", drv->bus->name, drv->name); 698 pr_debug("bus %s: remove driver %s\n", drv->bus->name, drv->name);
674 driver_detach(drv); 699 driver_detach(drv);
675 module_remove_driver(drv); 700 module_remove_driver(drv);
676 kobject_unregister(&drv->kobj); 701 kobject_unregister(&drv->kobj);
677 put_bus(drv->bus); 702 bus_put(drv->bus);
678} 703}
679 704
680 705
@@ -729,18 +754,6 @@ int device_reprobe(struct device *dev)
729} 754}
730EXPORT_SYMBOL_GPL(device_reprobe); 755EXPORT_SYMBOL_GPL(device_reprobe);
731 756
732struct bus_type *get_bus(struct bus_type *bus)
733{
734 return bus ? container_of(subsys_get(&bus->subsys),
735 struct bus_type, subsys) : NULL;
736}
737
738void put_bus(struct bus_type * bus)
739{
740 subsys_put(&bus->subsys);
741}
742
743
744/** 757/**
745 * find_bus - locate bus by name. 758 * find_bus - locate bus by name.
746 * @name: name of bus. 759 * @name: name of bus.
@@ -808,6 +821,17 @@ static void klist_devices_put(struct klist_node *n)
808 put_device(dev); 821 put_device(dev);
809} 822}
810 823
824static ssize_t bus_uevent_store(struct bus_type *bus,
825 const char *buf, size_t count)
826{
827 enum kobject_action action;
828
829 if (kobject_action_type(buf, count, &action) == 0)
830 kobject_uevent(&bus->subsys.kobj, action);
831 return count;
832}
833static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
834
811/** 835/**
812 * bus_register - register a bus with the system. 836 * bus_register - register a bus with the system.
813 * @bus: bus. 837 * @bus: bus.
@@ -826,11 +850,16 @@ int bus_register(struct bus_type * bus)
826 if (retval) 850 if (retval)
827 goto out; 851 goto out;
828 852
829 subsys_set_kset(bus, bus_subsys); 853 bus->subsys.kobj.kset = &bus_subsys;
854
830 retval = subsystem_register(&bus->subsys); 855 retval = subsystem_register(&bus->subsys);
831 if (retval) 856 if (retval)
832 goto out; 857 goto out;
833 858
859 retval = bus_create_file(bus, &bus_attr_uevent);
860 if (retval)
861 goto bus_uevent_fail;
862
834 kobject_set_name(&bus->devices.kobj, "devices"); 863 kobject_set_name(&bus->devices.kobj, "devices");
835 bus->devices.kobj.parent = &bus->subsys.kobj; 864 bus->devices.kobj.parent = &bus->subsys.kobj;
836 retval = kset_register(&bus->devices); 865 retval = kset_register(&bus->devices);
@@ -839,7 +868,7 @@ int bus_register(struct bus_type * bus)
839 868
840 kobject_set_name(&bus->drivers.kobj, "drivers"); 869 kobject_set_name(&bus->drivers.kobj, "drivers");
841 bus->drivers.kobj.parent = &bus->subsys.kobj; 870 bus->drivers.kobj.parent = &bus->subsys.kobj;
842 bus->drivers.ktype = &ktype_driver; 871 bus->drivers.ktype = &driver_ktype;
843 retval = kset_register(&bus->drivers); 872 retval = kset_register(&bus->drivers);
844 if (retval) 873 if (retval)
845 goto bus_drivers_fail; 874 goto bus_drivers_fail;
@@ -866,6 +895,8 @@ bus_probe_files_fail:
866bus_drivers_fail: 895bus_drivers_fail:
867 kset_unregister(&bus->devices); 896 kset_unregister(&bus->devices);
868bus_devices_fail: 897bus_devices_fail:
898 bus_remove_file(bus, &bus_attr_uevent);
899bus_uevent_fail:
869 subsystem_unregister(&bus->subsys); 900 subsystem_unregister(&bus->subsys);
870out: 901out:
871 return retval; 902 return retval;
@@ -876,7 +907,7 @@ out:
876 * @bus: bus. 907 * @bus: bus.
877 * 908 *
878 * Unregister the child subsystems and the bus itself. 909 * Unregister the child subsystems and the bus itself.
879 * Finally, we call put_bus() to release the refcount 910 * Finally, we call bus_put() to release the refcount
880 */ 911 */
881void bus_unregister(struct bus_type * bus) 912void bus_unregister(struct bus_type * bus)
882{ 913{
@@ -885,6 +916,7 @@ void bus_unregister(struct bus_type * bus)
885 remove_probe_files(bus); 916 remove_probe_files(bus);
886 kset_unregister(&bus->drivers); 917 kset_unregister(&bus->drivers);
887 kset_unregister(&bus->devices); 918 kset_unregister(&bus->devices);
919 bus_remove_file(bus, &bus_attr_uevent);
888 subsystem_unregister(&bus->subsys); 920 subsystem_unregister(&bus->subsys);
889} 921}
890 922
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 4d2222618b..a863bb091e 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -65,13 +65,13 @@ static struct sysfs_ops class_sysfs_ops = {
65 .store = class_attr_store, 65 .store = class_attr_store,
66}; 66};
67 67
68static struct kobj_type ktype_class = { 68static struct kobj_type class_ktype = {
69 .sysfs_ops = &class_sysfs_ops, 69 .sysfs_ops = &class_sysfs_ops,
70 .release = class_release, 70 .release = class_release,
71}; 71};
72 72
73/* Hotplug events for classes go to the class_obj subsys */ 73/* Hotplug events for classes go to the class_obj subsys */
74static decl_subsys(class, &ktype_class, NULL); 74static decl_subsys(class, &class_ktype, NULL);
75 75
76 76
77int class_create_file(struct class * cls, const struct class_attribute * attr) 77int class_create_file(struct class * cls, const struct class_attribute * attr)
@@ -93,14 +93,14 @@ void class_remove_file(struct class * cls, const struct class_attribute * attr)
93static struct class *class_get(struct class *cls) 93static struct class *class_get(struct class *cls)
94{ 94{
95 if (cls) 95 if (cls)
96 return container_of(subsys_get(&cls->subsys), struct class, subsys); 96 return container_of(kset_get(&cls->subsys), struct class, subsys);
97 return NULL; 97 return NULL;
98} 98}
99 99
100static void class_put(struct class * cls) 100static void class_put(struct class * cls)
101{ 101{
102 if (cls) 102 if (cls)
103 subsys_put(&cls->subsys); 103 kset_put(&cls->subsys);
104} 104}
105 105
106 106
@@ -149,7 +149,7 @@ int class_register(struct class * cls)
149 if (error) 149 if (error)
150 return error; 150 return error;
151 151
152 subsys_set_kset(cls, class_subsys); 152 cls->subsys.kobj.kset = &class_subsys;
153 153
154 error = subsystem_register(&cls->subsys); 154 error = subsystem_register(&cls->subsys);
155 if (!error) { 155 if (!error) {
@@ -180,8 +180,7 @@ static void class_device_create_release(struct class_device *class_dev)
180 180
181/* needed to allow these devices to have parent class devices */ 181/* needed to allow these devices to have parent class devices */
182static int class_device_create_uevent(struct class_device *class_dev, 182static int class_device_create_uevent(struct class_device *class_dev,
183 char **envp, int num_envp, 183 struct kobj_uevent_env *env)
184 char *buffer, int buffer_size)
185{ 184{
186 pr_debug("%s called for %s\n", __FUNCTION__, class_dev->class_id); 185 pr_debug("%s called for %s\n", __FUNCTION__, class_dev->class_id);
187 return 0; 186 return 0;
@@ -324,7 +323,7 @@ static void class_dev_release(struct kobject * kobj)
324 } 323 }
325} 324}
326 325
327static struct kobj_type ktype_class_device = { 326static struct kobj_type class_device_ktype = {
328 .sysfs_ops = &class_dev_sysfs_ops, 327 .sysfs_ops = &class_dev_sysfs_ops,
329 .release = class_dev_release, 328 .release = class_dev_release,
330}; 329};
@@ -333,7 +332,7 @@ static int class_uevent_filter(struct kset *kset, struct kobject *kobj)
333{ 332{
334 struct kobj_type *ktype = get_ktype(kobj); 333 struct kobj_type *ktype = get_ktype(kobj);
335 334
336 if (ktype == &ktype_class_device) { 335 if (ktype == &class_device_ktype) {
337 struct class_device *class_dev = to_class_dev(kobj); 336 struct class_device *class_dev = to_class_dev(kobj);
338 if (class_dev->class) 337 if (class_dev->class)
339 return 1; 338 return 1;
@@ -403,64 +402,43 @@ static void remove_deprecated_class_device_links(struct class_device *cd)
403{ } 402{ }
404#endif 403#endif
405 404
406static int class_uevent(struct kset *kset, struct kobject *kobj, char **envp, 405static int class_uevent(struct kset *kset, struct kobject *kobj,
407 int num_envp, char *buffer, int buffer_size) 406 struct kobj_uevent_env *env)
408{ 407{
409 struct class_device *class_dev = to_class_dev(kobj); 408 struct class_device *class_dev = to_class_dev(kobj);
410 struct device *dev = class_dev->dev; 409 struct device *dev = class_dev->dev;
411 int i = 0;
412 int length = 0;
413 int retval = 0; 410 int retval = 0;
414 411
415 pr_debug("%s - name = %s\n", __FUNCTION__, class_dev->class_id); 412 pr_debug("%s - name = %s\n", __FUNCTION__, class_dev->class_id);
416 413
417 if (MAJOR(class_dev->devt)) { 414 if (MAJOR(class_dev->devt)) {
418 add_uevent_var(envp, num_envp, &i, 415 add_uevent_var(env, "MAJOR=%u", MAJOR(class_dev->devt));
419 buffer, buffer_size, &length,
420 "MAJOR=%u", MAJOR(class_dev->devt));
421 416
422 add_uevent_var(envp, num_envp, &i, 417 add_uevent_var(env, "MINOR=%u", MINOR(class_dev->devt));
423 buffer, buffer_size, &length,
424 "MINOR=%u", MINOR(class_dev->devt));
425 } 418 }
426 419
427 if (dev) { 420 if (dev) {
428 const char *path = kobject_get_path(&dev->kobj, GFP_KERNEL); 421 const char *path = kobject_get_path(&dev->kobj, GFP_KERNEL);
429 if (path) { 422 if (path) {
430 add_uevent_var(envp, num_envp, &i, 423 add_uevent_var(env, "PHYSDEVPATH=%s", path);
431 buffer, buffer_size, &length,
432 "PHYSDEVPATH=%s", path);
433 kfree(path); 424 kfree(path);
434 } 425 }
435 426
436 if (dev->bus) 427 if (dev->bus)
437 add_uevent_var(envp, num_envp, &i, 428 add_uevent_var(env, "PHYSDEVBUS=%s", dev->bus->name);
438 buffer, buffer_size, &length,
439 "PHYSDEVBUS=%s", dev->bus->name);
440 429
441 if (dev->driver) 430 if (dev->driver)
442 add_uevent_var(envp, num_envp, &i, 431 add_uevent_var(env, "PHYSDEVDRIVER=%s", dev->driver->name);
443 buffer, buffer_size, &length,
444 "PHYSDEVDRIVER=%s", dev->driver->name);
445 } 432 }
446 433
447 /* terminate, set to next free slot, shrink available space */
448 envp[i] = NULL;
449 envp = &envp[i];
450 num_envp -= i;
451 buffer = &buffer[length];
452 buffer_size -= length;
453
454 if (class_dev->uevent) { 434 if (class_dev->uevent) {
455 /* have the class device specific function add its stuff */ 435 /* have the class device specific function add its stuff */
456 retval = class_dev->uevent(class_dev, envp, num_envp, 436 retval = class_dev->uevent(class_dev, env);
457 buffer, buffer_size);
458 if (retval) 437 if (retval)
459 pr_debug("class_dev->uevent() returned %d\n", retval); 438 pr_debug("class_dev->uevent() returned %d\n", retval);
460 } else if (class_dev->class->uevent) { 439 } else if (class_dev->class->uevent) {
461 /* have the class specific function add its stuff */ 440 /* have the class specific function add its stuff */
462 retval = class_dev->class->uevent(class_dev, envp, num_envp, 441 retval = class_dev->class->uevent(class_dev, env);
463 buffer, buffer_size);
464 if (retval) 442 if (retval)
465 pr_debug("class->uevent() returned %d\n", retval); 443 pr_debug("class->uevent() returned %d\n", retval);
466 } 444 }
@@ -474,7 +452,7 @@ static struct kset_uevent_ops class_uevent_ops = {
474 .uevent = class_uevent, 452 .uevent = class_uevent,
475}; 453};
476 454
477static decl_subsys(class_obj, &ktype_class_device, &class_uevent_ops); 455static decl_subsys(class_obj, &class_device_ktype, &class_uevent_ops);
478 456
479 457
480static int class_device_add_attrs(struct class_device * cd) 458static int class_device_add_attrs(struct class_device * cd)
@@ -883,7 +861,7 @@ int __init classes_init(void)
883 861
884 /* ick, this is ugly, the things we go through to keep from showing up 862 /* ick, this is ugly, the things we go through to keep from showing up
885 * in sysfs... */ 863 * in sysfs... */
886 subsystem_init(&class_obj_subsys); 864 kset_init(&class_obj_subsys);
887 if (!class_obj_subsys.kobj.parent) 865 if (!class_obj_subsys.kobj.parent)
888 class_obj_subsys.kobj.parent = &class_obj_subsys.kobj; 866 class_obj_subsys.kobj.parent = &class_obj_subsys.kobj;
889 return 0; 867 return 0;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ec86d6fc23..c1343414d2 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -108,7 +108,7 @@ static void device_release(struct kobject * kobj)
108 } 108 }
109} 109}
110 110
111static struct kobj_type ktype_device = { 111static struct kobj_type device_ktype = {
112 .release = device_release, 112 .release = device_release,
113 .sysfs_ops = &dev_sysfs_ops, 113 .sysfs_ops = &dev_sysfs_ops,
114}; 114};
@@ -118,7 +118,7 @@ static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
118{ 118{
119 struct kobj_type *ktype = get_ktype(kobj); 119 struct kobj_type *ktype = get_ktype(kobj);
120 120
121 if (ktype == &ktype_device) { 121 if (ktype == &device_ktype) {
122 struct device *dev = to_dev(kobj); 122 struct device *dev = to_dev(kobj);
123 if (dev->uevent_suppress) 123 if (dev->uevent_suppress)
124 return 0; 124 return 0;
@@ -141,33 +141,23 @@ static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
141 return NULL; 141 return NULL;
142} 142}
143 143
144static int dev_uevent(struct kset *kset, struct kobject *kobj, char **envp, 144static int dev_uevent(struct kset *kset, struct kobject *kobj,
145 int num_envp, char *buffer, int buffer_size) 145 struct kobj_uevent_env *env)
146{ 146{
147 struct device *dev = to_dev(kobj); 147 struct device *dev = to_dev(kobj);
148 int i = 0;
149 int length = 0;
150 int retval = 0; 148 int retval = 0;
151 149
152 /* add the major/minor if present */ 150 /* add the major/minor if present */
153 if (MAJOR(dev->devt)) { 151 if (MAJOR(dev->devt)) {
154 add_uevent_var(envp, num_envp, &i, 152 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
155 buffer, buffer_size, &length, 153 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
156 "MAJOR=%u", MAJOR(dev->devt));
157 add_uevent_var(envp, num_envp, &i,
158 buffer, buffer_size, &length,
159 "MINOR=%u", MINOR(dev->devt));
160 } 154 }
161 155
162 if (dev->type && dev->type->name) 156 if (dev->type && dev->type->name)
163 add_uevent_var(envp, num_envp, &i, 157 add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
164 buffer, buffer_size, &length,
165 "DEVTYPE=%s", dev->type->name);
166 158
167 if (dev->driver) 159 if (dev->driver)
168 add_uevent_var(envp, num_envp, &i, 160 add_uevent_var(env, "DRIVER=%s", dev->driver->name);
169 buffer, buffer_size, &length,
170 "DRIVER=%s", dev->driver->name);
171 161
172#ifdef CONFIG_SYSFS_DEPRECATED 162#ifdef CONFIG_SYSFS_DEPRECATED
173 if (dev->class) { 163 if (dev->class) {
@@ -181,59 +171,43 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, char **envp,
181 171
182 path = kobject_get_path(&parent->kobj, GFP_KERNEL); 172 path = kobject_get_path(&parent->kobj, GFP_KERNEL);
183 if (path) { 173 if (path) {
184 add_uevent_var(envp, num_envp, &i, 174 add_uevent_var(env, "PHYSDEVPATH=%s", path);
185 buffer, buffer_size, &length,
186 "PHYSDEVPATH=%s", path);
187 kfree(path); 175 kfree(path);
188 } 176 }
189 177
190 add_uevent_var(envp, num_envp, &i, 178 add_uevent_var(env, "PHYSDEVBUS=%s", parent->bus->name);
191 buffer, buffer_size, &length,
192 "PHYSDEVBUS=%s", parent->bus->name);
193 179
194 if (parent->driver) 180 if (parent->driver)
195 add_uevent_var(envp, num_envp, &i, 181 add_uevent_var(env, "PHYSDEVDRIVER=%s",
196 buffer, buffer_size, &length, 182 parent->driver->name);
197 "PHYSDEVDRIVER=%s", parent->driver->name);
198 } 183 }
199 } else if (dev->bus) { 184 } else if (dev->bus) {
200 add_uevent_var(envp, num_envp, &i, 185 add_uevent_var(env, "PHYSDEVBUS=%s", dev->bus->name);
201 buffer, buffer_size, &length,
202 "PHYSDEVBUS=%s", dev->bus->name);
203 186
204 if (dev->driver) 187 if (dev->driver)
205 add_uevent_var(envp, num_envp, &i, 188 add_uevent_var(env, "PHYSDEVDRIVER=%s", dev->driver->name);
206 buffer, buffer_size, &length,
207 "PHYSDEVDRIVER=%s", dev->driver->name);
208 } 189 }
209#endif 190#endif
210 191
211 /* terminate, set to next free slot, shrink available space */ 192 /* have the bus specific function add its stuff */
212 envp[i] = NULL;
213 envp = &envp[i];
214 num_envp -= i;
215 buffer = &buffer[length];
216 buffer_size -= length;
217
218 if (dev->bus && dev->bus->uevent) { 193 if (dev->bus && dev->bus->uevent) {
219 /* have the bus specific function add its stuff */ 194 retval = dev->bus->uevent(dev, env);
220 retval = dev->bus->uevent(dev, envp, num_envp, buffer, buffer_size);
221 if (retval) 195 if (retval)
222 pr_debug ("%s: bus uevent() returned %d\n", 196 pr_debug ("%s: bus uevent() returned %d\n",
223 __FUNCTION__, retval); 197 __FUNCTION__, retval);
224 } 198 }
225 199
200 /* have the class specific function add its stuff */
226 if (dev->class && dev->class->dev_uevent) { 201 if (dev->class && dev->class->dev_uevent) {
227 /* have the class specific function add its stuff */ 202 retval = dev->class->dev_uevent(dev, env);
228 retval = dev->class->dev_uevent(dev, envp, num_envp, buffer, buffer_size);
229 if (retval) 203 if (retval)
230 pr_debug("%s: class uevent() returned %d\n", 204 pr_debug("%s: class uevent() returned %d\n",
231 __FUNCTION__, retval); 205 __FUNCTION__, retval);
232 } 206 }
233 207
208 /* have the device type specific fuction add its stuff */
234 if (dev->type && dev->type->uevent) { 209 if (dev->type && dev->type->uevent) {
235 /* have the device type specific fuction add its stuff */ 210 retval = dev->type->uevent(dev, env);
236 retval = dev->type->uevent(dev, envp, num_envp, buffer, buffer_size);
237 if (retval) 211 if (retval)
238 pr_debug("%s: dev_type uevent() returned %d\n", 212 pr_debug("%s: dev_type uevent() returned %d\n",
239 __FUNCTION__, retval); 213 __FUNCTION__, retval);
@@ -253,22 +227,18 @@ static ssize_t show_uevent(struct device *dev, struct device_attribute *attr,
253{ 227{
254 struct kobject *top_kobj; 228 struct kobject *top_kobj;
255 struct kset *kset; 229 struct kset *kset;
256 char *envp[32]; 230 struct kobj_uevent_env *env = NULL;
257 char *data = NULL;
258 char *pos;
259 int i; 231 int i;
260 size_t count = 0; 232 size_t count = 0;
261 int retval; 233 int retval;
262 234
263 /* search the kset, the device belongs to */ 235 /* search the kset, the device belongs to */
264 top_kobj = &dev->kobj; 236 top_kobj = &dev->kobj;
265 if (!top_kobj->kset && top_kobj->parent) { 237 while (!top_kobj->kset && top_kobj->parent)
266 do { 238 top_kobj = top_kobj->parent;
267 top_kobj = top_kobj->parent;
268 } while (!top_kobj->kset && top_kobj->parent);
269 }
270 if (!top_kobj->kset) 239 if (!top_kobj->kset)
271 goto out; 240 goto out;
241
272 kset = top_kobj->kset; 242 kset = top_kobj->kset;
273 if (!kset->uevent_ops || !kset->uevent_ops->uevent) 243 if (!kset->uevent_ops || !kset->uevent_ops->uevent)
274 goto out; 244 goto out;
@@ -278,43 +248,29 @@ static ssize_t show_uevent(struct device *dev, struct device_attribute *attr,
278 if (!kset->uevent_ops->filter(kset, &dev->kobj)) 248 if (!kset->uevent_ops->filter(kset, &dev->kobj))
279 goto out; 249 goto out;
280 250
281 data = (char *)get_zeroed_page(GFP_KERNEL); 251 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
282 if (!data) 252 if (!env)
283 return -ENOMEM; 253 return -ENOMEM;
284 254
285 /* let the kset specific function add its keys */ 255 /* let the kset specific function add its keys */
286 pos = data; 256 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
287 memset(envp, 0, sizeof(envp));
288 retval = kset->uevent_ops->uevent(kset, &dev->kobj,
289 envp, ARRAY_SIZE(envp),
290 pos, PAGE_SIZE);
291 if (retval) 257 if (retval)
292 goto out; 258 goto out;
293 259
294 /* copy keys to file */ 260 /* copy keys to file */
295 for (i = 0; envp[i]; i++) { 261 for (i = 0; i < env->envp_idx; i++)
296 pos = &buf[count]; 262 count += sprintf(&buf[count], "%s\n", env->envp[i]);
297 count += sprintf(pos, "%s\n", envp[i]);
298 }
299out: 263out:
300 free_page((unsigned long)data); 264 kfree(env);
301 return count; 265 return count;
302} 266}
303 267
304static ssize_t store_uevent(struct device *dev, struct device_attribute *attr, 268static ssize_t store_uevent(struct device *dev, struct device_attribute *attr,
305 const char *buf, size_t count) 269 const char *buf, size_t count)
306{ 270{
307 size_t len = count;
308 enum kobject_action action; 271 enum kobject_action action;
309 272
310 if (len && buf[len-1] == '\n') 273 if (kobject_action_type(buf, count, &action) == 0) {
311 len--;
312
313 for (action = 0; action < KOBJ_MAX; action++) {
314 if (strncmp(kobject_actions[action], buf, len) != 0)
315 continue;
316 if (kobject_actions[action][len] != '\0')
317 continue;
318 kobject_uevent(&dev->kobj, action); 274 kobject_uevent(&dev->kobj, action);
319 goto out; 275 goto out;
320 } 276 }
@@ -449,7 +405,7 @@ static struct device_attribute devt_attr =
449 * devices_subsys - structure to be registered with kobject core. 405 * devices_subsys - structure to be registered with kobject core.
450 */ 406 */
451 407
452decl_subsys(devices, &ktype_device, &device_uevent_ops); 408decl_subsys(devices, &device_ktype, &device_uevent_ops);
453 409
454 410
455/** 411/**
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index b24efd4e3e..0295855a3e 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -88,19 +88,14 @@ static CLASS_ATTR(timeout, 0644, firmware_timeout_show, firmware_timeout_store);
88 88
89static void fw_dev_release(struct device *dev); 89static void fw_dev_release(struct device *dev);
90 90
91static int firmware_uevent(struct device *dev, char **envp, int num_envp, 91static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
92 char *buffer, int buffer_size)
93{ 92{
94 struct firmware_priv *fw_priv = dev_get_drvdata(dev); 93 struct firmware_priv *fw_priv = dev_get_drvdata(dev);
95 int i = 0, len = 0;
96 94
97 if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, 95 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->fw_id))
98 "FIRMWARE=%s", fw_priv->fw_id))
99 return -ENOMEM; 96 return -ENOMEM;
100 if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, 97 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
101 "TIMEOUT=%i", loading_timeout))
102 return -ENOMEM; 98 return -ENOMEM;
103 envp[i] = NULL;
104 99
105 return 0; 100 return 0;
106} 101}
@@ -297,8 +292,7 @@ firmware_class_timeout(u_long data)
297 292
298static inline void fw_setup_device_id(struct device *f_dev, struct device *dev) 293static inline void fw_setup_device_id(struct device *f_dev, struct device *dev)
299{ 294{
300 /* XXX warning we should watch out for name collisions */ 295 snprintf(f_dev->bus_id, BUS_ID_SIZE, "firmware-%s", dev->bus_id);
301 strlcpy(f_dev->bus_id, dev->bus_id, BUS_ID_SIZE);
302} 296}
303 297
304static int fw_register_device(struct device **dev_p, const char *fw_name, 298static int fw_register_device(struct device **dev_p, const char *fw_name,
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 74b96795d2..cb99daeae9 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -34,8 +34,7 @@ static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj)
34 return MEMORY_CLASS_NAME; 34 return MEMORY_CLASS_NAME;
35} 35}
36 36
37static int memory_uevent(struct kset *kset, struct kobject *kobj, char **envp, 37static int memory_uevent(struct kset *kset, struct kobj_uevent_env *env)
38 int num_envp, char *buffer, int buffer_size)
39{ 38{
40 int retval = 0; 39 int retval = 0;
41 40
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 869ff8c001..fb56092414 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -160,13 +160,8 @@ static void platform_device_release(struct device *dev)
160 * 160 *
161 * Create a platform device object which can have other objects attached 161 * Create a platform device object which can have other objects attached
162 * to it, and which will have attached objects freed when it is released. 162 * to it, and which will have attached objects freed when it is released.
163 *
164 * This device will be marked as not supporting hotpluggable drivers; no
165 * device add/remove uevents will be generated. In the unusual case that
166 * the device isn't being dynamically allocated as a legacy "probe the
167 * hardware" driver, infrastructure code should reverse this marking.
168 */ 163 */
169struct platform_device *platform_device_alloc(const char *name, unsigned int id) 164struct platform_device *platform_device_alloc(const char *name, int id)
170{ 165{
171 struct platform_object *pa; 166 struct platform_object *pa;
172 167
@@ -177,12 +172,6 @@ struct platform_device *platform_device_alloc(const char *name, unsigned int id)
177 pa->pdev.id = id; 172 pa->pdev.id = id;
178 device_initialize(&pa->pdev.dev); 173 device_initialize(&pa->pdev.dev);
179 pa->pdev.dev.release = platform_device_release; 174 pa->pdev.dev.release = platform_device_release;
180
181 /* prevent hotplug "modprobe $(MODALIAS)" from causing trouble in
182 * legacy probe-the-hardware drivers, which don't properly split
183 * out device enumeration logic from drivers.
184 */
185 pa->pdev.dev.uevent_suppress = 1;
186 } 175 }
187 176
188 return pa ? &pa->pdev : NULL; 177 return pa ? &pa->pdev : NULL;
@@ -256,7 +245,8 @@ int platform_device_add(struct platform_device *pdev)
256 pdev->dev.bus = &platform_bus_type; 245 pdev->dev.bus = &platform_bus_type;
257 246
258 if (pdev->id != -1) 247 if (pdev->id != -1)
259 snprintf(pdev->dev.bus_id, BUS_ID_SIZE, "%s.%u", pdev->name, pdev->id); 248 snprintf(pdev->dev.bus_id, BUS_ID_SIZE, "%s.%d", pdev->name,
249 pdev->id);
260 else 250 else
261 strlcpy(pdev->dev.bus_id, pdev->name, BUS_ID_SIZE); 251 strlcpy(pdev->dev.bus_id, pdev->name, BUS_ID_SIZE);
262 252
@@ -370,7 +360,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
370 * the Linux driver model. In particular, when such drivers are built 360 * the Linux driver model. In particular, when such drivers are built
371 * as modules, they can't be "hotplugged". 361 * as modules, they can't be "hotplugged".
372 */ 362 */
373struct platform_device *platform_device_register_simple(char *name, unsigned int id, 363struct platform_device *platform_device_register_simple(char *name, int id,
374 struct resource *res, unsigned int num) 364 struct resource *res, unsigned int num)
375{ 365{
376 struct platform_device *pdev; 366 struct platform_device *pdev;
@@ -530,7 +520,7 @@ static ssize_t
530modalias_show(struct device *dev, struct device_attribute *a, char *buf) 520modalias_show(struct device *dev, struct device_attribute *a, char *buf)
531{ 521{
532 struct platform_device *pdev = to_platform_device(dev); 522 struct platform_device *pdev = to_platform_device(dev);
533 int len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->name); 523 int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
534 524
535 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; 525 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
536} 526}
@@ -540,13 +530,11 @@ static struct device_attribute platform_dev_attrs[] = {
540 __ATTR_NULL, 530 __ATTR_NULL,
541}; 531};
542 532
543static int platform_uevent(struct device *dev, char **envp, int num_envp, 533static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
544 char *buffer, int buffer_size)
545{ 534{
546 struct platform_device *pdev = to_platform_device(dev); 535 struct platform_device *pdev = to_platform_device(dev);
547 536
548 envp[0] = buffer; 537 add_uevent_var(env, "MODALIAS=platform:%s", pdev->name);
549 snprintf(buffer, buffer_size, "MODALIAS=%s", pdev->name);
550 return 0; 538 return 0;
551} 539}
552 540
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 9caeaea753..a803733c83 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,5 +1,5 @@
1obj-y := shutdown.o 1obj-y := shutdown.o
2obj-$(CONFIG_PM_SLEEP) += main.o suspend.o resume.o sysfs.o 2obj-$(CONFIG_PM_SLEEP) += main.o sysfs.o
3obj-$(CONFIG_PM_TRACE) += trace.o 3obj-$(CONFIG_PM_TRACE) += trace.o
4 4
5ifeq ($(CONFIG_DEBUG_DRIVER),y) 5ifeq ($(CONFIG_DEBUG_DRIVER),y)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index eb9f38d0aa..0ab4ab21f5 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -20,19 +20,24 @@
20 */ 20 */
21 21
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/kallsyms.h>
23#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/pm.h>
26#include <linux/resume-trace.h>
24 27
28#include "../base.h"
25#include "power.h" 29#include "power.h"
26 30
27LIST_HEAD(dpm_active); 31LIST_HEAD(dpm_active);
28LIST_HEAD(dpm_off); 32static LIST_HEAD(dpm_off);
29LIST_HEAD(dpm_off_irq); 33static LIST_HEAD(dpm_off_irq);
30 34
31DEFINE_MUTEX(dpm_mtx); 35static DEFINE_MUTEX(dpm_mtx);
32DEFINE_MUTEX(dpm_list_mtx); 36static DEFINE_MUTEX(dpm_list_mtx);
33 37
34int (*platform_enable_wakeup)(struct device *dev, int is_on); 38int (*platform_enable_wakeup)(struct device *dev, int is_on);
35 39
40
36int device_pm_add(struct device *dev) 41int device_pm_add(struct device *dev)
37{ 42{
38 int error; 43 int error;
@@ -61,3 +66,334 @@ void device_pm_remove(struct device *dev)
61} 66}
62 67
63 68
69/*------------------------- Resume routines -------------------------*/
70
71/**
72 * resume_device - Restore state for one device.
73 * @dev: Device.
74 *
75 */
76
77static int resume_device(struct device * dev)
78{
79 int error = 0;
80
81 TRACE_DEVICE(dev);
82 TRACE_RESUME(0);
83
84 down(&dev->sem);
85
86 if (dev->bus && dev->bus->resume) {
87 dev_dbg(dev,"resuming\n");
88 error = dev->bus->resume(dev);
89 }
90
91 if (!error && dev->type && dev->type->resume) {
92 dev_dbg(dev,"resuming\n");
93 error = dev->type->resume(dev);
94 }
95
96 if (!error && dev->class && dev->class->resume) {
97 dev_dbg(dev,"class resume\n");
98 error = dev->class->resume(dev);
99 }
100
101 up(&dev->sem);
102
103 TRACE_RESUME(error);
104 return error;
105}
106
107
108static int resume_device_early(struct device * dev)
109{
110 int error = 0;
111
112 TRACE_DEVICE(dev);
113 TRACE_RESUME(0);
114 if (dev->bus && dev->bus->resume_early) {
115 dev_dbg(dev,"EARLY resume\n");
116 error = dev->bus->resume_early(dev);
117 }
118 TRACE_RESUME(error);
119 return error;
120}
121
122/*
123 * Resume the devices that have either not gone through
124 * the late suspend, or that did go through it but also
125 * went through the early resume
126 */
127static void dpm_resume(void)
128{
129 mutex_lock(&dpm_list_mtx);
130 while(!list_empty(&dpm_off)) {
131 struct list_head * entry = dpm_off.next;
132 struct device * dev = to_device(entry);
133
134 get_device(dev);
135 list_move_tail(entry, &dpm_active);
136
137 mutex_unlock(&dpm_list_mtx);
138 resume_device(dev);
139 mutex_lock(&dpm_list_mtx);
140 put_device(dev);
141 }
142 mutex_unlock(&dpm_list_mtx);
143}
144
145
146/**
147 * device_resume - Restore state of each device in system.
148 *
149 * Walk the dpm_off list, remove each entry, resume the device,
150 * then add it to the dpm_active list.
151 */
152
153void device_resume(void)
154{
155 might_sleep();
156 mutex_lock(&dpm_mtx);
157 dpm_resume();
158 mutex_unlock(&dpm_mtx);
159}
160
161EXPORT_SYMBOL_GPL(device_resume);
162
163
164/**
165 * dpm_power_up - Power on some devices.
166 *
167 * Walk the dpm_off_irq list and power each device up. This
168 * is used for devices that required they be powered down with
169 * interrupts disabled. As devices are powered on, they are moved
170 * to the dpm_active list.
171 *
172 * Interrupts must be disabled when calling this.
173 */
174
175static void dpm_power_up(void)
176{
177 while(!list_empty(&dpm_off_irq)) {
178 struct list_head * entry = dpm_off_irq.next;
179 struct device * dev = to_device(entry);
180
181 list_move_tail(entry, &dpm_off);
182 resume_device_early(dev);
183 }
184}
185
186
187/**
188 * device_power_up - Turn on all devices that need special attention.
189 *
190 * Power on system devices then devices that required we shut them down
191 * with interrupts disabled.
192 * Called with interrupts disabled.
193 */
194
195void device_power_up(void)
196{
197 sysdev_resume();
198 dpm_power_up();
199}
200
201EXPORT_SYMBOL_GPL(device_power_up);
202
203
204/*------------------------- Suspend routines -------------------------*/
205
206/*
207 * The entries in the dpm_active list are in a depth first order, simply
208 * because children are guaranteed to be discovered after parents, and
209 * are inserted at the back of the list on discovery.
210 *
211 * All list on the suspend path are done in reverse order, so we operate
212 * on the leaves of the device tree (or forests, depending on how you want
213 * to look at it ;) first. As nodes are removed from the back of the list,
214 * they are inserted into the front of their destintation lists.
215 *
216 * Things are the reverse on the resume path - iterations are done in
217 * forward order, and nodes are inserted at the back of their destination
218 * lists. This way, the ancestors will be accessed before their descendents.
219 */
220
221static inline char *suspend_verb(u32 event)
222{
223 switch (event) {
224 case PM_EVENT_SUSPEND: return "suspend";
225 case PM_EVENT_FREEZE: return "freeze";
226 case PM_EVENT_PRETHAW: return "prethaw";
227 default: return "(unknown suspend event)";
228 }
229}
230
231
232static void
233suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
234{
235 dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event),
236 ((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ?
237 ", may wakeup" : "");
238}
239
240/**
241 * suspend_device - Save state of one device.
242 * @dev: Device.
243 * @state: Power state device is entering.
244 */
245
246static int suspend_device(struct device * dev, pm_message_t state)
247{
248 int error = 0;
249
250 down(&dev->sem);
251 if (dev->power.power_state.event) {
252 dev_dbg(dev, "PM: suspend %d-->%d\n",
253 dev->power.power_state.event, state.event);
254 }
255
256 if (dev->class && dev->class->suspend) {
257 suspend_device_dbg(dev, state, "class ");
258 error = dev->class->suspend(dev, state);
259 suspend_report_result(dev->class->suspend, error);
260 }
261
262 if (!error && dev->type && dev->type->suspend) {
263 suspend_device_dbg(dev, state, "type ");
264 error = dev->type->suspend(dev, state);
265 suspend_report_result(dev->type->suspend, error);
266 }
267
268 if (!error && dev->bus && dev->bus->suspend) {
269 suspend_device_dbg(dev, state, "");
270 error = dev->bus->suspend(dev, state);
271 suspend_report_result(dev->bus->suspend, error);
272 }
273 up(&dev->sem);
274 return error;
275}
276
277
278/*
279 * This is called with interrupts off, only a single CPU
280 * running. We can't acquire a mutex or semaphore (and we don't
281 * need the protection)
282 */
283static int suspend_device_late(struct device *dev, pm_message_t state)
284{
285 int error = 0;
286
287 if (dev->bus && dev->bus->suspend_late) {
288 suspend_device_dbg(dev, state, "LATE ");
289 error = dev->bus->suspend_late(dev, state);
290 suspend_report_result(dev->bus->suspend_late, error);
291 }
292 return error;
293}
294
295/**
296 * device_suspend - Save state and stop all devices in system.
297 * @state: Power state to put each device in.
298 *
299 * Walk the dpm_active list, call ->suspend() for each device, and move
300 * it to the dpm_off list.
301 *
302 * (For historical reasons, if it returns -EAGAIN, that used to mean
303 * that the device would be called again with interrupts disabled.
304 * These days, we use the "suspend_late()" callback for that, so we
305 * print a warning and consider it an error).
306 *
307 * If we get a different error, try and back out.
308 *
309 * If we hit a failure with any of the devices, call device_resume()
310 * above to bring the suspended devices back to life.
311 *
312 */
313
314int device_suspend(pm_message_t state)
315{
316 int error = 0;
317
318 might_sleep();
319 mutex_lock(&dpm_mtx);
320 mutex_lock(&dpm_list_mtx);
321 while (!list_empty(&dpm_active) && error == 0) {
322 struct list_head * entry = dpm_active.prev;
323 struct device * dev = to_device(entry);
324
325 get_device(dev);
326 mutex_unlock(&dpm_list_mtx);
327
328 error = suspend_device(dev, state);
329
330 mutex_lock(&dpm_list_mtx);
331
332 /* Check if the device got removed */
333 if (!list_empty(&dev->power.entry)) {
334 /* Move it to the dpm_off list */
335 if (!error)
336 list_move(&dev->power.entry, &dpm_off);
337 }
338 if (error)
339 printk(KERN_ERR "Could not suspend device %s: "
340 "error %d%s\n",
341 kobject_name(&dev->kobj), error,
342 error == -EAGAIN ? " (please convert to suspend_late)" : "");
343 put_device(dev);
344 }
345 mutex_unlock(&dpm_list_mtx);
346 if (error)
347 dpm_resume();
348
349 mutex_unlock(&dpm_mtx);
350 return error;
351}
352
353EXPORT_SYMBOL_GPL(device_suspend);
354
355/**
356 * device_power_down - Shut down special devices.
357 * @state: Power state to enter.
358 *
359 * Walk the dpm_off_irq list, calling ->power_down() for each device that
360 * couldn't power down the device with interrupts enabled. When we're
361 * done, power down system devices.
362 */
363
364int device_power_down(pm_message_t state)
365{
366 int error = 0;
367 struct device * dev;
368
369 while (!list_empty(&dpm_off)) {
370 struct list_head * entry = dpm_off.prev;
371
372 dev = to_device(entry);
373 error = suspend_device_late(dev, state);
374 if (error)
375 goto Error;
376 list_move(&dev->power.entry, &dpm_off_irq);
377 }
378
379 error = sysdev_suspend(state);
380 Done:
381 return error;
382 Error:
383 printk(KERN_ERR "Could not power down device %s: "
384 "error %d\n", kobject_name(&dev->kobj), error);
385 dpm_power_up();
386 goto Done;
387}
388
389EXPORT_SYMBOL_GPL(device_power_down);
390
391void __suspend_report_result(const char *function, void *fn, int ret)
392{
393 if (ret) {
394 printk(KERN_ERR "%s(): ", function);
395 print_fn_descriptor_symbol("%s() returns ", (unsigned long)fn);
396 printk("%d\n", ret);
397 }
398}
399EXPORT_SYMBOL_GPL(__suspend_report_result);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 8ba0830cbc..5c4efd493f 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -11,32 +11,11 @@ extern void device_shutdown(void);
11 * main.c 11 * main.c
12 */ 12 */
13 13
14/* 14extern struct list_head dpm_active; /* The active device list */
15 * Used to synchronize global power management operations.
16 */
17extern struct mutex dpm_mtx;
18
19/*
20 * Used to serialize changes to the dpm_* lists.
21 */
22extern struct mutex dpm_list_mtx;
23
24/*
25 * The PM lists.
26 */
27extern struct list_head dpm_active;
28extern struct list_head dpm_off;
29extern struct list_head dpm_off_irq;
30
31
32static inline struct dev_pm_info * to_pm_info(struct list_head * entry)
33{
34 return container_of(entry, struct dev_pm_info, entry);
35}
36 15
37static inline struct device * to_device(struct list_head * entry) 16static inline struct device * to_device(struct list_head * entry)
38{ 17{
39 return container_of(to_pm_info(entry), struct device, power); 18 return container_of(entry, struct device, power.entry);
40} 19}
41 20
42extern int device_pm_add(struct device *); 21extern int device_pm_add(struct device *);
@@ -49,19 +28,6 @@ extern void device_pm_remove(struct device *);
49extern int dpm_sysfs_add(struct device *); 28extern int dpm_sysfs_add(struct device *);
50extern void dpm_sysfs_remove(struct device *); 29extern void dpm_sysfs_remove(struct device *);
51 30
52/*
53 * resume.c
54 */
55
56extern void dpm_resume(void);
57extern void dpm_power_up(void);
58extern int resume_device(struct device *);
59
60/*
61 * suspend.c
62 */
63extern int suspend_device(struct device *, pm_message_t);
64
65#else /* CONFIG_PM_SLEEP */ 31#else /* CONFIG_PM_SLEEP */
66 32
67 33
diff --git a/drivers/base/power/resume.c b/drivers/base/power/resume.c
deleted file mode 100644
index 00fd84ae6e..0000000000
--- a/drivers/base/power/resume.c
+++ /dev/null
@@ -1,149 +0,0 @@
1/*
2 * resume.c - Functions for waking devices up.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Labs
6 *
7 * This file is released under the GPLv2
8 *
9 */
10
11#include <linux/device.h>
12#include <linux/resume-trace.h>
13#include "../base.h"
14#include "power.h"
15
16
17/**
18 * resume_device - Restore state for one device.
19 * @dev: Device.
20 *
21 */
22
23int resume_device(struct device * dev)
24{
25 int error = 0;
26
27 TRACE_DEVICE(dev);
28 TRACE_RESUME(0);
29
30 down(&dev->sem);
31
32 if (dev->bus && dev->bus->resume) {
33 dev_dbg(dev,"resuming\n");
34 error = dev->bus->resume(dev);
35 }
36
37 if (!error && dev->type && dev->type->resume) {
38 dev_dbg(dev,"resuming\n");
39 error = dev->type->resume(dev);
40 }
41
42 if (!error && dev->class && dev->class->resume) {
43 dev_dbg(dev,"class resume\n");
44 error = dev->class->resume(dev);
45 }
46
47 up(&dev->sem);
48
49 TRACE_RESUME(error);
50 return error;
51}
52
53
54static int resume_device_early(struct device * dev)
55{
56 int error = 0;
57
58 TRACE_DEVICE(dev);
59 TRACE_RESUME(0);
60 if (dev->bus && dev->bus->resume_early) {
61 dev_dbg(dev,"EARLY resume\n");
62 error = dev->bus->resume_early(dev);
63 }
64 TRACE_RESUME(error);
65 return error;
66}
67
68/*
69 * Resume the devices that have either not gone through
70 * the late suspend, or that did go through it but also
71 * went through the early resume
72 */
73void dpm_resume(void)
74{
75 mutex_lock(&dpm_list_mtx);
76 while(!list_empty(&dpm_off)) {
77 struct list_head * entry = dpm_off.next;
78 struct device * dev = to_device(entry);
79
80 get_device(dev);
81 list_move_tail(entry, &dpm_active);
82
83 mutex_unlock(&dpm_list_mtx);
84 resume_device(dev);
85 mutex_lock(&dpm_list_mtx);
86 put_device(dev);
87 }
88 mutex_unlock(&dpm_list_mtx);
89}
90
91
92/**
93 * device_resume - Restore state of each device in system.
94 *
95 * Walk the dpm_off list, remove each entry, resume the device,
96 * then add it to the dpm_active list.
97 */
98
99void device_resume(void)
100{
101 might_sleep();
102 mutex_lock(&dpm_mtx);
103 dpm_resume();
104 mutex_unlock(&dpm_mtx);
105}
106
107EXPORT_SYMBOL_GPL(device_resume);
108
109
110/**
111 * dpm_power_up - Power on some devices.
112 *
113 * Walk the dpm_off_irq list and power each device up. This
114 * is used for devices that required they be powered down with
115 * interrupts disabled. As devices are powered on, they are moved
116 * to the dpm_active list.
117 *
118 * Interrupts must be disabled when calling this.
119 */
120
121void dpm_power_up(void)
122{
123 while(!list_empty(&dpm_off_irq)) {
124 struct list_head * entry = dpm_off_irq.next;
125 struct device * dev = to_device(entry);
126
127 list_move_tail(entry, &dpm_off);
128 resume_device_early(dev);
129 }
130}
131
132
133/**
134 * device_power_up - Turn on all devices that need special attention.
135 *
136 * Power on system devices then devices that required we shut them down
137 * with interrupts disabled.
138 * Called with interrupts disabled.
139 */
140
141void device_power_up(void)
142{
143 sysdev_resume();
144 dpm_power_up();
145}
146
147EXPORT_SYMBOL_GPL(device_power_up);
148
149
diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c
deleted file mode 100644
index 26df9b2317..0000000000
--- a/drivers/base/power/suspend.c
+++ /dev/null
@@ -1,210 +0,0 @@
1/*
2 * suspend.c - Functions for putting devices to sleep.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Labs
6 *
7 * This file is released under the GPLv2
8 *
9 */
10
11#include <linux/device.h>
12#include <linux/kallsyms.h>
13#include <linux/pm.h>
14#include "../base.h"
15#include "power.h"
16
17/*
18 * The entries in the dpm_active list are in a depth first order, simply
19 * because children are guaranteed to be discovered after parents, and
20 * are inserted at the back of the list on discovery.
21 *
22 * All list on the suspend path are done in reverse order, so we operate
23 * on the leaves of the device tree (or forests, depending on how you want
24 * to look at it ;) first. As nodes are removed from the back of the list,
25 * they are inserted into the front of their destintation lists.
26 *
27 * Things are the reverse on the resume path - iterations are done in
28 * forward order, and nodes are inserted at the back of their destination
29 * lists. This way, the ancestors will be accessed before their descendents.
30 */
31
32static inline char *suspend_verb(u32 event)
33{
34 switch (event) {
35 case PM_EVENT_SUSPEND: return "suspend";
36 case PM_EVENT_FREEZE: return "freeze";
37 case PM_EVENT_PRETHAW: return "prethaw";
38 default: return "(unknown suspend event)";
39 }
40}
41
42
43static void
44suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
45{
46 dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event),
47 ((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ?
48 ", may wakeup" : "");
49}
50
51/**
52 * suspend_device - Save state of one device.
53 * @dev: Device.
54 * @state: Power state device is entering.
55 */
56
57int suspend_device(struct device * dev, pm_message_t state)
58{
59 int error = 0;
60
61 down(&dev->sem);
62 if (dev->power.power_state.event) {
63 dev_dbg(dev, "PM: suspend %d-->%d\n",
64 dev->power.power_state.event, state.event);
65 }
66
67 if (dev->class && dev->class->suspend) {
68 suspend_device_dbg(dev, state, "class ");
69 error = dev->class->suspend(dev, state);
70 suspend_report_result(dev->class->suspend, error);
71 }
72
73 if (!error && dev->type && dev->type->suspend) {
74 suspend_device_dbg(dev, state, "type ");
75 error = dev->type->suspend(dev, state);
76 suspend_report_result(dev->type->suspend, error);
77 }
78
79 if (!error && dev->bus && dev->bus->suspend) {
80 suspend_device_dbg(dev, state, "");
81 error = dev->bus->suspend(dev, state);
82 suspend_report_result(dev->bus->suspend, error);
83 }
84 up(&dev->sem);
85 return error;
86}
87
88
89/*
90 * This is called with interrupts off, only a single CPU
91 * running. We can't acquire a mutex or semaphore (and we don't
92 * need the protection)
93 */
94static int suspend_device_late(struct device *dev, pm_message_t state)
95{
96 int error = 0;
97
98 if (dev->bus && dev->bus->suspend_late) {
99 suspend_device_dbg(dev, state, "LATE ");
100 error = dev->bus->suspend_late(dev, state);
101 suspend_report_result(dev->bus->suspend_late, error);
102 }
103 return error;
104}
105
106/**
107 * device_suspend - Save state and stop all devices in system.
108 * @state: Power state to put each device in.
109 *
110 * Walk the dpm_active list, call ->suspend() for each device, and move
111 * it to the dpm_off list.
112 *
113 * (For historical reasons, if it returns -EAGAIN, that used to mean
114 * that the device would be called again with interrupts disabled.
115 * These days, we use the "suspend_late()" callback for that, so we
116 * print a warning and consider it an error).
117 *
118 * If we get a different error, try and back out.
119 *
120 * If we hit a failure with any of the devices, call device_resume()
121 * above to bring the suspended devices back to life.
122 *
123 */
124
125int device_suspend(pm_message_t state)
126{
127 int error = 0;
128
129 might_sleep();
130 mutex_lock(&dpm_mtx);
131 mutex_lock(&dpm_list_mtx);
132 while (!list_empty(&dpm_active) && error == 0) {
133 struct list_head * entry = dpm_active.prev;
134 struct device * dev = to_device(entry);
135
136 get_device(dev);
137 mutex_unlock(&dpm_list_mtx);
138
139 error = suspend_device(dev, state);
140
141 mutex_lock(&dpm_list_mtx);
142
143 /* Check if the device got removed */
144 if (!list_empty(&dev->power.entry)) {
145 /* Move it to the dpm_off list */
146 if (!error)
147 list_move(&dev->power.entry, &dpm_off);
148 }
149 if (error)
150 printk(KERN_ERR "Could not suspend device %s: "
151 "error %d%s\n",
152 kobject_name(&dev->kobj), error,
153 error == -EAGAIN ? " (please convert to suspend_late)" : "");
154 put_device(dev);
155 }
156 mutex_unlock(&dpm_list_mtx);
157 if (error)
158 dpm_resume();
159
160 mutex_unlock(&dpm_mtx);
161 return error;
162}
163
164EXPORT_SYMBOL_GPL(device_suspend);
165
166/**
167 * device_power_down - Shut down special devices.
168 * @state: Power state to enter.
169 *
170 * Walk the dpm_off_irq list, calling ->power_down() for each device that
171 * couldn't power down the device with interrupts enabled. When we're
172 * done, power down system devices.
173 */
174
175int device_power_down(pm_message_t state)
176{
177 int error = 0;
178 struct device * dev;
179
180 while (!list_empty(&dpm_off)) {
181 struct list_head * entry = dpm_off.prev;
182
183 dev = to_device(entry);
184 error = suspend_device_late(dev, state);
185 if (error)
186 goto Error;
187 list_move(&dev->power.entry, &dpm_off_irq);
188 }
189
190 error = sysdev_suspend(state);
191 Done:
192 return error;
193 Error:
194 printk(KERN_ERR "Could not power down device %s: "
195 "error %d\n", kobject_name(&dev->kobj), error);
196 dpm_power_up();
197 goto Done;
198}
199
200EXPORT_SYMBOL_GPL(device_power_down);
201
202void __suspend_report_result(const char *function, void *fn, int ret)
203{
204 if (ret) {
205 printk(KERN_ERR "%s(): ", function);
206 print_fn_descriptor_symbol("%s() returns ", (unsigned long)fn);
207 printk("%d\n", ret);
208 }
209}
210EXPORT_SYMBOL_GPL(__suspend_report_result);
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 18febe26ca..ac7ff6d0c6 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -139,7 +139,7 @@ int sysdev_class_register(struct sysdev_class * cls)
139 kobject_name(&cls->kset.kobj)); 139 kobject_name(&cls->kset.kobj));
140 INIT_LIST_HEAD(&cls->drivers); 140 INIT_LIST_HEAD(&cls->drivers);
141 cls->kset.kobj.parent = &system_subsys.kobj; 141 cls->kset.kobj.parent = &system_subsys.kobj;
142 kset_set_kset_s(cls, system_subsys); 142 cls->kset.kobj.kset = &system_subsys;
143 return kset_register(&cls->kset); 143 return kset_register(&cls->kset);
144} 144}
145 145
@@ -153,25 +153,22 @@ void sysdev_class_unregister(struct sysdev_class * cls)
153EXPORT_SYMBOL_GPL(sysdev_class_register); 153EXPORT_SYMBOL_GPL(sysdev_class_register);
154EXPORT_SYMBOL_GPL(sysdev_class_unregister); 154EXPORT_SYMBOL_GPL(sysdev_class_unregister);
155 155
156
157static LIST_HEAD(sysdev_drivers);
158static DEFINE_MUTEX(sysdev_drivers_lock); 156static DEFINE_MUTEX(sysdev_drivers_lock);
159 157
160/** 158/**
161 * sysdev_driver_register - Register auxillary driver 159 * sysdev_driver_register - Register auxillary driver
162 * @cls: Device class driver belongs to. 160 * @cls: Device class driver belongs to.
163 * @drv: Driver. 161 * @drv: Driver.
164 * 162 *
165 * If @cls is valid, then @drv is inserted into @cls->drivers to be 163 * @drv is inserted into @cls->drivers to be
166 * called on each operation on devices of that class. The refcount 164 * called on each operation on devices of that class. The refcount
167 * of @cls is incremented. 165 * of @cls is incremented.
168 * Otherwise, @drv is inserted into sysdev_drivers, and called for
169 * each device.
170 */ 166 */
171 167
172int sysdev_driver_register(struct sysdev_class * cls, 168int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv)
173 struct sysdev_driver * drv)
174{ 169{
170 int err = 0;
171
175 mutex_lock(&sysdev_drivers_lock); 172 mutex_lock(&sysdev_drivers_lock);
176 if (cls && kset_get(&cls->kset)) { 173 if (cls && kset_get(&cls->kset)) {
177 list_add_tail(&drv->entry, &cls->drivers); 174 list_add_tail(&drv->entry, &cls->drivers);
@@ -182,10 +179,13 @@ int sysdev_driver_register(struct sysdev_class * cls,
182 list_for_each_entry(dev, &cls->kset.list, kobj.entry) 179 list_for_each_entry(dev, &cls->kset.list, kobj.entry)
183 drv->add(dev); 180 drv->add(dev);
184 } 181 }
185 } else 182 } else {
186 list_add_tail(&drv->entry, &sysdev_drivers); 183 err = -EINVAL;
184 printk(KERN_ERR "%s: invalid device class\n", __FUNCTION__);
185 WARN_ON(1);
186 }
187 mutex_unlock(&sysdev_drivers_lock); 187 mutex_unlock(&sysdev_drivers_lock);
188 return 0; 188 return err;
189} 189}
190 190
191 191
@@ -251,12 +251,6 @@ int sysdev_register(struct sys_device * sysdev)
251 * code that should have called us. 251 * code that should have called us.
252 */ 252 */
253 253
254 /* Notify global drivers */
255 list_for_each_entry(drv, &sysdev_drivers, entry) {
256 if (drv->add)
257 drv->add(sysdev);
258 }
259
260 /* Notify class auxillary drivers */ 254 /* Notify class auxillary drivers */
261 list_for_each_entry(drv, &cls->drivers, entry) { 255 list_for_each_entry(drv, &cls->drivers, entry) {
262 if (drv->add) 256 if (drv->add)
@@ -272,11 +266,6 @@ void sysdev_unregister(struct sys_device * sysdev)
272 struct sysdev_driver * drv; 266 struct sysdev_driver * drv;
273 267
274 mutex_lock(&sysdev_drivers_lock); 268 mutex_lock(&sysdev_drivers_lock);
275 list_for_each_entry(drv, &sysdev_drivers, entry) {
276 if (drv->remove)
277 drv->remove(sysdev);
278 }
279
280 list_for_each_entry(drv, &sysdev->cls->drivers, entry) { 269 list_for_each_entry(drv, &sysdev->cls->drivers, entry) {
281 if (drv->remove) 270 if (drv->remove)
282 drv->remove(sysdev); 271 drv->remove(sysdev);
@@ -293,7 +282,7 @@ void sysdev_unregister(struct sys_device * sysdev)
293 * 282 *
294 * Loop over each class of system devices, and the devices in each 283 * Loop over each class of system devices, and the devices in each
295 * of those classes. For each device, we call the shutdown method for 284 * of those classes. For each device, we call the shutdown method for
296 * each driver registered for the device - the globals, the auxillaries, 285 * each driver registered for the device - the auxillaries,
297 * and the class driver. 286 * and the class driver.
298 * 287 *
299 * Note: The list is iterated in reverse order, so that we shut down 288 * Note: The list is iterated in reverse order, so that we shut down
@@ -320,13 +309,7 @@ void sysdev_shutdown(void)
320 struct sysdev_driver * drv; 309 struct sysdev_driver * drv;
321 pr_debug(" %s\n", kobject_name(&sysdev->kobj)); 310 pr_debug(" %s\n", kobject_name(&sysdev->kobj));
322 311
323 /* Call global drivers first. */ 312 /* Call auxillary drivers first */
324 list_for_each_entry(drv, &sysdev_drivers, entry) {
325 if (drv->shutdown)
326 drv->shutdown(sysdev);
327 }
328
329 /* Call auxillary drivers next. */
330 list_for_each_entry(drv, &cls->drivers, entry) { 313 list_for_each_entry(drv, &cls->drivers, entry) {
331 if (drv->shutdown) 314 if (drv->shutdown)
332 drv->shutdown(sysdev); 315 drv->shutdown(sysdev);
@@ -354,12 +337,6 @@ static void __sysdev_resume(struct sys_device *dev)
354 if (drv->resume) 337 if (drv->resume)
355 drv->resume(dev); 338 drv->resume(dev);
356 } 339 }
357
358 /* Call global drivers. */
359 list_for_each_entry(drv, &sysdev_drivers, entry) {
360 if (drv->resume)
361 drv->resume(dev);
362 }
363} 340}
364 341
365/** 342/**
@@ -393,16 +370,7 @@ int sysdev_suspend(pm_message_t state)
393 list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { 370 list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) {
394 pr_debug(" %s\n", kobject_name(&sysdev->kobj)); 371 pr_debug(" %s\n", kobject_name(&sysdev->kobj));
395 372
396 /* Call global drivers first. */ 373 /* Call auxillary drivers first */
397 list_for_each_entry(drv, &sysdev_drivers, entry) {
398 if (drv->suspend) {
399 ret = drv->suspend(sysdev, state);
400 if (ret)
401 goto gbl_driver;
402 }
403 }
404
405 /* Call auxillary drivers next. */
406 list_for_each_entry(drv, &cls->drivers, entry) { 374 list_for_each_entry(drv, &cls->drivers, entry) {
407 if (drv->suspend) { 375 if (drv->suspend) {
408 ret = drv->suspend(sysdev, state); 376 ret = drv->suspend(sysdev, state);
@@ -436,18 +404,7 @@ aux_driver:
436 if (err_drv->resume) 404 if (err_drv->resume)
437 err_drv->resume(sysdev); 405 err_drv->resume(sysdev);
438 } 406 }
439 drv = NULL;
440 407
441gbl_driver:
442 if (drv)
443 printk(KERN_ERR "sysdev driver suspend failed for %s\n",
444 kobject_name(&sysdev->kobj));
445 list_for_each_entry(err_drv, &sysdev_drivers, entry) {
446 if (err_drv == drv)
447 break;
448 if (err_drv->resume)
449 err_drv->resume(sysdev);
450 }
451 /* resume other sysdevs in current class */ 408 /* resume other sysdevs in current class */
452 list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { 409 list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) {
453 if (err_dev == sysdev) 410 if (err_dev == sysdev)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 4245b7f80a..ca4d7f0d09 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -361,8 +361,7 @@ config BLK_DEV_RAM_SIZE
361 default "4096" 361 default "4096"
362 help 362 help
363 The default value is 4096 kilobytes. Only change this if you know 363 The default value is 4096 kilobytes. Only change this if you know
364 what are you doing. If you are using IBM S/390, then set this to 364 what are you doing.
365 8192.
366 365
367config BLK_DEV_RAM_BLOCKSIZE 366config BLK_DEV_RAM_BLOCKSIZE
368 int "Default RAM disk block size (bytes)" 367 int "Default RAM disk block size (bytes)"
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 9b8278e1f4..acbfe1c49b 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -513,7 +513,7 @@ static int __init dsp56k_init_driver(void)
513 err = PTR_ERR(dsp56k_class); 513 err = PTR_ERR(dsp56k_class);
514 goto out_chrdev; 514 goto out_chrdev;
515 } 515 }
516 class_device_create(dsp56k_class, NULL, MKDEV(DSP56K_MAJOR, 0), NULL, "dsp56k"); 516 device_create(dsp56k_class, NULL, MKDEV(DSP56K_MAJOR, 0), "dsp56k");
517 517
518 printk(banner); 518 printk(banner);
519 goto out; 519 goto out;
@@ -527,7 +527,7 @@ module_init(dsp56k_init_driver);
527 527
528static void __exit dsp56k_cleanup_driver(void) 528static void __exit dsp56k_cleanup_driver(void)
529{ 529{
530 class_device_destroy(dsp56k_class, MKDEV(DSP56K_MAJOR, 0)); 530 device_destroy(dsp56k_class, MKDEV(DSP56K_MAJOR, 0));
531 class_destroy(dsp56k_class); 531 class_destroy(dsp56k_class);
532 unregister_chrdev(DSP56K_MAJOR, "dsp56k"); 532 unregister_chrdev(DSP56K_MAJOR, "dsp56k");
533} 533}
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 8d74b8745e..bd94d5f9e6 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -411,8 +411,8 @@ cleanup_module(void)
411 iiResetDelay( i2BoardPtrTable[i] ); 411 iiResetDelay( i2BoardPtrTable[i] );
412 /* free io addresses and Tibet */ 412 /* free io addresses and Tibet */
413 release_region( ip2config.addr[i], 8 ); 413 release_region( ip2config.addr[i], 8 );
414 class_device_destroy(ip2_class, MKDEV(IP2_IPL_MAJOR, 4 * i)); 414 device_destroy(ip2_class, MKDEV(IP2_IPL_MAJOR, 4 * i));
415 class_device_destroy(ip2_class, MKDEV(IP2_IPL_MAJOR, 4 * i + 1)); 415 device_destroy(ip2_class, MKDEV(IP2_IPL_MAJOR, 4 * i + 1));
416 } 416 }
417 /* Disable and remove interrupt handler. */ 417 /* Disable and remove interrupt handler. */
418 if ( (ip2config.irq[i] > 0) && have_requested_irq(ip2config.irq[i]) ) { 418 if ( (ip2config.irq[i] > 0) && have_requested_irq(ip2config.irq[i]) ) {
@@ -718,12 +718,12 @@ ip2_loadmain(int *iop, int *irqp, unsigned char *firmware, int firmsize)
718 } 718 }
719 719
720 if ( NULL != ( pB = i2BoardPtrTable[i] ) ) { 720 if ( NULL != ( pB = i2BoardPtrTable[i] ) ) {
721 class_device_create(ip2_class, NULL, 721 device_create(ip2_class, NULL,
722 MKDEV(IP2_IPL_MAJOR, 4 * i), 722 MKDEV(IP2_IPL_MAJOR, 4 * i),
723 NULL, "ipl%d", i); 723 "ipl%d", i);
724 class_device_create(ip2_class, NULL, 724 device_create(ip2_class, NULL,
725 MKDEV(IP2_IPL_MAJOR, 4 * i + 1), 725 MKDEV(IP2_IPL_MAJOR, 4 * i + 1),
726 NULL, "stat%d", i); 726 "stat%d", i);
727 727
728 for ( box = 0; box < ABS_MAX_BOXES; ++box ) 728 for ( box = 0; box < ABS_MAX_BOXES; ++box )
729 { 729 {
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index c2aa44ee6e..0246a2b8ce 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -865,7 +865,7 @@ static void ipmi_new_smi(int if_num, struct device *device)
865 entry->dev = dev; 865 entry->dev = dev;
866 866
867 mutex_lock(&reg_list_mutex); 867 mutex_lock(&reg_list_mutex);
868 class_device_create(ipmi_class, NULL, dev, device, "ipmi%d", if_num); 868 device_create(ipmi_class, device, dev, "ipmi%d", if_num);
869 list_add(&entry->link, &reg_list); 869 list_add(&entry->link, &reg_list);
870 mutex_unlock(&reg_list_mutex); 870 mutex_unlock(&reg_list_mutex);
871} 871}
@@ -883,7 +883,7 @@ static void ipmi_smi_gone(int if_num)
883 break; 883 break;
884 } 884 }
885 } 885 }
886 class_device_destroy(ipmi_class, dev); 886 device_destroy(ipmi_class, dev);
887 mutex_unlock(&reg_list_mutex); 887 mutex_unlock(&reg_list_mutex);
888} 888}
889 889
@@ -938,7 +938,7 @@ static __exit void cleanup_ipmi(void)
938 mutex_lock(&reg_list_mutex); 938 mutex_lock(&reg_list_mutex);
939 list_for_each_entry_safe(entry, entry2, &reg_list, link) { 939 list_for_each_entry_safe(entry, entry2, &reg_list, link) {
940 list_del(&entry->link); 940 list_del(&entry->link);
941 class_device_destroy(ipmi_class, entry->dev); 941 device_destroy(ipmi_class, entry->dev);
942 kfree(entry); 942 kfree(entry);
943 } 943 }
944 mutex_unlock(&reg_list_mutex); 944 mutex_unlock(&reg_list_mutex);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 3c66f402f9..1f27be1ec3 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -4624,9 +4624,8 @@ static int __init istallion_module_init(void)
4624 4624
4625 istallion_class = class_create(THIS_MODULE, "staliomem"); 4625 istallion_class = class_create(THIS_MODULE, "staliomem");
4626 for (i = 0; i < 4; i++) 4626 for (i = 0; i < 4; i++)
4627 class_device_create(istallion_class, NULL, 4627 device_create(istallion_class, NULL, MKDEV(STL_SIOMEMMAJOR, i),
4628 MKDEV(STL_SIOMEMMAJOR, i), 4628 "staliomem%d", i);
4629 NULL, "staliomem%d", i);
4630 4629
4631 return 0; 4630 return 0;
4632err_deinit: 4631err_deinit:
@@ -4659,8 +4658,7 @@ static void __exit istallion_module_exit(void)
4659 unregister_chrdev(STL_SIOMEMMAJOR, "staliomem"); 4658 unregister_chrdev(STL_SIOMEMMAJOR, "staliomem");
4660 4659
4661 for (j = 0; j < 4; j++) 4660 for (j = 0; j < 4; j++)
4662 class_device_destroy(istallion_class, MKDEV(STL_SIOMEMMAJOR, 4661 device_destroy(istallion_class, MKDEV(STL_SIOMEMMAJOR, j));
4663 j));
4664 class_destroy(istallion_class); 4662 class_destroy(istallion_class);
4665 4663
4666 pci_unregister_driver(&stli_pcidriver); 4664 pci_unregister_driver(&stli_pcidriver);
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 62051f8b09..c59e2a0996 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -799,8 +799,7 @@ static int lp_register(int nr, struct parport *port)
799 if (reset) 799 if (reset)
800 lp_reset(nr); 800 lp_reset(nr);
801 801
802 class_device_create(lp_class, NULL, MKDEV(LP_MAJOR, nr), port->dev, 802 device_create(lp_class, port->dev, MKDEV(LP_MAJOR, nr), "lp%d", nr);
803 "lp%d", nr);
804 803
805 printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name, 804 printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name,
806 (port->irq == PARPORT_IRQ_NONE)?"polling":"interrupt-driven"); 805 (port->irq == PARPORT_IRQ_NONE)?"polling":"interrupt-driven");
@@ -971,7 +970,7 @@ static void lp_cleanup_module (void)
971 if (lp_table[offset].dev == NULL) 970 if (lp_table[offset].dev == NULL)
972 continue; 971 continue;
973 parport_unregister_device(lp_table[offset].dev); 972 parport_unregister_device(lp_table[offset].dev);
974 class_device_destroy(lp_class, MKDEV(LP_MAJOR, offset)); 973 device_destroy(lp_class, MKDEV(LP_MAJOR, offset));
975 } 974 }
976 class_destroy(lp_class); 975 class_destroy(lp_class);
977} 976}
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 4177f6db83..cc5d77797d 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1863,8 +1863,7 @@ static int cm4000_probe(struct pcmcia_device *link)
1863 return ret; 1863 return ret;
1864 } 1864 }
1865 1865
1866 class_device_create(cmm_class, NULL, MKDEV(major, i), NULL, 1866 device_create(cmm_class, NULL, MKDEV(major, i), "cmm%d", i);
1867 "cmm%d", i);
1868 1867
1869 return 0; 1868 return 0;
1870} 1869}
@@ -1888,7 +1887,7 @@ static void cm4000_detach(struct pcmcia_device *link)
1888 dev_table[devno] = NULL; 1887 dev_table[devno] = NULL;
1889 kfree(dev); 1888 kfree(dev);
1890 1889
1891 class_device_destroy(cmm_class, MKDEV(major, devno)); 1890 device_destroy(cmm_class, MKDEV(major, devno));
1892 1891
1893 return; 1892 return;
1894} 1893}
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index b24a3e7bbb..a0b9c8728d 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -642,8 +642,7 @@ static int reader_probe(struct pcmcia_device *link)
642 return ret; 642 return ret;
643 } 643 }
644 644
645 class_device_create(cmx_class, NULL, MKDEV(major, i), NULL, 645 device_create(cmx_class, NULL, MKDEV(major, i), "cmx%d", i);
646 "cmx%d", i);
647 646
648 return 0; 647 return 0;
649} 648}
@@ -666,7 +665,7 @@ static void reader_detach(struct pcmcia_device *link)
666 dev_table[devno] = NULL; 665 dev_table[devno] = NULL;
667 kfree(dev); 666 kfree(dev);
668 667
669 class_device_destroy(cmx_class, MKDEV(major, devno)); 668 device_destroy(cmx_class, MKDEV(major, devno));
670 669
671 return; 670 return;
672} 671}
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index de14aea34e..73de77105f 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -248,14 +248,19 @@ static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
248 return -ENOIOCTLCMD; 248 return -ENOIOCTLCMD;
249} 249}
250 250
251static int legacy_count = CONFIG_LEGACY_PTY_COUNT;
252module_param(legacy_count, int, 0);
253
251static void __init legacy_pty_init(void) 254static void __init legacy_pty_init(void)
252{ 255{
256 if (legacy_count <= 0)
257 return;
253 258
254 pty_driver = alloc_tty_driver(NR_PTYS); 259 pty_driver = alloc_tty_driver(legacy_count);
255 if (!pty_driver) 260 if (!pty_driver)
256 panic("Couldn't allocate pty driver"); 261 panic("Couldn't allocate pty driver");
257 262
258 pty_slave_driver = alloc_tty_driver(NR_PTYS); 263 pty_slave_driver = alloc_tty_driver(legacy_count);
259 if (!pty_slave_driver) 264 if (!pty_slave_driver)
260 panic("Couldn't allocate pty slave driver"); 265 panic("Couldn't allocate pty slave driver");
261 266
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 1f0d7c60c9..bbfa0e241c 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -255,10 +255,7 @@ static const struct file_operations raw_ctl_fops = {
255 .owner = THIS_MODULE, 255 .owner = THIS_MODULE,
256}; 256};
257 257
258static struct cdev raw_cdev = { 258static struct cdev raw_cdev;
259 .kobj = {.name = "raw", },
260 .owner = THIS_MODULE,
261};
262 259
263static int __init raw_init(void) 260static int __init raw_init(void)
264{ 261{
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 52753e723e..b9c1dba6bd 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -441,8 +441,7 @@ scdrv_init(void)
441 continue; 441 continue;
442 } 442 }
443 443
444 class_device_create(snsc_class, NULL, dev, NULL, 444 device_create(snsc_class, NULL, dev, "%s", devname);
445 "%s", devname);
446 445
447 ia64_sn_irtr_intr_enable(scd->scd_nasid, 446 ia64_sn_irtr_intr_enable(scd->scd_nasid,
448 0 /*ignored */ , 447 0 /*ignored */ ,
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 4a80b2f864..45758d5b56 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -4778,9 +4778,8 @@ static int __init stallion_module_init(void)
4778 if (IS_ERR(stallion_class)) 4778 if (IS_ERR(stallion_class))
4779 printk("STALLION: failed to create class\n"); 4779 printk("STALLION: failed to create class\n");
4780 for (i = 0; i < 4; i++) 4780 for (i = 0; i < 4; i++)
4781 class_device_create(stallion_class, NULL, 4781 device_create(stallion_class, NULL, MKDEV(STL_SIOMEMMAJOR, i),
4782 MKDEV(STL_SIOMEMMAJOR, i), NULL, 4782 "staliomem%d", i);
4783 "staliomem%d", i);
4784 4783
4785 return 0; 4784 return 0;
4786err_unrtty: 4785err_unrtty:
@@ -4816,7 +4815,7 @@ static void __exit stallion_module_exit(void)
4816 } 4815 }
4817 4816
4818 for (i = 0; i < 4; i++) 4817 for (i = 0; i < 4; i++)
4819 class_device_destroy(stallion_class, MKDEV(STL_SIOMEMMAJOR, i)); 4818 device_destroy(stallion_class, MKDEV(STL_SIOMEMMAJOR, i));
4820 unregister_chrdev(STL_SIOMEMMAJOR, "staliomem"); 4819 unregister_chrdev(STL_SIOMEMMAJOR, "staliomem");
4821 class_destroy(stallion_class); 4820 class_destroy(stallion_class);
4822 4821
diff --git a/drivers/char/tipar.c b/drivers/char/tipar.c
index 35b40b9965..cef55c4065 100644
--- a/drivers/char/tipar.c
+++ b/drivers/char/tipar.c
@@ -441,8 +441,8 @@ tipar_register(int nr, struct parport *port)
441 goto out; 441 goto out;
442 } 442 }
443 443
444 class_device_create(tipar_class, NULL, MKDEV(TIPAR_MAJOR, 444 device_create(tipar_class, port->dev, MKDEV(TIPAR_MAJOR,
445 TIPAR_MINOR + nr), port->dev, "par%d", nr); 445 TIPAR_MINOR + nr), "par%d", nr);
446 446
447 /* Display informations */ 447 /* Display informations */
448 pr_info("tipar%d: using %s (%s)\n", nr, port->name, (port->irq == 448 pr_info("tipar%d: using %s (%s)\n", nr, port->name, (port->irq ==
@@ -534,7 +534,7 @@ tipar_cleanup_module(void)
534 if (table[i].dev == NULL) 534 if (table[i].dev == NULL)
535 continue; 535 continue;
536 parport_unregister_device(table[i].dev); 536 parport_unregister_device(table[i].dev);
537 class_device_destroy(tipar_class, MKDEV(TIPAR_MAJOR, i)); 537 device_destroy(tipar_class, MKDEV(TIPAR_MAJOR, i));
538 } 538 }
539 class_destroy(tipar_class); 539 class_destroy(tipar_class);
540 540
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index f1d60f0cef..db7a731e23 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -871,10 +871,10 @@ static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
871 state[i].cur_part = 0; 871 state[i].cur_part = 0;
872 for (j = 0; j < MAX_PARTITIONS; ++j) 872 for (j = 0; j < MAX_PARTITIONS; ++j)
873 state[i].part_stat_rwi[j] = VIOT_IDLE; 873 state[i].part_stat_rwi[j] = VIOT_IDLE;
874 class_device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i), NULL, 874 device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i),
875 "iseries!vt%d", i); 875 "iseries!vt%d", i);
876 class_device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80), 876 device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80),
877 NULL, "iseries!nvt%d", i); 877 "iseries!nvt%d", i);
878 printk(VIOTAPE_KERN_INFO "tape iseries/vt%d is iSeries " 878 printk(VIOTAPE_KERN_INFO "tape iseries/vt%d is iSeries "
879 "resource %10.10s type %4.4s, model %3.3s\n", 879 "resource %10.10s type %4.4s, model %3.3s\n",
880 i, viotape_unitinfo[i].rsrcname, 880 i, viotape_unitinfo[i].rsrcname,
@@ -886,8 +886,8 @@ static int viotape_remove(struct vio_dev *vdev)
886{ 886{
887 int i = vdev->unit_address; 887 int i = vdev->unit_address;
888 888
889 class_device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i | 0x80)); 889 device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i | 0x80));
890 class_device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i)); 890 device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i));
891 return 0; 891 return 0;
892} 892}
893 893
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 993fa7b892..721f86f4f0 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -56,10 +56,6 @@ config CPU_FREQ_STAT_DETAILS
56 56
57 If in doubt, say N. 57 If in doubt, say N.
58 58
59# Note that it is not currently possible to set the other governors (such as ondemand)
60# as the default, since if they fail to initialise, cpufreq will be
61# left in an undefined state.
62
63choice 59choice
64 prompt "Default CPUFreq governor" 60 prompt "Default CPUFreq governor"
65 default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110 61 default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110
@@ -85,6 +81,29 @@ config CPU_FREQ_DEFAULT_GOV_USERSPACE
85 program shall be able to set the CPU dynamically without having 81 program shall be able to set the CPU dynamically without having
86 to enable the userspace governor manually. 82 to enable the userspace governor manually.
87 83
84config CPU_FREQ_DEFAULT_GOV_ONDEMAND
85 bool "ondemand"
86 select CPU_FREQ_GOV_ONDEMAND
87 select CPU_FREQ_GOV_PERFORMANCE
88 help
89 Use the CPUFreq governor 'ondemand' as default. This allows
90 you to get a full dynamic frequency capable system by simply
91 loading your cpufreq low-level hardware driver.
92 Be aware that not all cpufreq drivers support the ondemand
93 governor. If unsure have a look at the help section of the
94 driver. Fallback governor will be the performance governor.
95
96config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
97 bool "conservative"
98 select CPU_FREQ_GOV_CONSERVATIVE
99 select CPU_FREQ_GOV_PERFORMANCE
100 help
101 Use the CPUFreq governor 'conservative' as default. This allows
102 you to get a full dynamic frequency capable system by simply
103 loading your cpufreq low-level hardware driver.
104 Be aware that not all cpufreq drivers support the conservative
105 governor. If unsure have a look at the help section of the
106 driver. Fallback governor will be the performance governor.
88endchoice 107endchoice
89 108
90config CPU_FREQ_GOV_PERFORMANCE 109config CPU_FREQ_GOV_PERFORMANCE
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2f6a73c01b..5e626b12b9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -763,6 +763,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
763 init_completion(&policy->kobj_unregister); 763 init_completion(&policy->kobj_unregister);
764 INIT_WORK(&policy->update, handle_update); 764 INIT_WORK(&policy->update, handle_update);
765 765
766 /* Set governor before ->init, so that driver could check it */
767 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
766 /* call driver. From then on the cpufreq must be able 768 /* call driver. From then on the cpufreq must be able
767 * to accept all calls to ->verify and ->setpolicy for this CPU 769 * to accept all calls to ->verify and ->setpolicy for this CPU
768 */ 770 */
@@ -828,7 +830,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
828 /* prepare interface data */ 830 /* prepare interface data */
829 policy->kobj.parent = &sys_dev->kobj; 831 policy->kobj.parent = &sys_dev->kobj;
830 policy->kobj.ktype = &ktype_cpufreq; 832 policy->kobj.ktype = &ktype_cpufreq;
831 strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN); 833 kobject_set_name(&policy->kobj, "cpufreq");
832 834
833 ret = kobject_register(&policy->kobj); 835 ret = kobject_register(&policy->kobj);
834 if (ret) { 836 if (ret) {
@@ -1109,12 +1111,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
1109 unsigned int ret_freq = 0; 1111 unsigned int ret_freq = 0;
1110 1112
1111 if (policy) { 1113 if (policy) {
1112 if (unlikely(lock_policy_rwsem_read(cpu)))
1113 return ret_freq;
1114
1115 ret_freq = policy->cur; 1114 ret_freq = policy->cur;
1116
1117 unlock_policy_rwsem_read(cpu);
1118 cpufreq_cpu_put(policy); 1115 cpufreq_cpu_put(policy);
1119 } 1116 }
1120 1117
@@ -1483,6 +1480,31 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
1483{ 1480{
1484 int ret; 1481 int ret;
1485 1482
1483 /* Only must be defined when default governor is known to have latency
1484 restrictions, like e.g. conservative or ondemand.
1485 That this is the case is already ensured in Kconfig
1486 */
1487#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1488 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1489#else
1490 struct cpufreq_governor *gov = NULL;
1491#endif
1492
1493 if (policy->governor->max_transition_latency &&
1494 policy->cpuinfo.transition_latency >
1495 policy->governor->max_transition_latency) {
1496 if (!gov)
1497 return -EINVAL;
1498 else {
1499 printk(KERN_WARNING "%s governor failed, too long"
1500 " transition latency of HW, fallback"
1501 " to %s governor\n",
1502 policy->governor->name,
1503 gov->name);
1504 policy->governor = gov;
1505 }
1506 }
1507
1486 if (!try_module_get(policy->governor->owner)) 1508 if (!try_module_get(policy->governor->owner))
1487 return -EINVAL; 1509 return -EINVAL;
1488 1510
@@ -1703,7 +1725,7 @@ int cpufreq_update_policy(unsigned int cpu)
1703} 1725}
1704EXPORT_SYMBOL(cpufreq_update_policy); 1726EXPORT_SYMBOL(cpufreq_update_policy);
1705 1727
1706static int cpufreq_cpu_callback(struct notifier_block *nfb, 1728static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1707 unsigned long action, void *hcpu) 1729 unsigned long action, void *hcpu)
1708{ 1730{
1709 unsigned int cpu = (unsigned long)hcpu; 1731 unsigned int cpu = (unsigned long)hcpu;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 26f440ccc3..4bd33ce8a6 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -58,7 +58,7 @@ static unsigned int def_sampling_rate;
58#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 58#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
59#define DEF_SAMPLING_DOWN_FACTOR (1) 59#define DEF_SAMPLING_DOWN_FACTOR (1)
60#define MAX_SAMPLING_DOWN_FACTOR (10) 60#define MAX_SAMPLING_DOWN_FACTOR (10)
61#define TRANSITION_LATENCY_LIMIT (10 * 1000) 61#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
62 62
63static void do_dbs_timer(struct work_struct *work); 63static void do_dbs_timer(struct work_struct *work);
64 64
@@ -466,9 +466,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
466 (!policy->cur)) 466 (!policy->cur))
467 return -EINVAL; 467 return -EINVAL;
468 468
469 if (policy->cpuinfo.transition_latency >
470 (TRANSITION_LATENCY_LIMIT * 1000))
471 return -EINVAL;
472 if (this_dbs_info->enable) /* Already enabled */ 469 if (this_dbs_info->enable) /* Already enabled */
473 break; 470 break;
474 471
@@ -551,15 +548,17 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
551 return 0; 548 return 0;
552} 549}
553 550
554static struct cpufreq_governor cpufreq_gov_dbs = { 551struct cpufreq_governor cpufreq_gov_conservative = {
555 .name = "conservative", 552 .name = "conservative",
556 .governor = cpufreq_governor_dbs, 553 .governor = cpufreq_governor_dbs,
557 .owner = THIS_MODULE, 554 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
555 .owner = THIS_MODULE,
558}; 556};
557EXPORT_SYMBOL(cpufreq_gov_conservative);
559 558
560static int __init cpufreq_gov_dbs_init(void) 559static int __init cpufreq_gov_dbs_init(void)
561{ 560{
562 return cpufreq_register_governor(&cpufreq_gov_dbs); 561 return cpufreq_register_governor(&cpufreq_gov_conservative);
563} 562}
564 563
565static void __exit cpufreq_gov_dbs_exit(void) 564static void __exit cpufreq_gov_dbs_exit(void)
@@ -567,7 +566,7 @@ static void __exit cpufreq_gov_dbs_exit(void)
567 /* Make sure that the scheduled work is indeed not running */ 566 /* Make sure that the scheduled work is indeed not running */
568 flush_scheduled_work(); 567 flush_scheduled_work();
569 568
570 cpufreq_unregister_governor(&cpufreq_gov_dbs); 569 cpufreq_unregister_governor(&cpufreq_gov_conservative);
571} 570}
572 571
573 572
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e794527e49..369f445951 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -47,7 +47,7 @@ static unsigned int def_sampling_rate;
47 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 47 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
48#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 48#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
49#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 49#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
50#define TRANSITION_LATENCY_LIMIT (10 * 1000) 50#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
51 51
52static void do_dbs_timer(struct work_struct *work); 52static void do_dbs_timer(struct work_struct *work);
53 53
@@ -508,12 +508,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
508 if ((!cpu_online(cpu)) || (!policy->cur)) 508 if ((!cpu_online(cpu)) || (!policy->cur))
509 return -EINVAL; 509 return -EINVAL;
510 510
511 if (policy->cpuinfo.transition_latency >
512 (TRANSITION_LATENCY_LIMIT * 1000)) {
513 printk(KERN_WARNING "ondemand governor failed to load "
514 "due to too long transition latency\n");
515 return -EINVAL;
516 }
517 if (this_dbs_info->enable) /* Already enabled */ 511 if (this_dbs_info->enable) /* Already enabled */
518 break; 512 break;
519 513
@@ -585,11 +579,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
585 return 0; 579 return 0;
586} 580}
587 581
588static struct cpufreq_governor cpufreq_gov_dbs = { 582struct cpufreq_governor cpufreq_gov_ondemand = {
589 .name = "ondemand", 583 .name = "ondemand",
590 .governor = cpufreq_governor_dbs, 584 .governor = cpufreq_governor_dbs,
591 .owner = THIS_MODULE, 585 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
586 .owner = THIS_MODULE,
592}; 587};
588EXPORT_SYMBOL(cpufreq_gov_ondemand);
593 589
594static int __init cpufreq_gov_dbs_init(void) 590static int __init cpufreq_gov_dbs_init(void)
595{ 591{
@@ -598,12 +594,12 @@ static int __init cpufreq_gov_dbs_init(void)
598 printk(KERN_ERR "Creation of kondemand failed\n"); 594 printk(KERN_ERR "Creation of kondemand failed\n");
599 return -EFAULT; 595 return -EFAULT;
600 } 596 }
601 return cpufreq_register_governor(&cpufreq_gov_dbs); 597 return cpufreq_register_governor(&cpufreq_gov_ondemand);
602} 598}
603 599
604static void __exit cpufreq_gov_dbs_exit(void) 600static void __exit cpufreq_gov_dbs_exit(void)
605{ 601{
606 cpufreq_unregister_governor(&cpufreq_gov_dbs); 602 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
607 destroy_workqueue(kondemand_wq); 603 destroy_workqueue(kondemand_wq);
608} 604}
609 605
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 917b9bab9c..8a45d0f93e 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -164,8 +164,7 @@ freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
164 return -1; 164 return -1;
165} 165}
166 166
167static void 167static void __cpuexit cpufreq_stats_free_table(unsigned int cpu)
168cpufreq_stats_free_table (unsigned int cpu)
169{ 168{
170 struct cpufreq_stats *stat = cpufreq_stats_table[cpu]; 169 struct cpufreq_stats *stat = cpufreq_stats_table[cpu];
171 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 170 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
@@ -305,8 +304,9 @@ cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val,
305 return 0; 304 return 0;
306} 305}
307 306
308static int cpufreq_stat_cpu_callback(struct notifier_block *nfb, 307static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
309 unsigned long action, void *hcpu) 308 unsigned long action,
309 void *hcpu)
310{ 310{
311 unsigned int cpu = (unsigned long)hcpu; 311 unsigned int cpu = (unsigned long)hcpu;
312 312
@@ -323,7 +323,7 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
323 return NOTIFY_OK; 323 return NOTIFY_OK;
324} 324}
325 325
326static struct notifier_block cpufreq_stat_cpu_notifier = 326static struct notifier_block cpufreq_stat_cpu_notifier __cpuinitdata =
327{ 327{
328 .notifier_call = cpufreq_stat_cpu_callback, 328 .notifier_call = cpufreq_stat_cpu_callback,
329}; 329};
@@ -356,8 +356,7 @@ __init cpufreq_stats_init(void)
356 356
357 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 357 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
358 for_each_online_cpu(cpu) { 358 for_each_online_cpu(cpu) {
359 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, 359 cpufreq_update_policy(cpu);
360 CPU_ONLINE, (void *)(long)cpu);
361 } 360 }
362 return 0; 361 return 0;
363} 362}
@@ -372,13 +371,12 @@ __exit cpufreq_stats_exit(void)
372 CPUFREQ_TRANSITION_NOTIFIER); 371 CPUFREQ_TRANSITION_NOTIFIER);
373 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 372 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
374 for_each_online_cpu(cpu) { 373 for_each_online_cpu(cpu) {
375 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, 374 cpufreq_stats_free_table(cpu);
376 CPU_DEAD, (void *)(long)cpu);
377 } 375 }
378} 376}
379 377
380MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); 378MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>");
381MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats" 379MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats "
382 "through sysfs filesystem"); 380 "through sysfs filesystem");
383MODULE_LICENSE ("GPL"); 381MODULE_LICENSE ("GPL");
384 382
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4a0576bd06..3706b2bc09 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -743,7 +743,7 @@ static struct kobj_type ktype_mc_set_attribs = {
743 * /sys/devices/system/edac/mc 743 * /sys/devices/system/edac/mc
744 */ 744 */
745static struct kset mc_kset = { 745static struct kset mc_kset = {
746 .kobj = {.name = "mc", .ktype = &ktype_mc_set_attribs }, 746 .kobj = {.ktype = &ktype_mc_set_attribs },
747 .ktype = &ktype_mci, 747 .ktype = &ktype_mci,
748}; 748};
749 749
@@ -1010,6 +1010,7 @@ int edac_sysfs_setup_mc_kset(void)
1010 } 1010 }
1011 1011
1012 /* Init the MC's kobject */ 1012 /* Init the MC's kobject */
1013 kobject_set_name(&mc_kset.kobj, "mc");
1013 mc_kset.kobj.parent = &edac_class->kset.kobj; 1014 mc_kset.kobj.parent = &edac_class->kset.kobj;
1014 1015
1015 /* register the mc_kset */ 1016 /* register the mc_kset */
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index d944647c82..4d4a473939 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -128,16 +128,11 @@ static int eisa_bus_match (struct device *dev, struct device_driver *drv)
128 return 0; 128 return 0;
129} 129}
130 130
131static int eisa_bus_uevent(struct device *dev, char **envp, int num_envp, 131static int eisa_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
132 char *buffer, int buffer_size)
133{ 132{
134 struct eisa_device *edev = to_eisa_device(dev); 133 struct eisa_device *edev = to_eisa_device(dev);
135 int i = 0;
136 int length = 0;
137 134
138 add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, 135 add_uevent_var(env, "MODALIAS=" EISA_DEVICE_MODALIAS_FMT, edev->id.sig);
139 "MODALIAS=" EISA_DEVICE_MODALIAS_FMT, edev->id.sig);
140 envp[i] = NULL;
141 return 0; 136 return 0;
142} 137}
143 138
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index 2b65863416..56681b3b29 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -130,23 +130,16 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
130} 130}
131 131
132static int 132static int
133fw_unit_uevent(struct device *dev, char **envp, int num_envp, 133fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
134 char *buffer, int buffer_size)
135{ 134{
136 struct fw_unit *unit = fw_unit(dev); 135 struct fw_unit *unit = fw_unit(dev);
137 char modalias[64]; 136 char modalias[64];
138 int length = 0;
139 int i = 0;
140 137
141 get_modalias(unit, modalias, sizeof(modalias)); 138 get_modalias(unit, modalias, sizeof(modalias));
142 139
143 if (add_uevent_var(envp, num_envp, &i, 140 if (add_uevent_var(env, "MODALIAS=%s", modalias))
144 buffer, buffer_size, &length,
145 "MODALIAS=%s", modalias))
146 return -ENOMEM; 141 return -ENOMEM;
147 142
148 envp[i] = NULL;
149
150 return 0; 143 return 0;
151} 144}
152 145
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 59c3b5aa89..b6e1eb77d1 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -13,21 +13,31 @@
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/autoconf.h> 14#include <linux/autoconf.h>
15 15
16#define DEFINE_DMI_ATTR(_name, _mode, _show) \ 16struct dmi_device_attribute{
17static struct device_attribute sys_dmi_##_name##_attr = \ 17 struct device_attribute dev_attr;
18 __ATTR(_name, _mode, _show, NULL); 18 int field;
19 19};
20#define DEFINE_DMI_ATTR_WITH_SHOW(_name, _mode, _field) \ 20#define to_dmi_dev_attr(_dev_attr) \
21static ssize_t sys_dmi_##_name##_show(struct device *dev, \ 21 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
22 struct device_attribute *attr, \ 22
23 char *page) \ 23static ssize_t sys_dmi_field_show(struct device *dev,
24{ \ 24 struct device_attribute *attr,
25 ssize_t len; \ 25 char *page)
26 len = scnprintf(page, PAGE_SIZE, "%s\n", dmi_get_system_info(_field)); \ 26{
27 page[len-1] = '\n'; \ 27 int field = to_dmi_dev_attr(attr)->field;
28 return len; \ 28 ssize_t len;
29} \ 29 len = scnprintf(page, PAGE_SIZE, "%s\n", dmi_get_system_info(field));
30DEFINE_DMI_ATTR(_name, _mode, sys_dmi_##_name##_show); 30 page[len-1] = '\n';
31 return len;
32}
33
34#define DMI_ATTR(_name, _mode, _show, _field) \
35 { .dev_attr = __ATTR(_name, _mode, _show, NULL), \
36 .field = _field }
37
38#define DEFINE_DMI_ATTR_WITH_SHOW(_name, _mode, _field) \
39static struct dmi_device_attribute sys_dmi_##_name##_attr = \
40 DMI_ATTR(_name, _mode, sys_dmi_field_show, _field);
31 41
32DEFINE_DMI_ATTR_WITH_SHOW(bios_vendor, 0444, DMI_BIOS_VENDOR); 42DEFINE_DMI_ATTR_WITH_SHOW(bios_vendor, 0444, DMI_BIOS_VENDOR);
33DEFINE_DMI_ATTR_WITH_SHOW(bios_version, 0444, DMI_BIOS_VERSION); 43DEFINE_DMI_ATTR_WITH_SHOW(bios_version, 0444, DMI_BIOS_VERSION);
@@ -121,7 +131,8 @@ static ssize_t sys_dmi_modalias_show(struct device *dev,
121 return r+1; 131 return r+1;
122} 132}
123 133
124DEFINE_DMI_ATTR(modalias, 0444, sys_dmi_modalias_show); 134static struct device_attribute sys_dmi_modalias_attr =
135 __ATTR(modalias, 0444, sys_dmi_modalias_show, NULL);
125 136
126static struct attribute *sys_dmi_attributes[DMI_STRING_MAX+2]; 137static struct attribute *sys_dmi_attributes[DMI_STRING_MAX+2];
127 138
@@ -134,14 +145,17 @@ static struct attribute_group* sys_dmi_attribute_groups[] = {
134 NULL 145 NULL
135}; 146};
136 147
137static int dmi_dev_uevent(struct device *dev, char **envp, 148static int dmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
138 int num_envp, char *buffer, int buffer_size)
139{ 149{
140 strcpy(buffer, "MODALIAS="); 150 ssize_t len;
141 get_modalias(buffer+9, buffer_size-9); 151
142 envp[0] = buffer; 152 if (add_uevent_var(env, "MODALIAS="))
143 envp[1] = NULL; 153 return -ENOMEM;
144 154 len = get_modalias(&env->buf[env->buflen - 1],
155 sizeof(env->buf) - env->buflen);
156 if (len >= (sizeof(env->buf) - env->buflen))
157 return -ENOMEM;
158 env->buflen += len;
145 return 0; 159 return 0;
146} 160}
147 161
@@ -157,7 +171,7 @@ static struct device *dmi_dev;
157 171
158#define ADD_DMI_ATTR(_name, _field) \ 172#define ADD_DMI_ATTR(_name, _field) \
159 if (dmi_get_system_info(_field)) \ 173 if (dmi_get_system_info(_field)) \
160 sys_dmi_attributes[i++] = & sys_dmi_##_name##_attr.attr; 174 sys_dmi_attributes[i++] = &sys_dmi_##_name##_attr.dev_attr.attr;
161 175
162extern int dmi_available; 176extern int dmi_available;
163 177
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index 0fb730ee1d..6942e065e6 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -625,13 +625,13 @@ static void edd_release(struct kobject * kobj)
625 kfree(dev); 625 kfree(dev);
626} 626}
627 627
628static struct kobj_type ktype_edd = { 628static struct kobj_type edd_ktype = {
629 .release = edd_release, 629 .release = edd_release,
630 .sysfs_ops = &edd_attr_ops, 630 .sysfs_ops = &edd_attr_ops,
631 .default_attrs = def_attrs, 631 .default_attrs = def_attrs,
632}; 632};
633 633
634static decl_subsys(edd,&ktype_edd,NULL); 634static decl_subsys(edd, &edd_ktype, NULL);
635 635
636 636
637/** 637/**
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index bfd2d67df6..858a7b9593 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -402,7 +402,7 @@ static struct attribute *def_attrs[] = {
402 NULL, 402 NULL,
403}; 403};
404 404
405static struct kobj_type ktype_efivar = { 405static struct kobj_type efivar_ktype = {
406 .release = efivar_release, 406 .release = efivar_release,
407 .sysfs_ops = &efivar_attr_ops, 407 .sysfs_ops = &efivar_attr_ops,
408 .default_attrs = def_attrs, 408 .default_attrs = def_attrs,
@@ -583,7 +583,7 @@ static struct subsys_attribute *efi_subsys_attrs[] = {
583 NULL, /* maybe more in the future? */ 583 NULL, /* maybe more in the future? */
584}; 584};
585 585
586static decl_subsys(vars, &ktype_efivar, NULL); 586static decl_subsys(vars, &efivar_ktype, NULL);
587static decl_subsys(efi, NULL, NULL); 587static decl_subsys(efi, NULL, NULL);
588 588
589/* 589/*
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 9f3a4cd0b0..de95c75efb 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -75,11 +75,19 @@ config I2C_AMD8111
75 75
76config I2C_AT91 76config I2C_AT91
77 tristate "Atmel AT91 I2C Two-Wire interface (TWI)" 77 tristate "Atmel AT91 I2C Two-Wire interface (TWI)"
78 depends on ARCH_AT91 && EXPERIMENTAL 78 depends on ARCH_AT91 && EXPERIMENTAL && BROKEN
79 help 79 help
80 This supports the use of the I2C interface on Atmel AT91 80 This supports the use of the I2C interface on Atmel AT91
81 processors. 81 processors.
82 82
83 This driver is BROKEN because the controller which it uses
84 will easily trigger RX overrun and TX underrun errors. Using
85 low I2C clock rates may partially work around those issues
86 on some systems. Another serious problem is that there is no
87 documented way to issue repeated START conditions, as needed
88 to support combined I2C messages. Use the i2c-gpio driver
89 unless your system can cope with those limitations.
90
83config I2C_AU1550 91config I2C_AU1550
84 tristate "Au1550/Au1200 SMBus interface" 92 tristate "Au1550/Au1200 SMBus interface"
85 depends on SOC_AU1550 || SOC_AU1200 93 depends on SOC_AU1550 || SOC_AU1200
@@ -106,6 +114,19 @@ config I2C_BLACKFIN_TWI_CLK_KHZ
106 help 114 help
107 The unit of the TWI clock is kHz. 115 The unit of the TWI clock is kHz.
108 116
117config I2C_DAVINCI
118 tristate "DaVinci I2C driver"
119 depends on ARCH_DAVINCI
120 help
121 Support for TI DaVinci I2C controller driver.
122
123 This driver can also be built as a module. If so, the module
124 will be called i2c-davinci.
125
126 Please note that this driver might be needed to bring up other
127 devices such as DaVinci NIC.
128 For details please see http://www.ti.com/davinci
129
109config I2C_ELEKTOR 130config I2C_ELEKTOR
110 tristate "Elektor ISA card" 131 tristate "Elektor ISA card"
111 depends on ISA && BROKEN_ON_SMP 132 depends on ISA && BROKEN_ON_SMP
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 5b752e4e19..81d43c27cf 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_I2C_AMD8111) += i2c-amd8111.o
11obj-$(CONFIG_I2C_AT91) += i2c-at91.o 11obj-$(CONFIG_I2C_AT91) += i2c-at91.o
12obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o 12obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
13obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o 13obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
14obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
14obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o 15obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
15obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o 16obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
16obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o 17obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index c9fca7b492..5d1a27ef25 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -326,7 +326,7 @@ static u32 amd8111_func(struct i2c_adapter *adapter)
326 I2C_FUNC_SMBUS_BYTE_DATA | 326 I2C_FUNC_SMBUS_BYTE_DATA |
327 I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | 327 I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA |
328 I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | 328 I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
329 I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_HWPEC_CALC; 329 I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_PEC;
330} 330}
331 331
332static const struct i2c_algorithm smbus_algorithm = { 332static const struct i2c_algorithm smbus_algorithm = {
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index d7e7c359fc..2f684166c4 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -48,17 +48,14 @@ wait_xfer_done(struct i2c_au1550_data *adap)
48 48
49 sp = (volatile psc_smb_t *)(adap->psc_base); 49 sp = (volatile psc_smb_t *)(adap->psc_base);
50 50
51 /* Wait for Tx FIFO Underflow. 51 /* Wait for Tx Buffer Empty
52 */ 52 */
53 for (i = 0; i < adap->xfer_timeout; i++) { 53 for (i = 0; i < adap->xfer_timeout; i++) {
54 stat = sp->psc_smbevnt; 54 stat = sp->psc_smbstat;
55 au_sync(); 55 au_sync();
56 if ((stat & PSC_SMBEVNT_TU) != 0) { 56 if ((stat & PSC_SMBSTAT_TE) != 0)
57 /* Clear it. */
58 sp->psc_smbevnt = PSC_SMBEVNT_TU;
59 au_sync();
60 return 0; 57 return 0;
61 } 58
62 udelay(1); 59 udelay(1);
63 } 60 }
64 61
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 6311039dfe..67224a424a 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -44,7 +44,6 @@
44#define TWI_I2C_MODE_COMBINED 0x04 44#define TWI_I2C_MODE_COMBINED 0x04
45 45
46struct bfin_twi_iface { 46struct bfin_twi_iface {
47 struct mutex twi_lock;
48 int irq; 47 int irq;
49 spinlock_t lock; 48 spinlock_t lock;
50 char read_write; 49 char read_write;
@@ -228,12 +227,8 @@ static int bfin_twi_master_xfer(struct i2c_adapter *adap,
228 if (!(bfin_read_TWI_CONTROL() & TWI_ENA)) 227 if (!(bfin_read_TWI_CONTROL() & TWI_ENA))
229 return -ENXIO; 228 return -ENXIO;
230 229
231 mutex_lock(&iface->twi_lock);
232
233 while (bfin_read_TWI_MASTER_STAT() & BUSBUSY) { 230 while (bfin_read_TWI_MASTER_STAT() & BUSBUSY) {
234 mutex_unlock(&iface->twi_lock);
235 yield(); 231 yield();
236 mutex_lock(&iface->twi_lock);
237 } 232 }
238 233
239 ret = 0; 234 ret = 0;
@@ -310,9 +305,6 @@ static int bfin_twi_master_xfer(struct i2c_adapter *adap,
310 break; 305 break;
311 } 306 }
312 307
313 /* Release mutex */
314 mutex_unlock(&iface->twi_lock);
315
316 return ret; 308 return ret;
317} 309}
318 310
@@ -330,12 +322,8 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
330 if (!(bfin_read_TWI_CONTROL() & TWI_ENA)) 322 if (!(bfin_read_TWI_CONTROL() & TWI_ENA))
331 return -ENXIO; 323 return -ENXIO;
332 324
333 mutex_lock(&iface->twi_lock);
334
335 while (bfin_read_TWI_MASTER_STAT() & BUSBUSY) { 325 while (bfin_read_TWI_MASTER_STAT() & BUSBUSY) {
336 mutex_unlock(&iface->twi_lock);
337 yield(); 326 yield();
338 mutex_lock(&iface->twi_lock);
339 } 327 }
340 328
341 iface->writeNum = 0; 329 iface->writeNum = 0;
@@ -502,9 +490,6 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
502 490
503 rc = (iface->result >= 0) ? 0 : -1; 491 rc = (iface->result >= 0) ? 0 : -1;
504 492
505 /* Release mutex */
506 mutex_unlock(&iface->twi_lock);
507
508 return rc; 493 return rc;
509} 494}
510 495
@@ -555,7 +540,6 @@ static int i2c_bfin_twi_probe(struct platform_device *dev)
555 struct i2c_adapter *p_adap; 540 struct i2c_adapter *p_adap;
556 int rc; 541 int rc;
557 542
558 mutex_init(&(iface->twi_lock));
559 spin_lock_init(&(iface->lock)); 543 spin_lock_init(&(iface->lock));
560 init_completion(&(iface->complete)); 544 init_completion(&(iface->complete));
561 iface->irq = IRQ_TWI; 545 iface->irq = IRQ_TWI;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
new file mode 100644
index 0000000000..bd7aaff352
--- /dev/null
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -0,0 +1,586 @@
1/*
2 * TI DAVINCI I2C adapter driver.
3 *
4 * Copyright (C) 2006 Texas Instruments.
5 * Copyright (C) 2007 MontaVista Software Inc.
6 *
7 * Updated by Vinod & Sudhakar Feb 2005
8 *
9 * ----------------------------------------------------------------------------
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * ----------------------------------------------------------------------------
25 *
26 */
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/delay.h>
30#include <linux/i2c.h>
31#include <linux/clk.h>
32#include <linux/errno.h>
33#include <linux/sched.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/platform_device.h>
37#include <linux/io.h>
38
39#include <asm/hardware.h>
40#include <asm/mach-types.h>
41
42#include <asm/arch/i2c.h>
43
44/* ----- global defines ----------------------------------------------- */
45
46#define DAVINCI_I2C_TIMEOUT (1*HZ)
47#define I2C_DAVINCI_INTR_ALL (DAVINCI_I2C_IMR_AAS | \
48 DAVINCI_I2C_IMR_SCD | \
49 DAVINCI_I2C_IMR_ARDY | \
50 DAVINCI_I2C_IMR_NACK | \
51 DAVINCI_I2C_IMR_AL)
52
53#define DAVINCI_I2C_OAR_REG 0x00
54#define DAVINCI_I2C_IMR_REG 0x04
55#define DAVINCI_I2C_STR_REG 0x08
56#define DAVINCI_I2C_CLKL_REG 0x0c
57#define DAVINCI_I2C_CLKH_REG 0x10
58#define DAVINCI_I2C_CNT_REG 0x14
59#define DAVINCI_I2C_DRR_REG 0x18
60#define DAVINCI_I2C_SAR_REG 0x1c
61#define DAVINCI_I2C_DXR_REG 0x20
62#define DAVINCI_I2C_MDR_REG 0x24
63#define DAVINCI_I2C_IVR_REG 0x28
64#define DAVINCI_I2C_EMDR_REG 0x2c
65#define DAVINCI_I2C_PSC_REG 0x30
66
67#define DAVINCI_I2C_IVR_AAS 0x07
68#define DAVINCI_I2C_IVR_SCD 0x06
69#define DAVINCI_I2C_IVR_XRDY 0x05
70#define DAVINCI_I2C_IVR_RDR 0x04
71#define DAVINCI_I2C_IVR_ARDY 0x03
72#define DAVINCI_I2C_IVR_NACK 0x02
73#define DAVINCI_I2C_IVR_AL 0x01
74
75#define DAVINCI_I2C_STR_BB (1 << 12)
76#define DAVINCI_I2C_STR_RSFULL (1 << 11)
77#define DAVINCI_I2C_STR_SCD (1 << 5)
78#define DAVINCI_I2C_STR_ARDY (1 << 2)
79#define DAVINCI_I2C_STR_NACK (1 << 1)
80#define DAVINCI_I2C_STR_AL (1 << 0)
81
82#define DAVINCI_I2C_MDR_NACK (1 << 15)
83#define DAVINCI_I2C_MDR_STT (1 << 13)
84#define DAVINCI_I2C_MDR_STP (1 << 11)
85#define DAVINCI_I2C_MDR_MST (1 << 10)
86#define DAVINCI_I2C_MDR_TRX (1 << 9)
87#define DAVINCI_I2C_MDR_XA (1 << 8)
88#define DAVINCI_I2C_MDR_IRS (1 << 5)
89
90#define DAVINCI_I2C_IMR_AAS (1 << 6)
91#define DAVINCI_I2C_IMR_SCD (1 << 5)
92#define DAVINCI_I2C_IMR_XRDY (1 << 4)
93#define DAVINCI_I2C_IMR_RRDY (1 << 3)
94#define DAVINCI_I2C_IMR_ARDY (1 << 2)
95#define DAVINCI_I2C_IMR_NACK (1 << 1)
96#define DAVINCI_I2C_IMR_AL (1 << 0)
97
98#define MOD_REG_BIT(val, mask, set) do { \
99 if (set) { \
100 val |= mask; \
101 } else { \
102 val &= ~mask; \
103 } \
104} while (0)
105
106struct davinci_i2c_dev {
107 struct device *dev;
108 void __iomem *base;
109 struct completion cmd_complete;
110 struct clk *clk;
111 int cmd_err;
112 u8 *buf;
113 size_t buf_len;
114 int irq;
115 struct i2c_adapter adapter;
116};
117
118/* default platform data to use if not supplied in the platform_device */
119static struct davinci_i2c_platform_data davinci_i2c_platform_data_default = {
120 .bus_freq = 100,
121 .bus_delay = 0,
122};
123
124static inline void davinci_i2c_write_reg(struct davinci_i2c_dev *i2c_dev,
125 int reg, u16 val)
126{
127 __raw_writew(val, i2c_dev->base + reg);
128}
129
130static inline u16 davinci_i2c_read_reg(struct davinci_i2c_dev *i2c_dev, int reg)
131{
132 return __raw_readw(i2c_dev->base + reg);
133}
134
135/*
136 * This functions configures I2C and brings I2C out of reset.
137 * This function is called during I2C init function. This function
138 * also gets called if I2C encounters any errors.
139 */
140static int i2c_davinci_init(struct davinci_i2c_dev *dev)
141{
142 struct davinci_i2c_platform_data *pdata = dev->dev->platform_data;
143 u16 psc;
144 u32 clk;
145 u32 clkh;
146 u32 clkl;
147 u32 input_clock = clk_get_rate(dev->clk);
148 u16 w;
149
150 if (!pdata)
151 pdata = &davinci_i2c_platform_data_default;
152
153 /* put I2C into reset */
154 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
155 MOD_REG_BIT(w, DAVINCI_I2C_MDR_IRS, 0);
156 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
157
158 /* NOTE: I2C Clock divider programming info
159 * As per I2C specs the following formulas provide prescaler
160 * and low/high divider values
161 * input clk --> PSC Div -----------> ICCL/H Div --> output clock
162 * module clk
163 *
164 * output clk = module clk / (PSC + 1) [ (ICCL + d) + (ICCH + d) ]
165 *
166 * Thus,
167 * (ICCL + ICCH) = clk = (input clk / ((psc +1) * output clk)) - 2d;
168 *
169 * where if PSC == 0, d = 7,
170 * if PSC == 1, d = 6
171 * if PSC > 1 , d = 5
172 */
173
174 psc = 26; /* To get 1MHz clock */
175
176 clk = ((input_clock / (psc + 1)) / (pdata->bus_freq * 1000)) - 10;
177 clkh = (50 * clk) / 100;
178 clkl = clk - clkh;
179
180 davinci_i2c_write_reg(dev, DAVINCI_I2C_PSC_REG, psc);
181 davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKH_REG, clkh);
182 davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKL_REG, clkl);
183
184 dev_dbg(dev->dev, "CLK = %d\n", clk);
185 dev_dbg(dev->dev, "PSC = %d\n",
186 davinci_i2c_read_reg(dev, DAVINCI_I2C_PSC_REG));
187 dev_dbg(dev->dev, "CLKL = %d\n",
188 davinci_i2c_read_reg(dev, DAVINCI_I2C_CLKL_REG));
189 dev_dbg(dev->dev, "CLKH = %d\n",
190 davinci_i2c_read_reg(dev, DAVINCI_I2C_CLKH_REG));
191
192 /* Take the I2C module out of reset: */
193 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
194 MOD_REG_BIT(w, DAVINCI_I2C_MDR_IRS, 1);
195 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
196
197 /* Enable interrupts */
198 davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, I2C_DAVINCI_INTR_ALL);
199
200 return 0;
201}
202
203/*
204 * Waiting for bus not busy
205 */
206static int i2c_davinci_wait_bus_not_busy(struct davinci_i2c_dev *dev,
207 char allow_sleep)
208{
209 unsigned long timeout;
210
211 timeout = jiffies + DAVINCI_I2C_TIMEOUT;
212 while (davinci_i2c_read_reg(dev, DAVINCI_I2C_STR_REG)
213 & DAVINCI_I2C_STR_BB) {
214 if (time_after(jiffies, timeout)) {
215 dev_warn(dev->dev,
216 "timeout waiting for bus ready\n");
217 return -ETIMEDOUT;
218 }
219 if (allow_sleep)
220 schedule_timeout(1);
221 }
222
223 return 0;
224}
225
226/*
227 * Low level master read/write transaction. This function is called
228 * from i2c_davinci_xfer.
229 */
230static int
231i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
232{
233 struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
234 struct davinci_i2c_platform_data *pdata = dev->dev->platform_data;
235 u32 flag;
236 u32 stat;
237 u16 w;
238 int r;
239
240 if (msg->len == 0)
241 return -EINVAL;
242
243 if (!pdata)
244 pdata = &davinci_i2c_platform_data_default;
245 /* Introduce a delay, required for some boards (e.g Davinci EVM) */
246 if (pdata->bus_delay)
247 udelay(pdata->bus_delay);
248
249 /* set the slave address */
250 davinci_i2c_write_reg(dev, DAVINCI_I2C_SAR_REG, msg->addr);
251
252 dev->buf = msg->buf;
253 dev->buf_len = msg->len;
254
255 davinci_i2c_write_reg(dev, DAVINCI_I2C_CNT_REG, dev->buf_len);
256
257 init_completion(&dev->cmd_complete);
258 dev->cmd_err = 0;
259
260 /* Clear any pending interrupts by reading the IVR */
261 stat = davinci_i2c_read_reg(dev, DAVINCI_I2C_IVR_REG);
262
263 /* Take I2C out of reset, configure it as master and set the
264 * start bit */
265 flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST | DAVINCI_I2C_MDR_STT;
266
267 /* if the slave address is ten bit address, enable XA bit */
268 if (msg->flags & I2C_M_TEN)
269 flag |= DAVINCI_I2C_MDR_XA;
270 if (!(msg->flags & I2C_M_RD))
271 flag |= DAVINCI_I2C_MDR_TRX;
272 if (stop)
273 flag |= DAVINCI_I2C_MDR_STP;
274
275 /* Enable receive or transmit interrupts */
276 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG);
277 if (msg->flags & I2C_M_RD)
278 MOD_REG_BIT(w, DAVINCI_I2C_IMR_RRDY, 1);
279 else
280 MOD_REG_BIT(w, DAVINCI_I2C_IMR_XRDY, 1);
281 davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, w);
282
283 /* write the data into mode register */
284 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
285
286 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
287 DAVINCI_I2C_TIMEOUT);
288 dev->buf_len = 0;
289 if (r < 0)
290 return r;
291
292 if (r == 0) {
293 dev_err(dev->dev, "controller timed out\n");
294 i2c_davinci_init(dev);
295 return -ETIMEDOUT;
296 }
297
298 /* no error */
299 if (likely(!dev->cmd_err))
300 return msg->len;
301
302 /* We have an error */
303 if (dev->cmd_err & DAVINCI_I2C_STR_AL) {
304 i2c_davinci_init(dev);
305 return -EIO;
306 }
307
308 if (dev->cmd_err & DAVINCI_I2C_STR_NACK) {
309 if (msg->flags & I2C_M_IGNORE_NAK)
310 return msg->len;
311 if (stop) {
312 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
313 MOD_REG_BIT(w, DAVINCI_I2C_MDR_STP, 1);
314 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
315 }
316 return -EREMOTEIO;
317 }
318 return -EIO;
319}
320
321/*
322 * Prepare controller for a transaction and call i2c_davinci_xfer_msg
323 */
324static int
325i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
326{
327 struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
328 int i;
329 int ret;
330
331 dev_dbg(dev->dev, "%s: msgs: %d\n", __FUNCTION__, num);
332
333 ret = i2c_davinci_wait_bus_not_busy(dev, 1);
334 if (ret < 0) {
335 dev_warn(dev->dev, "timeout waiting for bus ready\n");
336 return ret;
337 }
338
339 for (i = 0; i < num; i++) {
340 ret = i2c_davinci_xfer_msg(adap, &msgs[i], (i == (num - 1)));
341 if (ret < 0)
342 return ret;
343 }
344
345 dev_dbg(dev->dev, "%s:%d ret: %d\n", __FUNCTION__, __LINE__, ret);
346
347 return num;
348}
349
350static u32 i2c_davinci_func(struct i2c_adapter *adap)
351{
352 return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
353}
354
355/*
356 * Interrupt service routine. This gets called whenever an I2C interrupt
357 * occurs.
358 */
359static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id)
360{
361 struct davinci_i2c_dev *dev = dev_id;
362 u32 stat;
363 int count = 0;
364 u16 w;
365
366 while ((stat = davinci_i2c_read_reg(dev, DAVINCI_I2C_IVR_REG))) {
367 dev_dbg(dev->dev, "%s: stat=0x%x\n", __FUNCTION__, stat);
368 if (count++ == 100) {
369 dev_warn(dev->dev, "Too much work in one IRQ\n");
370 break;
371 }
372
373 switch (stat) {
374 case DAVINCI_I2C_IVR_AL:
375 dev->cmd_err |= DAVINCI_I2C_STR_AL;
376 complete(&dev->cmd_complete);
377 break;
378
379 case DAVINCI_I2C_IVR_NACK:
380 dev->cmd_err |= DAVINCI_I2C_STR_NACK;
381 complete(&dev->cmd_complete);
382 break;
383
384 case DAVINCI_I2C_IVR_ARDY:
385 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_STR_REG);
386 MOD_REG_BIT(w, DAVINCI_I2C_STR_ARDY, 1);
387 davinci_i2c_write_reg(dev, DAVINCI_I2C_STR_REG, w);
388 complete(&dev->cmd_complete);
389 break;
390
391 case DAVINCI_I2C_IVR_RDR:
392 if (dev->buf_len) {
393 *dev->buf++ =
394 davinci_i2c_read_reg(dev,
395 DAVINCI_I2C_DRR_REG);
396 dev->buf_len--;
397 if (dev->buf_len)
398 continue;
399
400 w = davinci_i2c_read_reg(dev,
401 DAVINCI_I2C_STR_REG);
402 MOD_REG_BIT(w, DAVINCI_I2C_IMR_RRDY, 0);
403 davinci_i2c_write_reg(dev,
404 DAVINCI_I2C_STR_REG,
405 w);
406 } else
407 dev_err(dev->dev, "RDR IRQ while no"
408 "data requested\n");
409 break;
410
411 case DAVINCI_I2C_IVR_XRDY:
412 if (dev->buf_len) {
413 davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG,
414 *dev->buf++);
415 dev->buf_len--;
416 if (dev->buf_len)
417 continue;
418
419 w = davinci_i2c_read_reg(dev,
420 DAVINCI_I2C_IMR_REG);
421 MOD_REG_BIT(w, DAVINCI_I2C_IMR_XRDY, 0);
422 davinci_i2c_write_reg(dev,
423 DAVINCI_I2C_IMR_REG,
424 w);
425 } else
426 dev_err(dev->dev, "TDR IRQ while no data to"
427 "send\n");
428 break;
429
430 case DAVINCI_I2C_IVR_SCD:
431 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_STR_REG);
432 MOD_REG_BIT(w, DAVINCI_I2C_STR_SCD, 1);
433 davinci_i2c_write_reg(dev, DAVINCI_I2C_STR_REG, w);
434 complete(&dev->cmd_complete);
435 break;
436
437 case DAVINCI_I2C_IVR_AAS:
438 dev_warn(dev->dev, "Address as slave interrupt\n");
439 }/* switch */
440 }/* while */
441
442 return count ? IRQ_HANDLED : IRQ_NONE;
443}
444
445static struct i2c_algorithm i2c_davinci_algo = {
446 .master_xfer = i2c_davinci_xfer,
447 .functionality = i2c_davinci_func,
448};
449
450static int davinci_i2c_probe(struct platform_device *pdev)
451{
452 struct davinci_i2c_dev *dev;
453 struct i2c_adapter *adap;
454 struct resource *mem, *irq, *ioarea;
455 int r;
456
457 /* NOTE: driver uses the static register mapping */
458 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
459 if (!mem) {
460 dev_err(&pdev->dev, "no mem resource?\n");
461 return -ENODEV;
462 }
463
464 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
465 if (!irq) {
466 dev_err(&pdev->dev, "no irq resource?\n");
467 return -ENODEV;
468 }
469
470 ioarea = request_mem_region(mem->start, (mem->end - mem->start) + 1,
471 pdev->name);
472 if (!ioarea) {
473 dev_err(&pdev->dev, "I2C region already claimed\n");
474 return -EBUSY;
475 }
476
477 dev = kzalloc(sizeof(struct davinci_i2c_dev), GFP_KERNEL);
478 if (!dev) {
479 r = -ENOMEM;
480 goto err_release_region;
481 }
482
483 dev->dev = get_device(&pdev->dev);
484 dev->irq = irq->start;
485 platform_set_drvdata(pdev, dev);
486
487 dev->clk = clk_get(&pdev->dev, "I2CCLK");
488 if (IS_ERR(dev->clk)) {
489 r = -ENODEV;
490 goto err_free_mem;
491 }
492 clk_enable(dev->clk);
493
494 dev->base = (void __iomem *)IO_ADDRESS(mem->start);
495 i2c_davinci_init(dev);
496
497 r = request_irq(dev->irq, i2c_davinci_isr, 0, pdev->name, dev);
498 if (r) {
499 dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
500 goto err_unuse_clocks;
501 }
502
503 adap = &dev->adapter;
504 i2c_set_adapdata(adap, dev);
505 adap->owner = THIS_MODULE;
506 adap->class = I2C_CLASS_HWMON;
507 strlcpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name));
508 adap->algo = &i2c_davinci_algo;
509 adap->dev.parent = &pdev->dev;
510
511 /* FIXME */
512 adap->timeout = 1;
513 adap->retries = 1;
514
515 adap->nr = pdev->id;
516 r = i2c_add_numbered_adapter(adap);
517 if (r) {
518 dev_err(&pdev->dev, "failure adding adapter\n");
519 goto err_free_irq;
520 }
521
522 return 0;
523
524err_free_irq:
525 free_irq(dev->irq, dev);
526err_unuse_clocks:
527 clk_disable(dev->clk);
528 clk_put(dev->clk);
529 dev->clk = NULL;
530err_free_mem:
531 platform_set_drvdata(pdev, NULL);
532 put_device(&pdev->dev);
533 kfree(dev);
534err_release_region:
535 release_mem_region(mem->start, (mem->end - mem->start) + 1);
536
537 return r;
538}
539
540static int davinci_i2c_remove(struct platform_device *pdev)
541{
542 struct davinci_i2c_dev *dev = platform_get_drvdata(pdev);
543 struct resource *mem;
544
545 platform_set_drvdata(pdev, NULL);
546 i2c_del_adapter(&dev->adapter);
547 put_device(&pdev->dev);
548
549 clk_disable(dev->clk);
550 clk_put(dev->clk);
551 dev->clk = NULL;
552
553 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
554 free_irq(IRQ_I2C, dev);
555 kfree(dev);
556
557 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
558 release_mem_region(mem->start, (mem->end - mem->start) + 1);
559 return 0;
560}
561
562static struct platform_driver davinci_i2c_driver = {
563 .probe = davinci_i2c_probe,
564 .remove = davinci_i2c_remove,
565 .driver = {
566 .name = "i2c_davinci",
567 .owner = THIS_MODULE,
568 },
569};
570
571/* I2C may be needed to bring up other drivers */
572static int __init davinci_i2c_init_driver(void)
573{
574 return platform_driver_register(&davinci_i2c_driver);
575}
576subsys_initcall(davinci_i2c_init_driver);
577
578static void __exit davinci_i2c_exit_driver(void)
579{
580 platform_driver_unregister(&davinci_i2c_driver);
581}
582module_exit(davinci_i2c_exit_driver);
583
584MODULE_AUTHOR("Texas Instruments India");
585MODULE_DESCRIPTION("TI DaVinci I2C bus adapter");
586MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 289816db52..ac27e5f84e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -34,6 +34,7 @@
34 ESB2 269B 34 ESB2 269B
35 ICH8 283E 35 ICH8 283E
36 ICH9 2930 36 ICH9 2930
37 Tolapai 5032
37 This driver supports several versions of Intel's I/O Controller Hubs (ICH). 38 This driver supports several versions of Intel's I/O Controller Hubs (ICH).
38 For SMBus support, they are similar to the PIIX4 and are part 39 For SMBus support, they are similar to the PIIX4 and are part
39 of Intel's '810' and other chipsets. 40 of Intel's '810' and other chipsets.
@@ -515,7 +516,7 @@ static u32 i801_func(struct i2c_adapter *adapter)
515 return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | 516 return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
516 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | 517 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
517 I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 518 I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK
518 | (isich4 ? I2C_FUNC_SMBUS_HWPEC_CALC : 0); 519 | (isich4 ? I2C_FUNC_SMBUS_PEC : 0);
519} 520}
520 521
521static const struct i2c_algorithm smbus_algorithm = { 522static const struct i2c_algorithm smbus_algorithm = {
@@ -543,6 +544,7 @@ static struct pci_device_id i801_ids[] = {
543 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) }, 544 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) },
544 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) }, 545 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
545 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) }, 546 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
547 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) },
546 { 0, } 548 { 0, }
547}; 549};
548 550
@@ -563,6 +565,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
563 case PCI_DEVICE_ID_INTEL_ESB2_17: 565 case PCI_DEVICE_ID_INTEL_ESB2_17:
564 case PCI_DEVICE_ID_INTEL_ICH8_5: 566 case PCI_DEVICE_ID_INTEL_ICH8_5:
565 case PCI_DEVICE_ID_INTEL_ICH9_6: 567 case PCI_DEVICE_ID_INTEL_ICH9_6:
568 case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
566 isich4 = 1; 569 isich4 = 1;
567 break; 570 break;
568 default: 571 default:
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 8b14d14e60..e08bacadd6 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -738,7 +738,14 @@ static int __devinit iic_probe(struct ocp_device *ocp){
738 adap->timeout = 1; 738 adap->timeout = 1;
739 adap->retries = 1; 739 adap->retries = 1;
740 740
741 if ((ret = i2c_add_adapter(adap)) != 0){ 741 /*
742 * If "dev->idx" is negative we consider it as zero.
743 * The reason to do so is to avoid sysfs names that only make
744 * sense when there are multiple adapters.
745 */
746 adap->nr = dev->idx >= 0 ? dev->idx : 0;
747
748 if ((ret = i2c_add_numbered_adapter(adap)) < 0) {
742 printk(KERN_CRIT "ibm-iic%d: failed to register i2c adapter\n", 749 printk(KERN_CRIT "ibm-iic%d: failed to register i2c adapter\n",
743 dev->idx); 750 dev->idx);
744 goto fail; 751 goto fail;
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index ace644e21b..c70146e4c2 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -389,13 +389,6 @@ iop3xx_i2c_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
389 return im; 389 return im;
390} 390}
391 391
392static int
393iop3xx_i2c_algo_control(struct i2c_adapter *adapter, unsigned int cmd,
394 unsigned long arg)
395{
396 return 0;
397}
398
399static u32 392static u32
400iop3xx_i2c_func(struct i2c_adapter *adap) 393iop3xx_i2c_func(struct i2c_adapter *adap)
401{ 394{
@@ -404,7 +397,6 @@ iop3xx_i2c_func(struct i2c_adapter *adap)
404 397
405static const struct i2c_algorithm iop3xx_i2c_algo = { 398static const struct i2c_algorithm iop3xx_i2c_algo = {
406 .master_xfer = iop3xx_i2c_master_xfer, 399 .master_xfer = iop3xx_i2c_master_xfer,
407 .algo_control = iop3xx_i2c_algo_control,
408 .functionality = iop3xx_i2c_func, 400 .functionality = iop3xx_i2c_func,
409}; 401};
410 402
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index c48140f782..1bf590c741 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -62,6 +62,7 @@ struct nforce2_smbus {
62 int base; 62 int base;
63 int size; 63 int size;
64 int blockops; 64 int blockops;
65 int can_abort;
65}; 66};
66 67
67 68
@@ -83,7 +84,14 @@ struct nforce2_smbus {
83#define NVIDIA_SMB_DATA (smbus->base + 0x04) /* 32 data registers */ 84#define NVIDIA_SMB_DATA (smbus->base + 0x04) /* 32 data registers */
84#define NVIDIA_SMB_BCNT (smbus->base + 0x24) /* number of data 85#define NVIDIA_SMB_BCNT (smbus->base + 0x24) /* number of data
85 bytes */ 86 bytes */
86 87#define NVIDIA_SMB_STATUS_ABRT (smbus->base + 0x3c) /* register used to
88 check the status of
89 the abort command */
90#define NVIDIA_SMB_CTRL (smbus->base + 0x3e) /* control register */
91
92#define NVIDIA_SMB_STATUS_ABRT_STS 0x01 /* Bit to notify that
93 abort succeeded */
94#define NVIDIA_SMB_CTRL_ABORT 0x20
87#define NVIDIA_SMB_STS_DONE 0x80 95#define NVIDIA_SMB_STS_DONE 0x80
88#define NVIDIA_SMB_STS_ALRM 0x40 96#define NVIDIA_SMB_STS_ALRM 0x40
89#define NVIDIA_SMB_STS_RES 0x20 97#define NVIDIA_SMB_STS_RES 0x20
@@ -98,15 +106,61 @@ struct nforce2_smbus {
98#define NVIDIA_SMB_PRTCL_BLOCK_DATA 0x0a 106#define NVIDIA_SMB_PRTCL_BLOCK_DATA 0x0a
99#define NVIDIA_SMB_PRTCL_PEC 0x80 107#define NVIDIA_SMB_PRTCL_PEC 0x80
100 108
109/* Misc definitions */
110#define MAX_TIMEOUT 100
111
101static struct pci_driver nforce2_driver; 112static struct pci_driver nforce2_driver;
102 113
114static void nforce2_abort(struct i2c_adapter *adap)
115{
116 struct nforce2_smbus *smbus = adap->algo_data;
117 int timeout = 0;
118 unsigned char temp;
119
120 dev_dbg(&adap->dev, "Aborting current transaction\n");
121
122 outb_p(NVIDIA_SMB_CTRL_ABORT, NVIDIA_SMB_CTRL);
123 do {
124 msleep(1);
125 temp = inb_p(NVIDIA_SMB_STATUS_ABRT);
126 } while (!(temp & NVIDIA_SMB_STATUS_ABRT_STS) &&
127 (timeout++ < MAX_TIMEOUT));
128 if (!(temp & NVIDIA_SMB_STATUS_ABRT_STS))
129 dev_err(&adap->dev, "Can't reset the smbus\n");
130 outb_p(NVIDIA_SMB_STATUS_ABRT_STS, NVIDIA_SMB_STATUS_ABRT);
131}
132
133static int nforce2_check_status(struct i2c_adapter *adap)
134{
135 struct nforce2_smbus *smbus = adap->algo_data;
136 int timeout = 0;
137 unsigned char temp;
138
139 do {
140 msleep(1);
141 temp = inb_p(NVIDIA_SMB_STS);
142 } while ((!temp) && (timeout++ < MAX_TIMEOUT));
143
144 if (timeout >= MAX_TIMEOUT) {
145 dev_dbg(&adap->dev, "SMBus Timeout!\n");
146 if (smbus->can_abort)
147 nforce2_abort(adap);
148 return -1;
149 }
150 if (!(temp & NVIDIA_SMB_STS_DONE) || (temp & NVIDIA_SMB_STS_STATUS)) {
151 dev_dbg(&adap->dev, "Transaction failed (0x%02x)!\n", temp);
152 return -1;
153 }
154 return 0;
155}
156
103/* Return -1 on error */ 157/* Return -1 on error */
104static s32 nforce2_access(struct i2c_adapter * adap, u16 addr, 158static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
105 unsigned short flags, char read_write, 159 unsigned short flags, char read_write,
106 u8 command, int size, union i2c_smbus_data * data) 160 u8 command, int size, union i2c_smbus_data * data)
107{ 161{
108 struct nforce2_smbus *smbus = adap->algo_data; 162 struct nforce2_smbus *smbus = adap->algo_data;
109 unsigned char protocol, pec, temp; 163 unsigned char protocol, pec;
110 u8 len; 164 u8 len;
111 int i; 165 int i;
112 166
@@ -170,21 +224,8 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
170 outb_p((addr & 0x7f) << 1, NVIDIA_SMB_ADDR); 224 outb_p((addr & 0x7f) << 1, NVIDIA_SMB_ADDR);
171 outb_p(protocol, NVIDIA_SMB_PRTCL); 225 outb_p(protocol, NVIDIA_SMB_PRTCL);
172 226
173 temp = inb_p(NVIDIA_SMB_STS); 227 if (nforce2_check_status(adap))
174
175 if (~temp & NVIDIA_SMB_STS_DONE) {
176 udelay(500);
177 temp = inb_p(NVIDIA_SMB_STS);
178 }
179 if (~temp & NVIDIA_SMB_STS_DONE) {
180 msleep(10);
181 temp = inb_p(NVIDIA_SMB_STS);
182 }
183
184 if ((~temp & NVIDIA_SMB_STS_DONE) || (temp & NVIDIA_SMB_STS_STATUS)) {
185 dev_dbg(&adap->dev, "SMBus Timeout! (0x%02x)\n", temp);
186 return -1; 228 return -1;
187 }
188 229
189 if (read_write == I2C_SMBUS_WRITE) 230 if (read_write == I2C_SMBUS_WRITE)
190 return 0; 231 return 0;
@@ -202,7 +243,12 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
202 243
203 case I2C_SMBUS_BLOCK_DATA: 244 case I2C_SMBUS_BLOCK_DATA:
204 len = inb_p(NVIDIA_SMB_BCNT); 245 len = inb_p(NVIDIA_SMB_BCNT);
205 len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX); 246 if ((len <= 0) || (len > I2C_SMBUS_BLOCK_MAX)) {
247 dev_err(&adap->dev, "Transaction failed "
248 "(received block size: 0x%02x)\n",
249 len);
250 return -1;
251 }
206 for (i = 0; i < len; i++) 252 for (i = 0; i < len; i++)
207 data->block[i+1] = inb_p(NVIDIA_SMB_DATA + i); 253 data->block[i+1] = inb_p(NVIDIA_SMB_DATA + i);
208 data->block[0] = len; 254 data->block[0] = len;
@@ -218,6 +264,7 @@ static u32 nforce2_func(struct i2c_adapter *adapter)
218 /* other functionality might be possible, but is not tested */ 264 /* other functionality might be possible, but is not tested */
219 return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | 265 return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
220 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | 266 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
267 I2C_FUNC_SMBUS_PEC |
221 (((struct nforce2_smbus*)adapter->algo_data)->blockops ? 268 (((struct nforce2_smbus*)adapter->algo_data)->blockops ?
222 I2C_FUNC_SMBUS_BLOCK_DATA : 0); 269 I2C_FUNC_SMBUS_BLOCK_DATA : 0);
223} 270}
@@ -308,6 +355,8 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
308 case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS: 355 case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS:
309 smbuses[0].blockops = 1; 356 smbuses[0].blockops = 1;
310 smbuses[1].blockops = 1; 357 smbuses[1].blockops = 1;
358 smbuses[0].can_abort = 1;
359 smbuses[1].can_abort = 1;
311 } 360 }
312 361
313 /* SMBus adapter 1 */ 362 /* SMBus adapter 1 */
diff --git a/drivers/i2c/busses/i2c-stub.c b/drivers/i2c/busses/i2c-stub.c
index a54adc50d1..84df29da1d 100644
--- a/drivers/i2c/busses/i2c-stub.c
+++ b/drivers/i2c/busses/i2c-stub.c
@@ -24,24 +24,41 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/slab.h>
27#include <linux/errno.h> 28#include <linux/errno.h>
28#include <linux/i2c.h> 29#include <linux/i2c.h>
29 30
30static unsigned short chip_addr; 31#define MAX_CHIPS 10
31module_param(chip_addr, ushort, S_IRUGO);
32MODULE_PARM_DESC(chip_addr, "Chip address (between 0x03 and 0x77)\n");
33 32
34static u8 stub_pointer; 33static unsigned short chip_addr[MAX_CHIPS];
35static u8 stub_bytes[256]; 34module_param_array(chip_addr, ushort, NULL, S_IRUGO);
36static u16 stub_words[256]; 35MODULE_PARM_DESC(chip_addr,
36 "Chip addresses (up to 10, between 0x03 and 0x77)\n");
37
38struct stub_chip {
39 u8 pointer;
40 u8 bytes[256];
41 u16 words[256];
42};
43
44static struct stub_chip *stub_chips;
37 45
38/* Return -1 on error. */ 46/* Return -1 on error. */
39static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags, 47static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
40 char read_write, u8 command, int size, union i2c_smbus_data * data) 48 char read_write, u8 command, int size, union i2c_smbus_data * data)
41{ 49{
42 s32 ret; 50 s32 ret;
43 51 int i;
44 if (addr != chip_addr) 52 struct stub_chip *chip = NULL;
53
54 /* Search for the right chip */
55 for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) {
56 if (addr == chip_addr[i]) {
57 chip = stub_chips + i;
58 break;
59 }
60 }
61 if (!chip)
45 return -ENODEV; 62 return -ENODEV;
46 63
47 switch (size) { 64 switch (size) {
@@ -53,12 +70,12 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
53 70
54 case I2C_SMBUS_BYTE: 71 case I2C_SMBUS_BYTE:
55 if (read_write == I2C_SMBUS_WRITE) { 72 if (read_write == I2C_SMBUS_WRITE) {
56 stub_pointer = command; 73 chip->pointer = command;
57 dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, " 74 dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, "
58 "wrote 0x%02x.\n", 75 "wrote 0x%02x.\n",
59 addr, command); 76 addr, command);
60 } else { 77 } else {
61 data->byte = stub_bytes[stub_pointer++]; 78 data->byte = chip->bytes[chip->pointer++];
62 dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, " 79 dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, "
63 "read 0x%02x.\n", 80 "read 0x%02x.\n",
64 addr, data->byte); 81 addr, data->byte);
@@ -69,29 +86,29 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
69 86
70 case I2C_SMBUS_BYTE_DATA: 87 case I2C_SMBUS_BYTE_DATA:
71 if (read_write == I2C_SMBUS_WRITE) { 88 if (read_write == I2C_SMBUS_WRITE) {
72 stub_bytes[command] = data->byte; 89 chip->bytes[command] = data->byte;
73 dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, " 90 dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, "
74 "wrote 0x%02x at 0x%02x.\n", 91 "wrote 0x%02x at 0x%02x.\n",
75 addr, data->byte, command); 92 addr, data->byte, command);
76 } else { 93 } else {
77 data->byte = stub_bytes[command]; 94 data->byte = chip->bytes[command];
78 dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, " 95 dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, "
79 "read 0x%02x at 0x%02x.\n", 96 "read 0x%02x at 0x%02x.\n",
80 addr, data->byte, command); 97 addr, data->byte, command);
81 } 98 }
82 stub_pointer = command + 1; 99 chip->pointer = command + 1;
83 100
84 ret = 0; 101 ret = 0;
85 break; 102 break;
86 103
87 case I2C_SMBUS_WORD_DATA: 104 case I2C_SMBUS_WORD_DATA:
88 if (read_write == I2C_SMBUS_WRITE) { 105 if (read_write == I2C_SMBUS_WRITE) {
89 stub_words[command] = data->word; 106 chip->words[command] = data->word;
90 dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, " 107 dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, "
91 "wrote 0x%04x at 0x%02x.\n", 108 "wrote 0x%04x at 0x%02x.\n",
92 addr, data->word, command); 109 addr, data->word, command);
93 } else { 110 } else {
94 data->word = stub_words[command]; 111 data->word = chip->words[command];
95 dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, " 112 dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, "
96 "read 0x%04x at 0x%02x.\n", 113 "read 0x%04x at 0x%02x.\n",
97 addr, data->word, command); 114 addr, data->word, command);
@@ -129,23 +146,41 @@ static struct i2c_adapter stub_adapter = {
129 146
130static int __init i2c_stub_init(void) 147static int __init i2c_stub_init(void)
131{ 148{
132 if (!chip_addr) { 149 int i, ret;
150
151 if (!chip_addr[0]) {
133 printk(KERN_ERR "i2c-stub: Please specify a chip address\n"); 152 printk(KERN_ERR "i2c-stub: Please specify a chip address\n");
134 return -ENODEV; 153 return -ENODEV;
135 } 154 }
136 if (chip_addr < 0x03 || chip_addr > 0x77) { 155
137 printk(KERN_ERR "i2c-stub: Invalid chip address 0x%02x\n", 156 for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) {
138 chip_addr); 157 if (chip_addr[i] < 0x03 || chip_addr[i] > 0x77) {
139 return -EINVAL; 158 printk(KERN_ERR "i2c-stub: Invalid chip address "
159 "0x%02x\n", chip_addr[i]);
160 return -EINVAL;
161 }
162
163 printk(KERN_INFO "i2c-stub: Virtual chip at 0x%02x\n",
164 chip_addr[i]);
140 } 165 }
141 166
142 printk(KERN_INFO "i2c-stub: Virtual chip at 0x%02x\n", chip_addr); 167 /* Allocate memory for all chips at once */
143 return i2c_add_adapter(&stub_adapter); 168 stub_chips = kzalloc(i * sizeof(struct stub_chip), GFP_KERNEL);
169 if (!stub_chips) {
170 printk(KERN_ERR "i2c-stub: Out of memory\n");
171 return -ENOMEM;
172 }
173
174 ret = i2c_add_adapter(&stub_adapter);
175 if (ret)
176 kfree(stub_chips);
177 return ret;
144} 178}
145 179
146static void __exit i2c_stub_exit(void) 180static void __exit i2c_stub_exit(void)
147{ 181{
148 i2c_del_adapter(&stub_adapter); 182 i2c_del_adapter(&stub_adapter);
183 kfree(stub_chips);
149} 184}
150 185
151MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>"); 186MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>");
diff --git a/drivers/i2c/chips/pcf8574.c b/drivers/i2c/chips/pcf8574.c
index 32b25427ea..21c6dd6919 100644
--- a/drivers/i2c/chips/pcf8574.c
+++ b/drivers/i2c/chips/pcf8574.c
@@ -48,14 +48,11 @@ static unsigned short normal_i2c[] = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26,
48/* Insmod parameters */ 48/* Insmod parameters */
49I2C_CLIENT_INSMOD_2(pcf8574, pcf8574a); 49I2C_CLIENT_INSMOD_2(pcf8574, pcf8574a);
50 50
51/* Initial values */
52#define PCF8574_INIT 255 /* All outputs on (input mode) */
53
54/* Each client has this additional data */ 51/* Each client has this additional data */
55struct pcf8574_data { 52struct pcf8574_data {
56 struct i2c_client client; 53 struct i2c_client client;
57 54
58 u8 write; /* Remember last written value */ 55 int write; /* Remember last written value */
59}; 56};
60 57
61static int pcf8574_attach_adapter(struct i2c_adapter *adapter); 58static int pcf8574_attach_adapter(struct i2c_adapter *adapter);
@@ -85,7 +82,11 @@ static DEVICE_ATTR(read, S_IRUGO, show_read, NULL);
85static ssize_t show_write(struct device *dev, struct device_attribute *attr, char *buf) 82static ssize_t show_write(struct device *dev, struct device_attribute *attr, char *buf)
86{ 83{
87 struct pcf8574_data *data = i2c_get_clientdata(to_i2c_client(dev)); 84 struct pcf8574_data *data = i2c_get_clientdata(to_i2c_client(dev));
88 return sprintf(buf, "%u\n", data->write); 85
86 if (data->write < 0)
87 return data->write;
88
89 return sprintf(buf, "%d\n", data->write);
89} 90}
90 91
91static ssize_t set_write(struct device *dev, struct device_attribute *attr, const char *buf, 92static ssize_t set_write(struct device *dev, struct device_attribute *attr, const char *buf,
@@ -206,8 +207,7 @@ static int pcf8574_detach_client(struct i2c_client *client)
206static void pcf8574_init_client(struct i2c_client *client) 207static void pcf8574_init_client(struct i2c_client *client)
207{ 208{
208 struct pcf8574_data *data = i2c_get_clientdata(client); 209 struct pcf8574_data *data = i2c_get_clientdata(client);
209 data->write = PCF8574_INIT; 210 data->write = -EAGAIN;
210 i2c_smbus_write_byte(client, data->write);
211} 211}
212 212
213static int __init pcf8574_init(void) 213static int __init pcf8574_init(void)
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index 503ffec2ce..e320994b98 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -24,20 +24,13 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/device.h>
28#include <linux/i2c.h> 27#include <linux/i2c.h>
29#include <linux/delay.h> 28#include <linux/delay.h>
30#include <linux/workqueue.h> 29#include <linux/workqueue.h>
31#include <linux/suspend.h>
32#include <linux/debugfs.h> 30#include <linux/debugfs.h>
33#include <linux/seq_file.h> 31#include <linux/seq_file.h>
34#include <linux/mutex.h> 32#include <linux/mutex.h>
35 33
36#include <asm/irq.h>
37#include <asm/mach-types.h>
38
39#include <asm/arch/gpio.h>
40#include <asm/arch/mux.h>
41#include <asm/arch/tps65010.h> 34#include <asm/arch/tps65010.h>
42 35
43/*-------------------------------------------------------------------------*/ 36/*-------------------------------------------------------------------------*/
@@ -48,10 +41,6 @@
48MODULE_DESCRIPTION("TPS6501x Power Management Driver"); 41MODULE_DESCRIPTION("TPS6501x Power Management Driver");
49MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
50 43
51static unsigned short normal_i2c[] = { 0x48, /* 0x49, */ I2C_CLIENT_END };
52
53I2C_CLIENT_INSMOD;
54
55static struct i2c_driver tps65010_driver; 44static struct i2c_driver tps65010_driver;
56 45
57/*-------------------------------------------------------------------------*/ 46/*-------------------------------------------------------------------------*/
@@ -79,9 +68,8 @@ enum tps_model {
79}; 68};
80 69
81struct tps65010 { 70struct tps65010 {
82 struct i2c_client client; 71 struct i2c_client *client;
83 struct mutex lock; 72 struct mutex lock;
84 int irq;
85 struct delayed_work work; 73 struct delayed_work work;
86 struct dentry *file; 74 struct dentry *file;
87 unsigned charging:1; 75 unsigned charging:1;
@@ -229,22 +217,22 @@ static int dbg_show(struct seq_file *s, void *_)
229 /* registers for monitoring battery charging and status; note 217 /* registers for monitoring battery charging and status; note
230 * that reading chgstat and regstat may ack IRQs... 218 * that reading chgstat and regstat may ack IRQs...
231 */ 219 */
232 value = i2c_smbus_read_byte_data(&tps->client, TPS_CHGCONFIG); 220 value = i2c_smbus_read_byte_data(tps->client, TPS_CHGCONFIG);
233 dbg_chgconf(tps->por, buf, sizeof buf, value); 221 dbg_chgconf(tps->por, buf, sizeof buf, value);
234 seq_printf(s, "chgconfig %s", buf); 222 seq_printf(s, "chgconfig %s", buf);
235 223
236 value = i2c_smbus_read_byte_data(&tps->client, TPS_CHGSTATUS); 224 value = i2c_smbus_read_byte_data(tps->client, TPS_CHGSTATUS);
237 dbg_chgstat(buf, sizeof buf, value); 225 dbg_chgstat(buf, sizeof buf, value);
238 seq_printf(s, "chgstat %s", buf); 226 seq_printf(s, "chgstat %s", buf);
239 value = i2c_smbus_read_byte_data(&tps->client, TPS_MASK1); 227 value = i2c_smbus_read_byte_data(tps->client, TPS_MASK1);
240 dbg_chgstat(buf, sizeof buf, value); 228 dbg_chgstat(buf, sizeof buf, value);
241 seq_printf(s, "mask1 %s", buf); 229 seq_printf(s, "mask1 %s", buf);
242 /* ignore ackint1 */ 230 /* ignore ackint1 */
243 231
244 value = i2c_smbus_read_byte_data(&tps->client, TPS_REGSTATUS); 232 value = i2c_smbus_read_byte_data(tps->client, TPS_REGSTATUS);
245 dbg_regstat(buf, sizeof buf, value); 233 dbg_regstat(buf, sizeof buf, value);
246 seq_printf(s, "regstat %s", buf); 234 seq_printf(s, "regstat %s", buf);
247 value = i2c_smbus_read_byte_data(&tps->client, TPS_MASK2); 235 value = i2c_smbus_read_byte_data(tps->client, TPS_MASK2);
248 dbg_regstat(buf, sizeof buf, value); 236 dbg_regstat(buf, sizeof buf, value);
249 seq_printf(s, "mask2 %s\n", buf); 237 seq_printf(s, "mask2 %s\n", buf);
250 /* ignore ackint2 */ 238 /* ignore ackint2 */
@@ -253,21 +241,21 @@ static int dbg_show(struct seq_file *s, void *_)
253 241
254 242
255 /* VMAIN voltage, enable lowpower, etc */ 243 /* VMAIN voltage, enable lowpower, etc */
256 value = i2c_smbus_read_byte_data(&tps->client, TPS_VDCDC1); 244 value = i2c_smbus_read_byte_data(tps->client, TPS_VDCDC1);
257 seq_printf(s, "vdcdc1 %02x\n", value); 245 seq_printf(s, "vdcdc1 %02x\n", value);
258 246
259 /* VCORE voltage, vibrator on/off */ 247 /* VCORE voltage, vibrator on/off */
260 value = i2c_smbus_read_byte_data(&tps->client, TPS_VDCDC2); 248 value = i2c_smbus_read_byte_data(tps->client, TPS_VDCDC2);
261 seq_printf(s, "vdcdc2 %02x\n", value); 249 seq_printf(s, "vdcdc2 %02x\n", value);
262 250
263 /* both LD0s, and their lowpower behavior */ 251 /* both LD0s, and their lowpower behavior */
264 value = i2c_smbus_read_byte_data(&tps->client, TPS_VREGS1); 252 value = i2c_smbus_read_byte_data(tps->client, TPS_VREGS1);
265 seq_printf(s, "vregs1 %02x\n\n", value); 253 seq_printf(s, "vregs1 %02x\n\n", value);
266 254
267 255
268 /* LEDs and GPIOs */ 256 /* LEDs and GPIOs */
269 value = i2c_smbus_read_byte_data(&tps->client, TPS_LED1_ON); 257 value = i2c_smbus_read_byte_data(tps->client, TPS_LED1_ON);
270 v2 = i2c_smbus_read_byte_data(&tps->client, TPS_LED1_PER); 258 v2 = i2c_smbus_read_byte_data(tps->client, TPS_LED1_PER);
271 seq_printf(s, "led1 %s, on=%02x, per=%02x, %d/%d msec\n", 259 seq_printf(s, "led1 %s, on=%02x, per=%02x, %d/%d msec\n",
272 (value & 0x80) 260 (value & 0x80)
273 ? ((v2 & 0x80) ? "on" : "off") 261 ? ((v2 & 0x80) ? "on" : "off")
@@ -275,8 +263,8 @@ static int dbg_show(struct seq_file *s, void *_)
275 value, v2, 263 value, v2,
276 (value & 0x7f) * 10, (v2 & 0x7f) * 100); 264 (value & 0x7f) * 10, (v2 & 0x7f) * 100);
277 265
278 value = i2c_smbus_read_byte_data(&tps->client, TPS_LED2_ON); 266 value = i2c_smbus_read_byte_data(tps->client, TPS_LED2_ON);
279 v2 = i2c_smbus_read_byte_data(&tps->client, TPS_LED2_PER); 267 v2 = i2c_smbus_read_byte_data(tps->client, TPS_LED2_PER);
280 seq_printf(s, "led2 %s, on=%02x, per=%02x, %d/%d msec\n", 268 seq_printf(s, "led2 %s, on=%02x, per=%02x, %d/%d msec\n",
281 (value & 0x80) 269 (value & 0x80)
282 ? ((v2 & 0x80) ? "on" : "off") 270 ? ((v2 & 0x80) ? "on" : "off")
@@ -284,8 +272,8 @@ static int dbg_show(struct seq_file *s, void *_)
284 value, v2, 272 value, v2,
285 (value & 0x7f) * 10, (v2 & 0x7f) * 100); 273 (value & 0x7f) * 10, (v2 & 0x7f) * 100);
286 274
287 value = i2c_smbus_read_byte_data(&tps->client, TPS_DEFGPIO); 275 value = i2c_smbus_read_byte_data(tps->client, TPS_DEFGPIO);
288 v2 = i2c_smbus_read_byte_data(&tps->client, TPS_MASK3); 276 v2 = i2c_smbus_read_byte_data(tps->client, TPS_MASK3);
289 seq_printf(s, "defgpio %02x mask3 %02x\n", value, v2); 277 seq_printf(s, "defgpio %02x mask3 %02x\n", value, v2);
290 278
291 for (i = 0; i < 4; i++) { 279 for (i = 0; i < 4; i++) {
@@ -335,7 +323,7 @@ static void tps65010_interrupt(struct tps65010 *tps)
335 323
336 /* regstatus irqs */ 324 /* regstatus irqs */
337 if (tps->nmask2) { 325 if (tps->nmask2) {
338 tmp = i2c_smbus_read_byte_data(&tps->client, TPS_REGSTATUS); 326 tmp = i2c_smbus_read_byte_data(tps->client, TPS_REGSTATUS);
339 mask = tmp ^ tps->regstatus; 327 mask = tmp ^ tps->regstatus;
340 tps->regstatus = tmp; 328 tps->regstatus = tmp;
341 mask &= tps->nmask2; 329 mask &= tps->nmask2;
@@ -362,7 +350,7 @@ static void tps65010_interrupt(struct tps65010 *tps)
362 350
363 /* chgstatus irqs */ 351 /* chgstatus irqs */
364 if (tps->nmask1) { 352 if (tps->nmask1) {
365 tmp = i2c_smbus_read_byte_data(&tps->client, TPS_CHGSTATUS); 353 tmp = i2c_smbus_read_byte_data(tps->client, TPS_CHGSTATUS);
366 mask = tmp ^ tps->chgstatus; 354 mask = tmp ^ tps->chgstatus;
367 tps->chgstatus = tmp; 355 tps->chgstatus = tmp;
368 mask &= tps->nmask1; 356 mask &= tps->nmask1;
@@ -426,7 +414,7 @@ static void tps65010_work(struct work_struct *work)
426 int status; 414 int status;
427 u8 chgconfig, tmp; 415 u8 chgconfig, tmp;
428 416
429 chgconfig = i2c_smbus_read_byte_data(&tps->client, 417 chgconfig = i2c_smbus_read_byte_data(tps->client,
430 TPS_CHGCONFIG); 418 TPS_CHGCONFIG);
431 chgconfig &= ~(TPS_VBUS_500MA | TPS_VBUS_CHARGING); 419 chgconfig &= ~(TPS_VBUS_500MA | TPS_VBUS_CHARGING);
432 if (tps->vbus == 500) 420 if (tps->vbus == 500)
@@ -434,17 +422,17 @@ static void tps65010_work(struct work_struct *work)
434 else if (tps->vbus >= 100) 422 else if (tps->vbus >= 100)
435 chgconfig |= TPS_VBUS_CHARGING; 423 chgconfig |= TPS_VBUS_CHARGING;
436 424
437 status = i2c_smbus_write_byte_data(&tps->client, 425 status = i2c_smbus_write_byte_data(tps->client,
438 TPS_CHGCONFIG, chgconfig); 426 TPS_CHGCONFIG, chgconfig);
439 427
440 /* vbus update fails unless VBUS is connected! */ 428 /* vbus update fails unless VBUS is connected! */
441 tmp = i2c_smbus_read_byte_data(&tps->client, TPS_CHGCONFIG); 429 tmp = i2c_smbus_read_byte_data(tps->client, TPS_CHGCONFIG);
442 tps->chgconf = tmp; 430 tps->chgconf = tmp;
443 show_chgconfig(tps->por, "update vbus", tmp); 431 show_chgconfig(tps->por, "update vbus", tmp);
444 } 432 }
445 433
446 if (test_and_clear_bit(FLAG_IRQ_ENABLE, &tps->flags)) 434 if (test_and_clear_bit(FLAG_IRQ_ENABLE, &tps->flags))
447 enable_irq(tps->irq); 435 enable_irq(tps->client->irq);
448 436
449 mutex_unlock(&tps->lock); 437 mutex_unlock(&tps->lock);
450} 438}
@@ -463,114 +451,75 @@ static irqreturn_t tps65010_irq(int irq, void *_tps)
463 451
464static struct tps65010 *the_tps; 452static struct tps65010 *the_tps;
465 453
466static int __exit tps65010_detach_client(struct i2c_client *client) 454static int __exit tps65010_remove(struct i2c_client *client)
467{ 455{
468 struct tps65010 *tps; 456 struct tps65010 *tps = i2c_get_clientdata(client);
469 457
470 tps = container_of(client, struct tps65010, client); 458 if (client->irq > 0)
471 free_irq(tps->irq, tps); 459 free_irq(client->irq, tps);
472#ifdef CONFIG_ARM
473 if (machine_is_omap_h2())
474 omap_free_gpio(58);
475 if (machine_is_omap_osk())
476 omap_free_gpio(OMAP_MPUIO(1));
477#endif
478 cancel_delayed_work(&tps->work); 460 cancel_delayed_work(&tps->work);
479 flush_scheduled_work(); 461 flush_scheduled_work();
480 debugfs_remove(tps->file); 462 debugfs_remove(tps->file);
481 if (i2c_detach_client(client) == 0) 463 kfree(tps);
482 kfree(tps);
483 the_tps = NULL; 464 the_tps = NULL;
484 return 0; 465 return 0;
485} 466}
486 467
487static int tps65010_noscan(struct i2c_adapter *bus) 468static int tps65010_probe(struct i2c_client *client)
488{
489 /* pure paranoia, in case someone adds another i2c bus
490 * after our init section's gone...
491 */
492 return -ENODEV;
493}
494
495/* no error returns, they'd just make bus scanning stop */
496static int __init
497tps65010_probe(struct i2c_adapter *bus, int address, int kind)
498{ 469{
499 struct tps65010 *tps; 470 struct tps65010 *tps;
500 int status; 471 int status;
501 unsigned long irqflags;
502 472
503 if (the_tps) { 473 if (the_tps) {
504 dev_dbg(&bus->dev, "only one %s for now\n", DRIVER_NAME); 474 dev_dbg(&client->dev, "only one tps6501x chip allowed\n");
505 return 0; 475 return -ENODEV;
506 } 476 }
507 477
478 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
479 return -EINVAL;
480
508 tps = kzalloc(sizeof *tps, GFP_KERNEL); 481 tps = kzalloc(sizeof *tps, GFP_KERNEL);
509 if (!tps) 482 if (!tps)
510 return 0; 483 return -ENOMEM;
511 484
512 mutex_init(&tps->lock); 485 mutex_init(&tps->lock);
513 INIT_DELAYED_WORK(&tps->work, tps65010_work); 486 INIT_DELAYED_WORK(&tps->work, tps65010_work);
514 tps->irq = -1; 487 tps->client = client;
515 tps->client.addr = address;
516 tps->client.adapter = bus;
517 tps->client.driver = &tps65010_driver;
518 strlcpy(tps->client.name, DRIVER_NAME, I2C_NAME_SIZE);
519
520 status = i2c_attach_client(&tps->client);
521 if (status < 0) {
522 dev_dbg(&bus->dev, "can't attach %s to device %d, err %d\n",
523 DRIVER_NAME, address, status);
524 goto fail1;
525 }
526 488
527 /* the IRQ is active low, but many gpio lines can't support that 489 if (strcmp(client->name, "tps65010") == 0)
528 * so this driver can use falling-edge triggers instead.
529 */
530 irqflags = IRQF_SAMPLE_RANDOM;
531#ifdef CONFIG_ARM
532 if (machine_is_omap_h2()) {
533 tps->model = TPS65010;
534 omap_cfg_reg(W4_GPIO58);
535 tps->irq = OMAP_GPIO_IRQ(58);
536 omap_request_gpio(58);
537 omap_set_gpio_direction(58, 1);
538 irqflags |= IRQF_TRIGGER_FALLING;
539 }
540 if (machine_is_omap_osk()) {
541 tps->model = TPS65010; 490 tps->model = TPS65010;
542 // omap_cfg_reg(U19_1610_MPUIO1); 491 else if (strcmp(client->name, "tps65011") == 0)
543 tps->irq = OMAP_GPIO_IRQ(OMAP_MPUIO(1)); 492 tps->model = TPS65011;
544 omap_request_gpio(OMAP_MPUIO(1)); 493 else if (strcmp(client->name, "tps65012") == 0)
545 omap_set_gpio_direction(OMAP_MPUIO(1), 1); 494 tps->model = TPS65012;
546 irqflags |= IRQF_TRIGGER_FALLING; 495 else if (strcmp(client->name, "tps65013") == 0)
547 }
548 if (machine_is_omap_h3()) {
549 tps->model = TPS65013; 496 tps->model = TPS65013;
550 497 else {
551 // FIXME set up this board's IRQ ... 498 dev_warn(&client->dev, "unknown chip '%s'\n", client->name);
499 status = -ENODEV;
500 goto fail1;
552 } 501 }
553#endif
554 502
555 if (tps->irq > 0) { 503 /* the IRQ is active low, but many gpio lines can't support that
556 status = request_irq(tps->irq, tps65010_irq, 504 * so this driver uses falling-edge triggers instead.
557 irqflags, DRIVER_NAME, tps); 505 */
506 if (client->irq > 0) {
507 status = request_irq(client->irq, tps65010_irq,
508 IRQF_SAMPLE_RANDOM | IRQF_TRIGGER_FALLING,
509 DRIVER_NAME, tps);
558 if (status < 0) { 510 if (status < 0) {
559 dev_dbg(&tps->client.dev, "can't get IRQ %d, err %d\n", 511 dev_dbg(&client->dev, "can't get IRQ %d, err %d\n",
560 tps->irq, status); 512 client->irq, status);
561 i2c_detach_client(&tps->client);
562 goto fail1; 513 goto fail1;
563 } 514 }
564#ifdef CONFIG_ARM
565 /* annoying race here, ideally we'd have an option 515 /* annoying race here, ideally we'd have an option
566 * to claim the irq now and enable it later. 516 * to claim the irq now and enable it later.
517 * FIXME genirq IRQF_NOAUTOEN now solves that ...
567 */ 518 */
568 disable_irq(tps->irq); 519 disable_irq(client->irq);
569 set_bit(FLAG_IRQ_ENABLE, &tps->flags); 520 set_bit(FLAG_IRQ_ENABLE, &tps->flags);
570#endif
571 } else 521 } else
572 printk(KERN_WARNING "%s: IRQ not configured!\n", 522 dev_warn(&client->dev, "IRQ not configured!\n");
573 DRIVER_NAME);
574 523
575 524
576 switch (tps->model) { 525 switch (tps->model) {
@@ -583,23 +532,22 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
583 break; 532 break;
584 /* else CHGCONFIG.POR is replaced by AUA, enabling a WAIT mode */ 533 /* else CHGCONFIG.POR is replaced by AUA, enabling a WAIT mode */
585 } 534 }
586 tps->chgconf = i2c_smbus_read_byte_data(&tps->client, TPS_CHGCONFIG); 535 tps->chgconf = i2c_smbus_read_byte_data(client, TPS_CHGCONFIG);
587 show_chgconfig(tps->por, "conf/init", tps->chgconf); 536 show_chgconfig(tps->por, "conf/init", tps->chgconf);
588 537
589 show_chgstatus("chg/init", 538 show_chgstatus("chg/init",
590 i2c_smbus_read_byte_data(&tps->client, TPS_CHGSTATUS)); 539 i2c_smbus_read_byte_data(client, TPS_CHGSTATUS));
591 show_regstatus("reg/init", 540 show_regstatus("reg/init",
592 i2c_smbus_read_byte_data(&tps->client, TPS_REGSTATUS)); 541 i2c_smbus_read_byte_data(client, TPS_REGSTATUS));
593 542
594 pr_debug("%s: vdcdc1 0x%02x, vdcdc2 %02x, vregs1 %02x\n", DRIVER_NAME, 543 pr_debug("%s: vdcdc1 0x%02x, vdcdc2 %02x, vregs1 %02x\n", DRIVER_NAME,
595 i2c_smbus_read_byte_data(&tps->client, TPS_VDCDC1), 544 i2c_smbus_read_byte_data(client, TPS_VDCDC1),
596 i2c_smbus_read_byte_data(&tps->client, TPS_VDCDC2), 545 i2c_smbus_read_byte_data(client, TPS_VDCDC2),
597 i2c_smbus_read_byte_data(&tps->client, TPS_VREGS1)); 546 i2c_smbus_read_byte_data(client, TPS_VREGS1));
598 pr_debug("%s: defgpio 0x%02x, mask3 0x%02x\n", DRIVER_NAME, 547 pr_debug("%s: defgpio 0x%02x, mask3 0x%02x\n", DRIVER_NAME,
599 i2c_smbus_read_byte_data(&tps->client, TPS_DEFGPIO), 548 i2c_smbus_read_byte_data(client, TPS_DEFGPIO),
600 i2c_smbus_read_byte_data(&tps->client, TPS_MASK3)); 549 i2c_smbus_read_byte_data(client, TPS_MASK3));
601 550
602 tps65010_driver.attach_adapter = tps65010_noscan;
603 the_tps = tps; 551 the_tps = tps;
604 552
605#if defined(CONFIG_USB_GADGET) && !defined(CONFIG_USB_OTG) 553#if defined(CONFIG_USB_GADGET) && !defined(CONFIG_USB_OTG)
@@ -615,15 +563,15 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
615 * registers, and maybe disable VBUS draw. 563 * registers, and maybe disable VBUS draw.
616 */ 564 */
617 tps->nmask1 = ~0; 565 tps->nmask1 = ~0;
618 (void) i2c_smbus_write_byte_data(&tps->client, TPS_MASK1, ~tps->nmask1); 566 (void) i2c_smbus_write_byte_data(client, TPS_MASK1, ~tps->nmask1);
619 567
620 tps->nmask2 = TPS_REG_ONOFF; 568 tps->nmask2 = TPS_REG_ONOFF;
621 if (tps->model == TPS65013) 569 if (tps->model == TPS65013)
622 tps->nmask2 |= TPS_REG_NO_CHG; 570 tps->nmask2 |= TPS_REG_NO_CHG;
623 (void) i2c_smbus_write_byte_data(&tps->client, TPS_MASK2, ~tps->nmask2); 571 (void) i2c_smbus_write_byte_data(client, TPS_MASK2, ~tps->nmask2);
624 572
625 (void) i2c_smbus_write_byte_data(&tps->client, TPS_MASK3, 0x0f 573 (void) i2c_smbus_write_byte_data(client, TPS_MASK3, 0x0f
626 | i2c_smbus_read_byte_data(&tps->client, TPS_MASK3)); 574 | i2c_smbus_read_byte_data(client, TPS_MASK3));
627 575
628 tps65010_work(&tps->work.work); 576 tps65010_work(&tps->work.work);
629 577
@@ -632,22 +580,15 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
632 return 0; 580 return 0;
633fail1: 581fail1:
634 kfree(tps); 582 kfree(tps);
635 return 0; 583 return status;
636}
637
638static int __init tps65010_scan_bus(struct i2c_adapter *bus)
639{
640 if (!i2c_check_functionality(bus, I2C_FUNC_SMBUS_BYTE_DATA))
641 return -EINVAL;
642 return i2c_probe(bus, &addr_data, tps65010_probe);
643} 584}
644 585
645static struct i2c_driver tps65010_driver = { 586static struct i2c_driver tps65010_driver = {
646 .driver = { 587 .driver = {
647 .name = "tps65010", 588 .name = "tps65010",
648 }, 589 },
649 .attach_adapter = tps65010_scan_bus, 590 .probe = tps65010_probe,
650 .detach_client = __exit_p(tps65010_detach_client), 591 .remove = __exit_p(tps65010_remove),
651}; 592};
652 593
653/*-------------------------------------------------------------------------*/ 594/*-------------------------------------------------------------------------*/
@@ -702,7 +643,7 @@ int tps65010_set_gpio_out_value(unsigned gpio, unsigned value)
702 643
703 mutex_lock(&the_tps->lock); 644 mutex_lock(&the_tps->lock);
704 645
705 defgpio = i2c_smbus_read_byte_data(&the_tps->client, TPS_DEFGPIO); 646 defgpio = i2c_smbus_read_byte_data(the_tps->client, TPS_DEFGPIO);
706 647
707 /* Configure GPIO for output */ 648 /* Configure GPIO for output */
708 defgpio |= 1 << (gpio + 3); 649 defgpio |= 1 << (gpio + 3);
@@ -718,12 +659,12 @@ int tps65010_set_gpio_out_value(unsigned gpio, unsigned value)
718 break; 659 break;
719 } 660 }
720 661
721 status = i2c_smbus_write_byte_data(&the_tps->client, 662 status = i2c_smbus_write_byte_data(the_tps->client,
722 TPS_DEFGPIO, defgpio); 663 TPS_DEFGPIO, defgpio);
723 664
724 pr_debug("%s: gpio%dout = %s, defgpio 0x%02x\n", DRIVER_NAME, 665 pr_debug("%s: gpio%dout = %s, defgpio 0x%02x\n", DRIVER_NAME,
725 gpio, value ? "high" : "low", 666 gpio, value ? "high" : "low",
726 i2c_smbus_read_byte_data(&the_tps->client, TPS_DEFGPIO)); 667 i2c_smbus_read_byte_data(the_tps->client, TPS_DEFGPIO));
727 668
728 mutex_unlock(&the_tps->lock); 669 mutex_unlock(&the_tps->lock);
729 return status; 670 return status;
@@ -753,11 +694,11 @@ int tps65010_set_led(unsigned led, unsigned mode)
753 mutex_lock(&the_tps->lock); 694 mutex_lock(&the_tps->lock);
754 695
755 pr_debug("%s: led%i_on 0x%02x\n", DRIVER_NAME, led, 696 pr_debug("%s: led%i_on 0x%02x\n", DRIVER_NAME, led,
756 i2c_smbus_read_byte_data(&the_tps->client, 697 i2c_smbus_read_byte_data(the_tps->client,
757 TPS_LED1_ON + offs)); 698 TPS_LED1_ON + offs));
758 699
759 pr_debug("%s: led%i_per 0x%02x\n", DRIVER_NAME, led, 700 pr_debug("%s: led%i_per 0x%02x\n", DRIVER_NAME, led,
760 i2c_smbus_read_byte_data(&the_tps->client, 701 i2c_smbus_read_byte_data(the_tps->client,
761 TPS_LED1_PER + offs)); 702 TPS_LED1_PER + offs));
762 703
763 switch (mode) { 704 switch (mode) {
@@ -780,7 +721,7 @@ int tps65010_set_led(unsigned led, unsigned mode)
780 return -EINVAL; 721 return -EINVAL;
781 } 722 }
782 723
783 status = i2c_smbus_write_byte_data(&the_tps->client, 724 status = i2c_smbus_write_byte_data(the_tps->client,
784 TPS_LED1_ON + offs, led_on); 725 TPS_LED1_ON + offs, led_on);
785 726
786 if (status != 0) { 727 if (status != 0) {
@@ -791,9 +732,9 @@ int tps65010_set_led(unsigned led, unsigned mode)
791 } 732 }
792 733
793 pr_debug("%s: led%i_on 0x%02x\n", DRIVER_NAME, led, 734 pr_debug("%s: led%i_on 0x%02x\n", DRIVER_NAME, led,
794 i2c_smbus_read_byte_data(&the_tps->client, TPS_LED1_ON + offs)); 735 i2c_smbus_read_byte_data(the_tps->client, TPS_LED1_ON + offs));
795 736
796 status = i2c_smbus_write_byte_data(&the_tps->client, 737 status = i2c_smbus_write_byte_data(the_tps->client,
797 TPS_LED1_PER + offs, led_per); 738 TPS_LED1_PER + offs, led_per);
798 739
799 if (status != 0) { 740 if (status != 0) {
@@ -804,7 +745,7 @@ int tps65010_set_led(unsigned led, unsigned mode)
804 } 745 }
805 746
806 pr_debug("%s: led%i_per 0x%02x\n", DRIVER_NAME, led, 747 pr_debug("%s: led%i_per 0x%02x\n", DRIVER_NAME, led,
807 i2c_smbus_read_byte_data(&the_tps->client, 748 i2c_smbus_read_byte_data(the_tps->client,
808 TPS_LED1_PER + offs)); 749 TPS_LED1_PER + offs));
809 750
810 mutex_unlock(&the_tps->lock); 751 mutex_unlock(&the_tps->lock);
@@ -827,11 +768,11 @@ int tps65010_set_vib(unsigned value)
827 768
828 mutex_lock(&the_tps->lock); 769 mutex_lock(&the_tps->lock);
829 770
830 vdcdc2 = i2c_smbus_read_byte_data(&the_tps->client, TPS_VDCDC2); 771 vdcdc2 = i2c_smbus_read_byte_data(the_tps->client, TPS_VDCDC2);
831 vdcdc2 &= ~(1 << 1); 772 vdcdc2 &= ~(1 << 1);
832 if (value) 773 if (value)
833 vdcdc2 |= (1 << 1); 774 vdcdc2 |= (1 << 1);
834 status = i2c_smbus_write_byte_data(&the_tps->client, 775 status = i2c_smbus_write_byte_data(the_tps->client,
835 TPS_VDCDC2, vdcdc2); 776 TPS_VDCDC2, vdcdc2);
836 777
837 pr_debug("%s: vibrator %s\n", DRIVER_NAME, value ? "on" : "off"); 778 pr_debug("%s: vibrator %s\n", DRIVER_NAME, value ? "on" : "off");
@@ -857,9 +798,9 @@ int tps65010_set_low_pwr(unsigned mode)
857 798
858 pr_debug("%s: %s low_pwr, vdcdc1 0x%02x\n", DRIVER_NAME, 799 pr_debug("%s: %s low_pwr, vdcdc1 0x%02x\n", DRIVER_NAME,
859 mode ? "enable" : "disable", 800 mode ? "enable" : "disable",
860 i2c_smbus_read_byte_data(&the_tps->client, TPS_VDCDC1)); 801 i2c_smbus_read_byte_data(the_tps->client, TPS_VDCDC1));
861 802
862 vdcdc1 = i2c_smbus_read_byte_data(&the_tps->client, TPS_VDCDC1); 803 vdcdc1 = i2c_smbus_read_byte_data(the_tps->client, TPS_VDCDC1);
863 804
864 switch (mode) { 805 switch (mode) {
865 case OFF: 806 case OFF:
@@ -871,7 +812,7 @@ int tps65010_set_low_pwr(unsigned mode)
871 break; 812 break;
872 } 813 }
873 814
874 status = i2c_smbus_write_byte_data(&the_tps->client, 815 status = i2c_smbus_write_byte_data(the_tps->client,
875 TPS_VDCDC1, vdcdc1); 816 TPS_VDCDC1, vdcdc1);
876 817
877 if (status != 0) 818 if (status != 0)
@@ -879,7 +820,7 @@ int tps65010_set_low_pwr(unsigned mode)
879 DRIVER_NAME); 820 DRIVER_NAME);
880 else 821 else
881 pr_debug("%s: vdcdc1 0x%02x\n", DRIVER_NAME, 822 pr_debug("%s: vdcdc1 0x%02x\n", DRIVER_NAME,
882 i2c_smbus_read_byte_data(&the_tps->client, TPS_VDCDC1)); 823 i2c_smbus_read_byte_data(the_tps->client, TPS_VDCDC1));
883 824
884 mutex_unlock(&the_tps->lock); 825 mutex_unlock(&the_tps->lock);
885 826
@@ -902,9 +843,9 @@ int tps65010_config_vregs1(unsigned value)
902 mutex_lock(&the_tps->lock); 843 mutex_lock(&the_tps->lock);
903 844
904 pr_debug("%s: vregs1 0x%02x\n", DRIVER_NAME, 845 pr_debug("%s: vregs1 0x%02x\n", DRIVER_NAME,
905 i2c_smbus_read_byte_data(&the_tps->client, TPS_VREGS1)); 846 i2c_smbus_read_byte_data(the_tps->client, TPS_VREGS1));
906 847
907 status = i2c_smbus_write_byte_data(&the_tps->client, 848 status = i2c_smbus_write_byte_data(the_tps->client,
908 TPS_VREGS1, value); 849 TPS_VREGS1, value);
909 850
910 if (status != 0) 851 if (status != 0)
@@ -912,7 +853,7 @@ int tps65010_config_vregs1(unsigned value)
912 DRIVER_NAME); 853 DRIVER_NAME);
913 else 854 else
914 pr_debug("%s: vregs1 0x%02x\n", DRIVER_NAME, 855 pr_debug("%s: vregs1 0x%02x\n", DRIVER_NAME,
915 i2c_smbus_read_byte_data(&the_tps->client, TPS_VREGS1)); 856 i2c_smbus_read_byte_data(the_tps->client, TPS_VREGS1));
916 857
917 mutex_unlock(&the_tps->lock); 858 mutex_unlock(&the_tps->lock);
918 859
@@ -941,11 +882,11 @@ int tps65013_set_low_pwr(unsigned mode)
941 pr_debug("%s: %s low_pwr, chgconfig 0x%02x vdcdc1 0x%02x\n", 882 pr_debug("%s: %s low_pwr, chgconfig 0x%02x vdcdc1 0x%02x\n",
942 DRIVER_NAME, 883 DRIVER_NAME,
943 mode ? "enable" : "disable", 884 mode ? "enable" : "disable",
944 i2c_smbus_read_byte_data(&the_tps->client, TPS_CHGCONFIG), 885 i2c_smbus_read_byte_data(the_tps->client, TPS_CHGCONFIG),
945 i2c_smbus_read_byte_data(&the_tps->client, TPS_VDCDC1)); 886 i2c_smbus_read_byte_data(the_tps->client, TPS_VDCDC1));
946 887
947 chgconfig = i2c_smbus_read_byte_data(&the_tps->client, TPS_CHGCONFIG); 888 chgconfig = i2c_smbus_read_byte_data(the_tps->client, TPS_CHGCONFIG);
948 vdcdc1 = i2c_smbus_read_byte_data(&the_tps->client, TPS_VDCDC1); 889 vdcdc1 = i2c_smbus_read_byte_data(the_tps->client, TPS_VDCDC1);
949 890
950 switch (mode) { 891 switch (mode) {
951 case OFF: 892 case OFF:
@@ -959,7 +900,7 @@ int tps65013_set_low_pwr(unsigned mode)
959 break; 900 break;
960 } 901 }
961 902
962 status = i2c_smbus_write_byte_data(&the_tps->client, 903 status = i2c_smbus_write_byte_data(the_tps->client,
963 TPS_CHGCONFIG, chgconfig); 904 TPS_CHGCONFIG, chgconfig);
964 if (status != 0) { 905 if (status != 0) {
965 printk(KERN_ERR "%s: Failed to write chconfig register\n", 906 printk(KERN_ERR "%s: Failed to write chconfig register\n",
@@ -968,11 +909,11 @@ int tps65013_set_low_pwr(unsigned mode)
968 return status; 909 return status;
969 } 910 }
970 911
971 chgconfig = i2c_smbus_read_byte_data(&the_tps->client, TPS_CHGCONFIG); 912 chgconfig = i2c_smbus_read_byte_data(the_tps->client, TPS_CHGCONFIG);
972 the_tps->chgconf = chgconfig; 913 the_tps->chgconf = chgconfig;
973 show_chgconfig(0, "chgconf", chgconfig); 914 show_chgconfig(0, "chgconf", chgconfig);
974 915
975 status = i2c_smbus_write_byte_data(&the_tps->client, 916 status = i2c_smbus_write_byte_data(the_tps->client,
976 TPS_VDCDC1, vdcdc1); 917 TPS_VDCDC1, vdcdc1);
977 918
978 if (status != 0) 919 if (status != 0)
@@ -980,7 +921,7 @@ int tps65013_set_low_pwr(unsigned mode)
980 DRIVER_NAME); 921 DRIVER_NAME);
981 else 922 else
982 pr_debug("%s: vdcdc1 0x%02x\n", DRIVER_NAME, 923 pr_debug("%s: vdcdc1 0x%02x\n", DRIVER_NAME,
983 i2c_smbus_read_byte_data(&the_tps->client, TPS_VDCDC1)); 924 i2c_smbus_read_byte_data(the_tps->client, TPS_VDCDC1));
984 925
985 mutex_unlock(&the_tps->lock); 926 mutex_unlock(&the_tps->lock);
986 927
@@ -1011,52 +952,6 @@ static int __init tps_init(void)
1011 msleep(10); 952 msleep(10);
1012 } 953 }
1013 954
1014#ifdef CONFIG_ARM
1015 if (machine_is_omap_osk()) {
1016
1017 // FIXME: More should be placed in the initialization code
1018 // of the submodules (DSP, ethernet, power management,
1019 // board-osk.c). Careful: I2C is initialized "late".
1020
1021 /* Let LED1 (D9) blink */
1022 tps65010_set_led(LED1, BLINK);
1023
1024 /* Disable LED 2 (D2) */
1025 tps65010_set_led(LED2, OFF);
1026
1027 /* Set GPIO 1 HIGH to disable VBUS power supply;
1028 * OHCI driver powers it up/down as needed.
1029 */
1030 tps65010_set_gpio_out_value(GPIO1, HIGH);
1031
1032 /* Set GPIO 2 low to turn on LED D3 */
1033 tps65010_set_gpio_out_value(GPIO2, HIGH);
1034
1035 /* Set GPIO 3 low to take ethernet out of reset */
1036 tps65010_set_gpio_out_value(GPIO3, LOW);
1037
1038 /* gpio4 for VDD_DSP */
1039
1040 /* Enable LOW_PWR */
1041 tps65010_set_low_pwr(ON);
1042
1043 /* Switch VLDO2 to 3.0V for AIC23 */
1044 tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V | TPS_LDO1_ENABLE);
1045
1046 } else if (machine_is_omap_h2()) {
1047 /* gpio3 for SD, gpio4 for VDD_DSP */
1048
1049 /* Enable LOW_PWR */
1050 tps65010_set_low_pwr(ON);
1051 } else if (machine_is_omap_h3()) {
1052 /* gpio4 for SD, gpio3 for VDD_DSP */
1053#ifdef CONFIG_PM
1054 /* Enable LOW_PWR */
1055 tps65013_set_low_pwr(ON);
1056#endif
1057 }
1058#endif
1059
1060 return status; 955 return status;
1061} 956}
1062/* NOTE: this MUST be initialized before the other parts of the system 957/* NOTE: this MUST be initialized before the other parts of the system
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index d663e6960d..e73d58c43f 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -67,20 +67,16 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
67#ifdef CONFIG_HOTPLUG 67#ifdef CONFIG_HOTPLUG
68 68
69/* uevent helps with hotplug: modprobe -q $(MODALIAS) */ 69/* uevent helps with hotplug: modprobe -q $(MODALIAS) */
70static int i2c_device_uevent(struct device *dev, char **envp, int num_envp, 70static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
71 char *buffer, int buffer_size)
72{ 71{
73 struct i2c_client *client = to_i2c_client(dev); 72 struct i2c_client *client = to_i2c_client(dev);
74 int i = 0, length = 0;
75 73
76 /* by definition, legacy drivers can't hotplug */ 74 /* by definition, legacy drivers can't hotplug */
77 if (dev->driver || !client->driver_name) 75 if (dev->driver || !client->driver_name)
78 return 0; 76 return 0;
79 77
80 if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, 78 if (add_uevent_var(env, "MODALIAS=%s", client->driver_name))
81 "MODALIAS=%s", client->driver_name))
82 return -ENOMEM; 79 return -ENOMEM;
83 envp[i] = NULL;
84 dev_dbg(dev, "uevent\n"); 80 dev_dbg(dev, "uevent\n");
85 return 0; 81 return 0;
86} 82}
@@ -190,7 +186,7 @@ static struct device_attribute i2c_dev_attrs[] = {
190 { }, 186 { },
191}; 187};
192 188
193struct bus_type i2c_bus_type = { 189static struct bus_type i2c_bus_type = {
194 .name = "i2c", 190 .name = "i2c",
195 .dev_attrs = i2c_dev_attrs, 191 .dev_attrs = i2c_dev_attrs,
196 .match = i2c_device_match, 192 .match = i2c_device_match,
@@ -201,7 +197,6 @@ struct bus_type i2c_bus_type = {
201 .suspend = i2c_device_suspend, 197 .suspend = i2c_device_suspend,
202 .resume = i2c_device_resume, 198 .resume = i2c_device_resume,
203}; 199};
204EXPORT_SYMBOL_GPL(i2c_bus_type);
205 200
206/** 201/**
207 * i2c_new_device - instantiate an i2c device for use with a new style driver 202 * i2c_new_device - instantiate an i2c device for use with a new style driver
@@ -230,7 +225,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
230 client->adapter = adap; 225 client->adapter = adap;
231 226
232 client->dev.platform_data = info->platform_data; 227 client->dev.platform_data = info->platform_data;
233 client->flags = info->flags; 228 device_init_wakeup(&client->dev, info->flags & I2C_CLIENT_WAKE);
229
230 client->flags = info->flags & ~I2C_CLIENT_WAKE;
234 client->addr = info->addr; 231 client->addr = info->addr;
235 client->irq = info->irq; 232 client->irq = info->irq;
236 233
@@ -283,7 +280,7 @@ EXPORT_SYMBOL_GPL(i2c_unregister_device);
283 280
284/* I2C bus adapters -- one roots each I2C or SMBUS segment */ 281/* I2C bus adapters -- one roots each I2C or SMBUS segment */
285 282
286void i2c_adapter_dev_release(struct device *dev) 283static void i2c_adapter_dev_release(struct device *dev)
287{ 284{
288 struct i2c_adapter *adap = to_i2c_adapter(dev); 285 struct i2c_adapter *adap = to_i2c_adapter(dev);
289 complete(&adap->dev_released); 286 complete(&adap->dev_released);
@@ -301,7 +298,7 @@ static struct device_attribute i2c_adapter_attrs[] = {
301 { }, 298 { },
302}; 299};
303 300
304struct class i2c_adapter_class = { 301static struct class i2c_adapter_class = {
305 .owner = THIS_MODULE, 302 .owner = THIS_MODULE,
306 .name = "i2c-adapter", 303 .name = "i2c-adapter",
307 .dev_attrs = i2c_adapter_attrs, 304 .dev_attrs = i2c_adapter_attrs,
@@ -934,28 +931,6 @@ int i2c_master_recv(struct i2c_client *client, char *buf ,int count)
934} 931}
935EXPORT_SYMBOL(i2c_master_recv); 932EXPORT_SYMBOL(i2c_master_recv);
936 933
937int i2c_control(struct i2c_client *client,
938 unsigned int cmd, unsigned long arg)
939{
940 int ret = 0;
941 struct i2c_adapter *adap = client->adapter;
942
943 dev_dbg(&client->adapter->dev, "i2c ioctl, cmd: 0x%x, arg: %#lx\n", cmd, arg);
944 switch (cmd) {
945 case I2C_RETRIES:
946 adap->retries = arg;
947 break;
948 case I2C_TIMEOUT:
949 adap->timeout = arg;
950 break;
951 default:
952 if (adap->algo->algo_control!=NULL)
953 ret = adap->algo->algo_control(adap,cmd,arg);
954 }
955 return ret;
956}
957EXPORT_SYMBOL(i2c_control);
958
959/* ---------------------------------------------------- 934/* ----------------------------------------------------
960 * the i2c address scanning function 935 * the i2c address scanning function
961 * Will not work for 10-bit addresses! 936 * Will not work for 10-bit addresses!
@@ -1310,7 +1285,22 @@ s32 i2c_smbus_write_word_data(struct i2c_client *client, u8 command, u16 value)
1310} 1285}
1311EXPORT_SYMBOL(i2c_smbus_write_word_data); 1286EXPORT_SYMBOL(i2c_smbus_write_word_data);
1312 1287
1313/* Returns the number of read bytes */ 1288/**
1289 * i2c_smbus_read_block_data - SMBus block read request
1290 * @client: Handle to slave device
1291 * @command: Command byte issued to let the slave know what data should
1292 * be returned
1293 * @values: Byte array into which data will be read; big enough to hold
1294 * the data returned by the slave. SMBus allows at most 32 bytes.
1295 *
1296 * Returns the number of bytes read in the slave's response, else a
1297 * negative number to indicate some kind of error.
1298 *
1299 * Note that using this function requires that the client's adapter support
1300 * the I2C_FUNC_SMBUS_READ_BLOCK_DATA functionality. Not all adapter drivers
1301 * support this; its emulation through I2C messaging relies on a specific
1302 * mechanism (I2C_M_RECV_LEN) which may not be implemented.
1303 */
1314s32 i2c_smbus_read_block_data(struct i2c_client *client, u8 command, 1304s32 i2c_smbus_read_block_data(struct i2c_client *client, u8 command,
1315 u8 *values) 1305 u8 *values)
1316{ 1306{
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 64eee9551b..5a15e50748 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -226,8 +226,10 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
226 226
227 res = 0; 227 res = 0;
228 for( i=0; i<rdwr_arg.nmsgs; i++ ) { 228 for( i=0; i<rdwr_arg.nmsgs; i++ ) {
229 /* Limit the size of the message to a sane amount */ 229 /* Limit the size of the message to a sane amount;
230 if (rdwr_pa[i].len > 8192) { 230 * and don't let length change either. */
231 if ((rdwr_pa[i].len > 8192) ||
232 (rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
231 res = -EINVAL; 233 res = -EINVAL;
232 break; 234 break;
233 } 235 }
@@ -352,9 +354,19 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
352 return -EFAULT; 354 return -EFAULT;
353 } 355 }
354 return res; 356 return res;
355 357 case I2C_RETRIES:
358 client->adapter->retries = arg;
359 break;
360 case I2C_TIMEOUT:
361 client->adapter->timeout = arg;
362 break;
356 default: 363 default:
357 return i2c_control(client,cmd,arg); 364 /* NOTE: returning a fault code here could cause trouble
365 * in buggy userspace code. Some old kernel bugs returned
366 * zero in this case, and userspace code might accidentally
367 * have depended on that bug.
368 */
369 return -ENOTTY;
358 } 370 }
359 return 0; 371 return 0;
360} 372}
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index aa0e0c9f74..8982c09324 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -1074,22 +1074,6 @@ endif
1074config BLK_DEV_IDEDMA 1074config BLK_DEV_IDEDMA
1075 def_bool BLK_DEV_IDEDMA_PCI || BLK_DEV_IDEDMA_PMAC || BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 1075 def_bool BLK_DEV_IDEDMA_PCI || BLK_DEV_IDEDMA_PMAC || BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
1076 1076
1077config IDEDMA_IVB
1078 bool "IGNORE word93 Validation BITS"
1079 depends on BLK_DEV_IDEDMA_PCI || BLK_DEV_IDEDMA_PMAC || BLK_DEV_IDEDMA_ICS
1080 ---help---
1081 There are unclear terms in ATA-4 and ATA-5 standards how certain
1082 hardware (an 80c ribbon) should be detected. Different interpretations
1083 of the standards have been released in hardware. This causes problems:
1084 for example, a host with Ultra Mode 4 (or higher) will not run
1085 in that mode with an 80c ribbon.
1086
1087 If you are experiencing compatibility or performance problems, you
1088 MAY try to answer Y here. However, it does not necessarily solve
1089 any of your problems, it could even cause more of them.
1090
1091 It is normally safe to answer Y; however, the default is N.
1092
1093endif 1077endif
1094 1078
1095config BLK_DEV_HD_ONLY 1079config BLK_DEV_HD_ONLY
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index 7912a471f1..bd1f5b6703 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -248,7 +248,7 @@ static void icside_build_sglist(ide_drive_t *drive, struct request *rq)
248 * MW1 80 50 50 150 C 248 * MW1 80 50 50 150 C
249 * MW2 70 25 25 120 C 249 * MW2 70 25 25 120 C
250 */ 250 */
251static int icside_set_speed(ide_drive_t *drive, const u8 xfer_mode) 251static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
252{ 252{
253 int cycle_time, use_dma_info = 0; 253 int cycle_time, use_dma_info = 0;
254 254
@@ -273,7 +273,7 @@ static int icside_set_speed(ide_drive_t *drive, const u8 xfer_mode)
273 cycle_time = 480; 273 cycle_time = 480;
274 break; 274 break;
275 default: 275 default:
276 return 1; 276 return;
277 } 277 }
278 278
279 /* 279 /*
@@ -287,8 +287,6 @@ static int icside_set_speed(ide_drive_t *drive, const u8 xfer_mode)
287 287
288 printk("%s: %s selected (peak %dMB/s)\n", drive->name, 288 printk("%s: %s selected (peak %dMB/s)\n", drive->name,
289 ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data); 289 ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data);
290
291 return ide_config_drive_speed(drive, xfer_mode);
292} 290}
293 291
294static void icside_dma_host_off(ide_drive_t *drive) 292static void icside_dma_host_off(ide_drive_t *drive)
@@ -313,41 +311,10 @@ static int icside_dma_on(ide_drive_t *drive)
313 311
314static int icside_dma_check(ide_drive_t *drive) 312static int icside_dma_check(ide_drive_t *drive)
315{ 313{
316 struct hd_driveid *id = drive->id; 314 if (ide_tune_dma(drive))
317 ide_hwif_t *hwif = HWIF(drive); 315 return 0;
318 int xfer_mode = 0;
319
320 if (!(id->capability & 1) || !hwif->autodma)
321 goto out;
322
323 /*
324 * Consult the list of known "bad" drives
325 */
326 if (__ide_dma_bad_drive(drive))
327 goto out;
328
329 /*
330 * Enable DMA on any drive that has multiword DMA
331 */
332 if (id->field_valid & 2) {
333 xfer_mode = ide_max_dma_mode(drive);
334 goto out;
335 }
336
337 /*
338 * Consult the list of known "good" drives
339 */
340 if (__ide_dma_good_drive(drive)) {
341 if (id->eide_dma_time > 150)
342 goto out;
343 xfer_mode = XFER_MW_DMA_1;
344 }
345
346out:
347 if (xfer_mode == 0)
348 return -1;
349 316
350 return icside_set_speed(drive, xfer_mode) ? -1 : 0; 317 return -1;
351} 318}
352 319
353static int icside_dma_end(ide_drive_t *drive) 320static int icside_dma_end(ide_drive_t *drive)
@@ -464,7 +431,7 @@ static void icside_dma_init(ide_hwif_t *hwif)
464 431
465 hwif->dmatable_cpu = NULL; 432 hwif->dmatable_cpu = NULL;
466 hwif->dmatable_dma = 0; 433 hwif->dmatable_dma = 0;
467 hwif->speedproc = icside_set_speed; 434 hwif->set_dma_mode = icside_set_dma_mode;
468 hwif->autodma = 1; 435 hwif->autodma = 1;
469 436
470 hwif->ide_dma_check = icside_dma_check; 437 hwif->ide_dma_check = icside_dma_check;
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index 4bb42b30bf..2b4d2a0ae5 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -716,11 +716,9 @@ static void cris_set_pio_mode(ide_drive_t *drive, const u8 pio)
716 } 716 }
717 717
718 cris_ide_set_speed(TYPE_PIO, setup, strobe, hold); 718 cris_ide_set_speed(TYPE_PIO, setup, strobe, hold);
719
720 (void)ide_config_drive_speed(drive, XFER_PIO_0 + pio);
721} 719}
722 720
723static int speed_cris_ide(ide_drive_t *drive, const u8 speed) 721static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
724{ 722{
725 int cyc = 0, dvs = 0, strobe = 0, hold = 0; 723 int cyc = 0, dvs = 0, strobe = 0, hold = 0;
726 724
@@ -759,8 +757,6 @@ static int speed_cris_ide(ide_drive_t *drive, const u8 speed)
759 cris_ide_set_speed(TYPE_UDMA, cyc, dvs, 0); 757 cris_ide_set_speed(TYPE_UDMA, cyc, dvs, 0);
760 else 758 else
761 cris_ide_set_speed(TYPE_DMA, 0, strobe, hold); 759 cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);
762
763 return ide_config_drive_speed(drive, speed);
764} 760}
765 761
766void __init 762void __init
@@ -791,7 +787,7 @@ init_e100_ide (void)
791 hwif->mmio = 1; 787 hwif->mmio = 1;
792 hwif->chipset = ide_etrax100; 788 hwif->chipset = ide_etrax100;
793 hwif->set_pio_mode = &cris_set_pio_mode; 789 hwif->set_pio_mode = &cris_set_pio_mode;
794 hwif->speedproc = &speed_cris_ide; 790 hwif->set_dma_mode = &cris_set_dma_mode;
795 hwif->ata_input_data = &cris_ide_input_data; 791 hwif->ata_input_data = &cris_ide_input_data;
796 hwif->ata_output_data = &cris_ide_output_data; 792 hwif->ata_output_data = &cris_ide_output_data;
797 hwif->atapi_input_bytes = &cris_atapi_input_bytes; 793 hwif->atapi_input_bytes = &cris_atapi_input_bytes;
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 6bff81a58b..1d5f682310 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -649,7 +649,6 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on)
649 if (!on) 649 if (!on)
650 acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D3); 650 acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D3);
651} 651}
652EXPORT_SYMBOL_GPL(ide_acpi_set_state);
653 652
654/** 653/**
655 * ide_acpi_init - initialize the ACPI link for an IDE interface 654 * ide_acpi_init - initialize the ACPI link for an IDE interface
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 6000c08f51..b453211ee0 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -169,6 +169,11 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
169 169
170EXPORT_SYMBOL_GPL(ide_dma_intr); 170EXPORT_SYMBOL_GPL(ide_dma_intr);
171 171
172static int ide_dma_good_drive(ide_drive_t *drive)
173{
174 return ide_in_drive_list(drive->id, drive_whitelist);
175}
176
172#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 177#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
173/** 178/**
174 * ide_build_sglist - map IDE scatter gather for DMA I/O 179 * ide_build_sglist - map IDE scatter gather for DMA I/O
@@ -357,7 +362,7 @@ static int config_drive_for_dma (ide_drive_t *drive)
357 return 0; 362 return 0;
358 363
359 /* Consult the list of known "good" drives */ 364 /* Consult the list of known "good" drives */
360 if (__ide_dma_good_drive(drive)) 365 if (ide_dma_good_drive(drive))
361 return 0; 366 return 0;
362 } 367 }
363 368
@@ -639,14 +644,6 @@ int __ide_dma_bad_drive (ide_drive_t *drive)
639 644
640EXPORT_SYMBOL(__ide_dma_bad_drive); 645EXPORT_SYMBOL(__ide_dma_bad_drive);
641 646
642int __ide_dma_good_drive (ide_drive_t *drive)
643{
644 struct hd_driveid *id = drive->id;
645 return ide_in_drive_list(id, drive_whitelist);
646}
647
648EXPORT_SYMBOL(__ide_dma_good_drive);
649
650static const u8 xfer_mode_bases[] = { 647static const u8 xfer_mode_bases[] = {
651 XFER_UDMA_0, 648 XFER_UDMA_0,
652 XFER_MW_DMA_0, 649 XFER_MW_DMA_0,
@@ -746,6 +743,14 @@ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
746 } 743 }
747 } 744 }
748 745
746 if (hwif->chipset == ide_acorn && mode == 0) {
747 /*
748 * is this correct?
749 */
750 if (ide_dma_good_drive(drive) && drive->id->eide_dma_time < 150)
751 mode = XFER_MW_DMA_1;
752 }
753
749 printk(KERN_DEBUG "%s: selected mode 0x%x\n", drive->name, mode); 754 printk(KERN_DEBUG "%s: selected mode 0x%x\n", drive->name, mode);
750 755
751 return min(mode, req_mode); 756 return min(mode, req_mode);
@@ -769,7 +774,10 @@ int ide_tune_dma(ide_drive_t *drive)
769 if (!speed) 774 if (!speed)
770 return 0; 775 return 0;
771 776
772 if (drive->hwif->speedproc(drive, speed)) 777 if (drive->hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
778 return 0;
779
780 if (ide_set_dma_mode(drive, speed))
773 return 0; 781 return 0;
774 782
775 return 1; 783 return 1;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 9560a8f4a8..4cece93011 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -836,9 +836,17 @@ static ide_startstop_t do_special (ide_drive_t *drive)
836 if (set_pio_mode_abuse(drive->hwif, req_pio)) { 836 if (set_pio_mode_abuse(drive->hwif, req_pio)) {
837 if (hwif->set_pio_mode) 837 if (hwif->set_pio_mode)
838 hwif->set_pio_mode(drive, req_pio); 838 hwif->set_pio_mode(drive, req_pio);
839 } else 839 } else {
840 int keep_dma = drive->using_dma;
841
840 ide_set_pio(drive, req_pio); 842 ide_set_pio(drive, req_pio);
841 843
844 if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) {
845 if (keep_dma)
846 hwif->ide_dma_on(drive);
847 }
848 }
849
842 return ide_stopped; 850 return ide_stopped;
843 } else { 851 } else {
844 if (drive->media == ide_disk) 852 if (drive->media == ide_disk)
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index cf0678b611..aa738833be 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -473,57 +473,22 @@ int drive_is_ready (ide_drive_t *drive)
473EXPORT_SYMBOL(drive_is_ready); 473EXPORT_SYMBOL(drive_is_ready);
474 474
475/* 475/*
476 * Global for All, and taken from ide-pmac.c. Can be called
477 * with spinlock held & IRQs disabled, so don't schedule !
478 */
479int wait_for_ready (ide_drive_t *drive, int timeout)
480{
481 ide_hwif_t *hwif = HWIF(drive);
482 u8 stat = 0;
483
484 while(--timeout) {
485 stat = hwif->INB(IDE_STATUS_REG);
486 if (!(stat & BUSY_STAT)) {
487 if (drive->ready_stat == 0)
488 break;
489 else if ((stat & drive->ready_stat)||(stat & ERR_STAT))
490 break;
491 }
492 mdelay(1);
493 }
494 if ((stat & ERR_STAT) || timeout <= 0) {
495 if (stat & ERR_STAT) {
496 printk(KERN_ERR "%s: wait_for_ready, "
497 "error status: %x\n", drive->name, stat);
498 }
499 return 1;
500 }
501 return 0;
502}
503
504/*
505 * This routine busy-waits for the drive status to be not "busy". 476 * This routine busy-waits for the drive status to be not "busy".
506 * It then checks the status for all of the "good" bits and none 477 * It then checks the status for all of the "good" bits and none
507 * of the "bad" bits, and if all is okay it returns 0. All other 478 * of the "bad" bits, and if all is okay it returns 0. All other
508 * cases return 1 after invoking ide_error() -- caller should just return. 479 * cases return error -- caller may then invoke ide_error().
509 * 480 *
510 * This routine should get fixed to not hog the cpu during extra long waits.. 481 * This routine should get fixed to not hog the cpu during extra long waits..
511 * That could be done by busy-waiting for the first jiffy or two, and then 482 * That could be done by busy-waiting for the first jiffy or two, and then
512 * setting a timer to wake up at half second intervals thereafter, 483 * setting a timer to wake up at half second intervals thereafter,
513 * until timeout is achieved, before timing out. 484 * until timeout is achieved, before timing out.
514 */ 485 */
515int ide_wait_stat (ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout) 486static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
516{ 487{
517 ide_hwif_t *hwif = HWIF(drive); 488 ide_hwif_t *hwif = drive->hwif;
518 u8 stat;
519 int i;
520 unsigned long flags; 489 unsigned long flags;
521 490 int i;
522 /* bail early if we've exceeded max_failures */ 491 u8 stat;
523 if (drive->max_failures && (drive->failures > drive->max_failures)) {
524 *startstop = ide_stopped;
525 return 1;
526 }
527 492
528 udelay(1); /* spec allows drive 400ns to assert "BUSY" */ 493 udelay(1); /* spec allows drive 400ns to assert "BUSY" */
529 if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) { 494 if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
@@ -541,8 +506,8 @@ int ide_wait_stat (ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 b
541 break; 506 break;
542 507
543 local_irq_restore(flags); 508 local_irq_restore(flags);
544 *startstop = ide_error(drive, "status timeout", stat); 509 *rstat = stat;
545 return 1; 510 return -EBUSY;
546 } 511 }
547 } 512 }
548 local_irq_restore(flags); 513 local_irq_restore(flags);
@@ -556,11 +521,39 @@ int ide_wait_stat (ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 b
556 */ 521 */
557 for (i = 0; i < 10; i++) { 522 for (i = 0; i < 10; i++) {
558 udelay(1); 523 udelay(1);
559 if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), good, bad)) 524 if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), good, bad)) {
525 *rstat = stat;
560 return 0; 526 return 0;
527 }
561 } 528 }
562 *startstop = ide_error(drive, "status error", stat); 529 *rstat = stat;
563 return 1; 530 return -EFAULT;
531}
532
533/*
534 * In case of error returns error value after doing "*startstop = ide_error()".
535 * The caller should return the updated value of "startstop" in this case,
536 * "startstop" is unchanged when the function returns 0.
537 */
538int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout)
539{
540 int err;
541 u8 stat;
542
543 /* bail early if we've exceeded max_failures */
544 if (drive->max_failures && (drive->failures > drive->max_failures)) {
545 *startstop = ide_stopped;
546 return 1;
547 }
548
549 err = __ide_wait_stat(drive, good, bad, timeout, &stat);
550
551 if (err) {
552 char *s = (err == -EBUSY) ? "status timeout" : "status error";
553 *startstop = ide_error(drive, s, stat);
554 }
555
556 return err;
564} 557}
565 558
566EXPORT_SYMBOL(ide_wait_stat); 559EXPORT_SYMBOL(ide_wait_stat);
@@ -620,15 +613,10 @@ u8 eighty_ninty_three (ide_drive_t *drive)
620 613
621 /* 614 /*
622 * FIXME: 615 * FIXME:
623 * - change master/slave IDENTIFY order
624 * - force bit13 (80c cable present) check also for !ivb devices 616 * - force bit13 (80c cable present) check also for !ivb devices
625 * (unless the slave device is pre-ATA3) 617 * (unless the slave device is pre-ATA3)
626 */ 618 */
627#ifndef CONFIG_IDEDMA_IVB
628 if ((id->hw_config & 0x4000) || (ivb && (id->hw_config & 0x2000))) 619 if ((id->hw_config & 0x4000) || (ivb && (id->hw_config & 0x2000)))
629#else
630 if (id->hw_config & 0x6000)
631#endif
632 return 1; 620 return 1;
633 621
634no_80w: 622no_80w:
@@ -778,15 +766,10 @@ int ide_driveid_update (ide_drive_t *drive)
778#endif 766#endif
779} 767}
780 768
781/* 769int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
782 * Similar to ide_wait_stat(), except it never calls ide_error internally.
783 *
784 * const char *msg == consider adding for verbose errors.
785 */
786int ide_config_drive_speed (ide_drive_t *drive, u8 speed)
787{ 770{
788 ide_hwif_t *hwif = HWIF(drive); 771 ide_hwif_t *hwif = drive->hwif;
789 int i, error = 1; 772 int error;
790 u8 stat; 773 u8 stat;
791 774
792// while (HWGROUP(drive)->busy) 775// while (HWGROUP(drive)->busy)
@@ -826,35 +809,10 @@ int ide_config_drive_speed (ide_drive_t *drive, u8 speed)
826 hwif->OUTBSYNC(drive, WIN_SETFEATURES, IDE_COMMAND_REG); 809 hwif->OUTBSYNC(drive, WIN_SETFEATURES, IDE_COMMAND_REG);
827 if ((IDE_CONTROL_REG) && (drive->quirk_list == 2)) 810 if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
828 hwif->OUTB(drive->ctl, IDE_CONTROL_REG); 811 hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
829 udelay(1);
830 /*
831 * Wait for drive to become non-BUSY
832 */
833 if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
834 unsigned long flags, timeout;
835 local_irq_set(flags);
836 timeout = jiffies + WAIT_CMD;
837 while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
838 if (time_after(jiffies, timeout))
839 break;
840 }
841 local_irq_restore(flags);
842 }
843 812
844 /* 813 error = __ide_wait_stat(drive, drive->ready_stat,
845 * Allow status to settle, then read it again. 814 BUSY_STAT|DRQ_STAT|ERR_STAT,
846 * A few rare drives vastly violate the 400ns spec here, 815 WAIT_CMD, &stat);
847 * so we'll wait up to 10usec for a "good" status
848 * rather than expensively fail things immediately.
849 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
850 */
851 for (i = 0; i < 10; i++) {
852 udelay(1);
853 if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), drive->ready_stat, BUSY_STAT|DRQ_STAT|ERR_STAT)) {
854 error = 0;
855 break;
856 }
857 }
858 816
859 SELECT_MASK(drive, 0); 817 SELECT_MASK(drive, 0);
860 818
@@ -899,9 +857,6 @@ int ide_config_drive_speed (ide_drive_t *drive, u8 speed)
899 return error; 857 return error;
900} 858}
901 859
902EXPORT_SYMBOL(ide_config_drive_speed);
903
904
905/* 860/*
906 * This should get invoked any time we exit the driver to 861 * This should get invoked any time we exit the driver to
907 * wait for an interrupt response from a drive. handler() points 862 * wait for an interrupt response from a drive. handler() points
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index d97390c054..0e2562f0f7 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -349,7 +349,7 @@ void ide_set_pio(ide_drive_t *drive, u8 req_pio)
349 drive->name, host_pio, req_pio, 349 drive->name, host_pio, req_pio,
350 req_pio == 255 ? "(auto-tune)" : "", pio); 350 req_pio == 255 ? "(auto-tune)" : "", pio);
351 351
352 hwif->set_pio_mode(drive, pio); 352 (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio);
353} 353}
354 354
355EXPORT_SYMBOL_GPL(ide_set_pio); 355EXPORT_SYMBOL_GPL(ide_set_pio);
@@ -378,39 +378,83 @@ void ide_toggle_bounce(ide_drive_t *drive, int on)
378 blk_queue_bounce_limit(drive->queue, addr); 378 blk_queue_bounce_limit(drive->queue, addr);
379} 379}
380 380
381int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
382{
383 ide_hwif_t *hwif = drive->hwif;
384
385 if (hwif->set_pio_mode == NULL)
386 return -1;
387
388 /*
389 * TODO: temporary hack for some legacy host drivers that didn't
390 * set transfer mode on the device in ->set_pio_mode method...
391 */
392 if (hwif->set_dma_mode == NULL) {
393 hwif->set_pio_mode(drive, mode - XFER_PIO_0);
394 return 0;
395 }
396
397 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
398 if (ide_config_drive_speed(drive, mode))
399 return -1;
400 hwif->set_pio_mode(drive, mode - XFER_PIO_0);
401 return 0;
402 } else {
403 hwif->set_pio_mode(drive, mode - XFER_PIO_0);
404 return ide_config_drive_speed(drive, mode);
405 }
406}
407
408int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
409{
410 ide_hwif_t *hwif = drive->hwif;
411
412 if (hwif->set_dma_mode == NULL)
413 return -1;
414
415 if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
416 if (ide_config_drive_speed(drive, mode))
417 return -1;
418 hwif->set_dma_mode(drive, mode);
419 return 0;
420 } else {
421 hwif->set_dma_mode(drive, mode);
422 return ide_config_drive_speed(drive, mode);
423 }
424}
425
426EXPORT_SYMBOL_GPL(ide_set_dma_mode);
427
381/** 428/**
382 * ide_set_xfer_rate - set transfer rate 429 * ide_set_xfer_rate - set transfer rate
383 * @drive: drive to set 430 * @drive: drive to set
384 * @speed: speed to attempt to set 431 * @rate: speed to attempt to set
385 * 432 *
386 * General helper for setting the speed of an IDE device. This 433 * General helper for setting the speed of an IDE device. This
387 * function knows about user enforced limits from the configuration 434 * function knows about user enforced limits from the configuration
388 * which speedproc() does not. High level drivers should never 435 * which ->set_pio_mode/->set_dma_mode does not.
389 * invoke speedproc() directly.
390 */ 436 */
391 437
392int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) 438int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
393{ 439{
394 ide_hwif_t *hwif = drive->hwif; 440 ide_hwif_t *hwif = drive->hwif;
395 441
396 if (hwif->speedproc == NULL) 442 if (hwif->set_dma_mode == NULL)
397 return -1; 443 return -1;
398 444
399 rate = ide_rate_filter(drive, rate); 445 rate = ide_rate_filter(drive, rate);
400 446
401 if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5) { 447 if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5)
402 if (hwif->set_pio_mode) 448 return ide_set_pio_mode(drive, rate);
403 hwif->set_pio_mode(drive, rate - XFER_PIO_0);
404 449
405 /* 450 /*
406 * FIXME: this is incorrect to return zero here but 451 * TODO: transfer modes 0x00-0x07 passed from the user-space are
407 * since all users of ide_set_xfer_rate() ignore 452 * currently handled here which needs fixing (please note that such
408 * the return value it is not a problem currently 453 * case could happen iff the transfer mode has already been set on
409 */ 454 * the device by ide-proc.c::set_xfer_rate()).
410 return 0; 455 */
411 }
412 456
413 return hwif->speedproc(drive, rate); 457 return ide_set_dma_mode(drive, rate);
414} 458}
415 459
416static void ide_dump_opcode(ide_drive_t *drive) 460static void ide_dump_opcode(ide_drive_t *drive)
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index b4c9f63a38..d101171260 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -719,9 +719,9 @@ EXPORT_SYMBOL_GPL(ide_undecoded_slave);
719 */ 719 */
720static void probe_hwif(ide_hwif_t *hwif, void (*fixup)(ide_hwif_t *hwif)) 720static void probe_hwif(ide_hwif_t *hwif, void (*fixup)(ide_hwif_t *hwif))
721{ 721{
722 unsigned int unit;
723 unsigned long flags; 722 unsigned long flags;
724 unsigned int irqd; 723 unsigned int irqd;
724 int unit;
725 725
726 if (hwif->noprobe) 726 if (hwif->noprobe)
727 return; 727 return;
@@ -777,10 +777,9 @@ static void probe_hwif(ide_hwif_t *hwif, void (*fixup)(ide_hwif_t *hwif))
777 printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name); 777 printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name);
778 778
779 /* 779 /*
780 * Second drive should only exist if first drive was found, 780 * Need to probe slave device first to make it release PDIAG-.
781 * but a lot of cdrom drives are configured as single slaves.
782 */ 781 */
783 for (unit = 0; unit < MAX_DRIVES; ++unit) { 782 for (unit = MAX_DRIVES - 1; unit >= 0; unit--) {
784 ide_drive_t *drive = &hwif->drives[unit]; 783 ide_drive_t *drive = &hwif->drives[unit];
785 drive->dn = (hwif->channel ? 2 : 0) + unit; 784 drive->dn = (hwif->channel ? 2 : 0) + unit;
786 (void) probe_for_drive(drive); 785 (void) probe_for_drive(drive);
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index e96212ce57..5c0e4078b5 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -397,7 +397,7 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
397#endif 397#endif
398 398
399 hwif->set_pio_mode = tmp_hwif->set_pio_mode; 399 hwif->set_pio_mode = tmp_hwif->set_pio_mode;
400 hwif->speedproc = tmp_hwif->speedproc; 400 hwif->set_dma_mode = tmp_hwif->set_dma_mode;
401 hwif->mdma_filter = tmp_hwif->mdma_filter; 401 hwif->mdma_filter = tmp_hwif->mdma_filter;
402 hwif->udma_filter = tmp_hwif->udma_filter; 402 hwif->udma_filter = tmp_hwif->udma_filter;
403 hwif->selectproc = tmp_hwif->selectproc; 403 hwif->selectproc = tmp_hwif->selectproc;
@@ -1663,20 +1663,13 @@ static struct device_attribute ide_dev_attrs[] = {
1663 __ATTR_NULL 1663 __ATTR_NULL
1664}; 1664};
1665 1665
1666static int ide_uevent(struct device *dev, char **envp, int num_envp, 1666static int ide_uevent(struct device *dev, struct kobj_uevent_env *env)
1667 char *buffer, int buffer_size)
1668{ 1667{
1669 ide_drive_t *drive = to_ide_device(dev); 1668 ide_drive_t *drive = to_ide_device(dev);
1670 int i = 0; 1669
1671 int length = 0; 1670 add_uevent_var(env, "MEDIA=%s", media_string(drive));
1672 1671 add_uevent_var(env, "DRIVENAME=%s", drive->name);
1673 add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, 1672 add_uevent_var(env, "MODALIAS=ide:m-%s", media_string(drive));
1674 "MEDIA=%s", media_string(drive));
1675 add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
1676 "DRIVENAME=%s", drive->name);
1677 add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
1678 "MODALIAS=ide:m-%s", media_string(drive));
1679 envp[i] = NULL;
1680 return 0; 1673 return 0;
1681} 1674}
1682 1675
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index ccfb9893a4..b992b2b91f 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -65,7 +65,7 @@ found:
65 hwif->hw.irq = hwif->irq = irq; 65 hwif->hw.irq = hwif->irq = irq;
66 66
67 hwif->hw.dma = NO_DMA; 67 hwif->hw.dma = NO_DMA;
68 hwif->hw.chipset = ide_generic; 68 hwif->chipset = hwif->hw.chipset = ide_generic;
69 69
70 if (mmio) { 70 if (mmio) {
71 hwif->mmio = 1; 71 hwif->mmio = 1;
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 85819ae206..aebde49365 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -101,12 +101,7 @@ void auide_outsw(unsigned long port, void *addr, u32 count)
101 101
102static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio) 102static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
103{ 103{
104 int mem_sttime; 104 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
105 int mem_stcfg;
106 u8 speed;
107
108 mem_sttime = 0;
109 mem_stcfg = au_readl(MEM_STCFG2);
110 105
111 /* set pio mode! */ 106 /* set pio mode! */
112 switch(pio) { 107 switch(pio) {
@@ -164,18 +159,11 @@ static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
164 159
165 au_writel(mem_sttime,MEM_STTIME2); 160 au_writel(mem_sttime,MEM_STTIME2);
166 au_writel(mem_stcfg,MEM_STCFG2); 161 au_writel(mem_stcfg,MEM_STCFG2);
167
168 speed = pio + XFER_PIO_0;
169 ide_config_drive_speed(drive, speed);
170} 162}
171 163
172static int auide_tune_chipset(ide_drive_t *drive, const u8 speed) 164static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
173{ 165{
174 int mem_sttime; 166 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
175 int mem_stcfg;
176
177 mem_sttime = 0;
178 mem_stcfg = au_readl(MEM_STCFG2);
179 167
180 switch(speed) { 168 switch(speed) {
181#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 169#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
@@ -211,16 +199,11 @@ static int auide_tune_chipset(ide_drive_t *drive, const u8 speed)
211 break; 199 break;
212#endif 200#endif
213 default: 201 default:
214 return 1; 202 return;
215 } 203 }
216 204
217 if (ide_config_drive_speed(drive, speed))
218 return 1;
219
220 au_writel(mem_sttime,MEM_STTIME2); 205 au_writel(mem_sttime,MEM_STTIME2);
221 au_writel(mem_stcfg,MEM_STCFG2); 206 au_writel(mem_stcfg,MEM_STCFG2);
222
223 return 0;
224} 207}
225 208
226/* 209/*
@@ -682,6 +665,7 @@ static int au_ide_probe(struct device *dev)
682#endif 665#endif
683 666
684 hwif->pio_mask = ATA_PIO4; 667 hwif->pio_mask = ATA_PIO4;
668 hwif->host_flags = IDE_HFLAG_POST_SET_MODE;
685 669
686 hwif->noprobe = 0; 670 hwif->noprobe = 0;
687 hwif->drives[0].unmask = 1; 671 hwif->drives[0].unmask = 1;
@@ -702,7 +686,7 @@ static int au_ide_probe(struct device *dev)
702#endif 686#endif
703 687
704 hwif->set_pio_mode = &au1xxx_set_pio_mode; 688 hwif->set_pio_mode = &au1xxx_set_pio_mode;
705 hwif->speedproc = &auide_tune_chipset; 689 hwif->set_dma_mode = &auide_set_dma_mode;
706 690
707#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 691#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
708 hwif->dma_off_quietly = &auide_dma_off_quietly; 692 hwif->dma_off_quietly = &auide_dma_off_quietly;
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index 0d5f62c5df..d6cb2d5143 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -87,7 +87,7 @@ static u8 pci_bus_clock_list_ultra (u8 speed, struct chipset_bus_clock_list_entr
87 return chipset_table->ultra_settings; 87 return chipset_table->ultra_settings;
88} 88}
89 89
90static int aec6210_tune_chipset(ide_drive_t *drive, const u8 speed) 90static void aec6210_set_mode(ide_drive_t *drive, const u8 speed)
91{ 91{
92 ide_hwif_t *hwif = HWIF(drive); 92 ide_hwif_t *hwif = HWIF(drive);
93 struct pci_dev *dev = hwif->pci_dev; 93 struct pci_dev *dev = hwif->pci_dev;
@@ -111,10 +111,9 @@ static int aec6210_tune_chipset(ide_drive_t *drive, const u8 speed)
111 tmp2 = ((ultra_conf << (2*drive->dn)) | (tmp1 & ~(3 << (2*drive->dn)))); 111 tmp2 = ((ultra_conf << (2*drive->dn)) | (tmp1 & ~(3 << (2*drive->dn))));
112 pci_write_config_byte(dev, 0x54, tmp2); 112 pci_write_config_byte(dev, 0x54, tmp2);
113 local_irq_restore(flags); 113 local_irq_restore(flags);
114 return(ide_config_drive_speed(drive, speed));
115} 114}
116 115
117static int aec6260_tune_chipset(ide_drive_t *drive, const u8 speed) 116static void aec6260_set_mode(ide_drive_t *drive, const u8 speed)
118{ 117{
119 ide_hwif_t *hwif = HWIF(drive); 118 ide_hwif_t *hwif = HWIF(drive);
120 struct pci_dev *dev = hwif->pci_dev; 119 struct pci_dev *dev = hwif->pci_dev;
@@ -135,12 +134,11 @@ static int aec6260_tune_chipset(ide_drive_t *drive, const u8 speed)
135 tmp2 = ((ultra_conf << (4*unit)) | (tmp1 & ~(7 << (4*unit)))); 134 tmp2 = ((ultra_conf << (4*unit)) | (tmp1 & ~(7 << (4*unit))));
136 pci_write_config_byte(dev, (0x44|hwif->channel), tmp2); 135 pci_write_config_byte(dev, (0x44|hwif->channel), tmp2);
137 local_irq_restore(flags); 136 local_irq_restore(flags);
138 return(ide_config_drive_speed(drive, speed));
139} 137}
140 138
141static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio) 139static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio)
142{ 140{
143 (void) HWIF(drive)->speedproc(drive, pio + XFER_PIO_0); 141 drive->hwif->set_dma_mode(drive, pio + XFER_PIO_0);
144} 142}
145 143
146static int aec62xx_config_drive_xfer_rate (ide_drive_t *drive) 144static int aec62xx_config_drive_xfer_rate (ide_drive_t *drive)
@@ -205,9 +203,9 @@ static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif)
205 if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF) { 203 if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF) {
206 if(hwif->mate) 204 if(hwif->mate)
207 hwif->mate->serialized = hwif->serialized = 1; 205 hwif->mate->serialized = hwif->serialized = 1;
208 hwif->speedproc = &aec6210_tune_chipset; 206 hwif->set_dma_mode = &aec6210_set_mode;
209 } else 207 } else
210 hwif->speedproc = &aec6260_tune_chipset; 208 hwif->set_dma_mode = &aec6260_set_mode;
211 209
212 if (!hwif->dma_base) { 210 if (!hwif->dma_base) {
213 hwif->drives[0].autotune = hwif->drives[1].autotune = 1; 211 hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index d04b966b43..0b83443bf2 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -283,14 +283,14 @@ static int ali_get_info (char *buffer, char **addr, off_t offset, int count)
283#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */ 283#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
284 284
285/** 285/**
286 * ali_tune_pio - set host controller for PIO mode 286 * ali_set_pio_mode - set host controller for PIO mode
287 * @drive: drive 287 * @drive: drive
288 * @pio: PIO mode number 288 * @pio: PIO mode number
289 * 289 *
290 * Program the controller for the given PIO mode. 290 * Program the controller for the given PIO mode.
291 */ 291 */
292 292
293static void ali_tune_pio(ide_drive_t *drive, const u8 pio) 293static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
294{ 294{
295 ide_hwif_t *hwif = HWIF(drive); 295 ide_hwif_t *hwif = HWIF(drive);
296 struct pci_dev *dev = hwif->pci_dev; 296 struct pci_dev *dev = hwif->pci_dev;
@@ -358,21 +358,6 @@ static void ali_tune_pio(ide_drive_t *drive, const u8 pio)
358} 358}
359 359
360/** 360/**
361 * ali_set_pio_mode - set up drive for PIO mode
362 * @drive: drive to tune
363 * @pio: desired mode
364 *
365 * Program the controller with the desired PIO timing for the given drive.
366 * Then set up the drive itself.
367 */
368
369static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
370{
371 ali_tune_pio(drive, pio);
372 (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
373}
374
375/**
376 * ali_udma_filter - compute UDMA mask 361 * ali_udma_filter - compute UDMA mask
377 * @drive: IDE device 362 * @drive: IDE device
378 * 363 *
@@ -401,15 +386,14 @@ static u8 ali_udma_filter(ide_drive_t *drive)
401} 386}
402 387
403/** 388/**
404 * ali15x3_tune_chipset - set up chipset/drive for new speed 389 * ali_set_dma_mode - set host controller for DMA mode
405 * @drive: drive to configure for 390 * @drive: drive
406 * @speed: desired speed 391 * @speed: DMA mode
407 * 392 *
408 * Configure the hardware for the desired IDE transfer mode. 393 * Configure the hardware for the desired IDE transfer mode.
409 * We also do the needed drive configuration through helpers
410 */ 394 */
411 395
412static int ali15x3_tune_chipset(ide_drive_t *drive, const u8 speed) 396static void ali_set_dma_mode(ide_drive_t *drive, const u8 speed)
413{ 397{
414 ide_hwif_t *hwif = HWIF(drive); 398 ide_hwif_t *hwif = HWIF(drive);
415 struct pci_dev *dev = hwif->pci_dev; 399 struct pci_dev *dev = hwif->pci_dev;
@@ -419,7 +403,7 @@ static int ali15x3_tune_chipset(ide_drive_t *drive, const u8 speed)
419 int m5229_udma = (hwif->channel) ? 0x57 : 0x56; 403 int m5229_udma = (hwif->channel) ? 0x57 : 0x56;
420 404
421 if (speed < XFER_PIO_0) 405 if (speed < XFER_PIO_0)
422 return 1; 406 return;
423 407
424 if (speed == XFER_UDMA_6) 408 if (speed == XFER_UDMA_6)
425 speed1 = 0x47; 409 speed1 = 0x47;
@@ -450,7 +434,6 @@ static int ali15x3_tune_chipset(ide_drive_t *drive, const u8 speed)
450 pci_write_config_byte(dev, 0x4b, tmpbyte); 434 pci_write_config_byte(dev, 0x4b, tmpbyte);
451 } 435 }
452 } 436 }
453 return (ide_config_drive_speed(drive, speed));
454} 437}
455 438
456/** 439/**
@@ -699,7 +682,7 @@ static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
699{ 682{
700 hwif->autodma = 0; 683 hwif->autodma = 0;
701 hwif->set_pio_mode = &ali_set_pio_mode; 684 hwif->set_pio_mode = &ali_set_pio_mode;
702 hwif->speedproc = &ali15x3_tune_chipset; 685 hwif->set_dma_mode = &ali_set_dma_mode;
703 hwif->udma_filter = &ali_udma_filter; 686 hwif->udma_filter = &ali_udma_filter;
704 687
705 /* don't use LBA48 DMA on ALi devices before rev 0xC5 */ 688 /* don't use LBA48 DMA on ALi devices before rev 0xC5 */
@@ -711,6 +694,10 @@ static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
711 return; 694 return;
712 } 695 }
713 696
697 /*
698 * check in ->init_dma guarantees m5229_revision >= 0x20 here
699 */
700
714 if (m5229_revision > 0x20) 701 if (m5229_revision > 0x20)
715 hwif->atapi_dma = 1; 702 hwif->atapi_dma = 1;
716 703
@@ -728,18 +715,15 @@ static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
728 hwif->mwdma_mask = 0x07; 715 hwif->mwdma_mask = 0x07;
729 hwif->swdma_mask = 0x07; 716 hwif->swdma_mask = 0x07;
730 717
731 if (m5229_revision >= 0x20) { 718 hwif->ide_dma_check = &ali15x3_config_drive_for_dma;
732 /* 719 hwif->dma_setup = &ali15x3_dma_setup;
733 * M1543C or newer for DMAing 720
734 */ 721 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
735 hwif->ide_dma_check = &ali15x3_config_drive_for_dma; 722 hwif->cbl = ata66_ali15x3(hwif);
736 hwif->dma_setup = &ali15x3_dma_setup; 723
737 if (!noautodma) 724 if (!noautodma)
738 hwif->autodma = 1; 725 hwif->autodma = 1;
739 726
740 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
741 hwif->cbl = ata66_ali15x3(hwif);
742 }
743 hwif->drives[0].autodma = hwif->autodma; 727 hwif->drives[0].autodma = hwif->autodma;
744 hwif->drives[1].autodma = hwif->autodma; 728 hwif->drives[1].autodma = hwif->autodma;
745} 729}
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 513205e52a..6ff4089a23 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Version 2.22 2 * Version 2.23
3 * 3 *
4 * AMD 755/756/766/8111 and nVidia nForce/2/2s/3/3s/CK804/MCP04 4 * AMD 755/756/766/8111 and nVidia nForce/2/2s/3/3s/CK804/MCP04
5 * IDE driver for Linux. 5 * IDE driver for Linux.
@@ -229,20 +229,16 @@ static void amd_set_speed(struct pci_dev *dev, unsigned char dn, struct ide_timi
229} 229}
230 230
231/* 231/*
232 * amd_set_drive() computes timing values configures the drive and 232 * amd_set_drive() computes timing values and configures the chipset
233 * the chipset to a desired transfer mode. It also can be called 233 * to a desired transfer mode. It also can be called by upper layers.
234 * by upper layers.
235 */ 234 */
236 235
237static int amd_set_drive(ide_drive_t *drive, const u8 speed) 236static void amd_set_drive(ide_drive_t *drive, const u8 speed)
238{ 237{
239 ide_drive_t *peer = HWIF(drive)->drives + (~drive->dn & 1); 238 ide_drive_t *peer = HWIF(drive)->drives + (~drive->dn & 1);
240 struct ide_timing t, p; 239 struct ide_timing t, p;
241 int T, UT; 240 int T, UT;
242 241
243 if (speed != XFER_PIO_SLOW)
244 ide_config_drive_speed(drive, speed);
245
246 T = 1000000000 / amd_clock; 242 T = 1000000000 / amd_clock;
247 UT = (amd_config->udma_mask == ATA_UDMA2) ? T : (T / 2); 243 UT = (amd_config->udma_mask == ATA_UDMA2) ? T : (T / 2);
248 244
@@ -257,12 +253,6 @@ static int amd_set_drive(ide_drive_t *drive, const u8 speed)
257 if (speed == XFER_UDMA_6 && amd_clock <= 33333) t.udma = 15; 253 if (speed == XFER_UDMA_6 && amd_clock <= 33333) t.udma = 15;
258 254
259 amd_set_speed(HWIF(drive)->pci_dev, drive->dn, &t); 255 amd_set_speed(HWIF(drive)->pci_dev, drive->dn, &t);
260
261 if (!drive->init_speed)
262 drive->init_speed = speed;
263 drive->current_speed = speed;
264
265 return 0;
266} 256}
267 257
268/* 258/*
@@ -399,7 +389,7 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
399 hwif->autodma = 0; 389 hwif->autodma = 0;
400 390
401 hwif->set_pio_mode = &amd_set_pio_mode; 391 hwif->set_pio_mode = &amd_set_pio_mode;
402 hwif->speedproc = &amd_set_drive; 392 hwif->set_dma_mode = &amd_set_drive;
403 393
404 for (i = 0; i < 2; i++) { 394 for (i = 0; i < 2; i++) {
405 hwif->drives[i].io_32bit = 1; 395 hwif->drives[i].io_32bit = 1;
@@ -441,7 +431,8 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
441 .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \ 431 .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \
442 .bootable = ON_BOARD, \ 432 .bootable = ON_BOARD, \
443 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST \ 433 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST \
444 | IDE_HFLAG_PIO_NO_DOWNGRADE, \ 434 | IDE_HFLAG_PIO_NO_DOWNGRADE \
435 | IDE_HFLAG_POST_SET_MODE, \
445 .pio_mask = ATA_PIO5, \ 436 .pio_mask = ATA_PIO5, \
446 } 437 }
447 438
@@ -454,7 +445,8 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
454 .enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \ 445 .enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \
455 .bootable = ON_BOARD, \ 446 .bootable = ON_BOARD, \
456 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST \ 447 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST \
457 | IDE_HFLAG_PIO_NO_DOWNGRADE, \ 448 | IDE_HFLAG_PIO_NO_DOWNGRADE \
449 | IDE_HFLAG_POST_SET_MODE, \
458 .pio_mask = ATA_PIO5, \ 450 .pio_mask = ATA_PIO5, \
459 } 451 }
460 452
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
index 178876a3af..0eb97f021d 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/pci/atiixp.c
@@ -122,14 +122,14 @@ static void atiixp_dma_host_off(ide_drive_t *drive)
122} 122}
123 123
124/** 124/**
125 * atiixp_tune_pio - tune a drive attached to a ATIIXP 125 * atiixp_set_pio_mode - set host controller for PIO mode
126 * @drive: drive to tune 126 * @drive: drive
127 * @pio: desired PIO mode 127 * @pio: PIO mode number
128 * 128 *
129 * Set the interface PIO mode. 129 * Set the interface PIO mode.
130 */ 130 */
131 131
132static void atiixp_tune_pio(ide_drive_t *drive, u8 pio) 132static void atiixp_set_pio_mode(ide_drive_t *drive, const u8 pio)
133{ 133{
134 struct pci_dev *dev = drive->hwif->pci_dev; 134 struct pci_dev *dev = drive->hwif->pci_dev;
135 unsigned long flags; 135 unsigned long flags;
@@ -153,23 +153,16 @@ static void atiixp_tune_pio(ide_drive_t *drive, u8 pio)
153 spin_unlock_irqrestore(&atiixp_lock, flags); 153 spin_unlock_irqrestore(&atiixp_lock, flags);
154} 154}
155 155
156static void atiixp_set_pio_mode(ide_drive_t *drive, const u8 pio)
157{
158 atiixp_tune_pio(drive, pio);
159 (void)ide_config_drive_speed(drive, XFER_PIO_0 + pio);
160}
161
162/** 156/**
163 * atiixp_tune_chipset - tune a ATIIXP interface 157 * atiixp_set_dma_mode - set host controller for DMA mode
164 * @drive: IDE drive to tune 158 * @drive: drive
165 * @speed: speed to configure 159 * @speed: DMA mode
166 * 160 *
167 * Set a ATIIXP interface channel to the desired speeds. This involves 161 * Set a ATIIXP host controller to the desired DMA mode. This involves
168 * requires the right timing data into the ATIIXP configuration space 162 * programming the right timing data into the PCI configuration space.
169 * then setting the drive parameters appropriately
170 */ 163 */
171 164
172static int atiixp_speedproc(ide_drive_t *drive, const u8 speed) 165static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed)
173{ 166{
174 struct pci_dev *dev = drive->hwif->pci_dev; 167 struct pci_dev *dev = drive->hwif->pci_dev;
175 unsigned long flags; 168 unsigned long flags;
@@ -204,9 +197,7 @@ static int atiixp_speedproc(ide_drive_t *drive, const u8 speed)
204 else 197 else
205 pio = speed - XFER_PIO_0; 198 pio = speed - XFER_PIO_0;
206 199
207 atiixp_tune_pio(drive, pio); 200 atiixp_set_pio_mode(drive, pio);
208
209 return ide_config_drive_speed(drive, speed);
210} 201}
211 202
212/** 203/**
@@ -249,7 +240,7 @@ static void __devinit init_hwif_atiixp(ide_hwif_t *hwif)
249 240
250 hwif->autodma = 0; 241 hwif->autodma = 0;
251 hwif->set_pio_mode = &atiixp_set_pio_mode; 242 hwif->set_pio_mode = &atiixp_set_pio_mode;
252 hwif->speedproc = &atiixp_speedproc; 243 hwif->set_dma_mode = &atiixp_set_dma_mode;
253 hwif->drives[0].autotune = 1; 244 hwif->drives[0].autotune = 1;
254 hwif->drives[1].autotune = 1; 245 hwif->drives[1].autotune = 1;
255 246
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 0b568c60f9..d50f15e34b 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -280,10 +280,9 @@ static void cmd64x_set_pio_mode(ide_drive_t *drive, const u8 pio)
280 return; 280 return;
281 281
282 cmd64x_tune_pio(drive, pio); 282 cmd64x_tune_pio(drive, pio);
283 (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
284} 283}
285 284
286static int cmd64x_tune_chipset(ide_drive_t *drive, const u8 speed) 285static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
287{ 286{
288 ide_hwif_t *hwif = HWIF(drive); 287 ide_hwif_t *hwif = HWIF(drive);
289 struct pci_dev *dev = hwif->pci_dev; 288 struct pci_dev *dev = hwif->pci_dev;
@@ -324,13 +323,11 @@ static int cmd64x_tune_chipset(ide_drive_t *drive, const u8 speed)
324 program_cycle_times(drive, 480, 215); 323 program_cycle_times(drive, 480, 215);
325 break; 324 break;
326 default: 325 default:
327 return 1; 326 return;
328 } 327 }
329 328
330 if (speed >= XFER_SW_DMA_0) 329 if (speed >= XFER_SW_DMA_0)
331 (void) pci_write_config_byte(dev, pciU, regU); 330 (void) pci_write_config_byte(dev, pciU, regU);
332
333 return ide_config_drive_speed(drive, speed);
334} 331}
335 332
336static int cmd64x_config_drive_for_dma (ide_drive_t *drive) 333static int cmd64x_config_drive_for_dma (ide_drive_t *drive)
@@ -524,7 +521,7 @@ static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif)
524 pci_read_config_byte(dev, PCI_REVISION_ID, &rev); 521 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
525 522
526 hwif->set_pio_mode = &cmd64x_set_pio_mode; 523 hwif->set_pio_mode = &cmd64x_set_pio_mode;
527 hwif->speedproc = &cmd64x_tune_chipset; 524 hwif->set_dma_mode = &cmd64x_set_dma_mode;
528 525
529 hwif->drives[0].autotune = hwif->drives[1].autotune = 1; 526 hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
530 527
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 1217d2a747..fbce90048a 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -96,22 +96,13 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
96 reg = inb(hwif->dma_base + 0x02 + 8*controller); 96 reg = inb(hwif->dma_base + 0x02 + 8*controller);
97 reg |= 1<<((drive->dn&1)+5); 97 reg |= 1<<((drive->dn&1)+5);
98 outb(reg, hwif->dma_base + 0x02 + 8*controller); 98 outb(reg, hwif->dma_base + 0x02 + 8*controller);
99
100 (void)ide_config_drive_speed(drive, XFER_PIO_0 + pio);
101} 99}
102 100
103static int cs5520_tune_chipset(ide_drive_t *drive, const u8 speed) 101static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
104{ 102{
105 printk(KERN_ERR "cs55x0: bad ide timing.\n"); 103 printk(KERN_ERR "cs55x0: bad ide timing.\n");
106 104
107 cs5520_set_pio_mode(drive, 0); 105 cs5520_set_pio_mode(drive, 0);
108
109 /*
110 * FIXME: this is incorrect to return zero here but
111 * since all users of ide_set_xfer_rate() ignore
112 * the return value it is not a problem currently
113 */
114 return 0;
115} 106}
116 107
117static int cs5520_config_drive_xfer_rate(ide_drive_t *drive) 108static int cs5520_config_drive_xfer_rate(ide_drive_t *drive)
@@ -150,26 +141,25 @@ static int cs5520_dma_on(ide_drive_t *drive)
150static void __devinit init_hwif_cs5520(ide_hwif_t *hwif) 141static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
151{ 142{
152 hwif->set_pio_mode = &cs5520_set_pio_mode; 143 hwif->set_pio_mode = &cs5520_set_pio_mode;
153 hwif->speedproc = &cs5520_tune_chipset; 144 hwif->set_dma_mode = &cs5520_set_dma_mode;
154 hwif->ide_dma_check = &cs5520_config_drive_xfer_rate;
155 hwif->ide_dma_on = &cs5520_dma_on;
156 145
157 if(!noautodma) 146 if (hwif->dma_base == 0) {
158 hwif->autodma = 1; 147 hwif->drives[1].autotune = hwif->drives[0].autotune = 1;
159
160 if(!hwif->dma_base)
161 {
162 hwif->drives[0].autotune = 1;
163 hwif->drives[1].autotune = 1;
164 return; 148 return;
165 } 149 }
166 150
151 hwif->ide_dma_check = &cs5520_config_drive_xfer_rate;
152 hwif->ide_dma_on = &cs5520_dma_on;
153
167 /* ATAPI is harder so leave it for now */ 154 /* ATAPI is harder so leave it for now */
168 hwif->atapi_dma = 0; 155 hwif->atapi_dma = 0;
169 hwif->ultra_mask = 0; 156 hwif->ultra_mask = 0;
170 hwif->swdma_mask = 0; 157 hwif->swdma_mask = 0;
171 hwif->mwdma_mask = 0; 158 hwif->mwdma_mask = 0;
172 159
160 if (!noautodma)
161 hwif->autodma = 1;
162
173 hwif->drives[0].autodma = hwif->autodma; 163 hwif->drives[0].autodma = hwif->autodma;
174 hwif->drives[1].autodma = hwif->autodma; 164 hwif->drives[1].autodma = hwif->autodma;
175} 165}
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/pci/cs5530.c
index 741507b4cd..e4121577ce 100644
--- a/drivers/ide/pci/cs5530.c
+++ b/drivers/ide/pci/cs5530.c
@@ -30,22 +30,6 @@
30#include <asm/io.h> 30#include <asm/io.h>
31#include <asm/irq.h> 31#include <asm/irq.h>
32 32
33/**
34 * cs5530_xfer_set_mode - set a new transfer mode at the drive
35 * @drive: drive to tune
36 * @mode: new mode
37 *
38 * Logging wrapper to the IDE driver speed configuration. This can
39 * probably go away now.
40 */
41
42static int cs5530_set_xfer_mode (ide_drive_t *drive, u8 mode)
43{
44 printk(KERN_DEBUG "%s: cs5530_set_xfer_mode(%s)\n",
45 drive->name, ide_xfer_verbose(mode));
46 return (ide_config_drive_speed(drive, mode));
47}
48
49/* 33/*
50 * Here are the standard PIO mode 0-4 timings for each "format". 34 * Here are the standard PIO mode 0-4 timings for each "format".
51 * Format-0 uses fast data reg timings, with slower command reg timings. 35 * Format-0 uses fast data reg timings, with slower command reg timings.
@@ -62,20 +46,12 @@ static unsigned int cs5530_pio_timings[2][5] = {
62#define CS5530_BAD_PIO(timings) (((timings)&~0x80000000)==0x0000e132) 46#define CS5530_BAD_PIO(timings) (((timings)&~0x80000000)==0x0000e132)
63#define CS5530_BASEREG(hwif) (((hwif)->dma_base & ~0xf) + ((hwif)->channel ? 0x30 : 0x20)) 47#define CS5530_BASEREG(hwif) (((hwif)->dma_base & ~0xf) + ((hwif)->channel ? 0x30 : 0x20))
64 48
65static void cs5530_tunepio(ide_drive_t *drive, u8 pio)
66{
67 unsigned long basereg = CS5530_BASEREG(drive->hwif);
68 unsigned int format = (inl(basereg + 4) >> 31) & 1;
69
70 outl(cs5530_pio_timings[format][pio], basereg + ((drive->dn & 1)<<3));
71}
72
73/** 49/**
74 * cs5530_set_pio_mode - set PIO mode 50 * cs5530_set_pio_mode - set host controller for PIO mode
75 * @drive: drive 51 * @drive: drive
76 * @pio: PIO mode number 52 * @pio: PIO mode number
77 * 53 *
78 * Handles setting of PIO mode for both the chipset and drive. 54 * Handles setting of PIO mode for the chipset.
79 * 55 *
80 * The init_hwif_cs5530() routine guarantees that all drives 56 * The init_hwif_cs5530() routine guarantees that all drives
81 * will have valid default PIO timings set up before we get here. 57 * will have valid default PIO timings set up before we get here.
@@ -83,8 +59,10 @@ static void cs5530_tunepio(ide_drive_t *drive, u8 pio)
83 59
84static void cs5530_set_pio_mode(ide_drive_t *drive, const u8 pio) 60static void cs5530_set_pio_mode(ide_drive_t *drive, const u8 pio)
85{ 61{
86 if (cs5530_set_xfer_mode(drive, XFER_PIO_0 + pio) == 0) 62 unsigned long basereg = CS5530_BASEREG(drive->hwif);
87 cs5530_tunepio(drive, pio); 63 unsigned int format = (inl(basereg + 4) >> 31) & 1;
64
65 outl(cs5530_pio_timings[format][pio], basereg + ((drive->dn & 1)<<3));
88} 66}
89 67
90/** 68/**
@@ -142,20 +120,11 @@ static int cs5530_config_dma(ide_drive_t *drive)
142 return 1; 120 return 1;
143} 121}
144 122
145static int cs5530_tune_chipset(ide_drive_t *drive, const u8 mode) 123static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode)
146{ 124{
147 unsigned long basereg; 125 unsigned long basereg;
148 unsigned int reg, timings = 0; 126 unsigned int reg, timings = 0;
149 127
150 /*
151 * Tell the drive to switch to the new mode; abort on failure.
152 */
153 if (cs5530_set_xfer_mode(drive, mode))
154 return 1; /* failure */
155
156 /*
157 * Now tune the chipset to match the drive:
158 */
159 switch (mode) { 128 switch (mode) {
160 case XFER_UDMA_0: timings = 0x00921250; break; 129 case XFER_UDMA_0: timings = 0x00921250; break;
161 case XFER_UDMA_1: timings = 0x00911140; break; 130 case XFER_UDMA_1: timings = 0x00911140; break;
@@ -180,8 +149,6 @@ static int cs5530_tune_chipset(ide_drive_t *drive, const u8 mode)
180 outl(reg, basereg + 4); /* write drive0 config register */ 149 outl(reg, basereg + 4); /* write drive0 config register */
181 outl(timings, basereg + 12); /* write drive1 config register */ 150 outl(timings, basereg + 12); /* write drive1 config register */
182 } 151 }
183
184 return 0; /* success */
185} 152}
186 153
187/** 154/**
@@ -299,7 +266,7 @@ static void __devinit init_hwif_cs5530 (ide_hwif_t *hwif)
299 hwif->serialized = hwif->mate->serialized = 1; 266 hwif->serialized = hwif->mate->serialized = 1;
300 267
301 hwif->set_pio_mode = &cs5530_set_pio_mode; 268 hwif->set_pio_mode = &cs5530_set_pio_mode;
302 hwif->speedproc = &cs5530_tune_chipset; 269 hwif->set_dma_mode = &cs5530_set_dma_mode;
303 270
304 basereg = CS5530_BASEREG(hwif); 271 basereg = CS5530_BASEREG(hwif);
305 d0_timings = inl(basereg + 0); 272 d0_timings = inl(basereg + 0);
@@ -340,6 +307,7 @@ static ide_pci_device_t cs5530_chipset __devinitdata = {
340 .autodma = AUTODMA, 307 .autodma = AUTODMA,
341 .bootable = ON_BOARD, 308 .bootable = ON_BOARD,
342 .pio_mask = ATA_PIO4, 309 .pio_mask = ATA_PIO4,
310 .host_flags = IDE_HFLAG_POST_SET_MODE,
343}; 311};
344 312
345static int __devinit cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id) 313static int __devinit cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index 383b7eccbc..257865778f 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -131,24 +131,21 @@ static void cs5535_set_speed(ide_drive_t *drive, const u8 speed)
131 } 131 }
132} 132}
133 133
134/**** 134/**
135 * cs5535_set_drive - Configure the drive to the new speed 135 * cs5535_set_dma_mode - set host controller for DMA mode
136 * @drive: Drive to set up 136 * @drive: drive
137 * @speed: desired speed 137 * @speed: DMA mode
138 * 138 *
139 * cs5535_set_drive() configures the drive and the chipset to a 139 * Programs the chipset for DMA mode.
140 * new speed. It also can be called by upper layers.
141 */ 140 */
142static int cs5535_set_drive(ide_drive_t *drive, u8 speed) 141
142static void cs5535_set_dma_mode(ide_drive_t *drive, const u8 speed)
143{ 143{
144 ide_config_drive_speed(drive, speed);
145 cs5535_set_speed(drive, speed); 144 cs5535_set_speed(drive, speed);
146
147 return 0;
148} 145}
149 146
150/** 147/**
151 * cs5535_set_pio_mode - PIO setup 148 * cs5535_set_pio_mode - set host controller for PIO mode
152 * @drive: drive 149 * @drive: drive
153 * @pio: PIO mode number 150 * @pio: PIO mode number
154 * 151 *
@@ -157,7 +154,6 @@ static int cs5535_set_drive(ide_drive_t *drive, u8 speed)
157 154
158static void cs5535_set_pio_mode(ide_drive_t *drive, const u8 pio) 155static void cs5535_set_pio_mode(ide_drive_t *drive, const u8 pio)
159{ 156{
160 ide_config_drive_speed(drive, XFER_PIO_0 + pio);
161 cs5535_set_speed(drive, XFER_PIO_0 + pio); 157 cs5535_set_speed(drive, XFER_PIO_0 + pio);
162} 158}
163 159
@@ -194,12 +190,16 @@ static u8 __devinit cs5535_cable_detect(struct pci_dev *dev)
194 */ 190 */
195static void __devinit init_hwif_cs5535(ide_hwif_t *hwif) 191static void __devinit init_hwif_cs5535(ide_hwif_t *hwif)
196{ 192{
197 int i;
198
199 hwif->autodma = 0; 193 hwif->autodma = 0;
200 194
201 hwif->set_pio_mode = &cs5535_set_pio_mode; 195 hwif->set_pio_mode = &cs5535_set_pio_mode;
202 hwif->speedproc = &cs5535_set_drive; 196 hwif->set_dma_mode = &cs5535_set_dma_mode;
197
198 hwif->drives[1].autotune = hwif->drives[0].autotune = 1;
199
200 if (hwif->dma_base == 0)
201 return;
202
203 hwif->ide_dma_check = &cs5535_dma_check; 203 hwif->ide_dma_check = &cs5535_dma_check;
204 204
205 hwif->atapi_dma = 1; 205 hwif->atapi_dma = 1;
@@ -211,11 +211,7 @@ static void __devinit init_hwif_cs5535(ide_hwif_t *hwif)
211 if (!noautodma) 211 if (!noautodma)
212 hwif->autodma = 1; 212 hwif->autodma = 1;
213 213
214 /* just setting autotune and not worrying about bios timings */ 214 hwif->drives[1].autodma = hwif->drives[0].autodma = hwif->autodma;
215 for (i = 0; i < 2; i++) {
216 hwif->drives[i].autotune = 1;
217 hwif->drives[i].autodma = hwif->autodma;
218 }
219} 215}
220 216
221static ide_pci_device_t cs5535_chipset __devinitdata = { 217static ide_pci_device_t cs5535_chipset __devinitdata = {
@@ -223,7 +219,7 @@ static ide_pci_device_t cs5535_chipset __devinitdata = {
223 .init_hwif = init_hwif_cs5535, 219 .init_hwif = init_hwif_cs5535,
224 .autodma = AUTODMA, 220 .autodma = AUTODMA,
225 .bootable = ON_BOARD, 221 .bootable = ON_BOARD,
226 .host_flags = IDE_HFLAG_SINGLE, 222 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
227 .pio_mask = ATA_PIO4, 223 .pio_mask = ATA_PIO4,
228}; 224};
229 225
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index a1bb10188f..218852aaf2 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -43,7 +43,7 @@
43 43
44#define HPT343_DEBUG_DRIVE_INFO 0 44#define HPT343_DEBUG_DRIVE_INFO 0
45 45
46static int hpt34x_tune_chipset(ide_drive_t *drive, const u8 speed) 46static void hpt34x_set_mode(ide_drive_t *drive, const u8 speed)
47{ 47{
48 struct pci_dev *dev = HWIF(drive)->pci_dev; 48 struct pci_dev *dev = HWIF(drive)->pci_dev;
49 u32 reg1= 0, tmp1 = 0, reg2 = 0, tmp2 = 0; 49 u32 reg1= 0, tmp1 = 0, reg2 = 0, tmp2 = 0;
@@ -73,13 +73,11 @@ static int hpt34x_tune_chipset(ide_drive_t *drive, const u8 speed)
73 drive->dn, reg1, tmp1, reg2, tmp2, 73 drive->dn, reg1, tmp1, reg2, tmp2,
74 hi_speed, lo_speed); 74 hi_speed, lo_speed);
75#endif /* HPT343_DEBUG_DRIVE_INFO */ 75#endif /* HPT343_DEBUG_DRIVE_INFO */
76
77 return(ide_config_drive_speed(drive, speed));
78} 76}
79 77
80static void hpt34x_set_pio_mode(ide_drive_t *drive, const u8 pio) 78static void hpt34x_set_pio_mode(ide_drive_t *drive, const u8 pio)
81{ 79{
82 (void) hpt34x_tune_chipset(drive, (XFER_PIO_0 + pio)); 80 hpt34x_set_mode(drive, XFER_PIO_0 + pio);
83} 81}
84 82
85static int hpt34x_config_drive_xfer_rate (ide_drive_t *drive) 83static int hpt34x_config_drive_xfer_rate (ide_drive_t *drive)
@@ -145,7 +143,8 @@ static void __devinit init_hwif_hpt34x(ide_hwif_t *hwif)
145 hwif->autodma = 0; 143 hwif->autodma = 0;
146 144
147 hwif->set_pio_mode = &hpt34x_set_pio_mode; 145 hwif->set_pio_mode = &hpt34x_set_pio_mode;
148 hwif->speedproc = &hpt34x_tune_chipset; 146 hwif->set_dma_mode = &hpt34x_set_mode;
147
149 hwif->drives[0].autotune = 1; 148 hwif->drives[0].autotune = 1;
150 hwif->drives[1].autotune = 1; 149 hwif->drives[1].autotune = 1;
151 150
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 0e7d3b60d4..8812a9bb03 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -600,7 +600,7 @@ static u32 get_speed_setting(u8 speed, struct hpt_info *info)
600 return (*info->settings)[i]; 600 return (*info->settings)[i];
601} 601}
602 602
603static int hpt36x_tune_chipset(ide_drive_t *drive, const u8 speed) 603static void hpt36x_set_mode(ide_drive_t *drive, const u8 speed)
604{ 604{
605 ide_hwif_t *hwif = HWIF(drive); 605 ide_hwif_t *hwif = HWIF(drive);
606 struct pci_dev *dev = hwif->pci_dev; 606 struct pci_dev *dev = hwif->pci_dev;
@@ -623,11 +623,9 @@ static int hpt36x_tune_chipset(ide_drive_t *drive, const u8 speed)
623 new_itr &= ~0xc0000000; 623 new_itr &= ~0xc0000000;
624 624
625 pci_write_config_dword(dev, itr_addr, new_itr); 625 pci_write_config_dword(dev, itr_addr, new_itr);
626
627 return ide_config_drive_speed(drive, speed);
628} 626}
629 627
630static int hpt37x_tune_chipset(ide_drive_t *drive, const u8 speed) 628static void hpt37x_set_mode(ide_drive_t *drive, const u8 speed)
631{ 629{
632 ide_hwif_t *hwif = HWIF(drive); 630 ide_hwif_t *hwif = HWIF(drive);
633 struct pci_dev *dev = hwif->pci_dev; 631 struct pci_dev *dev = hwif->pci_dev;
@@ -647,24 +645,22 @@ static int hpt37x_tune_chipset(ide_drive_t *drive, const u8 speed)
647 if (speed < XFER_MW_DMA_0) 645 if (speed < XFER_MW_DMA_0)
648 new_itr &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */ 646 new_itr &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */
649 pci_write_config_dword(dev, itr_addr, new_itr); 647 pci_write_config_dword(dev, itr_addr, new_itr);
650
651 return ide_config_drive_speed(drive, speed);
652} 648}
653 649
654static int hpt3xx_tune_chipset(ide_drive_t *drive, u8 speed) 650static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed)
655{ 651{
656 ide_hwif_t *hwif = HWIF(drive); 652 ide_hwif_t *hwif = HWIF(drive);
657 struct hpt_info *info = pci_get_drvdata(hwif->pci_dev); 653 struct hpt_info *info = pci_get_drvdata(hwif->pci_dev);
658 654
659 if (info->chip_type >= HPT370) 655 if (info->chip_type >= HPT370)
660 return hpt37x_tune_chipset(drive, speed); 656 hpt37x_set_mode(drive, speed);
661 else /* hpt368: hpt_minimum_revision(dev, 2) */ 657 else /* hpt368: hpt_minimum_revision(dev, 2) */
662 return hpt36x_tune_chipset(drive, speed); 658 hpt36x_set_mode(drive, speed);
663} 659}
664 660
665static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio) 661static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
666{ 662{
667 (void) hpt3xx_tune_chipset (drive, XFER_PIO_0 + pio); 663 hpt3xx_set_mode(drive, XFER_PIO_0 + pio);
668} 664}
669 665
670static int hpt3xx_quirkproc(ide_drive_t *drive) 666static int hpt3xx_quirkproc(ide_drive_t *drive)
@@ -1257,7 +1253,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1257 hwif->select_data = hwif->channel ? 0x54 : 0x50; 1253 hwif->select_data = hwif->channel ? 0x54 : 0x50;
1258 1254
1259 hwif->set_pio_mode = &hpt3xx_set_pio_mode; 1255 hwif->set_pio_mode = &hpt3xx_set_pio_mode;
1260 hwif->speedproc = &hpt3xx_tune_chipset; 1256 hwif->set_dma_mode = &hpt3xx_set_mode;
1261 hwif->quirkproc = &hpt3xx_quirkproc; 1257 hwif->quirkproc = &hpt3xx_quirkproc;
1262 hwif->intrproc = &hpt3xx_intrproc; 1258 hwif->intrproc = &hpt3xx_intrproc;
1263 hwif->maskproc = &hpt3xx_maskproc; 1259 hwif->maskproc = &hpt3xx_maskproc;
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
index 76e91ff942..ecf4ce078d 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/pci/it8213.c
@@ -48,15 +48,15 @@ static u8 it8213_dma_2_pio (u8 xfer_rate) {
48 } 48 }
49} 49}
50 50
51/* 51/**
52 * it8213_tune_pio - tune a drive 52 * it8213_set_pio_mode - set host controller for PIO mode
53 * @drive: drive to tune 53 * @drive: drive
54 * @pio: desired PIO mode 54 * @pio: PIO mode number
55 * 55 *
56 * Set the interface PIO mode. 56 * Set the interface PIO mode.
57 */ 57 */
58 58
59static void it8213_tune_pio(ide_drive_t *drive, const u8 pio) 59static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio)
60{ 60{
61 ide_hwif_t *hwif = HWIF(drive); 61 ide_hwif_t *hwif = HWIF(drive);
62 struct pci_dev *dev = hwif->pci_dev; 62 struct pci_dev *dev = hwif->pci_dev;
@@ -105,21 +105,15 @@ static void it8213_tune_pio(ide_drive_t *drive, const u8 pio)
105 spin_unlock_irqrestore(&tune_lock, flags); 105 spin_unlock_irqrestore(&tune_lock, flags);
106} 106}
107 107
108static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio)
109{
110 it8213_tune_pio(drive, pio);
111 ide_config_drive_speed(drive, XFER_PIO_0 + pio);
112}
113
114/** 108/**
115 * it8213_tune_chipset - set controller timings 109 * it8213_set_dma_mode - set host controller for DMA mode
116 * @drive: Drive to set up 110 * @drive: drive
117 * @speed: speed we want to achieve 111 * @speed: DMA mode
118 * 112 *
119 * Tune the ITE chipset for the desired mode. 113 * Tune the ITE chipset for the DMA mode.
120 */ 114 */
121 115
122static int it8213_tune_chipset(ide_drive_t *drive, const u8 speed) 116static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
123{ 117{
124 ide_hwif_t *hwif = HWIF(drive); 118 ide_hwif_t *hwif = HWIF(drive);
125 struct pci_dev *dev = hwif->pci_dev; 119 struct pci_dev *dev = hwif->pci_dev;
@@ -152,7 +146,7 @@ static int it8213_tune_chipset(ide_drive_t *drive, const u8 speed)
152 case XFER_SW_DMA_2: 146 case XFER_SW_DMA_2:
153 break; 147 break;
154 default: 148 default:
155 return -1; 149 return;
156 } 150 }
157 151
158 if (speed >= XFER_UDMA_0) { 152 if (speed >= XFER_UDMA_0) {
@@ -182,9 +176,7 @@ static int it8213_tune_chipset(ide_drive_t *drive, const u8 speed)
182 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); 176 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
183 } 177 }
184 178
185 it8213_tune_pio(drive, it8213_dma_2_pio(speed)); 179 it8213_set_pio_mode(drive, it8213_dma_2_pio(speed));
186
187 return ide_config_drive_speed(drive, speed);
188} 180}
189 181
190/** 182/**
@@ -220,7 +212,7 @@ static void __devinit init_hwif_it8213(ide_hwif_t *hwif)
220{ 212{
221 u8 reg42h = 0; 213 u8 reg42h = 0;
222 214
223 hwif->speedproc = &it8213_tune_chipset; 215 hwif->set_dma_mode = &it8213_set_dma_mode;
224 hwif->set_pio_mode = &it8213_set_pio_mode; 216 hwif->set_pio_mode = &it8213_set_pio_mode;
225 217
226 hwif->autodma = 0; 218 hwif->autodma = 0;
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 758a98230c..1b69d82478 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -229,24 +229,24 @@ static void it821x_clock_strategy(ide_drive_t *drive)
229} 229}
230 230
231/** 231/**
232 * it821x_tunepio - tune a drive 232 * it821x_set_pio_mode - set host controller for PIO mode
233 * @drive: drive to tune 233 * @drive: drive
234 * @pio: the desired PIO mode 234 * @pio: PIO mode number
235 * 235 *
236 * Try to tune the drive/host to the desired PIO mode taking into 236 * Tune the host to the desired PIO mode taking into the consideration
237 * the consideration the maximum PIO mode supported by the other 237 * the maximum PIO mode supported by the other device on the cable.
238 * device on the cable.
239 */ 238 */
240 239
241static int it821x_tunepio(ide_drive_t *drive, u8 set_pio) 240static void it821x_set_pio_mode(ide_drive_t *drive, const u8 pio)
242{ 241{
243 ide_hwif_t *hwif = drive->hwif; 242 ide_hwif_t *hwif = drive->hwif;
244 struct it821x_dev *itdev = ide_get_hwifdata(hwif); 243 struct it821x_dev *itdev = ide_get_hwifdata(hwif);
245 int unit = drive->select.b.unit; 244 int unit = drive->select.b.unit;
246 ide_drive_t *pair = &hwif->drives[1 - unit]; 245 ide_drive_t *pair = &hwif->drives[1 - unit];
246 u8 set_pio = pio;
247 247
248 /* Spec says 89 ref driver uses 88 */ 248 /* Spec says 89 ref driver uses 88 */
249 static u16 pio[] = { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 }; 249 static u16 pio_timings[]= { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 };
250 static u8 pio_want[] = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY }; 250 static u8 pio_want[] = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY };
251 251
252 /* 252 /*
@@ -261,22 +261,12 @@ static int it821x_tunepio(ide_drive_t *drive, u8 set_pio)
261 set_pio = pair_pio; 261 set_pio = pair_pio;
262 } 262 }
263 263
264 if (itdev->smart)
265 return 0;
266
267 /* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */ 264 /* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */
268 itdev->want[unit][1] = pio_want[set_pio]; 265 itdev->want[unit][1] = pio_want[set_pio];
269 itdev->want[unit][0] = 1; /* PIO is lowest priority */ 266 itdev->want[unit][0] = 1; /* PIO is lowest priority */
270 itdev->pio[unit] = pio[set_pio]; 267 itdev->pio[unit] = pio_timings[set_pio];
271 it821x_clock_strategy(drive); 268 it821x_clock_strategy(drive);
272 it821x_program(drive, itdev->pio[unit]); 269 it821x_program(drive, itdev->pio[unit]);
273
274 return ide_config_drive_speed(drive, XFER_PIO_0 + set_pio);
275}
276
277static void it821x_set_pio_mode(ide_drive_t *drive, const u8 pio)
278{
279 (void)it821x_tunepio(drive, pio);
280} 270}
281 271
282/** 272/**
@@ -405,47 +395,24 @@ static int it821x_dma_end(ide_drive_t *drive)
405} 395}
406 396
407/** 397/**
408 * it821x_tune_chipset - set controller timings 398 * it821x_set_dma_mode - set host controller for DMA mode
409 * @drive: Drive to set up 399 * @drive: drive
410 * @speed: speed we want to achieve 400 * @speed: DMA mode
411 * 401 *
412 * Tune the ITE chipset for the desired mode. 402 * Tune the ITE chipset for the desired DMA mode.
413 */ 403 */
414 404
415static int it821x_tune_chipset(ide_drive_t *drive, const u8 speed) 405static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed)
416{ 406{
417 407 /*
418 ide_hwif_t *hwif = drive->hwif; 408 * MWDMA tuning is really hard because our MWDMA and PIO
419 struct it821x_dev *itdev = ide_get_hwifdata(hwif); 409 * timings are kept in the same place. We can switch in the
420 410 * host dma on/off callbacks.
421 if (itdev->smart == 0) { 411 */
422 switch (speed) { 412 if (speed >= XFER_UDMA_0 && speed <= XFER_UDMA_6)
423 /* MWDMA tuning is really hard because our MWDMA and PIO 413 it821x_tune_udma(drive, speed - XFER_UDMA_0);
424 timings are kept in the same place. We can switch in the 414 else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
425 host dma on/off callbacks */ 415 it821x_tune_mwdma(drive, speed - XFER_MW_DMA_0);
426 case XFER_MW_DMA_2:
427 case XFER_MW_DMA_1:
428 case XFER_MW_DMA_0:
429 it821x_tune_mwdma(drive, (speed - XFER_MW_DMA_0));
430 break;
431 case XFER_UDMA_6:
432 case XFER_UDMA_5:
433 case XFER_UDMA_4:
434 case XFER_UDMA_3:
435 case XFER_UDMA_2:
436 case XFER_UDMA_1:
437 case XFER_UDMA_0:
438 it821x_tune_udma(drive, (speed - XFER_UDMA_0));
439 break;
440 default:
441 return 1;
442 }
443
444 return ide_config_drive_speed(drive, speed);
445 }
446
447 /* don't touch anything in the smart mode */
448 return 0;
449} 416}
450 417
451/** 418/**
@@ -629,14 +596,15 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
629 printk(KERN_WARNING "it821x: Revision 0x10, workarounds activated.\n"); 596 printk(KERN_WARNING "it821x: Revision 0x10, workarounds activated.\n");
630 } 597 }
631 598
632 hwif->speedproc = &it821x_tune_chipset; 599 if (idev->smart == 0) {
633 hwif->set_pio_mode = &it821x_set_pio_mode; 600 hwif->set_pio_mode = &it821x_set_pio_mode;
601 hwif->set_dma_mode = &it821x_set_dma_mode;
634 602
635 /* MWDMA/PIO clock switching for pass through mode */ 603 /* MWDMA/PIO clock switching for pass through mode */
636 if(!idev->smart) {
637 hwif->dma_start = &it821x_dma_start; 604 hwif->dma_start = &it821x_dma_start;
638 hwif->ide_dma_end = &it821x_dma_end; 605 hwif->ide_dma_end = &it821x_dma_end;
639 } 606 } else
607 hwif->host_flags |= IDE_HFLAG_NO_SET_MODE;
640 608
641 hwif->drives[0].autotune = 1; 609 hwif->drives[0].autotune = 1;
642 hwif->drives[1].autotune = 1; 610 hwif->drives[1].autotune = 1;
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index d379fbaf67..582b4cae2b 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -85,21 +85,18 @@ static u8 __devinit ata66_jmicron(ide_hwif_t *hwif)
85 85
86static void jmicron_set_pio_mode(ide_drive_t *drive, const u8 pio) 86static void jmicron_set_pio_mode(ide_drive_t *drive, const u8 pio)
87{ 87{
88 ide_config_drive_speed(drive, XFER_PIO_0 + pio);
89} 88}
90 89
91/** 90/**
92 * jmicron_tune_chipset - set controller timings 91 * jmicron_set_dma_mode - set host controller for DMA mode
93 * @drive: Drive to set up 92 * @drive: drive
94 * @speed: speed we want to achieve 93 * @mode: DMA mode
95 * 94 *
96 * As the JMicron snoops for timings all we actually need to do is 95 * As the JMicron snoops for timings we don't need to do anything here.
97 * set the transfer mode on the device.
98 */ 96 */
99 97
100static int jmicron_tune_chipset(ide_drive_t *drive, const u8 speed) 98static void jmicron_set_dma_mode(ide_drive_t *drive, const u8 mode)
101{ 99{
102 return ide_config_drive_speed(drive, speed);
103} 100}
104 101
105/** 102/**
@@ -129,8 +126,8 @@ static int jmicron_config_drive_for_dma (ide_drive_t *drive)
129 126
130static void __devinit init_hwif_jmicron(ide_hwif_t *hwif) 127static void __devinit init_hwif_jmicron(ide_hwif_t *hwif)
131{ 128{
132 hwif->speedproc = &jmicron_tune_chipset;
133 hwif->set_pio_mode = &jmicron_set_pio_mode; 129 hwif->set_pio_mode = &jmicron_set_pio_mode;
130 hwif->set_dma_mode = &jmicron_set_dma_mode;
134 131
135 hwif->drives[0].autotune = 1; 132 hwif->drives[0].autotune = 1;
136 hwif->drives[1].autotune = 1; 133 hwif->drives[1].autotune = 1;
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 5fb1eedc81..ad0bdcb0c0 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -146,19 +146,16 @@ static struct udma_timing {
146 { 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */ 146 { 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
147}; 147};
148 148
149static int pdcnew_tune_chipset(ide_drive_t *drive, const u8 speed) 149static void pdcnew_set_mode(ide_drive_t *drive, const u8 speed)
150{ 150{
151 ide_hwif_t *hwif = HWIF(drive); 151 ide_hwif_t *hwif = HWIF(drive);
152 u8 adj = (drive->dn & 1) ? 0x08 : 0x00; 152 u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
153 int err;
154 153
155 /* 154 /*
156 * Issue SETFEATURES_XFER to the drive first. PDC202xx hardware will 155 * IDE core issues SETFEATURES_XFER to the drive first (thanks to
156 * IDE_HFLAG_POST_SET_MODE in ->host_flags). PDC202xx hardware will
157 * automatically set the timing registers based on 100 MHz PLL output. 157 * automatically set the timing registers based on 100 MHz PLL output.
158 */ 158 *
159 err = ide_config_drive_speed(drive, speed);
160
161 /*
162 * As we set up the PLL to output 133 MHz for UltraDMA/133 capable 159 * As we set up the PLL to output 133 MHz for UltraDMA/133 capable
163 * chips, we must override the default register settings... 160 * chips, we must override the default register settings...
164 */ 161 */
@@ -211,13 +208,11 @@ static int pdcnew_tune_chipset(ide_drive_t *drive, const u8 speed)
211 208
212 set_indexed_reg(hwif, 0x10 + adj, tmp & 0x7f); 209 set_indexed_reg(hwif, 0x10 + adj, tmp & 0x7f);
213 } 210 }
214
215 return err;
216} 211}
217 212
218static void pdcnew_set_pio_mode(ide_drive_t *drive, const u8 pio) 213static void pdcnew_set_pio_mode(ide_drive_t *drive, const u8 pio)
219{ 214{
220 (void)pdcnew_tune_chipset(drive, XFER_PIO_0 + pio); 215 pdcnew_set_mode(drive, XFER_PIO_0 + pio);
221} 216}
222 217
223static u8 pdcnew_cable_detect(ide_hwif_t *hwif) 218static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
@@ -490,9 +485,9 @@ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
490 hwif->autodma = 0; 485 hwif->autodma = 0;
491 486
492 hwif->set_pio_mode = &pdcnew_set_pio_mode; 487 hwif->set_pio_mode = &pdcnew_set_pio_mode;
488 hwif->set_dma_mode = &pdcnew_set_mode;
493 489
494 hwif->quirkproc = &pdcnew_quirkproc; 490 hwif->quirkproc = &pdcnew_quirkproc;
495 hwif->speedproc = &pdcnew_tune_chipset;
496 hwif->resetproc = &pdcnew_reset; 491 hwif->resetproc = &pdcnew_reset;
497 492
498 hwif->err_stops_fifo = 1; 493 hwif->err_stops_fifo = 1;
@@ -583,6 +578,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
583 .bootable = OFF_BOARD, 578 .bootable = OFF_BOARD,
584 .pio_mask = ATA_PIO4, 579 .pio_mask = ATA_PIO4,
585 .udma_mask = 0x3f, /* udma0-5 */ 580 .udma_mask = 0x3f, /* udma0-5 */
581 .host_flags = IDE_HFLAG_POST_SET_MODE,
586 },{ /* 1 */ 582 },{ /* 1 */
587 .name = "PDC20269", 583 .name = "PDC20269",
588 .init_setup = init_setup_pdcnew, 584 .init_setup = init_setup_pdcnew,
@@ -592,6 +588,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
592 .bootable = OFF_BOARD, 588 .bootable = OFF_BOARD,
593 .pio_mask = ATA_PIO4, 589 .pio_mask = ATA_PIO4,
594 .udma_mask = 0x7f, /* udma0-6*/ 590 .udma_mask = 0x7f, /* udma0-6*/
591 .host_flags = IDE_HFLAG_POST_SET_MODE,
595 },{ /* 2 */ 592 },{ /* 2 */
596 .name = "PDC20270", 593 .name = "PDC20270",
597 .init_setup = init_setup_pdc20270, 594 .init_setup = init_setup_pdc20270,
@@ -601,6 +598,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
601 .bootable = OFF_BOARD, 598 .bootable = OFF_BOARD,
602 .pio_mask = ATA_PIO4, 599 .pio_mask = ATA_PIO4,
603 .udma_mask = 0x3f, /* udma0-5 */ 600 .udma_mask = 0x3f, /* udma0-5 */
601 .host_flags = IDE_HFLAG_POST_SET_MODE,
604 },{ /* 3 */ 602 },{ /* 3 */
605 .name = "PDC20271", 603 .name = "PDC20271",
606 .init_setup = init_setup_pdcnew, 604 .init_setup = init_setup_pdcnew,
@@ -610,6 +608,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
610 .bootable = OFF_BOARD, 608 .bootable = OFF_BOARD,
611 .pio_mask = ATA_PIO4, 609 .pio_mask = ATA_PIO4,
612 .udma_mask = 0x7f, /* udma0-6*/ 610 .udma_mask = 0x7f, /* udma0-6*/
611 .host_flags = IDE_HFLAG_POST_SET_MODE,
613 },{ /* 4 */ 612 },{ /* 4 */
614 .name = "PDC20275", 613 .name = "PDC20275",
615 .init_setup = init_setup_pdcnew, 614 .init_setup = init_setup_pdcnew,
@@ -619,6 +618,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
619 .bootable = OFF_BOARD, 618 .bootable = OFF_BOARD,
620 .pio_mask = ATA_PIO4, 619 .pio_mask = ATA_PIO4,
621 .udma_mask = 0x7f, /* udma0-6*/ 620 .udma_mask = 0x7f, /* udma0-6*/
621 .host_flags = IDE_HFLAG_POST_SET_MODE,
622 },{ /* 5 */ 622 },{ /* 5 */
623 .name = "PDC20276", 623 .name = "PDC20276",
624 .init_setup = init_setup_pdc20276, 624 .init_setup = init_setup_pdc20276,
@@ -628,6 +628,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
628 .bootable = OFF_BOARD, 628 .bootable = OFF_BOARD,
629 .pio_mask = ATA_PIO4, 629 .pio_mask = ATA_PIO4,
630 .udma_mask = 0x7f, /* udma0-6*/ 630 .udma_mask = 0x7f, /* udma0-6*/
631 .host_flags = IDE_HFLAG_POST_SET_MODE,
631 },{ /* 6 */ 632 },{ /* 6 */
632 .name = "PDC20277", 633 .name = "PDC20277",
633 .init_setup = init_setup_pdcnew, 634 .init_setup = init_setup_pdcnew,
@@ -637,6 +638,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
637 .bootable = OFF_BOARD, 638 .bootable = OFF_BOARD,
638 .pio_mask = ATA_PIO4, 639 .pio_mask = ATA_PIO4,
639 .udma_mask = 0x7f, /* udma0-6*/ 640 .udma_mask = 0x7f, /* udma0-6*/
641 .host_flags = IDE_HFLAG_POST_SET_MODE,
640 } 642 }
641}; 643};
642 644
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index b578307fad..8c3e8cf36e 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -63,7 +63,7 @@ static const char *pdc_quirk_drives[] = {
63 63
64static void pdc_old_disable_66MHz_clock(ide_hwif_t *); 64static void pdc_old_disable_66MHz_clock(ide_hwif_t *);
65 65
66static int pdc202xx_tune_chipset(ide_drive_t *drive, const u8 speed) 66static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed)
67{ 67{
68 ide_hwif_t *hwif = HWIF(drive); 68 ide_hwif_t *hwif = HWIF(drive);
69 struct pci_dev *dev = hwif->pci_dev; 69 struct pci_dev *dev = hwif->pci_dev;
@@ -138,13 +138,11 @@ static int pdc202xx_tune_chipset(ide_drive_t *drive, const u8 speed)
138 pci_read_config_dword(dev, drive_pci, &drive_conf); 138 pci_read_config_dword(dev, drive_pci, &drive_conf);
139 printk("0x%08x\n", drive_conf); 139 printk("0x%08x\n", drive_conf);
140#endif 140#endif
141
142 return ide_config_drive_speed(drive, speed);
143} 141}
144 142
145static void pdc202xx_set_pio_mode(ide_drive_t *drive, const u8 pio) 143static void pdc202xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
146{ 144{
147 pdc202xx_tune_chipset(drive, XFER_PIO_0 + pio); 145 pdc202xx_set_mode(drive, XFER_PIO_0 + pio);
148} 146}
149 147
150static u8 pdc202xx_old_cable_detect (ide_hwif_t *hwif) 148static u8 pdc202xx_old_cable_detect (ide_hwif_t *hwif)
@@ -330,14 +328,13 @@ static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif)
330 hwif->autodma = 0; 328 hwif->autodma = 0;
331 329
332 hwif->set_pio_mode = &pdc202xx_set_pio_mode; 330 hwif->set_pio_mode = &pdc202xx_set_pio_mode;
331 hwif->set_dma_mode = &pdc202xx_set_mode;
333 332
334 hwif->quirkproc = &pdc202xx_quirkproc; 333 hwif->quirkproc = &pdc202xx_quirkproc;
335 334
336 if (hwif->pci_dev->device != PCI_DEVICE_ID_PROMISE_20246) 335 if (hwif->pci_dev->device != PCI_DEVICE_ID_PROMISE_20246)
337 hwif->resetproc = &pdc202xx_reset; 336 hwif->resetproc = &pdc202xx_reset;
338 337
339 hwif->speedproc = &pdc202xx_tune_chipset;
340
341 hwif->err_stops_fifo = 1; 338 hwif->err_stops_fifo = 1;
342 339
343 hwif->drives[0].autotune = hwif->drives[1].autotune = 1; 340 hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index fd8214a7ab..38c91ba649 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -137,13 +137,14 @@ static u8 piix_dma_2_pio (u8 xfer_rate) {
137} 137}
138 138
139/** 139/**
140 * piix_tune_pio - tune PIIX for PIO mode 140 * piix_set_pio_mode - set host controller for PIO mode
141 * @drive: drive to tune 141 * @drive: drive
142 * @pio: desired PIO mode 142 * @pio: PIO mode number
143 * 143 *
144 * Set the interface PIO mode based upon the settings done by AMI BIOS. 144 * Set the interface PIO mode based upon the settings done by AMI BIOS.
145 */ 145 */
146static void piix_tune_pio (ide_drive_t *drive, u8 pio) 146
147static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio)
147{ 148{
148 ide_hwif_t *hwif = HWIF(drive); 149 ide_hwif_t *hwif = HWIF(drive);
149 struct pci_dev *dev = hwif->pci_dev; 150 struct pci_dev *dev = hwif->pci_dev;
@@ -204,31 +205,15 @@ static void piix_tune_pio (ide_drive_t *drive, u8 pio)
204} 205}
205 206
206/** 207/**
207 * piix_set_pio_mode - set PIO mode 208 * piix_set_dma_mode - set host controller for DMA mode
208 * @drive: drive to tune 209 * @drive: drive
209 * @pio: desired PIO mode 210 * @speed: DMA mode
210 *
211 * Set the drive's PIO mode (might be useful if drive is not registered
212 * in CMOS for any reason).
213 */
214
215static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio)
216{
217 piix_tune_pio(drive, pio);
218 (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
219}
220
221/**
222 * piix_tune_chipset - tune a PIIX interface
223 * @drive: IDE drive to tune
224 * @speed: speed to configure
225 * 211 *
226 * Set a PIIX interface channel to the desired speeds. This involves 212 * Set a PIIX host controller to the desired DMA mode. This involves
227 * requires the right timing data into the PIIX configuration space 213 * programming the right timing data into the PCI configuration space.
228 * then setting the drive parameters appropriately
229 */ 214 */
230 215
231static int piix_tune_chipset(ide_drive_t *drive, const u8 speed) 216static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
232{ 217{
233 ide_hwif_t *hwif = HWIF(drive); 218 ide_hwif_t *hwif = HWIF(drive);
234 struct pci_dev *dev = hwif->pci_dev; 219 struct pci_dev *dev = hwif->pci_dev;
@@ -259,7 +244,7 @@ static int piix_tune_chipset(ide_drive_t *drive, const u8 speed)
259 case XFER_MW_DMA_2: 244 case XFER_MW_DMA_2:
260 case XFER_MW_DMA_1: 245 case XFER_MW_DMA_1:
261 case XFER_SW_DMA_2: break; 246 case XFER_SW_DMA_2: break;
262 default: return -1; 247 default: return;
263 } 248 }
264 249
265 if (speed >= XFER_UDMA_0) { 250 if (speed >= XFER_UDMA_0) {
@@ -288,9 +273,7 @@ static int piix_tune_chipset(ide_drive_t *drive, const u8 speed)
288 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); 273 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
289 } 274 }
290 275
291 piix_tune_pio(drive, piix_dma_2_pio(speed)); 276 piix_set_pio_mode(drive, piix_dma_2_pio(speed));
292
293 return ide_config_drive_speed(drive, speed);
294} 277}
295 278
296/** 279/**
@@ -448,7 +431,8 @@ static void __devinit init_hwif_piix(ide_hwif_t *hwif)
448 hwif->autodma = 0; 431 hwif->autodma = 0;
449 432
450 hwif->set_pio_mode = &piix_set_pio_mode; 433 hwif->set_pio_mode = &piix_set_pio_mode;
451 hwif->speedproc = &piix_tune_chipset; 434 hwif->set_dma_mode = &piix_set_dma_mode;
435
452 hwif->drives[0].autotune = 1; 436 hwif->drives[0].autotune = 1;
453 hwif->drives[1].autotune = 1; 437 hwif->drives[1].autotune = 1;
454 438
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
index 79ecab6894..ee0e3f554d 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/pci/sc1200.c
@@ -68,17 +68,6 @@ static unsigned short sc1200_get_pci_clock (void)
68 return pci_clock; 68 return pci_clock;
69} 69}
70 70
71extern char *ide_xfer_verbose (byte xfer_rate);
72
73/*
74 * Set a new transfer mode at the drive
75 */
76static int sc1200_set_xfer_mode (ide_drive_t *drive, byte mode)
77{
78 printk("%s: sc1200_set_xfer_mode(%s)\n", drive->name, ide_xfer_verbose(mode));
79 return ide_config_drive_speed(drive, mode);
80}
81
82/* 71/*
83 * Here are the standard PIO mode 0-4 timings for each "format". 72 * Here are the standard PIO mode 0-4 timings for each "format".
84 * Format-0 uses fast data reg timings, with slower command reg timings. 73 * Format-0 uses fast data reg timings, with slower command reg timings.
@@ -138,7 +127,7 @@ out:
138 return mask; 127 return mask;
139} 128}
140 129
141static int sc1200_tune_chipset(ide_drive_t *drive, const u8 mode) 130static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode)
142{ 131{
143 ide_hwif_t *hwif = HWIF(drive); 132 ide_hwif_t *hwif = HWIF(drive);
144 int unit = drive->select.b.unit; 133 int unit = drive->select.b.unit;
@@ -146,17 +135,9 @@ static int sc1200_tune_chipset(ide_drive_t *drive, const u8 mode)
146 unsigned short pci_clock; 135 unsigned short pci_clock;
147 unsigned int basereg = hwif->channel ? 0x50 : 0x40; 136 unsigned int basereg = hwif->channel ? 0x50 : 0x40;
148 137
149 /*
150 * Tell the drive to switch to the new mode; abort on failure.
151 */
152 if (sc1200_set_xfer_mode(drive, mode))
153 return 1; /* failure */
154
155 pci_clock = sc1200_get_pci_clock(); 138 pci_clock = sc1200_get_pci_clock();
156 139
157 /* 140 /*
158 * Now tune the chipset to match the drive:
159 *
160 * Note that each DMA mode has several timings associated with it. 141 * Note that each DMA mode has several timings associated with it.
161 * The correct timing depends on the fast PCI clock freq. 142 * The correct timing depends on the fast PCI clock freq.
162 */ 143 */
@@ -216,8 +197,6 @@ static int sc1200_tune_chipset(ide_drive_t *drive, const u8 mode)
216 } else { 197 } else {
217 pci_write_config_dword(hwif->pci_dev, basereg+12, timings); 198 pci_write_config_dword(hwif->pci_dev, basereg+12, timings);
218 } 199 }
219
220 return 0; /* success */
221} 200}
222 201
223/* 202/*
@@ -286,13 +265,12 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
286 if (mode != -1) { 265 if (mode != -1) {
287 printk("SC1200: %s: changing (U)DMA mode\n", drive->name); 266 printk("SC1200: %s: changing (U)DMA mode\n", drive->name);
288 hwif->dma_off_quietly(drive); 267 hwif->dma_off_quietly(drive);
289 if (sc1200_tune_chipset(drive, mode) == 0) 268 if (ide_set_dma_mode(drive, mode) == 0)
290 hwif->dma_host_on(drive); 269 hwif->dma_host_on(drive);
291 return; 270 return;
292 } 271 }
293 272
294 if (sc1200_set_xfer_mode(drive, XFER_PIO_0 + pio) == 0) 273 sc1200_tunepio(drive, pio);
295 sc1200_tunepio(drive, pio);
296} 274}
297 275
298#ifdef CONFIG_PM 276#ifdef CONFIG_PM
@@ -400,16 +378,20 @@ static void __devinit init_hwif_sc1200 (ide_hwif_t *hwif)
400 if (hwif->mate) 378 if (hwif->mate)
401 hwif->serialized = hwif->mate->serialized = 1; 379 hwif->serialized = hwif->mate->serialized = 1;
402 hwif->autodma = 0; 380 hwif->autodma = 0;
403 if (hwif->dma_base) { 381
404 hwif->udma_filter = sc1200_udma_filter; 382 hwif->set_pio_mode = &sc1200_set_pio_mode;
405 hwif->ide_dma_check = &sc1200_config_dma; 383 hwif->set_dma_mode = &sc1200_set_dma_mode;
406 hwif->ide_dma_end = &sc1200_ide_dma_end; 384
407 if (!noautodma) 385 if (hwif->dma_base == 0)
408 hwif->autodma = 1; 386 return;
409 387
410 hwif->set_pio_mode = &sc1200_set_pio_mode; 388 hwif->udma_filter = sc1200_udma_filter;
411 hwif->speedproc = &sc1200_tune_chipset; 389 hwif->ide_dma_check = &sc1200_config_dma;
412 } 390 hwif->ide_dma_end = &sc1200_ide_dma_end;
391
392 if (!noautodma)
393 hwif->autodma = 1;
394
413 hwif->atapi_dma = 1; 395 hwif->atapi_dma = 1;
414 hwif->ultra_mask = 0x07; 396 hwif->ultra_mask = 0x07;
415 hwif->mwdma_mask = 0x07; 397 hwif->mwdma_mask = 0x07;
@@ -423,7 +405,7 @@ static ide_pci_device_t sc1200_chipset __devinitdata = {
423 .init_hwif = init_hwif_sc1200, 405 .init_hwif = init_hwif_sc1200,
424 .autodma = AUTODMA, 406 .autodma = AUTODMA,
425 .bootable = ON_BOARD, 407 .bootable = ON_BOARD,
426 .host_flags = IDE_HFLAG_ABUSE_DMA_MODES, 408 .host_flags = IDE_HFLAG_ABUSE_DMA_MODES | IDE_HFLAG_POST_SET_MODE,
427 .pio_mask = ATA_PIO4, 409 .pio_mask = ATA_PIO4,
428}; 410};
429 411
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 66a526e0ec..67f06dd11b 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -190,15 +190,15 @@ scc_ide_outsl(unsigned long port, void *addr, u32 count)
190} 190}
191 191
192/** 192/**
193 * scc_tune_pio - tune a drive PIO mode 193 * scc_set_pio_mode - set host controller for PIO mode
194 * @drive: drive to tune 194 * @drive: drive
195 * @mode_wanted: the target operating mode 195 * @pio: PIO mode number
196 * 196 *
197 * Load the timing settings for this device mode into the 197 * Load the timing settings for this device mode into the
198 * controller. 198 * controller.
199 */ 199 */
200 200
201static void scc_tune_pio(ide_drive_t *drive, const u8 pio) 201static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
202{ 202{
203 ide_hwif_t *hwif = HWIF(drive); 203 ide_hwif_t *hwif = HWIF(drive);
204 struct scc_ports *ports = ide_get_hwifdata(hwif); 204 struct scc_ports *ports = ide_get_hwifdata(hwif);
@@ -221,22 +221,16 @@ static void scc_tune_pio(ide_drive_t *drive, const u8 pio)
221 out_be32((void __iomem *)pioct_port, reg); 221 out_be32((void __iomem *)pioct_port, reg);
222} 222}
223 223
224static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
225{
226 scc_tune_pio(drive, pio);
227 ide_config_drive_speed(drive, XFER_PIO_0 + pio);
228}
229
230/** 224/**
231 * scc_tune_chipset - tune a drive DMA mode 225 * scc_set_dma_mode - set host controller for DMA mode
232 * @drive: Drive to set up 226 * @drive: drive
233 * @speed: speed we want to achieve 227 * @speed: DMA mode
234 * 228 *
235 * Load the timing settings for this device mode into the 229 * Load the timing settings for this device mode into the
236 * controller. 230 * controller.
237 */ 231 */
238 232
239static int scc_tune_chipset(ide_drive_t *drive, const u8 speed) 233static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
240{ 234{
241 ide_hwif_t *hwif = HWIF(drive); 235 ide_hwif_t *hwif = HWIF(drive);
242 struct scc_ports *ports = ide_get_hwifdata(hwif); 236 struct scc_ports *ports = ide_get_hwifdata(hwif);
@@ -271,7 +265,7 @@ static int scc_tune_chipset(ide_drive_t *drive, const u8 speed)
271 idx = speed - XFER_UDMA_0; 265 idx = speed - XFER_UDMA_0;
272 break; 266 break;
273 default: 267 default:
274 return 1; 268 return;
275 } 269 }
276 270
277 jcactsel = JCACTSELtbl[offset][idx]; 271 jcactsel = JCACTSELtbl[offset][idx];
@@ -287,8 +281,6 @@ static int scc_tune_chipset(ide_drive_t *drive, const u8 speed)
287 } 281 }
288 reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]; 282 reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx];
289 out_be32((void __iomem *)udenvt_port, reg); 283 out_be32((void __iomem *)udenvt_port, reg);
290
291 return ide_config_drive_speed(drive, speed);
292} 284}
293 285
294/** 286/**
@@ -708,8 +700,8 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
708 700
709 hwif->dma_setup = scc_dma_setup; 701 hwif->dma_setup = scc_dma_setup;
710 hwif->ide_dma_end = scc_ide_dma_end; 702 hwif->ide_dma_end = scc_ide_dma_end;
711 hwif->speedproc = scc_tune_chipset;
712 hwif->set_pio_mode = scc_set_pio_mode; 703 hwif->set_pio_mode = scc_set_pio_mode;
704 hwif->set_dma_mode = scc_set_dma_mode;
713 hwif->ide_dma_check = scc_config_drive_for_dma; 705 hwif->ide_dma_check = scc_config_drive_for_dma;
714 hwif->ide_dma_test_irq = scc_dma_test_irq; 706 hwif->ide_dma_test_irq = scc_dma_test_irq;
715 hwif->udma_filter = scc_udma_filter; 707 hwif->udma_filter = scc_udma_filter;
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index 0351cf2104..49ec0ac64a 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -124,7 +124,7 @@ static u8 svwks_csb_check (struct pci_dev *dev)
124 return 0; 124 return 0;
125} 125}
126 126
127static void svwks_tune_pio(ide_drive_t *drive, const u8 pio) 127static void svwks_set_pio_mode(ide_drive_t *drive, const u8 pio)
128{ 128{
129 static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 }; 129 static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
130 static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 }; 130 static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 };
@@ -145,7 +145,7 @@ static void svwks_tune_pio(ide_drive_t *drive, const u8 pio)
145 } 145 }
146} 146}
147 147
148static int svwks_tune_chipset(ide_drive_t *drive, const u8 speed) 148static void svwks_set_dma_mode(ide_drive_t *drive, const u8 speed)
149{ 149{
150 static const u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 }; 150 static const u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 };
151 static const u8 dma_modes[] = { 0x77, 0x21, 0x20 }; 151 static const u8 dma_modes[] = { 0x77, 0x21, 0x20 };
@@ -193,14 +193,6 @@ static int svwks_tune_chipset(ide_drive_t *drive, const u8 speed)
193 pci_write_config_byte(dev, drive_pci2[drive->dn], dma_timing); 193 pci_write_config_byte(dev, drive_pci2[drive->dn], dma_timing);
194 pci_write_config_byte(dev, (0x56|hwif->channel), ultra_timing); 194 pci_write_config_byte(dev, (0x56|hwif->channel), ultra_timing);
195 pci_write_config_byte(dev, 0x54, ultra_enable); 195 pci_write_config_byte(dev, 0x54, ultra_enable);
196
197 return (ide_config_drive_speed(drive, speed));
198}
199
200static void svwks_set_pio_mode(ide_drive_t *drive, const u8 pio)
201{
202 svwks_tune_pio(drive, pio);
203 (void)ide_config_drive_speed(drive, XFER_PIO_0 + pio);
204} 196}
205 197
206static int svwks_config_drive_xfer_rate (ide_drive_t *drive) 198static int svwks_config_drive_xfer_rate (ide_drive_t *drive)
@@ -384,7 +376,7 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
384 hwif->irq = hwif->channel ? 15 : 14; 376 hwif->irq = hwif->channel ? 15 : 14;
385 377
386 hwif->set_pio_mode = &svwks_set_pio_mode; 378 hwif->set_pio_mode = &svwks_set_pio_mode;
387 hwif->speedproc = &svwks_tune_chipset; 379 hwif->set_dma_mode = &svwks_set_dma_mode;
388 hwif->udma_filter = &svwks_udma_filter; 380 hwif->udma_filter = &svwks_udma_filter;
389 381
390 hwif->atapi_dma = 1; 382 hwif->atapi_dma = 1;
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index c292e1de1d..85ffaaa39b 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -291,12 +291,8 @@ static void sgiioc4_dma_off_quietly(ide_drive_t *drive)
291 drive->hwif->dma_host_off(drive); 291 drive->hwif->dma_host_off(drive);
292} 292}
293 293
294static int sgiioc4_speedproc(ide_drive_t *drive, const u8 speed) 294static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed)
295{ 295{
296 if (speed != XFER_MW_DMA_2)
297 return 1;
298
299 return ide_config_drive_speed(drive, speed);
300} 296}
301 297
302static int sgiioc4_ide_dma_check(ide_drive_t *drive) 298static int sgiioc4_ide_dma_check(ide_drive_t *drive)
@@ -591,11 +587,9 @@ static void __devinit
591ide_init_sgiioc4(ide_hwif_t * hwif) 587ide_init_sgiioc4(ide_hwif_t * hwif)
592{ 588{
593 hwif->mmio = 1; 589 hwif->mmio = 1;
594 hwif->atapi_dma = 1;
595 hwif->mwdma_mask = 0x04;
596 hwif->pio_mask = 0x00; 590 hwif->pio_mask = 0x00;
597 hwif->set_pio_mode = NULL; /* Sets timing for PIO mode */ 591 hwif->set_pio_mode = NULL; /* Sets timing for PIO mode */
598 hwif->speedproc = &sgiioc4_speedproc; 592 hwif->set_dma_mode = &sgiioc4_set_dma_mode;
599 hwif->selectproc = NULL;/* Use the default routine to select drive */ 593 hwif->selectproc = NULL;/* Use the default routine to select drive */
600 hwif->reset_poll = NULL;/* No HBA specific reset_poll needed */ 594 hwif->reset_poll = NULL;/* No HBA specific reset_poll needed */
601 hwif->pre_reset = NULL; /* No HBA specific pre_set needed */ 595 hwif->pre_reset = NULL; /* No HBA specific pre_set needed */
@@ -606,6 +600,14 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
606 hwif->quirkproc = NULL; 600 hwif->quirkproc = NULL;
607 hwif->busproc = NULL; 601 hwif->busproc = NULL;
608 602
603 hwif->INB = &sgiioc4_INB;
604
605 if (hwif->dma_base == 0)
606 return;
607
608 hwif->atapi_dma = 1;
609 hwif->mwdma_mask = 0x04;
610
609 hwif->dma_setup = &sgiioc4_ide_dma_setup; 611 hwif->dma_setup = &sgiioc4_ide_dma_setup;
610 hwif->dma_start = &sgiioc4_ide_dma_start; 612 hwif->dma_start = &sgiioc4_ide_dma_start;
611 hwif->ide_dma_end = &sgiioc4_ide_dma_end; 613 hwif->ide_dma_end = &sgiioc4_ide_dma_end;
@@ -617,8 +619,6 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
617 hwif->dma_host_off = &sgiioc4_dma_host_off; 619 hwif->dma_host_off = &sgiioc4_dma_host_off;
618 hwif->dma_lost_irq = &sgiioc4_dma_lost_irq; 620 hwif->dma_lost_irq = &sgiioc4_dma_lost_irq;
619 hwif->dma_timeout = &ide_dma_timeout; 621 hwif->dma_timeout = &ide_dma_timeout;
620
621 hwif->INB = &sgiioc4_INB;
622} 622}
623 623
624static int __devinit 624static int __devinit
@@ -688,8 +688,6 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
688 /* Initializing chipset IRQ Registers */ 688 /* Initializing chipset IRQ Registers */
689 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); 689 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
690 690
691 ide_init_sgiioc4(hwif);
692
693 hwif->autodma = 0; 691 hwif->autodma = 0;
694 692
695 if (dma_base && ide_dma_sgiioc4(hwif, dma_base) == 0) { 693 if (dma_base && ide_dma_sgiioc4(hwif, dma_base) == 0) {
@@ -699,6 +697,8 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
699 printk(KERN_INFO "%s: %s Bus-Master DMA disabled\n", 697 printk(KERN_INFO "%s: %s Bus-Master DMA disabled\n",
700 hwif->name, DRV_NAME); 698 hwif->name, DRV_NAME);
701 699
700 ide_init_sgiioc4(hwif);
701
702 if (probe_hwif_init(hwif)) 702 if (probe_hwif_init(hwif))
703 return -EIO; 703 return -EIO;
704 704
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 5d1e5e52a0..ce7784996d 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -165,16 +165,16 @@ out:
165} 165}
166 166
167/** 167/**
168 * sil_tune_pio - tune a drive 168 * sil_set_pio_mode - set host controller for PIO mode
169 * @drive: drive to tune 169 * @drive: drive
170 * @pio: the desired PIO mode 170 * @pio: PIO mode number
171 * 171 *
172 * Load the timing settings for this device mode into the 172 * Load the timing settings for this device mode into the
173 * controller. If we are in PIO mode 3 or 4 turn on IORDY 173 * controller. If we are in PIO mode 3 or 4 turn on IORDY
174 * monitoring (bit 9). The TF timing is bits 31:16 174 * monitoring (bit 9). The TF timing is bits 31:16
175 */ 175 */
176 176
177static void sil_tune_pio(ide_drive_t *drive, u8 pio) 177static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
178{ 178{
179 const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 }; 179 const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 };
180 const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 180 const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
@@ -234,21 +234,15 @@ static void sil_tune_pio(ide_drive_t *drive, u8 pio)
234 } 234 }
235} 235}
236 236
237static void sil_set_pio_mode(ide_drive_t *drive, const u8 pio)
238{
239 sil_tune_pio(drive, pio);
240 (void)ide_config_drive_speed(drive, XFER_PIO_0 + pio);
241}
242
243/** 237/**
244 * siimage_tune_chipset - set controller timings 238 * sil_set_dma_mode - set host controller for DMA mode
245 * @drive: Drive to set up 239 * @drive: drive
246 * @speed: speed we want to achieve 240 * @speed: DMA mode
247 * 241 *
248 * Tune the SII chipset for the desired mode. 242 * Tune the SiI chipset for the desired DMA mode.
249 */ 243 */
250 244
251static int siimage_tune_chipset(ide_drive_t *drive, const u8 speed) 245static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
252{ 246{
253 u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }; 247 u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 };
254 u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 }; 248 u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 };
@@ -303,7 +297,7 @@ static int siimage_tune_chipset(ide_drive_t *drive, const u8 speed)
303 mode |= ((unit) ? 0x30 : 0x03); 297 mode |= ((unit) ? 0x30 : 0x03);
304 break; 298 break;
305 default: 299 default:
306 return 1; 300 return;
307 } 301 }
308 302
309 if (hwif->mmio) { 303 if (hwif->mmio) {
@@ -315,7 +309,6 @@ static int siimage_tune_chipset(ide_drive_t *drive, const u8 speed)
315 pci_write_config_word(hwif->pci_dev, ma, multi); 309 pci_write_config_word(hwif->pci_dev, ma, multi);
316 pci_write_config_word(hwif->pci_dev, ua, ultra); 310 pci_write_config_word(hwif->pci_dev, ua, ultra);
317 } 311 }
318 return (ide_config_drive_speed(drive, speed));
319} 312}
320 313
321/** 314/**
@@ -904,8 +897,8 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
904 hwif->autodma = 0; 897 hwif->autodma = 0;
905 898
906 hwif->resetproc = &siimage_reset; 899 hwif->resetproc = &siimage_reset;
907 hwif->speedproc = &siimage_tune_chipset;
908 hwif->set_pio_mode = &sil_set_pio_mode; 900 hwif->set_pio_mode = &sil_set_pio_mode;
901 hwif->set_dma_mode = &sil_set_dma_mode;
909 hwif->reset_poll = &siimage_reset_poll; 902 hwif->reset_poll = &siimage_reset_poll;
910 hwif->pre_reset = &siimage_pre_reset; 903 hwif->pre_reset = &siimage_pre_reset;
911 hwif->udma_filter = &sil_udma_filter; 904 hwif->udma_filter = &sil_udma_filter;
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index 3e18899de6..b375ee53d6 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -451,7 +451,7 @@ static void config_drive_art_rwp (ide_drive_t *drive)
451} 451}
452 452
453/* Set per-drive active and recovery time */ 453/* Set per-drive active and recovery time */
454static void config_art_rwp_pio (ide_drive_t *drive, u8 pio) 454static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio)
455{ 455{
456 ide_hwif_t *hwif = HWIF(drive); 456 ide_hwif_t *hwif = HWIF(drive);
457 struct pci_dev *dev = hwif->pci_dev; 457 struct pci_dev *dev = hwif->pci_dev;
@@ -519,20 +519,14 @@ static void config_art_rwp_pio (ide_drive_t *drive, u8 pio)
519 } 519 }
520} 520}
521 521
522static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio) 522static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed)
523{
524 config_art_rwp_pio(drive, pio);
525 (void)ide_config_drive_speed(drive, XFER_PIO_0 + pio);
526}
527
528static int sis5513_tune_chipset(ide_drive_t *drive, const u8 speed)
529{ 523{
530 ide_hwif_t *hwif = HWIF(drive); 524 ide_hwif_t *hwif = HWIF(drive);
531 struct pci_dev *dev = hwif->pci_dev; 525 struct pci_dev *dev = hwif->pci_dev;
532 u32 regdw; 526 u32 regdw;
533 u8 drive_pci, reg; 527 u8 drive_pci, reg;
534 528
535 /* See config_art_rwp_pio for drive pci config registers */ 529 /* See sis_set_pio_mode() for drive PCI config registers */
536 drive_pci = 0x40; 530 drive_pci = 0x40;
537 if (chipset_family >= ATA_133) { 531 if (chipset_family >= ATA_133) {
538 u32 reg54h; 532 u32 reg54h;
@@ -600,8 +594,6 @@ static int sis5513_tune_chipset(ide_drive_t *drive, const u8 speed)
600 BUG(); 594 BUG();
601 break; 595 break;
602 } 596 }
603
604 return ide_config_drive_speed(drive, speed);
605} 597}
606 598
607static int sis5513_config_xfer_rate(ide_drive_t *drive) 599static int sis5513_config_xfer_rate(ide_drive_t *drive)
@@ -841,7 +833,7 @@ static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif)
841 hwif->irq = hwif->channel ? 15 : 14; 833 hwif->irq = hwif->channel ? 15 : 14;
842 834
843 hwif->set_pio_mode = &sis_set_pio_mode; 835 hwif->set_pio_mode = &sis_set_pio_mode;
844 hwif->speedproc = &sis5513_tune_chipset; 836 hwif->set_dma_mode = &sis_set_dma_mode;
845 837
846 if (chipset_family >= ATA_133) 838 if (chipset_family >= ATA_133)
847 hwif->udma_filter = sis5513_ata133_udma_filter; 839 hwif->udma_filter = sis5513_ata133_udma_filter;
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index f492318ba7..2ef26e3f7b 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -75,7 +75,7 @@ static unsigned int get_pio_timings(ide_drive_t *drive, u8 pio)
75/* 75/*
76 * Configure the chipset for PIO mode. 76 * Configure the chipset for PIO mode.
77 */ 77 */
78static void sl82c105_tune_pio(ide_drive_t *drive, const u8 pio) 78static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio)
79{ 79{
80 struct pci_dev *dev = HWIF(drive)->pci_dev; 80 struct pci_dev *dev = HWIF(drive)->pci_dev;
81 int reg = 0x44 + drive->dn * 4; 81 int reg = 0x44 + drive->dn * 4;
@@ -105,9 +105,9 @@ static void sl82c105_tune_pio(ide_drive_t *drive, const u8 pio)
105} 105}
106 106
107/* 107/*
108 * Configure the drive and chipset for a new transfer speed. 108 * Configure the chipset for DMA mode.
109 */ 109 */
110static int sl82c105_tune_chipset(ide_drive_t *drive, const u8 speed) 110static void sl82c105_set_dma_mode(ide_drive_t *drive, const u8 speed)
111{ 111{
112 static u16 mwdma_timings[] = {0x0707, 0x0201, 0x0200}; 112 static u16 mwdma_timings[] = {0x0707, 0x0201, 0x0200};
113 u16 drv_ctrl; 113 u16 drv_ctrl;
@@ -140,10 +140,8 @@ static int sl82c105_tune_chipset(ide_drive_t *drive, const u8 speed)
140 } 140 }
141 break; 141 break;
142 default: 142 default:
143 return -1; 143 return;
144 } 144 }
145
146 return ide_config_drive_speed(drive, speed);
147} 145}
148 146
149/* 147/*
@@ -306,17 +304,6 @@ static void sl82c105_resetproc(ide_drive_t *drive)
306 pci_read_config_dword(dev, 0x40, &val); 304 pci_read_config_dword(dev, 0x40, &val);
307 pci_set_drvdata(dev, (void *)val); 305 pci_set_drvdata(dev, (void *)val);
308} 306}
309
310/*
311 * We only deal with PIO mode here - DMA mode 'using_dma' is not
312 * initialised at the point that this function is called.
313 */
314static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio)
315{
316 sl82c105_tune_pio(drive, pio);
317
318 (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
319}
320 307
321/* 308/*
322 * Return the revision of the Winbond bridge 309 * Return the revision of the Winbond bridge
@@ -383,7 +370,7 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
383 DBG(("init_hwif_sl82c105(hwif: ide%d)\n", hwif->index)); 370 DBG(("init_hwif_sl82c105(hwif: ide%d)\n", hwif->index));
384 371
385 hwif->set_pio_mode = &sl82c105_set_pio_mode; 372 hwif->set_pio_mode = &sl82c105_set_pio_mode;
386 hwif->speedproc = &sl82c105_tune_chipset; 373 hwif->set_dma_mode = &sl82c105_set_dma_mode;
387 hwif->selectproc = &sl82c105_selectproc; 374 hwif->selectproc = &sl82c105_selectproc;
388 hwif->resetproc = &sl82c105_resetproc; 375 hwif->resetproc = &sl82c105_resetproc;
389 376
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index ae8e913245..ebac87f720 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -42,7 +42,7 @@ static u8 slc90e66_dma_2_pio (u8 xfer_rate) {
42 } 42 }
43} 43}
44 44
45static void slc90e66_tune_pio (ide_drive_t *drive, u8 pio) 45static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio)
46{ 46{
47 ide_hwif_t *hwif = HWIF(drive); 47 ide_hwif_t *hwif = HWIF(drive);
48 struct pci_dev *dev = hwif->pci_dev; 48 struct pci_dev *dev = hwif->pci_dev;
@@ -95,13 +95,7 @@ static void slc90e66_tune_pio (ide_drive_t *drive, u8 pio)
95 spin_unlock_irqrestore(&ide_lock, flags); 95 spin_unlock_irqrestore(&ide_lock, flags);
96} 96}
97 97
98static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio) 98static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed)
99{
100 slc90e66_tune_pio(drive, pio);
101 (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
102}
103
104static int slc90e66_tune_chipset(ide_drive_t *drive, const u8 speed)
105{ 99{
106 ide_hwif_t *hwif = HWIF(drive); 100 ide_hwif_t *hwif = HWIF(drive);
107 struct pci_dev *dev = hwif->pci_dev; 101 struct pci_dev *dev = hwif->pci_dev;
@@ -125,7 +119,7 @@ static int slc90e66_tune_chipset(ide_drive_t *drive, const u8 speed)
125 case XFER_MW_DMA_2: 119 case XFER_MW_DMA_2:
126 case XFER_MW_DMA_1: 120 case XFER_MW_DMA_1:
127 case XFER_SW_DMA_2: break; 121 case XFER_SW_DMA_2: break;
128 default: return -1; 122 default: return;
129 } 123 }
130 124
131 if (speed >= XFER_UDMA_0) { 125 if (speed >= XFER_UDMA_0) {
@@ -144,9 +138,7 @@ static int slc90e66_tune_chipset(ide_drive_t *drive, const u8 speed)
144 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); 138 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
145 } 139 }
146 140
147 slc90e66_tune_pio(drive, slc90e66_dma_2_pio(speed)); 141 slc90e66_set_pio_mode(drive, slc90e66_dma_2_pio(speed));
148
149 return ide_config_drive_speed(drive, speed);
150} 142}
151 143
152static int slc90e66_config_drive_xfer_rate (ide_drive_t *drive) 144static int slc90e66_config_drive_xfer_rate (ide_drive_t *drive)
@@ -172,8 +164,8 @@ static void __devinit init_hwif_slc90e66 (ide_hwif_t *hwif)
172 if (!hwif->irq) 164 if (!hwif->irq)
173 hwif->irq = hwif->channel ? 15 : 14; 165 hwif->irq = hwif->channel ? 15 : 14;
174 166
175 hwif->speedproc = &slc90e66_tune_chipset;
176 hwif->set_pio_mode = &slc90e66_set_pio_mode; 167 hwif->set_pio_mode = &slc90e66_set_pio_mode;
168 hwif->set_dma_mode = &slc90e66_set_dma_mode;
177 169
178 pci_read_config_byte(hwif->pci_dev, 0x47, &reg47); 170 pci_read_config_byte(hwif->pci_dev, 0x47, &reg47);
179 171
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index e23b9cfb6e..840415d68d 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -13,7 +13,7 @@
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/ide.h> 14#include <linux/ide.h>
15 15
16static int tc86c001_tune_chipset(ide_drive_t *drive, const u8 speed) 16static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed)
17{ 17{
18 ide_hwif_t *hwif = HWIF(drive); 18 ide_hwif_t *hwif = HWIF(drive);
19 unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00); 19 unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00);
@@ -39,13 +39,11 @@ static int tc86c001_tune_chipset(ide_drive_t *drive, const u8 speed)
39 scr &= (speed < XFER_MW_DMA_0) ? 0xf8ff : 0xff0f; 39 scr &= (speed < XFER_MW_DMA_0) ? 0xf8ff : 0xff0f;
40 scr |= mode; 40 scr |= mode;
41 outw(scr, scr_port); 41 outw(scr, scr_port);
42
43 return ide_config_drive_speed(drive, speed);
44} 42}
45 43
46static void tc86c001_set_pio_mode(ide_drive_t *drive, const u8 pio) 44static void tc86c001_set_pio_mode(ide_drive_t *drive, const u8 pio)
47{ 45{
48 (void) tc86c001_tune_chipset(drive, XFER_PIO_0 + pio); 46 tc86c001_set_mode(drive, XFER_PIO_0 + pio);
49} 47}
50 48
51/* 49/*
@@ -193,7 +191,8 @@ static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
193 hwif->config_data = sc_base; 191 hwif->config_data = sc_base;
194 192
195 hwif->set_pio_mode = &tc86c001_set_pio_mode; 193 hwif->set_pio_mode = &tc86c001_set_pio_mode;
196 hwif->speedproc = &tc86c001_tune_chipset; 194 hwif->set_dma_mode = &tc86c001_set_mode;
195
197 hwif->busproc = &tc86c001_busproc; 196 hwif->busproc = &tc86c001_busproc;
198 197
199 hwif->drives[0].autotune = hwif->drives[1].autotune = 1; 198 hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/pci/triflex.c
index c3ff066eea..54e411d4e5 100644
--- a/drivers/ide/pci/triflex.c
+++ b/drivers/ide/pci/triflex.c
@@ -40,7 +40,7 @@
40#include <linux/ide.h> 40#include <linux/ide.h>
41#include <linux/init.h> 41#include <linux/init.h>
42 42
43static int triflex_tune_chipset(ide_drive_t *drive, const u8 speed) 43static void triflex_set_mode(ide_drive_t *drive, const u8 speed)
44{ 44{
45 ide_hwif_t *hwif = HWIF(drive); 45 ide_hwif_t *hwif = HWIF(drive);
46 struct pci_dev *dev = hwif->pci_dev; 46 struct pci_dev *dev = hwif->pci_dev;
@@ -82,20 +82,18 @@ static int triflex_tune_chipset(ide_drive_t *drive, const u8 speed)
82 timing = 0x0808; 82 timing = 0x0808;
83 break; 83 break;
84 default: 84 default:
85 return -1; 85 return;
86 } 86 }
87 87
88 triflex_timings &= ~(0xFFFF << (16 * unit)); 88 triflex_timings &= ~(0xFFFF << (16 * unit));
89 triflex_timings |= (timing << (16 * unit)); 89 triflex_timings |= (timing << (16 * unit));
90 90
91 pci_write_config_dword(dev, channel_offset, triflex_timings); 91 pci_write_config_dword(dev, channel_offset, triflex_timings);
92
93 return (ide_config_drive_speed(drive, speed));
94} 92}
95 93
96static void triflex_set_pio_mode(ide_drive_t *drive, const u8 pio) 94static void triflex_set_pio_mode(ide_drive_t *drive, const u8 pio)
97{ 95{
98 (void)triflex_tune_chipset(drive, XFER_PIO_0 + pio); 96 triflex_set_mode(drive, XFER_PIO_0 + pio);
99} 97}
100 98
101static int triflex_config_drive_xfer_rate(ide_drive_t *drive) 99static int triflex_config_drive_xfer_rate(ide_drive_t *drive)
@@ -111,7 +109,7 @@ static int triflex_config_drive_xfer_rate(ide_drive_t *drive)
111static void __devinit init_hwif_triflex(ide_hwif_t *hwif) 109static void __devinit init_hwif_triflex(ide_hwif_t *hwif)
112{ 110{
113 hwif->set_pio_mode = &triflex_set_pio_mode; 111 hwif->set_pio_mode = &triflex_set_pio_mode;
114 hwif->speedproc = &triflex_tune_chipset; 112 hwif->set_dma_mode = &triflex_set_mode;
115 113
116 if (hwif->dma_base == 0) 114 if (hwif->dma_base == 0)
117 return; 115 return;
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index 378feb491e..479e496610 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * 2 *
3 * Version 3.48 3 * Version 3.49
4 * 4 *
5 * VIA IDE driver for Linux. Supported southbridges: 5 * VIA IDE driver for Linux. Supported southbridges:
6 * 6 *
@@ -153,21 +153,17 @@ static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
153 * @drive: Drive to set up 153 * @drive: Drive to set up
154 * @speed: desired speed 154 * @speed: desired speed
155 * 155 *
156 * via_set_drive() computes timing values configures the drive and 156 * via_set_drive() computes timing values configures the chipset to
157 * the chipset to a desired transfer mode. It also can be called 157 * a desired transfer mode. It also can be called by upper layers.
158 * by upper layers.
159 */ 158 */
160 159
161static int via_set_drive(ide_drive_t *drive, const u8 speed) 160static void via_set_drive(ide_drive_t *drive, const u8 speed)
162{ 161{
163 ide_drive_t *peer = HWIF(drive)->drives + (~drive->dn & 1); 162 ide_drive_t *peer = HWIF(drive)->drives + (~drive->dn & 1);
164 struct via82cxxx_dev *vdev = pci_get_drvdata(drive->hwif->pci_dev); 163 struct via82cxxx_dev *vdev = pci_get_drvdata(drive->hwif->pci_dev);
165 struct ide_timing t, p; 164 struct ide_timing t, p;
166 unsigned int T, UT; 165 unsigned int T, UT;
167 166
168 if (speed != XFER_PIO_SLOW)
169 ide_config_drive_speed(drive, speed);
170
171 T = 1000000000 / via_clock; 167 T = 1000000000 / via_clock;
172 168
173 switch (vdev->via_config->udma_mask) { 169 switch (vdev->via_config->udma_mask) {
@@ -186,16 +182,10 @@ static int via_set_drive(ide_drive_t *drive, const u8 speed)
186 } 182 }
187 183
188 via_set_speed(HWIF(drive), drive->dn, &t); 184 via_set_speed(HWIF(drive), drive->dn, &t);
189
190 if (!drive->init_speed)
191 drive->init_speed = speed;
192 drive->current_speed = speed;
193
194 return 0;
195} 185}
196 186
197/** 187/**
198 * via_set_pio_mode - PIO setup 188 * via_set_pio_mode - set host controller for PIO mode
199 * @drive: drive 189 * @drive: drive
200 * @pio: PIO mode number 190 * @pio: PIO mode number
201 * 191 *
@@ -456,8 +446,7 @@ static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif)
456 hwif->autodma = 0; 446 hwif->autodma = 0;
457 447
458 hwif->set_pio_mode = &via_set_pio_mode; 448 hwif->set_pio_mode = &via_set_pio_mode;
459 hwif->speedproc = &via_set_drive; 449 hwif->set_dma_mode = &via_set_drive;
460
461 450
462#ifdef CONFIG_PPC_CHRP 451#ifdef CONFIG_PPC_CHRP
463 if(machine_is(chrp) && _chrp_type == _CHRP_Pegasos) { 452 if(machine_is(chrp) && _chrp_type == _CHRP_Pegasos) {
@@ -500,7 +489,8 @@ static ide_pci_device_t via82cxxx_chipsets[] __devinitdata = {
500 .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, 489 .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}},
501 .bootable = ON_BOARD, 490 .bootable = ON_BOARD,
502 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST 491 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST
503 | IDE_HFLAG_PIO_NO_DOWNGRADE, 492 | IDE_HFLAG_PIO_NO_DOWNGRADE
493 | IDE_HFLAG_POST_SET_MODE,
504 .pio_mask = ATA_PIO5, 494 .pio_mask = ATA_PIO5,
505 },{ /* 1 */ 495 },{ /* 1 */
506 .name = "VP_IDE", 496 .name = "VP_IDE",
@@ -510,7 +500,8 @@ static ide_pci_device_t via82cxxx_chipsets[] __devinitdata = {
510 .enablebits = {{0x00,0x00,0x00}, {0x00,0x00,0x00}}, 500 .enablebits = {{0x00,0x00,0x00}, {0x00,0x00,0x00}},
511 .bootable = ON_BOARD, 501 .bootable = ON_BOARD,
512 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST 502 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST
513 | IDE_HFLAG_PIO_NO_DOWNGRADE, 503 | IDE_HFLAG_PIO_NO_DOWNGRADE
504 | IDE_HFLAG_POST_SET_MODE,
514 .pio_mask = ATA_PIO5, 505 .pio_mask = ATA_PIO5,
515 } 506 }
516}; 507};
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index f759a53978..7d8873839e 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -392,6 +392,7 @@ kauai_lookup_timing(struct kauai_timing* table, int cycle_time)
392 for (i=0; table[i].cycle_time; i++) 392 for (i=0; table[i].cycle_time; i++)
393 if (cycle_time > table[i+1].cycle_time) 393 if (cycle_time > table[i+1].cycle_time)
394 return table[i].timing_reg; 394 return table[i].timing_reg;
395 BUG();
395 return 0; 396 return 0;
396} 397}
397 398
@@ -529,97 +530,12 @@ pmac_outbsync(ide_drive_t *drive, u8 value, unsigned long port)
529} 530}
530 531
531/* 532/*
532 * Send the SET_FEATURE IDE command to the drive and update drive->id with
533 * the new state. We currently don't use the generic routine as it used to
534 * cause various trouble, especially with older mediabays.
535 * This code is sometimes triggering a spurrious interrupt though, I need
536 * to sort that out sooner or later and see if I can finally get the
537 * common version to work properly in all cases
538 */
539static int
540pmac_ide_do_setfeature(ide_drive_t *drive, u8 command)
541{
542 ide_hwif_t *hwif = HWIF(drive);
543 int result = 1;
544
545 disable_irq_nosync(hwif->irq);
546 udelay(1);
547 SELECT_DRIVE(drive);
548 SELECT_MASK(drive, 0);
549 udelay(1);
550 /* Get rid of pending error state */
551 (void) hwif->INB(IDE_STATUS_REG);
552 /* Timeout bumped for some powerbooks */
553 if (wait_for_ready(drive, 2000)) {
554 /* Timeout bumped for some powerbooks */
555 printk(KERN_ERR "%s: pmac_ide_do_setfeature disk not ready "
556 "before SET_FEATURE!\n", drive->name);
557 goto out;
558 }
559 udelay(10);
560 hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
561 hwif->OUTB(command, IDE_NSECTOR_REG);
562 hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
563 hwif->OUTBSYNC(drive, WIN_SETFEATURES, IDE_COMMAND_REG);
564 udelay(1);
565 /* Timeout bumped for some powerbooks */
566 result = wait_for_ready(drive, 2000);
567 hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
568 if (result)
569 printk(KERN_ERR "%s: pmac_ide_do_setfeature disk not ready "
570 "after SET_FEATURE !\n", drive->name);
571out:
572 SELECT_MASK(drive, 0);
573 if (result == 0) {
574 drive->id->dma_ultra &= ~0xFF00;
575 drive->id->dma_mword &= ~0x0F00;
576 drive->id->dma_1word &= ~0x0F00;
577 switch(command) {
578 case XFER_UDMA_7:
579 drive->id->dma_ultra |= 0x8080; break;
580 case XFER_UDMA_6:
581 drive->id->dma_ultra |= 0x4040; break;
582 case XFER_UDMA_5:
583 drive->id->dma_ultra |= 0x2020; break;
584 case XFER_UDMA_4:
585 drive->id->dma_ultra |= 0x1010; break;
586 case XFER_UDMA_3:
587 drive->id->dma_ultra |= 0x0808; break;
588 case XFER_UDMA_2:
589 drive->id->dma_ultra |= 0x0404; break;
590 case XFER_UDMA_1:
591 drive->id->dma_ultra |= 0x0202; break;
592 case XFER_UDMA_0:
593 drive->id->dma_ultra |= 0x0101; break;
594 case XFER_MW_DMA_2:
595 drive->id->dma_mword |= 0x0404; break;
596 case XFER_MW_DMA_1:
597 drive->id->dma_mword |= 0x0202; break;
598 case XFER_MW_DMA_0:
599 drive->id->dma_mword |= 0x0101; break;
600 case XFER_SW_DMA_2:
601 drive->id->dma_1word |= 0x0404; break;
602 case XFER_SW_DMA_1:
603 drive->id->dma_1word |= 0x0202; break;
604 case XFER_SW_DMA_0:
605 drive->id->dma_1word |= 0x0101; break;
606 default: break;
607 }
608 if (!drive->init_speed)
609 drive->init_speed = command;
610 drive->current_speed = command;
611 }
612 enable_irq(hwif->irq);
613 return result;
614}
615
616/*
617 * Old tuning functions (called on hdparm -p), sets up drive PIO timings 533 * Old tuning functions (called on hdparm -p), sets up drive PIO timings
618 */ 534 */
619static void 535static void
620pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) 536pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
621{ 537{
622 u32 *timings; 538 u32 *timings, t;
623 unsigned accessTicks, recTicks; 539 unsigned accessTicks, recTicks;
624 unsigned accessTime, recTime; 540 unsigned accessTime, recTime;
625 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 541 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -630,6 +546,7 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
630 546
631 /* which drive is it ? */ 547 /* which drive is it ? */
632 timings = &pmif->timings[drive->select.b.unit & 0x01]; 548 timings = &pmif->timings[drive->select.b.unit & 0x01];
549 t = *timings;
633 550
634 cycle_time = ide_pio_cycle_time(drive, pio); 551 cycle_time = ide_pio_cycle_time(drive, pio);
635 552
@@ -637,18 +554,14 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
637 case controller_sh_ata6: { 554 case controller_sh_ata6: {
638 /* 133Mhz cell */ 555 /* 133Mhz cell */
639 u32 tr = kauai_lookup_timing(shasta_pio_timings, cycle_time); 556 u32 tr = kauai_lookup_timing(shasta_pio_timings, cycle_time);
640 if (tr == 0) 557 t = (t & ~TR_133_PIOREG_PIO_MASK) | tr;
641 return;
642 *timings = ((*timings) & ~TR_133_PIOREG_PIO_MASK) | tr;
643 break; 558 break;
644 } 559 }
645 case controller_un_ata6: 560 case controller_un_ata6:
646 case controller_k2_ata6: { 561 case controller_k2_ata6: {
647 /* 100Mhz cell */ 562 /* 100Mhz cell */
648 u32 tr = kauai_lookup_timing(kauai_pio_timings, cycle_time); 563 u32 tr = kauai_lookup_timing(kauai_pio_timings, cycle_time);
649 if (tr == 0) 564 t = (t & ~TR_100_PIOREG_PIO_MASK) | tr;
650 return;
651 *timings = ((*timings) & ~TR_100_PIOREG_PIO_MASK) | tr;
652 break; 565 break;
653 } 566 }
654 case controller_kl_ata4: 567 case controller_kl_ata4:
@@ -662,9 +575,9 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
662 accessTicks = min(accessTicks, 0x1fU); 575 accessTicks = min(accessTicks, 0x1fU);
663 recTicks = SYSCLK_TICKS_66(recTime); 576 recTicks = SYSCLK_TICKS_66(recTime);
664 recTicks = min(recTicks, 0x1fU); 577 recTicks = min(recTicks, 0x1fU);
665 *timings = ((*timings) & ~TR_66_PIO_MASK) | 578 t = (t & ~TR_66_PIO_MASK) |
666 (accessTicks << TR_66_PIO_ACCESS_SHIFT) | 579 (accessTicks << TR_66_PIO_ACCESS_SHIFT) |
667 (recTicks << TR_66_PIO_RECOVERY_SHIFT); 580 (recTicks << TR_66_PIO_RECOVERY_SHIFT);
668 break; 581 break;
669 default: { 582 default: {
670 /* 33Mhz cell */ 583 /* 33Mhz cell */
@@ -684,11 +597,11 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
684 recTicks--; /* guess, but it's only for PIO0, so... */ 597 recTicks--; /* guess, but it's only for PIO0, so... */
685 ebit = 1; 598 ebit = 1;
686 } 599 }
687 *timings = ((*timings) & ~TR_33_PIO_MASK) | 600 t = (t & ~TR_33_PIO_MASK) |
688 (accessTicks << TR_33_PIO_ACCESS_SHIFT) | 601 (accessTicks << TR_33_PIO_ACCESS_SHIFT) |
689 (recTicks << TR_33_PIO_RECOVERY_SHIFT); 602 (recTicks << TR_33_PIO_RECOVERY_SHIFT);
690 if (ebit) 603 if (ebit)
691 *timings |= TR_33_PIO_E; 604 t |= TR_33_PIO_E;
692 break; 605 break;
693 } 606 }
694 } 607 }
@@ -698,9 +611,7 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
698 drive->name, pio, *timings); 611 drive->name, pio, *timings);
699#endif 612#endif
700 613
701 if (pmac_ide_do_setfeature(drive, XFER_PIO_0 + pio)) 614 *timings = t;
702 return;
703
704 pmac_ide_do_update_timings(drive); 615 pmac_ide_do_update_timings(drive);
705} 616}
706 617
@@ -746,8 +657,6 @@ set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
746 if (speed > XFER_UDMA_5 || t == NULL) 657 if (speed > XFER_UDMA_5 || t == NULL)
747 return 1; 658 return 1;
748 tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma); 659 tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma);
749 if (tr == 0)
750 return 1;
751 *ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr; 660 *ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr;
752 *ultra_timings = (*ultra_timings) | TR_100_UDMAREG_UDMA_EN; 661 *ultra_timings = (*ultra_timings) | TR_100_UDMAREG_UDMA_EN;
753 662
@@ -766,8 +675,6 @@ set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
766 if (speed > XFER_UDMA_6 || t == NULL) 675 if (speed > XFER_UDMA_6 || t == NULL)
767 return 1; 676 return 1;
768 tr = kauai_lookup_timing(shasta_udma133_timings, (int)t->udma); 677 tr = kauai_lookup_timing(shasta_udma133_timings, (int)t->udma);
769 if (tr == 0)
770 return 1;
771 *ultra_timings = ((*ultra_timings) & ~TR_133_UDMAREG_UDMA_MASK) | tr; 678 *ultra_timings = ((*ultra_timings) & ~TR_133_UDMAREG_UDMA_MASK) | tr;
772 *ultra_timings = (*ultra_timings) | TR_133_UDMAREG_UDMA_EN; 679 *ultra_timings = (*ultra_timings) | TR_133_UDMAREG_UDMA_EN;
773 680
@@ -777,12 +684,13 @@ set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
777/* 684/*
778 * Calculate MDMA timings for all cells 685 * Calculate MDMA timings for all cells
779 */ 686 */
780static int 687static void
781set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2, 688set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
782 u8 speed, int drive_cycle_time) 689 u8 speed)
783{ 690{
784 int cycleTime, accessTime = 0, recTime = 0; 691 int cycleTime, accessTime = 0, recTime = 0;
785 unsigned accessTicks, recTicks; 692 unsigned accessTicks, recTicks;
693 struct hd_driveid *id = drive->id;
786 struct mdma_timings_t* tm = NULL; 694 struct mdma_timings_t* tm = NULL;
787 int i; 695 int i;
788 696
@@ -792,11 +700,14 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
792 case 1: cycleTime = 150; break; 700 case 1: cycleTime = 150; break;
793 case 2: cycleTime = 120; break; 701 case 2: cycleTime = 120; break;
794 default: 702 default:
795 return 1; 703 BUG();
704 break;
796 } 705 }
797 /* Adjust for drive */ 706
798 if (drive_cycle_time && drive_cycle_time > cycleTime) 707 /* Check if drive provides explicit DMA cycle time */
799 cycleTime = drive_cycle_time; 708 if ((id->field_valid & 2) && id->eide_dma_time)
709 cycleTime = max_t(int, id->eide_dma_time, cycleTime);
710
800 /* OHare limits according to some old Apple sources */ 711 /* OHare limits according to some old Apple sources */
801 if ((intf_type == controller_ohare) && (cycleTime < 150)) 712 if ((intf_type == controller_ohare) && (cycleTime < 150))
802 cycleTime = 150; 713 cycleTime = 150;
@@ -824,8 +735,6 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
824 break; 735 break;
825 i++; 736 i++;
826 } 737 }
827 if (i < 0)
828 return 1;
829 cycleTime = tm[i].cycleTime; 738 cycleTime = tm[i].cycleTime;
830 accessTime = tm[i].accessTime; 739 accessTime = tm[i].accessTime;
831 recTime = tm[i].recoveryTime; 740 recTime = tm[i].recoveryTime;
@@ -839,8 +748,6 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
839 case controller_sh_ata6: { 748 case controller_sh_ata6: {
840 /* 133Mhz cell */ 749 /* 133Mhz cell */
841 u32 tr = kauai_lookup_timing(shasta_mdma_timings, cycleTime); 750 u32 tr = kauai_lookup_timing(shasta_mdma_timings, cycleTime);
842 if (tr == 0)
843 return 1;
844 *timings = ((*timings) & ~TR_133_PIOREG_MDMA_MASK) | tr; 751 *timings = ((*timings) & ~TR_133_PIOREG_MDMA_MASK) | tr;
845 *timings2 = (*timings2) & ~TR_133_UDMAREG_UDMA_EN; 752 *timings2 = (*timings2) & ~TR_133_UDMAREG_UDMA_EN;
846 } 753 }
@@ -848,8 +755,6 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
848 case controller_k2_ata6: { 755 case controller_k2_ata6: {
849 /* 100Mhz cell */ 756 /* 100Mhz cell */
850 u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime); 757 u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime);
851 if (tr == 0)
852 return 1;
853 *timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr; 758 *timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr;
854 *timings2 = (*timings2) & ~TR_100_UDMAREG_UDMA_EN; 759 *timings2 = (*timings2) & ~TR_100_UDMAREG_UDMA_EN;
855 } 760 }
@@ -911,30 +816,23 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
911 printk(KERN_ERR "%s: Set MDMA timing for mode %d, reg: 0x%08x\n", 816 printk(KERN_ERR "%s: Set MDMA timing for mode %d, reg: 0x%08x\n",
912 drive->name, speed & 0xf, *timings); 817 drive->name, speed & 0xf, *timings);
913#endif 818#endif
914 return 0;
915} 819}
916#endif /* #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC */ 820#endif /* #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC */
917 821
918/* 822static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
919 * Speedproc. This function is called by the core to set any of the standard
920 * DMA timing (MDMA or UDMA) to both the drive and the controller.
921 * You may notice we don't use this function on normal "dma check" operation,
922 * our dedicated function is more precise as it uses the drive provided
923 * cycle time value. We should probably fix this one to deal with that too...
924 */
925static int pmac_ide_tune_chipset(ide_drive_t *drive, const u8 speed)
926{ 823{
927 int unit = (drive->select.b.unit & 0x01); 824 int unit = (drive->select.b.unit & 0x01);
928 int ret = 0; 825 int ret = 0;
929 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 826 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
930 u32 *timings, *timings2; 827 u32 *timings, *timings2, tl[2];
931 828
932 if (pmif == NULL)
933 return 1;
934
935 timings = &pmif->timings[unit]; 829 timings = &pmif->timings[unit];
936 timings2 = &pmif->timings[unit+2]; 830 timings2 = &pmif->timings[unit+2];
937 831
832 /* Copy timings to local image */
833 tl[0] = *timings;
834 tl[1] = *timings2;
835
938 switch(speed) { 836 switch(speed) {
939#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC 837#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
940 case XFER_UDMA_6: 838 case XFER_UDMA_6:
@@ -945,38 +843,36 @@ static int pmac_ide_tune_chipset(ide_drive_t *drive, const u8 speed)
945 case XFER_UDMA_1: 843 case XFER_UDMA_1:
946 case XFER_UDMA_0: 844 case XFER_UDMA_0:
947 if (pmif->kind == controller_kl_ata4) 845 if (pmif->kind == controller_kl_ata4)
948 ret = set_timings_udma_ata4(timings, speed); 846 ret = set_timings_udma_ata4(&tl[0], speed);
949 else if (pmif->kind == controller_un_ata6 847 else if (pmif->kind == controller_un_ata6
950 || pmif->kind == controller_k2_ata6) 848 || pmif->kind == controller_k2_ata6)
951 ret = set_timings_udma_ata6(timings, timings2, speed); 849 ret = set_timings_udma_ata6(&tl[0], &tl[1], speed);
952 else if (pmif->kind == controller_sh_ata6) 850 else if (pmif->kind == controller_sh_ata6)
953 ret = set_timings_udma_shasta(timings, timings2, speed); 851 ret = set_timings_udma_shasta(&tl[0], &tl[1], speed);
954 else 852 else
955 ret = 1; 853 ret = 1;
956 break; 854 break;
957 case XFER_MW_DMA_2: 855 case XFER_MW_DMA_2:
958 case XFER_MW_DMA_1: 856 case XFER_MW_DMA_1:
959 case XFER_MW_DMA_0: 857 case XFER_MW_DMA_0:
960 ret = set_timings_mdma(drive, pmif->kind, timings, timings2, speed, 0); 858 set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
961 break; 859 break;
962 case XFER_SW_DMA_2: 860 case XFER_SW_DMA_2:
963 case XFER_SW_DMA_1: 861 case XFER_SW_DMA_1:
964 case XFER_SW_DMA_0: 862 case XFER_SW_DMA_0:
965 return 1; 863 return;
966#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 864#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
967 default: 865 default:
968 ret = 1; 866 ret = 1;
969 } 867 }
970 if (ret) 868 if (ret)
971 return ret; 869 return;
972 870
973 ret = pmac_ide_do_setfeature(drive, speed); 871 /* Apply timings to controller */
974 if (ret) 872 *timings = tl[0];
975 return ret; 873 *timings2 = tl[1];
976
977 pmac_ide_do_update_timings(drive);
978 874
979 return 0; 875 pmac_ide_do_update_timings(drive);
980} 876}
981 877
982/* 878/*
@@ -1236,6 +1132,10 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
1236 hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40; 1132 hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
1237 hwif->drives[0].unmask = 1; 1133 hwif->drives[0].unmask = 1;
1238 hwif->drives[1].unmask = 1; 1134 hwif->drives[1].unmask = 1;
1135 hwif->drives[0].autotune = IDE_TUNE_AUTO;
1136 hwif->drives[1].autotune = IDE_TUNE_AUTO;
1137 hwif->host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
1138 IDE_HFLAG_POST_SET_MODE;
1239 hwif->pio_mask = ATA_PIO4; 1139 hwif->pio_mask = ATA_PIO4;
1240 hwif->set_pio_mode = pmac_ide_set_pio_mode; 1140 hwif->set_pio_mode = pmac_ide_set_pio_mode;
1241 if (pmif->kind == controller_un_ata6 1141 if (pmif->kind == controller_un_ata6
@@ -1244,7 +1144,7 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
1244 hwif->selectproc = pmac_ide_kauai_selectproc; 1144 hwif->selectproc = pmac_ide_kauai_selectproc;
1245 else 1145 else
1246 hwif->selectproc = pmac_ide_selectproc; 1146 hwif->selectproc = pmac_ide_selectproc;
1247 hwif->speedproc = pmac_ide_tune_chipset; 1147 hwif->set_dma_mode = pmac_ide_set_dma_mode;
1248 1148
1249 printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n", 1149 printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n",
1250 hwif->index, model_name[pmif->kind], pmif->aapl_bus_id, 1150 hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
@@ -1679,138 +1579,16 @@ pmac_ide_destroy_dmatable (ide_drive_t *drive)
1679} 1579}
1680 1580
1681/* 1581/*
1682 * Pick up best MDMA timing for the drive and apply it
1683 */
1684static int
1685pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode)
1686{
1687 ide_hwif_t *hwif = HWIF(drive);
1688 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1689 int drive_cycle_time;
1690 struct hd_driveid *id = drive->id;
1691 u32 *timings, *timings2;
1692 u32 timing_local[2];
1693 int ret;
1694
1695 /* which drive is it ? */
1696 timings = &pmif->timings[drive->select.b.unit & 0x01];
1697 timings2 = &pmif->timings[(drive->select.b.unit & 0x01) + 2];
1698
1699 /* Check if drive provide explicit cycle time */
1700 if ((id->field_valid & 2) && (id->eide_dma_time))
1701 drive_cycle_time = id->eide_dma_time;
1702 else
1703 drive_cycle_time = 0;
1704
1705 /* Copy timings to local image */
1706 timing_local[0] = *timings;
1707 timing_local[1] = *timings2;
1708
1709 /* Calculate controller timings */
1710 ret = set_timings_mdma( drive, pmif->kind,
1711 &timing_local[0],
1712 &timing_local[1],
1713 mode,
1714 drive_cycle_time);
1715 if (ret)
1716 return 0;
1717
1718 /* Set feature on drive */
1719 printk(KERN_INFO "%s: Enabling MultiWord DMA %d\n", drive->name, mode & 0xf);
1720 ret = pmac_ide_do_setfeature(drive, mode);
1721 if (ret) {
1722 printk(KERN_WARNING "%s: Failed !\n", drive->name);
1723 return 0;
1724 }
1725
1726 /* Apply timings to controller */
1727 *timings = timing_local[0];
1728 *timings2 = timing_local[1];
1729
1730 return 1;
1731}
1732
1733/*
1734 * Pick up best UDMA timing for the drive and apply it
1735 */
1736static int
1737pmac_ide_udma_enable(ide_drive_t *drive, u16 mode)
1738{
1739 ide_hwif_t *hwif = HWIF(drive);
1740 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1741 u32 *timings, *timings2;
1742 u32 timing_local[2];
1743 int ret;
1744
1745 /* which drive is it ? */
1746 timings = &pmif->timings[drive->select.b.unit & 0x01];
1747 timings2 = &pmif->timings[(drive->select.b.unit & 0x01) + 2];
1748
1749 /* Copy timings to local image */
1750 timing_local[0] = *timings;
1751 timing_local[1] = *timings2;
1752
1753 /* Calculate timings for interface */
1754 if (pmif->kind == controller_un_ata6
1755 || pmif->kind == controller_k2_ata6)
1756 ret = set_timings_udma_ata6( &timing_local[0],
1757 &timing_local[1],
1758 mode);
1759 else if (pmif->kind == controller_sh_ata6)
1760 ret = set_timings_udma_shasta( &timing_local[0],
1761 &timing_local[1],
1762 mode);
1763 else
1764 ret = set_timings_udma_ata4(&timing_local[0], mode);
1765 if (ret)
1766 return 0;
1767
1768 /* Set feature on drive */
1769 printk(KERN_INFO "%s: Enabling Ultra DMA %d\n", drive->name, mode & 0x0f);
1770 ret = pmac_ide_do_setfeature(drive, mode);
1771 if (ret) {
1772 printk(KERN_WARNING "%s: Failed !\n", drive->name);
1773 return 0;
1774 }
1775
1776 /* Apply timings to controller */
1777 *timings = timing_local[0];
1778 *timings2 = timing_local[1];
1779
1780 return 1;
1781}
1782
1783/*
1784 * Check what is the best DMA timing setting for the drive and 1582 * Check what is the best DMA timing setting for the drive and
1785 * call appropriate functions to apply it. 1583 * call appropriate functions to apply it.
1786 */ 1584 */
1787static int 1585static int
1788pmac_ide_dma_check(ide_drive_t *drive) 1586pmac_ide_dma_check(ide_drive_t *drive)
1789{ 1587{
1790 struct hd_driveid *id = drive->id; 1588 if (ide_tune_dma(drive))
1791 ide_hwif_t *hwif = HWIF(drive); 1589 return 0;
1792 int enable = 1; 1590
1793 drive->using_dma = 0; 1591 return -1;
1794
1795 if (drive->media == ide_floppy)
1796 enable = 0;
1797 if (((id->capability & 1) == 0) && !__ide_dma_good_drive(drive))
1798 enable = 0;
1799 if (__ide_dma_bad_drive(drive))
1800 enable = 0;
1801
1802 if (enable) {
1803 u8 mode = ide_max_dma_mode(drive);
1804
1805 if (mode >= XFER_UDMA_0)
1806 drive->using_dma = pmac_ide_udma_enable(drive, mode);
1807 else if (mode >= XFER_MW_DMA_0)
1808 drive->using_dma = pmac_ide_mdma_enable(drive, mode);
1809 hwif->OUTB(0, IDE_CONTROL_REG);
1810 /* Apply settings to controller */
1811 pmac_ide_do_update_timings(drive);
1812 }
1813 return 0;
1814} 1592}
1815 1593
1816/* 1594/*
@@ -2044,7 +1822,10 @@ pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
2044 hwif->mwdma_mask = 0x07; 1822 hwif->mwdma_mask = 0x07;
2045 hwif->swdma_mask = 0x00; 1823 hwif->swdma_mask = 0x00;
2046 break; 1824 break;
2047 } 1825 }
1826
1827 hwif->autodma = 1;
1828 hwif->drives[1].autodma = hwif->drives[0].autodma = hwif->autodma;
2048} 1829}
2049 1830
2050#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 1831#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 2ffd53461d..1939fee616 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -153,8 +153,7 @@ struct host_info {
153}; 153};
154 154
155static int nodemgr_bus_match(struct device * dev, struct device_driver * drv); 155static int nodemgr_bus_match(struct device * dev, struct device_driver * drv);
156static int nodemgr_uevent(struct device *dev, char **envp, int num_envp, 156static int nodemgr_uevent(struct device *dev, struct kobj_uevent_env *env);
157 char *buffer, int buffer_size);
158static void nodemgr_resume_ne(struct node_entry *ne); 157static void nodemgr_resume_ne(struct node_entry *ne);
159static void nodemgr_remove_ne(struct node_entry *ne); 158static void nodemgr_remove_ne(struct node_entry *ne);
160static struct node_entry *find_entry_by_guid(u64 guid); 159static struct node_entry *find_entry_by_guid(u64 guid);
@@ -1160,12 +1159,9 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
1160 1159
1161#ifdef CONFIG_HOTPLUG 1160#ifdef CONFIG_HOTPLUG
1162 1161
1163static int nodemgr_uevent(struct device *dev, char **envp, int num_envp, 1162static int nodemgr_uevent(struct device *dev, struct kobj_uevent_env *env)
1164 char *buffer, int buffer_size)
1165{ 1163{
1166 struct unit_directory *ud; 1164 struct unit_directory *ud;
1167 int i = 0;
1168 int length = 0;
1169 int retval = 0; 1165 int retval = 0;
1170 /* ieee1394:venNmoNspNverN */ 1166 /* ieee1394:venNmoNspNverN */
1171 char buf[8 + 1 + 3 + 8 + 2 + 8 + 2 + 8 + 3 + 8 + 1]; 1167 char buf[8 + 1 + 3 + 8 + 2 + 8 + 2 + 8 + 3 + 8 + 1];
@@ -1180,9 +1176,7 @@ static int nodemgr_uevent(struct device *dev, char **envp, int num_envp,
1180 1176
1181#define PUT_ENVP(fmt,val) \ 1177#define PUT_ENVP(fmt,val) \
1182do { \ 1178do { \
1183 retval = add_uevent_var(envp, num_envp, &i, \ 1179 retval = add_uevent_var(env, fmt, val); \
1184 buffer, buffer_size, &length, \
1185 fmt, val); \
1186 if (retval) \ 1180 if (retval) \
1187 return retval; \ 1181 return retval; \
1188} while (0) 1182} while (0)
@@ -1201,15 +1195,12 @@ do { \
1201 1195
1202#undef PUT_ENVP 1196#undef PUT_ENVP
1203 1197
1204 envp[i] = NULL;
1205
1206 return 0; 1198 return 0;
1207} 1199}
1208 1200
1209#else 1201#else
1210 1202
1211static int nodemgr_uevent(struct device *dev, char **envp, int num_envp, 1203static int nodemgr_uevent(struct device *dev, struct kobj_uevent_env *env)
1212 char *buffer, int buffer_size)
1213{ 1204{
1214 return -ENODEV; 1205 return -ENODEV;
1215} 1206}
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 70b77ae674..3d40506813 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -434,21 +434,18 @@ static void ib_device_release(struct class_device *cdev)
434 kfree(dev); 434 kfree(dev);
435} 435}
436 436
437static int ib_device_uevent(struct class_device *cdev, char **envp, 437static int ib_device_uevent(struct class_device *cdev,
438 int num_envp, char *buf, int size) 438 struct kobj_uevent_env *env)
439{ 439{
440 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); 440 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
441 int i = 0, len = 0;
442 441
443 if (add_uevent_var(envp, num_envp, &i, buf, size, &len, 442 if (add_uevent_var(env, "NAME=%s", dev->name))
444 "NAME=%s", dev->name))
445 return -ENOMEM; 443 return -ENOMEM;
446 444
447 /* 445 /*
448 * It would be nice to pass the node GUID with the event... 446 * It would be nice to pass the node GUID with the event...
449 */ 447 */
450 448
451 envp[i] = NULL;
452 return 0; 449 return 0;
453} 450}
454 451
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 5fe7555866..5dc361c954 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -859,87 +859,66 @@ static void input_dev_release(struct device *device)
859 * Input uevent interface - loading event handlers based on 859 * Input uevent interface - loading event handlers based on
860 * device bitfields. 860 * device bitfields.
861 */ 861 */
862static int input_add_uevent_bm_var(char **envp, int num_envp, int *cur_index, 862static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
863 char *buffer, int buffer_size, int *cur_len,
864 const char *name, unsigned long *bitmap, int max) 863 const char *name, unsigned long *bitmap, int max)
865{ 864{
866 if (*cur_index >= num_envp - 1) 865 int len;
867 return -ENOMEM;
868
869 envp[*cur_index] = buffer + *cur_len;
870 866
871 *cur_len += snprintf(buffer + *cur_len, max(buffer_size - *cur_len, 0), name); 867 if (add_uevent_var(env, "%s=", name))
872 if (*cur_len >= buffer_size)
873 return -ENOMEM; 868 return -ENOMEM;
874 869
875 *cur_len += input_print_bitmap(buffer + *cur_len, 870 len = input_print_bitmap(&env->buf[env->buflen - 1],
876 max(buffer_size - *cur_len, 0), 871 sizeof(env->buf) - env->buflen,
877 bitmap, max, 0) + 1; 872 bitmap, max, 0);
878 if (*cur_len > buffer_size) 873 if (len >= (sizeof(env->buf) - env->buflen))
879 return -ENOMEM; 874 return -ENOMEM;
880 875
881 (*cur_index)++; 876 env->buflen += len;
882 return 0; 877 return 0;
883} 878}
884 879
885static int input_add_uevent_modalias_var(char **envp, int num_envp, int *cur_index, 880static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
886 char *buffer, int buffer_size, int *cur_len,
887 struct input_dev *dev) 881 struct input_dev *dev)
888{ 882{
889 if (*cur_index >= num_envp - 1) 883 int len;
890 return -ENOMEM;
891
892 envp[*cur_index] = buffer + *cur_len;
893 884
894 *cur_len += snprintf(buffer + *cur_len, max(buffer_size - *cur_len, 0), 885 if (add_uevent_var(env, "MODALIAS="))
895 "MODALIAS=");
896 if (*cur_len >= buffer_size)
897 return -ENOMEM; 886 return -ENOMEM;
898 887
899 *cur_len += input_print_modalias(buffer + *cur_len, 888 len = input_print_modalias(&env->buf[env->buflen - 1],
900 max(buffer_size - *cur_len, 0), 889 sizeof(env->buf) - env->buflen,
901 dev, 0) + 1; 890 dev, 0);
902 if (*cur_len > buffer_size) 891 if (len >= (sizeof(env->buf) - env->buflen))
903 return -ENOMEM; 892 return -ENOMEM;
904 893
905 (*cur_index)++; 894 env->buflen += len;
906 return 0; 895 return 0;
907} 896}
908 897
909#define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \ 898#define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \
910 do { \ 899 do { \
911 int err = add_uevent_var(envp, num_envp, &i, \ 900 int err = add_uevent_var(env, fmt, val); \
912 buffer, buffer_size, &len, \
913 fmt, val); \
914 if (err) \ 901 if (err) \
915 return err; \ 902 return err; \
916 } while (0) 903 } while (0)
917 904
918#define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \ 905#define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \
919 do { \ 906 do { \
920 int err = input_add_uevent_bm_var(envp, num_envp, &i, \ 907 int err = input_add_uevent_bm_var(env, name, bm, max); \
921 buffer, buffer_size, &len, \
922 name, bm, max); \
923 if (err) \ 908 if (err) \
924 return err; \ 909 return err; \
925 } while (0) 910 } while (0)
926 911
927#define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \ 912#define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \
928 do { \ 913 do { \
929 int err = input_add_uevent_modalias_var(envp, \ 914 int err = input_add_uevent_modalias_var(env, dev); \
930 num_envp, &i, \
931 buffer, buffer_size, &len, \
932 dev); \
933 if (err) \ 915 if (err) \
934 return err; \ 916 return err; \
935 } while (0) 917 } while (0)
936 918
937static int input_dev_uevent(struct device *device, char **envp, 919static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
938 int num_envp, char *buffer, int buffer_size)
939{ 920{
940 struct input_dev *dev = to_input_dev(device); 921 struct input_dev *dev = to_input_dev(device);
941 int i = 0;
942 int len = 0;
943 922
944 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x", 923 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x",
945 dev->id.bustype, dev->id.vendor, 924 dev->id.bustype, dev->id.vendor,
@@ -971,7 +950,6 @@ static int input_dev_uevent(struct device *device, char **envp,
971 950
972 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev); 951 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev);
973 952
974 envp[i] = NULL;
975 return 0; 953 return 0;
976} 954}
977 955
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index ded1d6ac6f..f948d3a14a 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -55,7 +55,140 @@ MODULE_AUTHOR("Michael Schmitz <schmitz@biophys.uni-duesseldorf.de>");
55MODULE_DESCRIPTION("Atari keyboard driver"); 55MODULE_DESCRIPTION("Atari keyboard driver");
56MODULE_LICENSE("GPL"); 56MODULE_LICENSE("GPL");
57 57
58static unsigned char atakbd_keycode[0x72]; 58/*
59 0x47: KP_7 71
60 0x48: KP_8 72
61 0x49: KP_9 73
62 0x62: KP_/ 98
63 0x4b: KP_4 75
64 0x4c: KP_5 76
65 0x4d: KP_6 77
66 0x37: KP_* 55
67 0x4f: KP_1 79
68 0x50: KP_2 80
69 0x51: KP_3 81
70 0x4a: KP_- 74
71 0x52: KP_0 82
72 0x53: KP_. 83
73 0x4e: KP_+ 78
74
75 0x67: Up 103
76 0x6c: Down 108
77 0x69: Left 105
78 0x6a: Right 106
79 */
80
81
82static unsigned char atakbd_keycode[0x72] = { /* American layout */
83 [0] = KEY_GRAVE,
84 [1] = KEY_ESC,
85 [2] = KEY_1,
86 [3] = KEY_2,
87 [4] = KEY_3,
88 [5] = KEY_4,
89 [6] = KEY_5,
90 [7] = KEY_6,
91 [8] = KEY_7,
92 [9] = KEY_8,
93 [10] = KEY_9,
94 [11] = KEY_0,
95 [12] = KEY_MINUS,
96 [13] = KEY_EQUAL,
97 [14] = KEY_BACKSPACE,
98 [15] = KEY_TAB,
99 [16] = KEY_Q,
100 [17] = KEY_W,
101 [18] = KEY_E,
102 [19] = KEY_R,
103 [20] = KEY_T,
104 [21] = KEY_Y,
105 [22] = KEY_U,
106 [23] = KEY_I,
107 [24] = KEY_O,
108 [25] = KEY_P,
109 [26] = KEY_LEFTBRACE,
110 [27] = KEY_RIGHTBRACE,
111 [28] = KEY_ENTER,
112 [29] = KEY_LEFTCTRL,
113 [30] = KEY_A,
114 [31] = KEY_S,
115 [32] = KEY_D,
116 [33] = KEY_F,
117 [34] = KEY_G,
118 [35] = KEY_H,
119 [36] = KEY_J,
120 [37] = KEY_K,
121 [38] = KEY_L,
122 [39] = KEY_SEMICOLON,
123 [40] = KEY_APOSTROPHE,
124 [41] = KEY_BACKSLASH, /* FIXME, '#' */
125 [42] = KEY_LEFTSHIFT,
126 [43] = KEY_GRAVE, /* FIXME: '~' */
127 [44] = KEY_Z,
128 [45] = KEY_X,
129 [46] = KEY_C,
130 [47] = KEY_V,
131 [48] = KEY_B,
132 [49] = KEY_N,
133 [50] = KEY_M,
134 [51] = KEY_COMMA,
135 [52] = KEY_DOT,
136 [53] = KEY_SLASH,
137 [54] = KEY_RIGHTSHIFT,
138 [55] = KEY_KPASTERISK,
139 [56] = KEY_LEFTALT,
140 [57] = KEY_SPACE,
141 [58] = KEY_CAPSLOCK,
142 [59] = KEY_F1,
143 [60] = KEY_F2,
144 [61] = KEY_F3,
145 [62] = KEY_F4,
146 [63] = KEY_F5,
147 [64] = KEY_F6,
148 [65] = KEY_F7,
149 [66] = KEY_F8,
150 [67] = KEY_F9,
151 [68] = KEY_F10,
152 [69] = KEY_ESC,
153 [70] = KEY_DELETE,
154 [71] = KEY_KP7,
155 [72] = KEY_KP8,
156 [73] = KEY_KP9,
157 [74] = KEY_KPMINUS,
158 [75] = KEY_KP4,
159 [76] = KEY_KP5,
160 [77] = KEY_KP6,
161 [78] = KEY_KPPLUS,
162 [79] = KEY_KP1,
163 [80] = KEY_KP2,
164 [81] = KEY_KP3,
165 [82] = KEY_KP0,
166 [83] = KEY_KPDOT,
167 [90] = KEY_KPLEFTPAREN,
168 [91] = KEY_KPRIGHTPAREN,
169 [92] = KEY_KPASTERISK, /* FIXME */
170 [93] = KEY_KPASTERISK,
171 [94] = KEY_KPPLUS,
172 [95] = KEY_HELP,
173 [96] = KEY_BACKSLASH, /* FIXME: '<' */
174 [97] = KEY_KPASTERISK, /* FIXME */
175 [98] = KEY_KPSLASH,
176 [99] = KEY_KPLEFTPAREN,
177 [100] = KEY_KPRIGHTPAREN,
178 [101] = KEY_KPSLASH,
179 [102] = KEY_KPASTERISK,
180 [103] = KEY_UP,
181 [104] = KEY_KPASTERISK, /* FIXME */
182 [105] = KEY_LEFT,
183 [106] = KEY_RIGHT,
184 [107] = KEY_KPASTERISK, /* FIXME */
185 [108] = KEY_DOWN,
186 [109] = KEY_KPASTERISK, /* FIXME */
187 [110] = KEY_KPASTERISK, /* FIXME */
188 [111] = KEY_KPASTERISK, /* FIXME */
189 [112] = KEY_KPASTERISK, /* FIXME */
190 [113] = KEY_KPASTERISK /* FIXME */
191};
59 192
60static struct input_dev *atakbd_dev; 193static struct input_dev *atakbd_dev;
61 194
@@ -86,21 +219,20 @@ static int __init atakbd_init(void)
86{ 219{
87 int i; 220 int i;
88 221
89 if (!ATARIHW_PRESENT(ST_MFP)) 222 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP))
90 return -EIO; 223 return -EIO;
91 224
92 // TODO: request_mem_region if not done in arch code
93
94 if (!(atakbd_dev = input_allocate_device()))
95 return -ENOMEM;
96
97 // need to init core driver if not already done so 225 // need to init core driver if not already done so
98 if (atari_keyb_init()) 226 if (atari_keyb_init())
99 return -ENODEV; 227 return -ENODEV;
100 228
229 atakbd_dev = input_allocate_device();
230 if (!atakbd_dev)
231 return -ENOMEM;
232
101 atakbd_dev->name = "Atari Keyboard"; 233 atakbd_dev->name = "Atari Keyboard";
102 atakbd_dev->phys = "atakbd/input0"; 234 atakbd_dev->phys = "atakbd/input0";
103 atakbd_dev->id.bustype = BUS_ATARI; 235 atakbd_dev->id.bustype = BUS_HOST;
104 atakbd_dev->id.vendor = 0x0001; 236 atakbd_dev->id.vendor = 0x0001;
105 atakbd_dev->id.product = 0x0001; 237 atakbd_dev->id.product = 0x0001;
106 atakbd_dev->id.version = 0x0100; 238 atakbd_dev->id.version = 0x0100;
@@ -111,16 +243,17 @@ static int __init atakbd_init(void)
111 atakbd_dev->keycodemax = ARRAY_SIZE(atakbd_keycode); 243 atakbd_dev->keycodemax = ARRAY_SIZE(atakbd_keycode);
112 244
113 for (i = 1; i < 0x72; i++) { 245 for (i = 1; i < 0x72; i++) {
114 atakbd_keycode[i] = i;
115 set_bit(atakbd_keycode[i], atakbd_dev->keybit); 246 set_bit(atakbd_keycode[i], atakbd_dev->keybit);
116 } 247 }
117 248
118 input_register_device(atakbd_dev); 249 /* error check */
250 if (input_register_device(atakbd_dev)) {
251 input_free_device(atakbd_dev);
252 return -ENOMEM;
253 }
119 254
120 atari_input_keyboard_interrupt_hook = atakbd_interrupt; 255 atari_input_keyboard_interrupt_hook = atakbd_interrupt;
121 256
122 printk(KERN_INFO "input: %s at IKBD ACIA\n", atakbd_dev->name);
123
124 return 0; 257 return 0;
125} 258}
126 259
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 906bf5e8de..c19f77fbaf 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -17,17 +17,18 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/input.h> 18#include <linux/input.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <asm/8253pit.h>
21#include <asm/io.h> 20#include <asm/io.h>
22 21
23MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 22MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
24MODULE_DESCRIPTION("PC Speaker beeper driver"); 23MODULE_DESCRIPTION("PC Speaker beeper driver");
25MODULE_LICENSE("GPL"); 24MODULE_LICENSE("GPL");
25MODULE_ALIAS("platform:pcspkr");
26 26
27#ifdef CONFIG_X86 27#ifdef CONFIG_X86
28/* Use the global PIT lock ! */ 28/* Use the global PIT lock ! */
29#include <asm/i8253.h> 29#include <asm/i8253.h>
30#else 30#else
31#include <asm/8253pit.h>
31static DEFINE_SPINLOCK(i8253_lock); 32static DEFINE_SPINLOCK(i8253_lock);
32#endif 33#endif
33 34
diff --git a/drivers/input/mouse/atarimouse.c b/drivers/input/mouse/atarimouse.c
index 43ab6566fb..c8c7244b48 100644
--- a/drivers/input/mouse/atarimouse.c
+++ b/drivers/input/mouse/atarimouse.c
@@ -73,14 +73,11 @@ static void atamouse_interrupt(char *buf)
73{ 73{
74 int buttons, dx, dy; 74 int buttons, dx, dy;
75 75
76/* ikbd_mouse_disable(); */
77
78 buttons = (buf[0] & 1) | ((buf[0] & 2) << 1); 76 buttons = (buf[0] & 1) | ((buf[0] & 2) << 1);
79#ifdef FIXED_ATARI_JOYSTICK 77#ifdef FIXED_ATARI_JOYSTICK
80 buttons |= atari_mouse_buttons & 2; 78 buttons |= atari_mouse_buttons & 2;
81 atari_mouse_buttons = buttons; 79 atari_mouse_buttons = buttons;
82#endif 80#endif
83/* ikbd_mouse_rel_pos(); */
84 81
85 /* only relative events get here */ 82 /* only relative events get here */
86 dx = buf[1]; 83 dx = buf[1];
@@ -126,15 +123,16 @@ static int __init atamouse_init(void)
126 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP)) 123 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP))
127 return -ENODEV; 124 return -ENODEV;
128 125
129 if (!(atamouse_dev = input_allocate_device()))
130 return -ENOMEM;
131
132 if (!(atari_keyb_init())) 126 if (!(atari_keyb_init()))
133 return -ENODEV; 127 return -ENODEV;
134 128
129 atamouse_dev = input_allocate_device();
130 if (!atamouse_dev)
131 return -ENOMEM;
132
135 atamouse_dev->name = "Atari mouse"; 133 atamouse_dev->name = "Atari mouse";
136 atamouse_dev->phys = "atamouse/input0"; 134 atamouse_dev->phys = "atamouse/input0";
137 atamouse_dev->id.bustype = BUS_ATARI; 135 atamouse_dev->id.bustype = BUS_HOST;
138 atamouse_dev->id.vendor = 0x0001; 136 atamouse_dev->id.vendor = 0x0001;
139 atamouse_dev->id.product = 0x0002; 137 atamouse_dev->id.product = 0x0002;
140 atamouse_dev->id.version = 0x0100; 138 atamouse_dev->id.version = 0x0100;
@@ -145,9 +143,11 @@ static int __init atamouse_init(void)
145 atamouse_dev->open = atamouse_open; 143 atamouse_dev->open = atamouse_open;
146 atamouse_dev->close = atamouse_close; 144 atamouse_dev->close = atamouse_close;
147 145
148 input_register_device(atamouse_dev); 146 if (input_register_device(atamouse_dev)) {
147 input_free_device(atamouse_dev);
148 return -ENOMEM;
149 }
149 150
150 printk(KERN_INFO "input: %s at keyboard ACIA\n", atamouse_dev->name);
151 return 0; 151 return 0;
152} 152}
153 153
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 372ca49311..b3bc15acd3 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -876,18 +876,14 @@ static int serio_bus_match(struct device *dev, struct device_driver *drv)
876 876
877#define SERIO_ADD_UEVENT_VAR(fmt, val...) \ 877#define SERIO_ADD_UEVENT_VAR(fmt, val...) \
878 do { \ 878 do { \
879 int err = add_uevent_var(envp, num_envp, &i, \ 879 int err = add_uevent_var(env, fmt, val); \
880 buffer, buffer_size, &len, \
881 fmt, val); \
882 if (err) \ 880 if (err) \
883 return err; \ 881 return err; \
884 } while (0) 882 } while (0)
885 883
886static int serio_uevent(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) 884static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
887{ 885{
888 struct serio *serio; 886 struct serio *serio;
889 int i = 0;
890 int len = 0;
891 887
892 if (!dev) 888 if (!dev)
893 return -ENODEV; 889 return -ENODEV;
@@ -900,7 +896,6 @@ static int serio_uevent(struct device *dev, char **envp, int num_envp, char *buf
900 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra); 896 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
901 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X", 897 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
902 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); 898 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
903 envp[i] = NULL;
904 899
905 return 0; 900 return 0;
906} 901}
@@ -908,7 +903,7 @@ static int serio_uevent(struct device *dev, char **envp, int num_envp, char *buf
908 903
909#else 904#else
910 905
911static int serio_uevent(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) 906static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
912{ 907{
913 return -ENODEV; 908 return -ENODEV;
914} 909}
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index b04a178e50..f8b79783c8 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -20,7 +20,6 @@
20#include <linux/isapnp.h> 20#include <linux/isapnp.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22 22
23extern const char *CardType[];
24static const char *avm_pci_rev = "$Revision: 1.29.2.4 $"; 23static const char *avm_pci_rev = "$Revision: 1.29.2.4 $";
25 24
26#define AVM_FRITZ_PCI 1 25#define AVM_FRITZ_PCI 1
@@ -726,100 +725,15 @@ AVM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
726 return(0); 725 return(0);
727} 726}
728 727
729#ifdef CONFIG_PCI 728static int __devinit avm_setup_rest(struct IsdnCardState *cs)
730static struct pci_dev *dev_avm __devinitdata = NULL;
731#endif
732#ifdef __ISAPNP__
733static struct pnp_card *pnp_avm_c __devinitdata = NULL;
734#endif
735
736int __devinit
737setup_avm_pcipnp(struct IsdnCard *card)
738{ 729{
739 u_int val, ver; 730 u_int val, ver;
740 struct IsdnCardState *cs = card->cs;
741 char tmp[64];
742 731
743 strcpy(tmp, avm_pci_rev);
744 printk(KERN_INFO "HiSax: AVM PCI driver Rev. %s\n", HiSax_getrev(tmp));
745 if (cs->typ != ISDN_CTYPE_FRITZPCI)
746 return (0);
747 if (card->para[1]) {
748 /* old manual method */
749 cs->hw.avm.cfg_reg = card->para[1];
750 cs->irq = card->para[0];
751 cs->subtyp = AVM_FRITZ_PNP;
752 goto ready;
753 }
754#ifdef __ISAPNP__
755 if (isapnp_present()) {
756 struct pnp_dev *pnp_avm_d = NULL;
757 if ((pnp_avm_c = pnp_find_card(
758 ISAPNP_VENDOR('A', 'V', 'M'),
759 ISAPNP_FUNCTION(0x0900), pnp_avm_c))) {
760 if ((pnp_avm_d = pnp_find_dev(pnp_avm_c,
761 ISAPNP_VENDOR('A', 'V', 'M'),
762 ISAPNP_FUNCTION(0x0900), pnp_avm_d))) {
763 int err;
764
765 pnp_disable_dev(pnp_avm_d);
766 err = pnp_activate_dev(pnp_avm_d);
767 if (err<0) {
768 printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
769 __FUNCTION__, err);
770 return(0);
771 }
772 cs->hw.avm.cfg_reg =
773 pnp_port_start(pnp_avm_d, 0);
774 cs->irq = pnp_irq(pnp_avm_d, 0);
775 if (!cs->irq) {
776 printk(KERN_ERR "FritzPnP:No IRQ\n");
777 return(0);
778 }
779 if (!cs->hw.avm.cfg_reg) {
780 printk(KERN_ERR "FritzPnP:No IO address\n");
781 return(0);
782 }
783 cs->subtyp = AVM_FRITZ_PNP;
784 goto ready;
785 }
786 }
787 } else {
788 printk(KERN_INFO "FritzPnP: no ISA PnP present\n");
789 }
790#endif
791#ifdef CONFIG_PCI
792 if ((dev_avm = pci_find_device(PCI_VENDOR_ID_AVM,
793 PCI_DEVICE_ID_AVM_A1, dev_avm))) {
794 if (pci_enable_device(dev_avm))
795 return(0);
796 cs->irq = dev_avm->irq;
797 if (!cs->irq) {
798 printk(KERN_ERR "FritzPCI: No IRQ for PCI card found\n");
799 return(0);
800 }
801 cs->hw.avm.cfg_reg = pci_resource_start(dev_avm, 1);
802 if (!cs->hw.avm.cfg_reg) {
803 printk(KERN_ERR "FritzPCI: No IO-Adr for PCI card found\n");
804 return(0);
805 }
806 cs->subtyp = AVM_FRITZ_PCI;
807 } else {
808 printk(KERN_WARNING "FritzPCI: No PCI card found\n");
809 return(0);
810 }
811 cs->irq_flags |= IRQF_SHARED;
812#else
813 printk(KERN_WARNING "FritzPCI: NO_PCI_BIOS\n");
814 return (0);
815#endif /* CONFIG_PCI */
816ready:
817 cs->hw.avm.isac = cs->hw.avm.cfg_reg + 0x10; 732 cs->hw.avm.isac = cs->hw.avm.cfg_reg + 0x10;
818 if (!request_region(cs->hw.avm.cfg_reg, 32, 733 if (!request_region(cs->hw.avm.cfg_reg, 32,
819 (cs->subtyp == AVM_FRITZ_PCI) ? "avm PCI" : "avm PnP")) { 734 (cs->subtyp == AVM_FRITZ_PCI) ? "avm PCI" : "avm PnP")) {
820 printk(KERN_WARNING 735 printk(KERN_WARNING
821 "HiSax: %s config port %x-%x already in use\n", 736 "HiSax: Fritz!PCI/PNP config port %x-%x already in use\n",
822 CardType[card->typ],
823 cs->hw.avm.cfg_reg, 737 cs->hw.avm.cfg_reg,
824 cs->hw.avm.cfg_reg + 31); 738 cs->hw.avm.cfg_reg + 31);
825 return (0); 739 return (0);
@@ -860,3 +774,137 @@ ready:
860 ISACVersion(cs, (cs->subtyp == AVM_FRITZ_PCI) ? "AVM PCI:" : "AVM PnP:"); 774 ISACVersion(cs, (cs->subtyp == AVM_FRITZ_PCI) ? "AVM PCI:" : "AVM PnP:");
861 return (1); 775 return (1);
862} 776}
777
778#ifndef __ISAPNP__
779
780static int __devinit avm_pnp_setup(struct IsdnCardState *cs)
781{
782 return(1); /* no-op: success */
783}
784
785#else
786
787static struct pnp_card *pnp_avm_c __devinitdata = NULL;
788
789static int __devinit avm_pnp_setup(struct IsdnCardState *cs)
790{
791 struct pnp_dev *pnp_avm_d = NULL;
792
793 if (!isapnp_present())
794 return(1); /* no-op: success */
795
796 if ((pnp_avm_c = pnp_find_card(
797 ISAPNP_VENDOR('A', 'V', 'M'),
798 ISAPNP_FUNCTION(0x0900), pnp_avm_c))) {
799 if ((pnp_avm_d = pnp_find_dev(pnp_avm_c,
800 ISAPNP_VENDOR('A', 'V', 'M'),
801 ISAPNP_FUNCTION(0x0900), pnp_avm_d))) {
802 int err;
803
804 pnp_disable_dev(pnp_avm_d);
805 err = pnp_activate_dev(pnp_avm_d);
806 if (err<0) {
807 printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
808 __FUNCTION__, err);
809 return(0);
810 }
811 cs->hw.avm.cfg_reg =
812 pnp_port_start(pnp_avm_d, 0);
813 cs->irq = pnp_irq(pnp_avm_d, 0);
814 if (!cs->irq) {
815 printk(KERN_ERR "FritzPnP:No IRQ\n");
816 return(0);
817 }
818 if (!cs->hw.avm.cfg_reg) {
819 printk(KERN_ERR "FritzPnP:No IO address\n");
820 return(0);
821 }
822 cs->subtyp = AVM_FRITZ_PNP;
823
824 return (2); /* goto 'ready' label */
825 }
826 }
827
828 return (1);
829}
830
831#endif /* __ISAPNP__ */
832
833#ifndef CONFIG_PCI
834
835static int __devinit avm_pci_setup(struct IsdnCardState *cs)
836{
837 return(1); /* no-op: success */
838}
839
840#else
841
842static struct pci_dev *dev_avm __devinitdata = NULL;
843
844static int __devinit avm_pci_setup(struct IsdnCardState *cs)
845{
846 if ((dev_avm = pci_find_device(PCI_VENDOR_ID_AVM,
847 PCI_DEVICE_ID_AVM_A1, dev_avm))) {
848
849 if (pci_enable_device(dev_avm))
850 return(0);
851
852 cs->irq = dev_avm->irq;
853 if (!cs->irq) {
854 printk(KERN_ERR "FritzPCI: No IRQ for PCI card found\n");
855 return(0);
856 }
857
858 cs->hw.avm.cfg_reg = pci_resource_start(dev_avm, 1);
859 if (!cs->hw.avm.cfg_reg) {
860 printk(KERN_ERR "FritzPCI: No IO-Adr for PCI card found\n");
861 return(0);
862 }
863
864 cs->subtyp = AVM_FRITZ_PCI;
865 } else {
866 printk(KERN_WARNING "FritzPCI: No PCI card found\n");
867 return(0);
868 }
869
870 cs->irq_flags |= IRQF_SHARED;
871
872 return (1);
873}
874
875#endif /* CONFIG_PCI */
876
877int __devinit
878setup_avm_pcipnp(struct IsdnCard *card)
879{
880 struct IsdnCardState *cs = card->cs;
881 char tmp[64];
882 int rc;
883
884 strcpy(tmp, avm_pci_rev);
885 printk(KERN_INFO "HiSax: AVM PCI driver Rev. %s\n", HiSax_getrev(tmp));
886
887 if (cs->typ != ISDN_CTYPE_FRITZPCI)
888 return (0);
889
890 if (card->para[1]) {
891 /* old manual method */
892 cs->hw.avm.cfg_reg = card->para[1];
893 cs->irq = card->para[0];
894 cs->subtyp = AVM_FRITZ_PNP;
895 goto ready;
896 }
897
898 rc = avm_pnp_setup(cs);
899 if (rc < 1)
900 return (0);
901 if (rc == 2)
902 goto ready;
903
904 rc = avm_pci_setup(cs);
905 if (rc < 1)
906 return (0);
907
908ready:
909 return avm_setup_rest(cs);
910}
diff --git a/drivers/isdn/hisax/bkm_a8.c b/drivers/isdn/hisax/bkm_a8.c
index 6339bb443f..99ef3b43fc 100644
--- a/drivers/isdn/hisax/bkm_a8.c
+++ b/drivers/isdn/hisax/bkm_a8.c
@@ -20,8 +20,6 @@
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include "bkm_ax.h" 21#include "bkm_ax.h"
22 22
23#ifdef CONFIG_PCI
24
25#define ATTEMPT_PCI_REMAPPING /* Required for PLX rev 1 */ 23#define ATTEMPT_PCI_REMAPPING /* Required for PLX rev 1 */
26 24
27extern const char *CardType[]; 25extern const char *CardType[];
@@ -279,12 +277,9 @@ static u_char pci_bus __devinitdata = 0;
279static u_char pci_device_fn __devinitdata = 0; 277static u_char pci_device_fn __devinitdata = 0;
280static u_char pci_irq __devinitdata = 0; 278static u_char pci_irq __devinitdata = 0;
281 279
282#endif /* CONFIG_PCI */
283
284int __devinit 280int __devinit
285setup_sct_quadro(struct IsdnCard *card) 281setup_sct_quadro(struct IsdnCard *card)
286{ 282{
287#ifdef CONFIG_PCI
288 struct IsdnCardState *cs = card->cs; 283 struct IsdnCardState *cs = card->cs;
289 char tmp[64]; 284 char tmp[64];
290 u_int found = 0; 285 u_int found = 0;
@@ -442,7 +437,4 @@ setup_sct_quadro(struct IsdnCard *card)
442 sct_quadro_subtypes[cs->subtyp], 437 sct_quadro_subtypes[cs->subtyp],
443 readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ID)); 438 readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ID));
444 return (1); 439 return (1);
445#else
446 printk(KERN_ERR "HiSax: bkm_a8 only supported on PCI Systems\n");
447#endif /* CONFIG_PCI */
448} 440}
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 6eebeb441b..8267450787 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -25,8 +25,6 @@
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/isapnp.h> 26#include <linux/isapnp.h>
27 27
28extern const char *CardType[];
29
30static const char *Diva_revision = "$Revision: 1.33.2.6 $"; 28static const char *Diva_revision = "$Revision: 1.33.2.6 $";
31 29
32#define byteout(addr,val) outb(val,addr) 30#define byteout(addr,val) outb(val,addr)
@@ -906,225 +904,15 @@ Diva_card_msg(struct IsdnCardState *cs, int mt, void *arg)
906 return(0); 904 return(0);
907} 905}
908 906
909static struct pci_dev *dev_diva __devinitdata = NULL; 907static int __devinit setup_diva_common(struct IsdnCardState *cs)
910static struct pci_dev *dev_diva_u __devinitdata = NULL;
911static struct pci_dev *dev_diva201 __devinitdata = NULL;
912static struct pci_dev *dev_diva202 __devinitdata = NULL;
913
914#ifdef __ISAPNP__
915static struct isapnp_device_id diva_ids[] __devinitdata = {
916 { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51),
917 ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51),
918 (unsigned long) "Diva picola" },
919 { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51),
920 ISAPNP_VENDOR('E', 'I', 'C'), ISAPNP_FUNCTION(0x51),
921 (unsigned long) "Diva picola" },
922 { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x71),
923 ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x71),
924 (unsigned long) "Diva 2.0" },
925 { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x71),
926 ISAPNP_VENDOR('E', 'I', 'C'), ISAPNP_FUNCTION(0x71),
927 (unsigned long) "Diva 2.0" },
928 { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0xA1),
929 ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0xA1),
930 (unsigned long) "Diva 2.01" },
931 { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0xA1),
932 ISAPNP_VENDOR('E', 'I', 'C'), ISAPNP_FUNCTION(0xA1),
933 (unsigned long) "Diva 2.01" },
934 { 0, }
935};
936
937static struct isapnp_device_id *ipid __devinitdata = &diva_ids[0];
938static struct pnp_card *pnp_c __devinitdata = NULL;
939#endif
940
941
942int __devinit
943setup_diva(struct IsdnCard *card)
944{ 908{
945 int bytecnt = 8; 909 int bytecnt;
946 u_char val; 910 u_char val;
947 struct IsdnCardState *cs = card->cs;
948 char tmp[64];
949
950 strcpy(tmp, Diva_revision);
951 printk(KERN_INFO "HiSax: Eicon.Diehl Diva driver Rev. %s\n", HiSax_getrev(tmp));
952 if (cs->typ != ISDN_CTYPE_DIEHLDIVA)
953 return(0);
954 cs->hw.diva.status = 0;
955 if (card->para[1]) {
956 cs->hw.diva.ctrl_reg = 0;
957 cs->hw.diva.cfg_reg = card->para[1];
958 val = readreg(cs->hw.diva.cfg_reg + DIVA_IPAC_ADR,
959 cs->hw.diva.cfg_reg + DIVA_IPAC_DATA, IPAC_ID);
960 printk(KERN_INFO "Diva: IPAC version %x\n", val);
961 if ((val == 1) || (val==2)) {
962 cs->subtyp = DIVA_IPAC_ISA;
963 cs->hw.diva.ctrl = 0;
964 cs->hw.diva.isac = card->para[1] + DIVA_IPAC_DATA;
965 cs->hw.diva.hscx = card->para[1] + DIVA_IPAC_DATA;
966 cs->hw.diva.isac_adr = card->para[1] + DIVA_IPAC_ADR;
967 cs->hw.diva.hscx_adr = card->para[1] + DIVA_IPAC_ADR;
968 test_and_set_bit(HW_IPAC, &cs->HW_Flags);
969 } else {
970 cs->subtyp = DIVA_ISA;
971 cs->hw.diva.ctrl = card->para[1] + DIVA_ISA_CTRL;
972 cs->hw.diva.isac = card->para[1] + DIVA_ISA_ISAC_DATA;
973 cs->hw.diva.hscx = card->para[1] + DIVA_HSCX_DATA;
974 cs->hw.diva.isac_adr = card->para[1] + DIVA_ISA_ISAC_ADR;
975 cs->hw.diva.hscx_adr = card->para[1] + DIVA_HSCX_ADR;
976 }
977 cs->irq = card->para[0];
978 } else {
979#ifdef __ISAPNP__
980 if (isapnp_present()) {
981 struct pnp_dev *pnp_d;
982 while(ipid->card_vendor) {
983 if ((pnp_c = pnp_find_card(ipid->card_vendor,
984 ipid->card_device, pnp_c))) {
985 pnp_d = NULL;
986 if ((pnp_d = pnp_find_dev(pnp_c,
987 ipid->vendor, ipid->function, pnp_d))) {
988 int err;
989
990 printk(KERN_INFO "HiSax: %s detected\n",
991 (char *)ipid->driver_data);
992 pnp_disable_dev(pnp_d);
993 err = pnp_activate_dev(pnp_d);
994 if (err<0) {
995 printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
996 __FUNCTION__, err);
997 return(0);
998 }
999 card->para[1] = pnp_port_start(pnp_d, 0);
1000 card->para[0] = pnp_irq(pnp_d, 0);
1001 if (!card->para[0] || !card->para[1]) {
1002 printk(KERN_ERR "Diva PnP:some resources are missing %ld/%lx\n",
1003 card->para[0], card->para[1]);
1004 pnp_disable_dev(pnp_d);
1005 return(0);
1006 }
1007 cs->hw.diva.cfg_reg = card->para[1];
1008 cs->irq = card->para[0];
1009 if (ipid->function == ISAPNP_FUNCTION(0xA1)) {
1010 cs->subtyp = DIVA_IPAC_ISA;
1011 cs->hw.diva.ctrl = 0;
1012 cs->hw.diva.isac =
1013 card->para[1] + DIVA_IPAC_DATA;
1014 cs->hw.diva.hscx =
1015 card->para[1] + DIVA_IPAC_DATA;
1016 cs->hw.diva.isac_adr =
1017 card->para[1] + DIVA_IPAC_ADR;
1018 cs->hw.diva.hscx_adr =
1019 card->para[1] + DIVA_IPAC_ADR;
1020 test_and_set_bit(HW_IPAC, &cs->HW_Flags);
1021 } else {
1022 cs->subtyp = DIVA_ISA;
1023 cs->hw.diva.ctrl =
1024 card->para[1] + DIVA_ISA_CTRL;
1025 cs->hw.diva.isac =
1026 card->para[1] + DIVA_ISA_ISAC_DATA;
1027 cs->hw.diva.hscx =
1028 card->para[1] + DIVA_HSCX_DATA;
1029 cs->hw.diva.isac_adr =
1030 card->para[1] + DIVA_ISA_ISAC_ADR;
1031 cs->hw.diva.hscx_adr =
1032 card->para[1] + DIVA_HSCX_ADR;
1033 }
1034 goto ready;
1035 } else {
1036 printk(KERN_ERR "Diva PnP: PnP error card found, no device\n");
1037 return(0);
1038 }
1039 }
1040 ipid++;
1041 pnp_c=NULL;
1042 }
1043 if (!ipid->card_vendor) {
1044 printk(KERN_INFO "Diva PnP: no ISAPnP card found\n");
1045 }
1046 }
1047#endif
1048#ifdef CONFIG_PCI
1049 cs->subtyp = 0;
1050 if ((dev_diva = pci_find_device(PCI_VENDOR_ID_EICON,
1051 PCI_DEVICE_ID_EICON_DIVA20, dev_diva))) {
1052 if (pci_enable_device(dev_diva))
1053 return(0);
1054 cs->subtyp = DIVA_PCI;
1055 cs->irq = dev_diva->irq;
1056 cs->hw.diva.cfg_reg = pci_resource_start(dev_diva, 2);
1057 } else if ((dev_diva_u = pci_find_device(PCI_VENDOR_ID_EICON,
1058 PCI_DEVICE_ID_EICON_DIVA20_U, dev_diva_u))) {
1059 if (pci_enable_device(dev_diva_u))
1060 return(0);
1061 cs->subtyp = DIVA_PCI;
1062 cs->irq = dev_diva_u->irq;
1063 cs->hw.diva.cfg_reg = pci_resource_start(dev_diva_u, 2);
1064 } else if ((dev_diva201 = pci_find_device(PCI_VENDOR_ID_EICON,
1065 PCI_DEVICE_ID_EICON_DIVA201, dev_diva201))) {
1066 if (pci_enable_device(dev_diva201))
1067 return(0);
1068 cs->subtyp = DIVA_IPAC_PCI;
1069 cs->irq = dev_diva201->irq;
1070 cs->hw.diva.pci_cfg =
1071 (ulong) ioremap(pci_resource_start(dev_diva201, 0), 4096);
1072 cs->hw.diva.cfg_reg =
1073 (ulong) ioremap(pci_resource_start(dev_diva201, 1), 4096);
1074 } else if ((dev_diva202 = pci_find_device(PCI_VENDOR_ID_EICON,
1075 PCI_DEVICE_ID_EICON_DIVA202, dev_diva202))) {
1076 if (pci_enable_device(dev_diva202))
1077 return(0);
1078 cs->subtyp = DIVA_IPACX_PCI;
1079 cs->irq = dev_diva202->irq;
1080 cs->hw.diva.pci_cfg =
1081 (ulong) ioremap(pci_resource_start(dev_diva202, 0), 4096);
1082 cs->hw.diva.cfg_reg =
1083 (ulong) ioremap(pci_resource_start(dev_diva202, 1), 4096);
1084 } else {
1085 printk(KERN_WARNING "Diva: No PCI card found\n");
1086 return(0);
1087 }
1088
1089 if (!cs->irq) {
1090 printk(KERN_WARNING "Diva: No IRQ for PCI card found\n");
1091 iounmap_diva(cs);
1092 return(0);
1093 }
1094
1095 if (!cs->hw.diva.cfg_reg) {
1096 printk(KERN_WARNING "Diva: No IO-Adr for PCI card found\n");
1097 iounmap_diva(cs);
1098 return(0);
1099 }
1100 cs->irq_flags |= IRQF_SHARED;
1101#else
1102 printk(KERN_WARNING "Diva: cfgreg 0 and NO_PCI_BIOS\n");
1103 printk(KERN_WARNING "Diva: unable to config DIVA PCI\n");
1104 return (0);
1105#endif /* CONFIG_PCI */
1106 if ((cs->subtyp == DIVA_IPAC_PCI) ||
1107 (cs->subtyp == DIVA_IPACX_PCI) ) {
1108 cs->hw.diva.ctrl = 0;
1109 cs->hw.diva.isac = 0;
1110 cs->hw.diva.hscx = 0;
1111 cs->hw.diva.isac_adr = 0;
1112 cs->hw.diva.hscx_adr = 0;
1113 test_and_set_bit(HW_IPAC, &cs->HW_Flags);
1114 bytecnt = 0;
1115 } else {
1116 cs->hw.diva.ctrl = cs->hw.diva.cfg_reg + DIVA_PCI_CTRL;
1117 cs->hw.diva.isac = cs->hw.diva.cfg_reg + DIVA_PCI_ISAC_DATA;
1118 cs->hw.diva.hscx = cs->hw.diva.cfg_reg + DIVA_HSCX_DATA;
1119 cs->hw.diva.isac_adr = cs->hw.diva.cfg_reg + DIVA_PCI_ISAC_ADR;
1120 cs->hw.diva.hscx_adr = cs->hw.diva.cfg_reg + DIVA_HSCX_ADR;
1121 bytecnt = 32;
1122 }
1123 }
1124 911
1125#ifdef __ISAPNP__ 912 if ((cs->subtyp == DIVA_ISA) || (cs->subtyp == DIVA_IPAC_ISA))
1126ready: 913 bytecnt = 8;
1127#endif 914 else
915 bytecnt = 32;
1128 916
1129 printk(KERN_INFO 917 printk(KERN_INFO
1130 "Diva: %s card configured at %#lx IRQ %d\n", 918 "Diva: %s card configured at %#lx IRQ %d\n",
@@ -1145,7 +933,7 @@ ready:
1145 if (!request_region(cs->hw.diva.cfg_reg, bytecnt, "diva isdn")) { 933 if (!request_region(cs->hw.diva.cfg_reg, bytecnt, "diva isdn")) {
1146 printk(KERN_WARNING 934 printk(KERN_WARNING
1147 "HiSax: %s config port %lx-%lx already in use\n", 935 "HiSax: %s config port %lx-%lx already in use\n",
1148 CardType[card->typ], 936 "diva",
1149 cs->hw.diva.cfg_reg, 937 cs->hw.diva.cfg_reg,
1150 cs->hw.diva.cfg_reg + bytecnt); 938 cs->hw.diva.cfg_reg + bytecnt);
1151 iounmap_diva(cs); 939 iounmap_diva(cs);
@@ -1206,3 +994,290 @@ ready:
1206 } 994 }
1207 return (1); 995 return (1);
1208} 996}
997
998#ifdef CONFIG_ISA
999
1000static int __devinit setup_diva_isa(struct IsdnCard *card)
1001{
1002 struct IsdnCardState *cs = card->cs;
1003 u_char val;
1004
1005 if (!card->para[1])
1006 return (-1); /* card not found; continue search */
1007
1008 cs->hw.diva.ctrl_reg = 0;
1009 cs->hw.diva.cfg_reg = card->para[1];
1010 val = readreg(cs->hw.diva.cfg_reg + DIVA_IPAC_ADR,
1011 cs->hw.diva.cfg_reg + DIVA_IPAC_DATA, IPAC_ID);
1012 printk(KERN_INFO "Diva: IPAC version %x\n", val);
1013 if ((val == 1) || (val==2)) {
1014 cs->subtyp = DIVA_IPAC_ISA;
1015 cs->hw.diva.ctrl = 0;
1016 cs->hw.diva.isac = card->para[1] + DIVA_IPAC_DATA;
1017 cs->hw.diva.hscx = card->para[1] + DIVA_IPAC_DATA;
1018 cs->hw.diva.isac_adr = card->para[1] + DIVA_IPAC_ADR;
1019 cs->hw.diva.hscx_adr = card->para[1] + DIVA_IPAC_ADR;
1020 test_and_set_bit(HW_IPAC, &cs->HW_Flags);
1021 } else {
1022 cs->subtyp = DIVA_ISA;
1023 cs->hw.diva.ctrl = card->para[1] + DIVA_ISA_CTRL;
1024 cs->hw.diva.isac = card->para[1] + DIVA_ISA_ISAC_DATA;
1025 cs->hw.diva.hscx = card->para[1] + DIVA_HSCX_DATA;
1026 cs->hw.diva.isac_adr = card->para[1] + DIVA_ISA_ISAC_ADR;
1027 cs->hw.diva.hscx_adr = card->para[1] + DIVA_HSCX_ADR;
1028 }
1029 cs->irq = card->para[0];
1030
1031 return (1); /* card found */
1032}
1033
1034#else /* if !CONFIG_ISA */
1035
1036static int __devinit setup_diva_isa(struct IsdnCard *card)
1037{
1038 return (-1); /* card not found; continue search */
1039}
1040
1041#endif /* CONFIG_ISA */
1042
1043#ifdef __ISAPNP__
1044static struct isapnp_device_id diva_ids[] __devinitdata = {
1045 { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51),
1046 ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51),
1047 (unsigned long) "Diva picola" },
1048 { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51),
1049 ISAPNP_VENDOR('E', 'I', 'C'), ISAPNP_FUNCTION(0x51),
1050 (unsigned long) "Diva picola" },
1051 { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x71),
1052 ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x71),
1053 (unsigned long) "Diva 2.0" },
1054 { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x71),
1055 ISAPNP_VENDOR('E', 'I', 'C'), ISAPNP_FUNCTION(0x71),
1056 (unsigned long) "Diva 2.0" },
1057 { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0xA1),
1058 ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0xA1),
1059 (unsigned long) "Diva 2.01" },
1060 { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0xA1),
1061 ISAPNP_VENDOR('E', 'I', 'C'), ISAPNP_FUNCTION(0xA1),
1062 (unsigned long) "Diva 2.01" },
1063 { 0, }
1064};
1065
1066static struct isapnp_device_id *ipid __devinitdata = &diva_ids[0];
1067static struct pnp_card *pnp_c __devinitdata = NULL;
1068
1069static int __devinit setup_diva_isapnp(struct IsdnCard *card)
1070{
1071 struct IsdnCardState *cs = card->cs;
1072 struct pnp_dev *pnp_d;
1073
1074 if (!isapnp_present())
1075 return (-1); /* card not found; continue search */
1076
1077 while(ipid->card_vendor) {
1078 if ((pnp_c = pnp_find_card(ipid->card_vendor,
1079 ipid->card_device, pnp_c))) {
1080 pnp_d = NULL;
1081 if ((pnp_d = pnp_find_dev(pnp_c,
1082 ipid->vendor, ipid->function, pnp_d))) {
1083 int err;
1084
1085 printk(KERN_INFO "HiSax: %s detected\n",
1086 (char *)ipid->driver_data);
1087 pnp_disable_dev(pnp_d);
1088 err = pnp_activate_dev(pnp_d);
1089 if (err<0) {
1090 printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
1091 __FUNCTION__, err);
1092 return(0);
1093 }
1094 card->para[1] = pnp_port_start(pnp_d, 0);
1095 card->para[0] = pnp_irq(pnp_d, 0);
1096 if (!card->para[0] || !card->para[1]) {
1097 printk(KERN_ERR "Diva PnP:some resources are missing %ld/%lx\n",
1098 card->para[0], card->para[1]);
1099 pnp_disable_dev(pnp_d);
1100 return(0);
1101 }
1102 cs->hw.diva.cfg_reg = card->para[1];
1103 cs->irq = card->para[0];
1104 if (ipid->function == ISAPNP_FUNCTION(0xA1)) {
1105 cs->subtyp = DIVA_IPAC_ISA;
1106 cs->hw.diva.ctrl = 0;
1107 cs->hw.diva.isac =
1108 card->para[1] + DIVA_IPAC_DATA;
1109 cs->hw.diva.hscx =
1110 card->para[1] + DIVA_IPAC_DATA;
1111 cs->hw.diva.isac_adr =
1112 card->para[1] + DIVA_IPAC_ADR;
1113 cs->hw.diva.hscx_adr =
1114 card->para[1] + DIVA_IPAC_ADR;
1115 test_and_set_bit(HW_IPAC, &cs->HW_Flags);
1116 } else {
1117 cs->subtyp = DIVA_ISA;
1118 cs->hw.diva.ctrl =
1119 card->para[1] + DIVA_ISA_CTRL;
1120 cs->hw.diva.isac =
1121 card->para[1] + DIVA_ISA_ISAC_DATA;
1122 cs->hw.diva.hscx =
1123 card->para[1] + DIVA_HSCX_DATA;
1124 cs->hw.diva.isac_adr =
1125 card->para[1] + DIVA_ISA_ISAC_ADR;
1126 cs->hw.diva.hscx_adr =
1127 card->para[1] + DIVA_HSCX_ADR;
1128 }
1129 return (1); /* card found */
1130 } else {
1131 printk(KERN_ERR "Diva PnP: PnP error card found, no device\n");
1132 return(0);
1133 }
1134 }
1135 ipid++;
1136 pnp_c=NULL;
1137 }
1138
1139 return (-1); /* card not found; continue search */
1140}
1141
1142#else /* if !ISAPNP */
1143
1144static int __devinit setup_diva_isapnp(struct IsdnCard *card)
1145{
1146 return (-1); /* card not found; continue search */
1147}
1148
1149#endif /* ISAPNP */
1150
1151#ifdef CONFIG_PCI
1152static struct pci_dev *dev_diva __devinitdata = NULL;
1153static struct pci_dev *dev_diva_u __devinitdata = NULL;
1154static struct pci_dev *dev_diva201 __devinitdata = NULL;
1155static struct pci_dev *dev_diva202 __devinitdata = NULL;
1156
1157static int __devinit setup_diva_pci(struct IsdnCard *card)
1158{
1159 struct IsdnCardState *cs = card->cs;
1160
1161 cs->subtyp = 0;
1162 if ((dev_diva = pci_find_device(PCI_VENDOR_ID_EICON,
1163 PCI_DEVICE_ID_EICON_DIVA20, dev_diva))) {
1164 if (pci_enable_device(dev_diva))
1165 return(0);
1166 cs->subtyp = DIVA_PCI;
1167 cs->irq = dev_diva->irq;
1168 cs->hw.diva.cfg_reg = pci_resource_start(dev_diva, 2);
1169 } else if ((dev_diva_u = pci_find_device(PCI_VENDOR_ID_EICON,
1170 PCI_DEVICE_ID_EICON_DIVA20_U, dev_diva_u))) {
1171 if (pci_enable_device(dev_diva_u))
1172 return(0);
1173 cs->subtyp = DIVA_PCI;
1174 cs->irq = dev_diva_u->irq;
1175 cs->hw.diva.cfg_reg = pci_resource_start(dev_diva_u, 2);
1176 } else if ((dev_diva201 = pci_find_device(PCI_VENDOR_ID_EICON,
1177 PCI_DEVICE_ID_EICON_DIVA201, dev_diva201))) {
1178 if (pci_enable_device(dev_diva201))
1179 return(0);
1180 cs->subtyp = DIVA_IPAC_PCI;
1181 cs->irq = dev_diva201->irq;
1182 cs->hw.diva.pci_cfg =
1183 (ulong) ioremap(pci_resource_start(dev_diva201, 0), 4096);
1184 cs->hw.diva.cfg_reg =
1185 (ulong) ioremap(pci_resource_start(dev_diva201, 1), 4096);
1186 } else if ((dev_diva202 = pci_find_device(PCI_VENDOR_ID_EICON,
1187 PCI_DEVICE_ID_EICON_DIVA202, dev_diva202))) {
1188 if (pci_enable_device(dev_diva202))
1189 return(0);
1190 cs->subtyp = DIVA_IPACX_PCI;
1191 cs->irq = dev_diva202->irq;
1192 cs->hw.diva.pci_cfg =
1193 (ulong) ioremap(pci_resource_start(dev_diva202, 0), 4096);
1194 cs->hw.diva.cfg_reg =
1195 (ulong) ioremap(pci_resource_start(dev_diva202, 1), 4096);
1196 } else {
1197 return (-1); /* card not found; continue search */
1198 }
1199
1200 if (!cs->irq) {
1201 printk(KERN_WARNING "Diva: No IRQ for PCI card found\n");
1202 iounmap_diva(cs);
1203 return(0);
1204 }
1205
1206 if (!cs->hw.diva.cfg_reg) {
1207 printk(KERN_WARNING "Diva: No IO-Adr for PCI card found\n");
1208 iounmap_diva(cs);
1209 return(0);
1210 }
1211 cs->irq_flags |= IRQF_SHARED;
1212
1213 if ((cs->subtyp == DIVA_IPAC_PCI) ||
1214 (cs->subtyp == DIVA_IPACX_PCI) ) {
1215 cs->hw.diva.ctrl = 0;
1216 cs->hw.diva.isac = 0;
1217 cs->hw.diva.hscx = 0;
1218 cs->hw.diva.isac_adr = 0;
1219 cs->hw.diva.hscx_adr = 0;
1220 test_and_set_bit(HW_IPAC, &cs->HW_Flags);
1221 } else {
1222 cs->hw.diva.ctrl = cs->hw.diva.cfg_reg + DIVA_PCI_CTRL;
1223 cs->hw.diva.isac = cs->hw.diva.cfg_reg + DIVA_PCI_ISAC_DATA;
1224 cs->hw.diva.hscx = cs->hw.diva.cfg_reg + DIVA_HSCX_DATA;
1225 cs->hw.diva.isac_adr = cs->hw.diva.cfg_reg + DIVA_PCI_ISAC_ADR;
1226 cs->hw.diva.hscx_adr = cs->hw.diva.cfg_reg + DIVA_HSCX_ADR;
1227 }
1228
1229 return (1); /* card found */
1230}
1231
1232#else /* if !CONFIG_PCI */
1233
1234static int __devinit setup_diva_pci(struct IsdnCard *card)
1235{
1236 return (-1); /* card not found; continue search */
1237}
1238
1239#endif /* CONFIG_PCI */
1240
1241int __devinit
1242setup_diva(struct IsdnCard *card)
1243{
1244 int rc, have_card = 0;
1245 struct IsdnCardState *cs = card->cs;
1246 char tmp[64];
1247
1248 strcpy(tmp, Diva_revision);
1249 printk(KERN_INFO "HiSax: Eicon.Diehl Diva driver Rev. %s\n", HiSax_getrev(tmp));
1250 if (cs->typ != ISDN_CTYPE_DIEHLDIVA)
1251 return(0);
1252 cs->hw.diva.status = 0;
1253
1254 rc = setup_diva_isa(card);
1255 if (!rc)
1256 return rc;
1257 if (rc > 0) {
1258 have_card = 1;
1259 goto ready;
1260 }
1261
1262 rc = setup_diva_isapnp(card);
1263 if (!rc)
1264 return rc;
1265 if (rc > 0) {
1266 have_card = 1;
1267 goto ready;
1268 }
1269
1270 rc = setup_diva_pci(card);
1271 if (!rc)
1272 return rc;
1273 if (rc > 0)
1274 have_card = 1;
1275
1276ready:
1277 if (!have_card) {
1278 printk(KERN_WARNING "Diva: No ISA, ISAPNP or PCI card found\n");
1279 return(0);
1280 }
1281
1282 return setup_diva_common(card->cs);
1283}
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index fab3e4ea05..948a9b290f 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -30,8 +30,6 @@
30#include <linux/serial.h> 30#include <linux/serial.h>
31#include <linux/serial_reg.h> 31#include <linux/serial_reg.h>
32 32
33extern const char *CardType[];
34
35static const char *Elsa_revision = "$Revision: 2.32.2.4 $"; 33static const char *Elsa_revision = "$Revision: 2.32.2.4 $";
36static const char *Elsa_Types[] = 34static const char *Elsa_Types[] =
37{"None", "PC", "PCC-8", "PCC-16", "PCF", "PCF-Pro", 35{"None", "PC", "PCC-8", "PCC-16", "PCF", "PCF-Pro",
@@ -832,8 +830,75 @@ probe_elsa(struct IsdnCardState *cs)
832 return (CARD_portlist[i]); 830 return (CARD_portlist[i]);
833} 831}
834 832
835static struct pci_dev *dev_qs1000 __devinitdata = NULL; 833static int __devinit
836static struct pci_dev *dev_qs3000 __devinitdata = NULL; 834setup_elsa_isa(struct IsdnCard *card)
835{
836 struct IsdnCardState *cs = card->cs;
837 u_char val;
838
839 cs->hw.elsa.base = card->para[0];
840 printk(KERN_INFO "Elsa: Microlink IO probing\n");
841 if (cs->hw.elsa.base) {
842 if (!(cs->subtyp = probe_elsa_adr(cs->hw.elsa.base,
843 cs->typ))) {
844 printk(KERN_WARNING
845 "Elsa: no Elsa Microlink at %#lx\n",
846 cs->hw.elsa.base);
847 return (0);
848 }
849 } else
850 cs->hw.elsa.base = probe_elsa(cs);
851
852 if (!cs->hw.elsa.base) {
853 printk(KERN_WARNING
854 "No Elsa Microlink found\n");
855 return (0);
856 }
857
858 cs->hw.elsa.cfg = cs->hw.elsa.base + ELSA_CONFIG;
859 cs->hw.elsa.ctrl = cs->hw.elsa.base + ELSA_CONTROL;
860 cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE;
861 cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC;
862 cs->hw.elsa.itac = cs->hw.elsa.base + ELSA_ITAC;
863 cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX;
864 cs->hw.elsa.trig = cs->hw.elsa.base + ELSA_TRIG_IRQ;
865 cs->hw.elsa.timer = cs->hw.elsa.base + ELSA_START_TIMER;
866 val = bytein(cs->hw.elsa.cfg);
867 if (cs->subtyp == ELSA_PC) {
868 const u_char CARD_IrqTab[8] =
869 {7, 3, 5, 9, 0, 0, 0, 0};
870 cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX_PC) >> 2];
871 } else if (cs->subtyp == ELSA_PCC8) {
872 const u_char CARD_IrqTab[8] =
873 {7, 3, 5, 9, 0, 0, 0, 0};
874 cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX_PCC8) >> 4];
875 } else {
876 const u_char CARD_IrqTab[8] =
877 {15, 10, 15, 3, 11, 5, 11, 9};
878 cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX) >> 3];
879 }
880 val = bytein(cs->hw.elsa.ale) & ELSA_HW_RELEASE;
881 if (val < 3)
882 val |= 8;
883 val += 'A' - 3;
884 if (val == 'B' || val == 'C')
885 val ^= 1;
886 if ((cs->subtyp == ELSA_PCFPRO) && (val = 'G'))
887 val = 'C';
888 printk(KERN_INFO
889 "Elsa: %s found at %#lx Rev.:%c IRQ %d\n",
890 Elsa_Types[cs->subtyp],
891 cs->hw.elsa.base,
892 val, cs->irq);
893 val = bytein(cs->hw.elsa.ale) & ELSA_S0_POWER_BAD;
894 if (val) {
895 printk(KERN_WARNING
896 "Elsa: Microlink S0 bus power bad\n");
897 cs->hw.elsa.status |= ELSA_BAD_PWR;
898 }
899
900 return (1);
901}
837 902
838#ifdef __ISAPNP__ 903#ifdef __ISAPNP__
839static struct isapnp_device_id elsa_ids[] __devinitdata = { 904static struct isapnp_device_id elsa_ids[] __devinitdata = {
@@ -848,233 +913,194 @@ static struct isapnp_device_id elsa_ids[] __devinitdata = {
848 913
849static struct isapnp_device_id *ipid __devinitdata = &elsa_ids[0]; 914static struct isapnp_device_id *ipid __devinitdata = &elsa_ids[0];
850static struct pnp_card *pnp_c __devinitdata = NULL; 915static struct pnp_card *pnp_c __devinitdata = NULL;
851#endif 916#endif /* __ISAPNP__ */
852 917
853int __devinit 918static int __devinit
854setup_elsa(struct IsdnCard *card) 919setup_elsa_isapnp(struct IsdnCard *card)
855{ 920{
856 int bytecnt;
857 u_char val;
858 struct IsdnCardState *cs = card->cs; 921 struct IsdnCardState *cs = card->cs;
859 char tmp[64];
860 922
861 strcpy(tmp, Elsa_revision);
862 printk(KERN_INFO "HiSax: Elsa driver Rev. %s\n", HiSax_getrev(tmp));
863 cs->hw.elsa.ctrl_reg = 0;
864 cs->hw.elsa.status = 0;
865 cs->hw.elsa.MFlag = 0;
866 cs->subtyp = 0;
867 if (cs->typ == ISDN_CTYPE_ELSA) {
868 cs->hw.elsa.base = card->para[0];
869 printk(KERN_INFO "Elsa: Microlink IO probing\n");
870 if (cs->hw.elsa.base) {
871 if (!(cs->subtyp = probe_elsa_adr(cs->hw.elsa.base,
872 cs->typ))) {
873 printk(KERN_WARNING
874 "Elsa: no Elsa Microlink at %#lx\n",
875 cs->hw.elsa.base);
876 return (0);
877 }
878 } else
879 cs->hw.elsa.base = probe_elsa(cs);
880 if (cs->hw.elsa.base) {
881 cs->hw.elsa.cfg = cs->hw.elsa.base + ELSA_CONFIG;
882 cs->hw.elsa.ctrl = cs->hw.elsa.base + ELSA_CONTROL;
883 cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE;
884 cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC;
885 cs->hw.elsa.itac = cs->hw.elsa.base + ELSA_ITAC;
886 cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX;
887 cs->hw.elsa.trig = cs->hw.elsa.base + ELSA_TRIG_IRQ;
888 cs->hw.elsa.timer = cs->hw.elsa.base + ELSA_START_TIMER;
889 val = bytein(cs->hw.elsa.cfg);
890 if (cs->subtyp == ELSA_PC) {
891 const u_char CARD_IrqTab[8] =
892 {7, 3, 5, 9, 0, 0, 0, 0};
893 cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX_PC) >> 2];
894 } else if (cs->subtyp == ELSA_PCC8) {
895 const u_char CARD_IrqTab[8] =
896 {7, 3, 5, 9, 0, 0, 0, 0};
897 cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX_PCC8) >> 4];
898 } else {
899 const u_char CARD_IrqTab[8] =
900 {15, 10, 15, 3, 11, 5, 11, 9};
901 cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX) >> 3];
902 }
903 val = bytein(cs->hw.elsa.ale) & ELSA_HW_RELEASE;
904 if (val < 3)
905 val |= 8;
906 val += 'A' - 3;
907 if (val == 'B' || val == 'C')
908 val ^= 1;
909 if ((cs->subtyp == ELSA_PCFPRO) && (val = 'G'))
910 val = 'C';
911 printk(KERN_INFO
912 "Elsa: %s found at %#lx Rev.:%c IRQ %d\n",
913 Elsa_Types[cs->subtyp],
914 cs->hw.elsa.base,
915 val, cs->irq);
916 val = bytein(cs->hw.elsa.ale) & ELSA_S0_POWER_BAD;
917 if (val) {
918 printk(KERN_WARNING
919 "Elsa: Microlink S0 bus power bad\n");
920 cs->hw.elsa.status |= ELSA_BAD_PWR;
921 }
922 } else {
923 printk(KERN_WARNING
924 "No Elsa Microlink found\n");
925 return (0);
926 }
927 } else if (cs->typ == ISDN_CTYPE_ELSA_PNP) {
928#ifdef __ISAPNP__ 923#ifdef __ISAPNP__
929 if (!card->para[1] && isapnp_present()) { 924 if (!card->para[1] && isapnp_present()) {
930 struct pnp_dev *pnp_d; 925 struct pnp_dev *pnp_d;
931 while(ipid->card_vendor) { 926 while(ipid->card_vendor) {
932 if ((pnp_c = pnp_find_card(ipid->card_vendor, 927 if ((pnp_c = pnp_find_card(ipid->card_vendor,
933 ipid->card_device, pnp_c))) { 928 ipid->card_device, pnp_c))) {
934 pnp_d = NULL; 929 pnp_d = NULL;
935 if ((pnp_d = pnp_find_dev(pnp_c, 930 if ((pnp_d = pnp_find_dev(pnp_c,
936 ipid->vendor, ipid->function, pnp_d))) { 931 ipid->vendor, ipid->function, pnp_d))) {
937 int err; 932 int err;
938 933
939 printk(KERN_INFO "HiSax: %s detected\n", 934 printk(KERN_INFO "HiSax: %s detected\n",
940 (char *)ipid->driver_data); 935 (char *)ipid->driver_data);
936 pnp_disable_dev(pnp_d);
937 err = pnp_activate_dev(pnp_d);
938 if (err<0) {
939 printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
940 __FUNCTION__, err);
941 return(0);
942 }
943 card->para[1] = pnp_port_start(pnp_d, 0);
944 card->para[0] = pnp_irq(pnp_d, 0);
945
946 if (!card->para[0] || !card->para[1]) {
947 printk(KERN_ERR "Elsa PnP:some resources are missing %ld/%lx\n",
948 card->para[0], card->para[1]);
941 pnp_disable_dev(pnp_d); 949 pnp_disable_dev(pnp_d);
942 err = pnp_activate_dev(pnp_d);
943 if (err<0) {
944 printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
945 __FUNCTION__, err);
946 return(0);
947 }
948 card->para[1] = pnp_port_start(pnp_d, 0);
949 card->para[0] = pnp_irq(pnp_d, 0);
950
951 if (!card->para[0] || !card->para[1]) {
952 printk(KERN_ERR "Elsa PnP:some resources are missing %ld/%lx\n",
953 card->para[0], card->para[1]);
954 pnp_disable_dev(pnp_d);
955 return(0);
956 }
957 if (ipid->function == ISAPNP_FUNCTION(0x133))
958 cs->subtyp = ELSA_QS1000;
959 else
960 cs->subtyp = ELSA_QS3000;
961 break;
962 } else {
963 printk(KERN_ERR "Elsa PnP: PnP error card found, no device\n");
964 return(0); 950 return(0);
965 } 951 }
952 if (ipid->function == ISAPNP_FUNCTION(0x133))
953 cs->subtyp = ELSA_QS1000;
954 else
955 cs->subtyp = ELSA_QS3000;
956 break;
957 } else {
958 printk(KERN_ERR "Elsa PnP: PnP error card found, no device\n");
959 return(0);
966 } 960 }
967 ipid++;
968 pnp_c=NULL;
969 }
970 if (!ipid->card_vendor) {
971 printk(KERN_INFO "Elsa PnP: no ISAPnP card found\n");
972 return(0);
973 } 961 }
962 ipid++;
963 pnp_c=NULL;
964 }
965 if (!ipid->card_vendor) {
966 printk(KERN_INFO "Elsa PnP: no ISAPnP card found\n");
967 return(0);
974 } 968 }
975#endif 969 }
976 if (card->para[1] && card->para[0]) { 970#endif /* __ISAPNP__ */
977 cs->hw.elsa.base = card->para[1]; 971
978 cs->irq = card->para[0]; 972 if (card->para[1] && card->para[0]) {
979 if (!cs->subtyp)
980 cs->subtyp = ELSA_QS1000;
981 } else {
982 printk(KERN_ERR "Elsa PnP: no parameter\n");
983 }
984 cs->hw.elsa.cfg = cs->hw.elsa.base + ELSA_CONFIG;
985 cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE;
986 cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC;
987 cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX;
988 cs->hw.elsa.trig = cs->hw.elsa.base + ELSA_TRIG_IRQ;
989 cs->hw.elsa.timer = cs->hw.elsa.base + ELSA_START_TIMER;
990 cs->hw.elsa.ctrl = cs->hw.elsa.base + ELSA_CONTROL;
991 printk(KERN_INFO
992 "Elsa: %s defined at %#lx IRQ %d\n",
993 Elsa_Types[cs->subtyp],
994 cs->hw.elsa.base,
995 cs->irq);
996 } else if (cs->typ == ISDN_CTYPE_ELSA_PCMCIA) {
997 cs->hw.elsa.base = card->para[1]; 973 cs->hw.elsa.base = card->para[1];
998 cs->irq = card->para[0]; 974 cs->irq = card->para[0];
999 val = readreg(cs->hw.elsa.base + 0, cs->hw.elsa.base + 2, IPAC_ID); 975 if (!cs->subtyp)
1000 if ((val == 1) || (val == 2)) { /* IPAC version 1.1/1.2 */ 976 cs->subtyp = ELSA_QS1000;
1001 cs->subtyp = ELSA_PCMCIA_IPAC; 977 } else {
1002 cs->hw.elsa.ale = cs->hw.elsa.base + 0; 978 printk(KERN_ERR "Elsa PnP: no parameter\n");
1003 cs->hw.elsa.isac = cs->hw.elsa.base + 2; 979 }
1004 cs->hw.elsa.hscx = cs->hw.elsa.base + 2; 980 cs->hw.elsa.cfg = cs->hw.elsa.base + ELSA_CONFIG;
1005 test_and_set_bit(HW_IPAC, &cs->HW_Flags); 981 cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE;
1006 } else { 982 cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC;
1007 cs->subtyp = ELSA_PCMCIA; 983 cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX;
1008 cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE_PCM; 984 cs->hw.elsa.trig = cs->hw.elsa.base + ELSA_TRIG_IRQ;
1009 cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC_PCM; 985 cs->hw.elsa.timer = cs->hw.elsa.base + ELSA_START_TIMER;
1010 cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX; 986 cs->hw.elsa.ctrl = cs->hw.elsa.base + ELSA_CONTROL;
1011 } 987 printk(KERN_INFO
1012 cs->hw.elsa.timer = 0; 988 "Elsa: %s defined at %#lx IRQ %d\n",
1013 cs->hw.elsa.trig = 0; 989 Elsa_Types[cs->subtyp],
1014 cs->hw.elsa.ctrl = 0; 990 cs->hw.elsa.base,
1015 cs->irq_flags |= IRQF_SHARED; 991 cs->irq);
1016 printk(KERN_INFO 992
1017 "Elsa: %s defined at %#lx IRQ %d\n", 993 return (1);
1018 Elsa_Types[cs->subtyp], 994}
1019 cs->hw.elsa.base, 995
1020 cs->irq); 996static void __devinit
1021 } else if (cs->typ == ISDN_CTYPE_ELSA_PCI) { 997setup_elsa_pcmcia(struct IsdnCard *card)
998{
999 struct IsdnCardState *cs = card->cs;
1000 u_char val;
1001
1002 cs->hw.elsa.base = card->para[1];
1003 cs->irq = card->para[0];
1004 val = readreg(cs->hw.elsa.base + 0, cs->hw.elsa.base + 2, IPAC_ID);
1005 if ((val == 1) || (val == 2)) { /* IPAC version 1.1/1.2 */
1006 cs->subtyp = ELSA_PCMCIA_IPAC;
1007 cs->hw.elsa.ale = cs->hw.elsa.base + 0;
1008 cs->hw.elsa.isac = cs->hw.elsa.base + 2;
1009 cs->hw.elsa.hscx = cs->hw.elsa.base + 2;
1010 test_and_set_bit(HW_IPAC, &cs->HW_Flags);
1011 } else {
1012 cs->subtyp = ELSA_PCMCIA;
1013 cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE_PCM;
1014 cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC_PCM;
1015 cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX;
1016 }
1017 cs->hw.elsa.timer = 0;
1018 cs->hw.elsa.trig = 0;
1019 cs->hw.elsa.ctrl = 0;
1020 cs->irq_flags |= IRQF_SHARED;
1021 printk(KERN_INFO
1022 "Elsa: %s defined at %#lx IRQ %d\n",
1023 Elsa_Types[cs->subtyp],
1024 cs->hw.elsa.base,
1025 cs->irq);
1026}
1027
1022#ifdef CONFIG_PCI 1028#ifdef CONFIG_PCI
1023 cs->subtyp = 0; 1029static struct pci_dev *dev_qs1000 __devinitdata = NULL;
1024 if ((dev_qs1000 = pci_find_device(PCI_VENDOR_ID_ELSA, 1030static struct pci_dev *dev_qs3000 __devinitdata = NULL;
1025 PCI_DEVICE_ID_ELSA_MICROLINK, dev_qs1000))) { 1031
1026 if (pci_enable_device(dev_qs1000)) 1032static int __devinit
1027 return(0); 1033setup_elsa_pci(struct IsdnCard *card)
1028 cs->subtyp = ELSA_QS1000PCI; 1034{
1029 cs->irq = dev_qs1000->irq; 1035 struct IsdnCardState *cs = card->cs;
1030 cs->hw.elsa.cfg = pci_resource_start(dev_qs1000, 1); 1036
1031 cs->hw.elsa.base = pci_resource_start(dev_qs1000, 3); 1037 cs->subtyp = 0;
1032 } else if ((dev_qs3000 = pci_find_device(PCI_VENDOR_ID_ELSA, 1038 if ((dev_qs1000 = pci_find_device(PCI_VENDOR_ID_ELSA,
1033 PCI_DEVICE_ID_ELSA_QS3000, dev_qs3000))) { 1039 PCI_DEVICE_ID_ELSA_MICROLINK, dev_qs1000))) {
1034 if (pci_enable_device(dev_qs3000)) 1040 if (pci_enable_device(dev_qs1000))
1035 return(0);
1036 cs->subtyp = ELSA_QS3000PCI;
1037 cs->irq = dev_qs3000->irq;
1038 cs->hw.elsa.cfg = pci_resource_start(dev_qs3000, 1);
1039 cs->hw.elsa.base = pci_resource_start(dev_qs3000, 3);
1040 } else {
1041 printk(KERN_WARNING "Elsa: No PCI card found\n");
1042 return(0); 1041 return(0);
1043 } 1042 cs->subtyp = ELSA_QS1000PCI;
1044 if (!cs->irq) { 1043 cs->irq = dev_qs1000->irq;
1045 printk(KERN_WARNING "Elsa: No IRQ for PCI card found\n"); 1044 cs->hw.elsa.cfg = pci_resource_start(dev_qs1000, 1);
1045 cs->hw.elsa.base = pci_resource_start(dev_qs1000, 3);
1046 } else if ((dev_qs3000 = pci_find_device(PCI_VENDOR_ID_ELSA,
1047 PCI_DEVICE_ID_ELSA_QS3000, dev_qs3000))) {
1048 if (pci_enable_device(dev_qs3000))
1046 return(0); 1049 return(0);
1047 } 1050 cs->subtyp = ELSA_QS3000PCI;
1051 cs->irq = dev_qs3000->irq;
1052 cs->hw.elsa.cfg = pci_resource_start(dev_qs3000, 1);
1053 cs->hw.elsa.base = pci_resource_start(dev_qs3000, 3);
1054 } else {
1055 printk(KERN_WARNING "Elsa: No PCI card found\n");
1056 return(0);
1057 }
1058 if (!cs->irq) {
1059 printk(KERN_WARNING "Elsa: No IRQ for PCI card found\n");
1060 return(0);
1061 }
1062
1063 if (!(cs->hw.elsa.base && cs->hw.elsa.cfg)) {
1064 printk(KERN_WARNING "Elsa: No IO-Adr for PCI card found\n");
1065 return(0);
1066 }
1067 if ((cs->hw.elsa.cfg & 0xff) || (cs->hw.elsa.base & 0xf)) {
1068 printk(KERN_WARNING "Elsa: You may have a wrong PCI bios\n");
1069 printk(KERN_WARNING "Elsa: If your system hangs now, read\n");
1070 printk(KERN_WARNING "Elsa: Documentation/isdn/README.HiSax\n");
1071 }
1072 cs->hw.elsa.ale = cs->hw.elsa.base;
1073 cs->hw.elsa.isac = cs->hw.elsa.base +1;
1074 cs->hw.elsa.hscx = cs->hw.elsa.base +1;
1075 test_and_set_bit(HW_IPAC, &cs->HW_Flags);
1076 cs->hw.elsa.timer = 0;
1077 cs->hw.elsa.trig = 0;
1078 cs->irq_flags |= IRQF_SHARED;
1079 printk(KERN_INFO
1080 "Elsa: %s defined at %#lx/0x%x IRQ %d\n",
1081 Elsa_Types[cs->subtyp],
1082 cs->hw.elsa.base,
1083 cs->hw.elsa.cfg,
1084 cs->irq);
1085
1086 return (1);
1087}
1048 1088
1049 if (!(cs->hw.elsa.base && cs->hw.elsa.cfg)) {
1050 printk(KERN_WARNING "Elsa: No IO-Adr for PCI card found\n");
1051 return(0);
1052 }
1053 if ((cs->hw.elsa.cfg & 0xff) || (cs->hw.elsa.base & 0xf)) {
1054 printk(KERN_WARNING "Elsa: You may have a wrong PCI bios\n");
1055 printk(KERN_WARNING "Elsa: If your system hangs now, read\n");
1056 printk(KERN_WARNING "Elsa: Documentation/isdn/README.HiSax\n");
1057 }
1058 cs->hw.elsa.ale = cs->hw.elsa.base;
1059 cs->hw.elsa.isac = cs->hw.elsa.base +1;
1060 cs->hw.elsa.hscx = cs->hw.elsa.base +1;
1061 test_and_set_bit(HW_IPAC, &cs->HW_Flags);
1062 cs->hw.elsa.timer = 0;
1063 cs->hw.elsa.trig = 0;
1064 cs->irq_flags |= IRQF_SHARED;
1065 printk(KERN_INFO
1066 "Elsa: %s defined at %#lx/0x%x IRQ %d\n",
1067 Elsa_Types[cs->subtyp],
1068 cs->hw.elsa.base,
1069 cs->hw.elsa.cfg,
1070 cs->irq);
1071#else 1089#else
1072 printk(KERN_WARNING "Elsa: Elsa PCI and NO_PCI_BIOS\n"); 1090
1073 printk(KERN_WARNING "Elsa: unable to config Elsa PCI\n"); 1091static int __devinit
1074 return (0); 1092setup_elsa_pci(struct IsdnCard *card)
1093{
1094 return (1);
1095}
1075#endif /* CONFIG_PCI */ 1096#endif /* CONFIG_PCI */
1076 } else 1097
1077 return (0); 1098static int __devinit
1099setup_elsa_common(struct IsdnCard *card)
1100{
1101 struct IsdnCardState *cs = card->cs;
1102 u_char val;
1103 int bytecnt;
1078 1104
1079 switch (cs->subtyp) { 1105 switch (cs->subtyp) {
1080 case ELSA_PC: 1106 case ELSA_PC:
@@ -1104,8 +1130,7 @@ setup_elsa(struct IsdnCard *card)
1104 here, it would fail. */ 1130 here, it would fail. */
1105 if (cs->typ != ISDN_CTYPE_ELSA_PCMCIA && !request_region(cs->hw.elsa.base, bytecnt, "elsa isdn")) { 1131 if (cs->typ != ISDN_CTYPE_ELSA_PCMCIA && !request_region(cs->hw.elsa.base, bytecnt, "elsa isdn")) {
1106 printk(KERN_WARNING 1132 printk(KERN_WARNING
1107 "HiSax: %s config port %#lx-%#lx already in use\n", 1133 "HiSax: ELSA config port %#lx-%#lx already in use\n",
1108 CardType[card->typ],
1109 cs->hw.elsa.base, 1134 cs->hw.elsa.base,
1110 cs->hw.elsa.base + bytecnt); 1135 cs->hw.elsa.base + bytecnt);
1111 return (0); 1136 return (0);
@@ -1113,8 +1138,7 @@ setup_elsa(struct IsdnCard *card)
1113 if ((cs->subtyp == ELSA_QS1000PCI) || (cs->subtyp == ELSA_QS3000PCI)) { 1138 if ((cs->subtyp == ELSA_QS1000PCI) || (cs->subtyp == ELSA_QS3000PCI)) {
1114 if (!request_region(cs->hw.elsa.cfg, 0x80, "elsa isdn pci")) { 1139 if (!request_region(cs->hw.elsa.cfg, 0x80, "elsa isdn pci")) {
1115 printk(KERN_WARNING 1140 printk(KERN_WARNING
1116 "HiSax: %s pci port %x-%x already in use\n", 1141 "HiSax: ELSA pci port %x-%x already in use\n",
1117 CardType[card->typ],
1118 cs->hw.elsa.cfg, 1142 cs->hw.elsa.cfg,
1119 cs->hw.elsa.cfg + 0x80); 1143 cs->hw.elsa.cfg + 0x80);
1120 release_region(cs->hw.elsa.base, bytecnt); 1144 release_region(cs->hw.elsa.base, bytecnt);
@@ -1186,3 +1210,41 @@ setup_elsa(struct IsdnCard *card)
1186 } 1210 }
1187 return (1); 1211 return (1);
1188} 1212}
1213
1214int __devinit
1215setup_elsa(struct IsdnCard *card)
1216{
1217 int rc;
1218 struct IsdnCardState *cs = card->cs;
1219 char tmp[64];
1220
1221 strcpy(tmp, Elsa_revision);
1222 printk(KERN_INFO "HiSax: Elsa driver Rev. %s\n", HiSax_getrev(tmp));
1223 cs->hw.elsa.ctrl_reg = 0;
1224 cs->hw.elsa.status = 0;
1225 cs->hw.elsa.MFlag = 0;
1226 cs->subtyp = 0;
1227
1228 if (cs->typ == ISDN_CTYPE_ELSA) {
1229 rc = setup_elsa_isa(card);
1230 if (!rc)
1231 return (0);
1232
1233 } else if (cs->typ == ISDN_CTYPE_ELSA_PNP) {
1234 rc = setup_elsa_isapnp(card);
1235 if (!rc)
1236 return (0);
1237
1238 } else if (cs->typ == ISDN_CTYPE_ELSA_PCMCIA)
1239 setup_elsa_pcmcia(card);
1240
1241 else if (cs->typ == ISDN_CTYPE_ELSA_PCI) {
1242 rc = setup_elsa_pci(card);
1243 if (!rc)
1244 return (0);
1245
1246 } else
1247 return (0);
1248
1249 return setup_elsa_common(card);
1250}
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c
index ad06f3cc60..03dfc32166 100644
--- a/drivers/isdn/hisax/sedlbauer.c
+++ b/drivers/isdn/hisax/sedlbauer.c
@@ -518,8 +518,6 @@ Sedl_card_msg(struct IsdnCardState *cs, int mt, void *arg)
518 return(0); 518 return(0);
519} 519}
520 520
521static struct pci_dev *dev_sedl __devinitdata = NULL;
522
523#ifdef __ISAPNP__ 521#ifdef __ISAPNP__
524static struct isapnp_device_id sedl_ids[] __devinitdata = { 522static struct isapnp_device_id sedl_ids[] __devinitdata = {
525 { ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x01), 523 { ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x01),
@@ -533,15 +531,158 @@ static struct isapnp_device_id sedl_ids[] __devinitdata = {
533 531
534static struct isapnp_device_id *ipid __devinitdata = &sedl_ids[0]; 532static struct isapnp_device_id *ipid __devinitdata = &sedl_ids[0];
535static struct pnp_card *pnp_c __devinitdata = NULL; 533static struct pnp_card *pnp_c __devinitdata = NULL;
536#endif 534
535static int __devinit
536setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
537{
538 struct IsdnCardState *cs = card->cs;
539 struct pnp_dev *pnp_d;
540
541 if (!isapnp_present())
542 return -1;
543
544 while(ipid->card_vendor) {
545 if ((pnp_c = pnp_find_card(ipid->card_vendor,
546 ipid->card_device, pnp_c))) {
547 pnp_d = NULL;
548 if ((pnp_d = pnp_find_dev(pnp_c,
549 ipid->vendor, ipid->function, pnp_d))) {
550 int err;
551
552 printk(KERN_INFO "HiSax: %s detected\n",
553 (char *)ipid->driver_data);
554 pnp_disable_dev(pnp_d);
555 err = pnp_activate_dev(pnp_d);
556 if (err<0) {
557 printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
558 __FUNCTION__, err);
559 return(0);
560 }
561 card->para[1] = pnp_port_start(pnp_d, 0);
562 card->para[0] = pnp_irq(pnp_d, 0);
563
564 if (!card->para[0] || !card->para[1]) {
565 printk(KERN_ERR "Sedlbauer PnP:some resources are missing %ld/%lx\n",
566 card->para[0], card->para[1]);
567 pnp_disable_dev(pnp_d);
568 return(0);
569 }
570 cs->hw.sedl.cfg_reg = card->para[1];
571 cs->irq = card->para[0];
572 if (ipid->function == ISAPNP_FUNCTION(0x2)) {
573 cs->subtyp = SEDL_SPEED_FAX;
574 cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
575 *bytecnt = 16;
576 } else {
577 cs->subtyp = SEDL_SPEED_CARD_WIN;
578 cs->hw.sedl.chip = SEDL_CHIP_TEST;
579 }
580
581 return (1);
582 } else {
583 printk(KERN_ERR "Sedlbauer PnP: PnP error card found, no device\n");
584 return(0);
585 }
586 }
587 ipid++;
588 pnp_c = NULL;
589 }
590
591 printk(KERN_INFO "Sedlbauer PnP: no ISAPnP card found\n");
592 return -1;
593}
594#else
595
596static int __devinit
597setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
598{
599 return -1;
600}
601#endif /* __ISAPNP__ */
602
603#ifdef CONFIG_PCI
604static struct pci_dev *dev_sedl __devinitdata = NULL;
605
606static int __devinit
607setup_sedlbauer_pci(struct IsdnCard *card)
608{
609 struct IsdnCardState *cs = card->cs;
610 u16 sub_vendor_id, sub_id;
611
612 if ((dev_sedl = pci_find_device(PCI_VENDOR_ID_TIGERJET,
613 PCI_DEVICE_ID_TIGERJET_100, dev_sedl))) {
614 if (pci_enable_device(dev_sedl))
615 return(0);
616 cs->irq = dev_sedl->irq;
617 if (!cs->irq) {
618 printk(KERN_WARNING "Sedlbauer: No IRQ for PCI card found\n");
619 return(0);
620 }
621 cs->hw.sedl.cfg_reg = pci_resource_start(dev_sedl, 0);
622 } else {
623 printk(KERN_WARNING "Sedlbauer: No PCI card found\n");
624 return(0);
625 }
626 cs->irq_flags |= IRQF_SHARED;
627 cs->hw.sedl.bus = SEDL_BUS_PCI;
628 sub_vendor_id = dev_sedl->subsystem_vendor;
629 sub_id = dev_sedl->subsystem_device;
630 printk(KERN_INFO "Sedlbauer: PCI subvendor:%x subid %x\n",
631 sub_vendor_id, sub_id);
632 printk(KERN_INFO "Sedlbauer: PCI base adr %#x\n",
633 cs->hw.sedl.cfg_reg);
634 if (sub_id != PCI_SUB_ID_SEDLBAUER) {
635 printk(KERN_ERR "Sedlbauer: unknown sub id %#x\n", sub_id);
636 return(0);
637 }
638 if (sub_vendor_id == PCI_SUBVENDOR_SPEEDFAX_PYRAMID) {
639 cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
640 cs->subtyp = SEDL_SPEEDFAX_PYRAMID;
641 } else if (sub_vendor_id == PCI_SUBVENDOR_SPEEDFAX_PCI) {
642 cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
643 cs->subtyp = SEDL_SPEEDFAX_PCI;
644 } else if (sub_vendor_id == PCI_SUBVENDOR_HST_SAPHIR3) {
645 cs->hw.sedl.chip = SEDL_CHIP_IPAC;
646 cs->subtyp = HST_SAPHIR3;
647 } else if (sub_vendor_id == PCI_SUBVENDOR_SEDLBAUER_PCI) {
648 cs->hw.sedl.chip = SEDL_CHIP_IPAC;
649 cs->subtyp = SEDL_SPEED_PCI;
650 } else {
651 printk(KERN_ERR "Sedlbauer: unknown sub vendor id %#x\n",
652 sub_vendor_id);
653 return(0);
654 }
655
656 cs->hw.sedl.reset_on = SEDL_ISAR_PCI_ISAR_RESET_ON;
657 cs->hw.sedl.reset_off = SEDL_ISAR_PCI_ISAR_RESET_OFF;
658 byteout(cs->hw.sedl.cfg_reg, 0xff);
659 byteout(cs->hw.sedl.cfg_reg, 0x00);
660 byteout(cs->hw.sedl.cfg_reg+ 2, 0xdd);
661 byteout(cs->hw.sedl.cfg_reg+ 5, 0); /* disable all IRQ */
662 byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_on);
663 mdelay(2);
664 byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_off);
665 mdelay(10);
666
667 return (1);
668}
669
670#else
671
672static int __devinit
673setup_sedlbauer_pci(struct IsdnCard *card)
674{
675 return (1);
676}
677
678#endif /* CONFIG_PCI */
537 679
538int __devinit 680int __devinit
539setup_sedlbauer(struct IsdnCard *card) 681setup_sedlbauer(struct IsdnCard *card)
540{ 682{
541 int bytecnt, ver, val; 683 int bytecnt = 8, ver, val, rc;
542 struct IsdnCardState *cs = card->cs; 684 struct IsdnCardState *cs = card->cs;
543 char tmp[64]; 685 char tmp[64];
544 u16 sub_vendor_id, sub_id;
545 686
546 strcpy(tmp, Sedlbauer_revision); 687 strcpy(tmp, Sedlbauer_revision);
547 printk(KERN_INFO "HiSax: Sedlbauer driver Rev. %s\n", HiSax_getrev(tmp)); 688 printk(KERN_INFO "HiSax: Sedlbauer driver Rev. %s\n", HiSax_getrev(tmp));
@@ -569,124 +710,21 @@ setup_sedlbauer(struct IsdnCard *card)
569 bytecnt = 16; 710 bytecnt = 16;
570 } 711 }
571 } else { 712 } else {
572#ifdef __ISAPNP__ 713 rc = setup_sedlbauer_isapnp(card, &bytecnt);
573 if (isapnp_present()) { 714 if (!rc)
574 struct pnp_dev *pnp_d; 715 return (0);
575 while(ipid->card_vendor) { 716 if (rc > 0)
576 if ((pnp_c = pnp_find_card(ipid->card_vendor, 717 goto ready;
577 ipid->card_device, pnp_c))) { 718
578 pnp_d = NULL; 719 /* Probe for Sedlbauer speed pci */
579 if ((pnp_d = pnp_find_dev(pnp_c, 720 rc = setup_sedlbauer_pci(card);
580 ipid->vendor, ipid->function, pnp_d))) { 721 if (!rc)
581 int err; 722 return (0);
582 723
583 printk(KERN_INFO "HiSax: %s detected\n",
584 (char *)ipid->driver_data);
585 pnp_disable_dev(pnp_d);
586 err = pnp_activate_dev(pnp_d);
587 if (err<0) {
588 printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
589 __FUNCTION__, err);
590 return(0);
591 }
592 card->para[1] = pnp_port_start(pnp_d, 0);
593 card->para[0] = pnp_irq(pnp_d, 0);
594
595 if (!card->para[0] || !card->para[1]) {
596 printk(KERN_ERR "Sedlbauer PnP:some resources are missing %ld/%lx\n",
597 card->para[0], card->para[1]);
598 pnp_disable_dev(pnp_d);
599 return(0);
600 }
601 cs->hw.sedl.cfg_reg = card->para[1];
602 cs->irq = card->para[0];
603 if (ipid->function == ISAPNP_FUNCTION(0x2)) {
604 cs->subtyp = SEDL_SPEED_FAX;
605 cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
606 bytecnt = 16;
607 } else {
608 cs->subtyp = SEDL_SPEED_CARD_WIN;
609 cs->hw.sedl.chip = SEDL_CHIP_TEST;
610 }
611 goto ready;
612 } else {
613 printk(KERN_ERR "Sedlbauer PnP: PnP error card found, no device\n");
614 return(0);
615 }
616 }
617 ipid++;
618 pnp_c = NULL;
619 }
620 if (!ipid->card_vendor) {
621 printk(KERN_INFO "Sedlbauer PnP: no ISAPnP card found\n");
622 }
623 }
624#endif
625/* Probe for Sedlbauer speed pci */
626#ifdef CONFIG_PCI
627 if ((dev_sedl = pci_find_device(PCI_VENDOR_ID_TIGERJET,
628 PCI_DEVICE_ID_TIGERJET_100, dev_sedl))) {
629 if (pci_enable_device(dev_sedl))
630 return(0);
631 cs->irq = dev_sedl->irq;
632 if (!cs->irq) {
633 printk(KERN_WARNING "Sedlbauer: No IRQ for PCI card found\n");
634 return(0);
635 }
636 cs->hw.sedl.cfg_reg = pci_resource_start(dev_sedl, 0);
637 } else {
638 printk(KERN_WARNING "Sedlbauer: No PCI card found\n");
639 return(0);
640 }
641 cs->irq_flags |= IRQF_SHARED;
642 cs->hw.sedl.bus = SEDL_BUS_PCI;
643 sub_vendor_id = dev_sedl->subsystem_vendor;
644 sub_id = dev_sedl->subsystem_device;
645 printk(KERN_INFO "Sedlbauer: PCI subvendor:%x subid %x\n",
646 sub_vendor_id, sub_id);
647 printk(KERN_INFO "Sedlbauer: PCI base adr %#x\n",
648 cs->hw.sedl.cfg_reg);
649 if (sub_id != PCI_SUB_ID_SEDLBAUER) {
650 printk(KERN_ERR "Sedlbauer: unknown sub id %#x\n", sub_id);
651 return(0);
652 }
653 if (sub_vendor_id == PCI_SUBVENDOR_SPEEDFAX_PYRAMID) {
654 cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
655 cs->subtyp = SEDL_SPEEDFAX_PYRAMID;
656 } else if (sub_vendor_id == PCI_SUBVENDOR_SPEEDFAX_PCI) {
657 cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
658 cs->subtyp = SEDL_SPEEDFAX_PCI;
659 } else if (sub_vendor_id == PCI_SUBVENDOR_HST_SAPHIR3) {
660 cs->hw.sedl.chip = SEDL_CHIP_IPAC;
661 cs->subtyp = HST_SAPHIR3;
662 } else if (sub_vendor_id == PCI_SUBVENDOR_SEDLBAUER_PCI) {
663 cs->hw.sedl.chip = SEDL_CHIP_IPAC;
664 cs->subtyp = SEDL_SPEED_PCI;
665 } else {
666 printk(KERN_ERR "Sedlbauer: unknown sub vendor id %#x\n",
667 sub_vendor_id);
668 return(0);
669 }
670 bytecnt = 256; 724 bytecnt = 256;
671 cs->hw.sedl.reset_on = SEDL_ISAR_PCI_ISAR_RESET_ON;
672 cs->hw.sedl.reset_off = SEDL_ISAR_PCI_ISAR_RESET_OFF;
673 byteout(cs->hw.sedl.cfg_reg, 0xff);
674 byteout(cs->hw.sedl.cfg_reg, 0x00);
675 byteout(cs->hw.sedl.cfg_reg+ 2, 0xdd);
676 byteout(cs->hw.sedl.cfg_reg+ 5, 0); /* disable all IRQ */
677 byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_on);
678 mdelay(2);
679 byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_off);
680 mdelay(10);
681#else
682 printk(KERN_WARNING "Sedlbauer: NO_PCI_BIOS\n");
683 return (0);
684#endif /* CONFIG_PCI */
685 } 725 }
686 726
687#ifdef __ISAPNP__
688ready: 727ready:
689#endif
690 728
691 /* In case of the sedlbauer pcmcia card, this region is in use, 729 /* In case of the sedlbauer pcmcia card, this region is in use,
692 * reserved for us by the card manager. So we do not check it 730 * reserved for us by the card manager. So we do not check it
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index d09f6d033f..4393003ae1 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -295,11 +295,12 @@ setup_telespci(struct IsdnCard *card)
295#ifdef __BIG_ENDIAN 295#ifdef __BIG_ENDIAN
296#error "not running on big endian machines now" 296#error "not running on big endian machines now"
297#endif 297#endif
298
298 strcpy(tmp, telespci_revision); 299 strcpy(tmp, telespci_revision);
299 printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); 300 printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
300 if (cs->typ != ISDN_CTYPE_TELESPCI) 301 if (cs->typ != ISDN_CTYPE_TELESPCI)
301 return (0); 302 return (0);
302#ifdef CONFIG_PCI 303
303 if ((dev_tel = pci_find_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) { 304 if ((dev_tel = pci_find_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) {
304 if (pci_enable_device(dev_tel)) 305 if (pci_enable_device(dev_tel))
305 return(0); 306 return(0);
@@ -317,11 +318,6 @@ setup_telespci(struct IsdnCard *card)
317 printk(KERN_WARNING "TelesPCI: No PCI card found\n"); 318 printk(KERN_WARNING "TelesPCI: No PCI card found\n");
318 return(0); 319 return(0);
319 } 320 }
320#else
321 printk(KERN_WARNING "HiSax: Teles/PCI and NO_PCI_BIOS\n");
322 printk(KERN_WARNING "HiSax: Teles/PCI unable to config\n");
323 return (0);
324#endif /* CONFIG_PCI */
325 321
326 /* Initialize Zoran PCI controller */ 322 /* Initialize Zoran PCI controller */
327 writel(0x00000000, cs->hw.teles0.membase + 0x28); 323 writel(0x00000000, cs->hw.teles0.membase + 0x28);
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index 3aeceaf976..39129b94f8 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -1009,7 +1009,7 @@ setup_w6692(struct IsdnCard *card)
1009 printk(KERN_INFO "HiSax: W6692 driver Rev. %s\n", HiSax_getrev(tmp)); 1009 printk(KERN_INFO "HiSax: W6692 driver Rev. %s\n", HiSax_getrev(tmp));
1010 if (cs->typ != ISDN_CTYPE_W6692) 1010 if (cs->typ != ISDN_CTYPE_W6692)
1011 return (0); 1011 return (0);
1012#ifdef CONFIG_PCI 1012
1013 while (id_list[id_idx].vendor_id) { 1013 while (id_list[id_idx].vendor_id) {
1014 dev_w6692 = pci_find_device(id_list[id_idx].vendor_id, 1014 dev_w6692 = pci_find_device(id_list[id_idx].vendor_id,
1015 id_list[id_idx].device_id, 1015 id_list[id_idx].device_id,
@@ -1061,11 +1061,6 @@ setup_w6692(struct IsdnCard *card)
1061 cs->hw.w6692.iobase + 255); 1061 cs->hw.w6692.iobase + 255);
1062 return (0); 1062 return (0);
1063 } 1063 }
1064#else
1065 printk(KERN_WARNING "HiSax: W6692 and NO_PCI_BIOS\n");
1066 printk(KERN_WARNING "HiSax: W6692 unable to config\n");
1067 return (0);
1068#endif /* CONFIG_PCI */
1069 1064
1070 printk(KERN_INFO 1065 printk(KERN_INFO
1071 "HiSax: %s config irq:%d I/O:%x\n", 1066 "HiSax: %s config irq:%d I/O:%x\n",
diff --git a/drivers/isdn/hysdn/hysdn_init.c b/drivers/isdn/hysdn/hysdn_init.c
index 9e01748a17..b7cc5c2f08 100644
--- a/drivers/isdn/hysdn/hysdn_init.c
+++ b/drivers/isdn/hysdn/hysdn_init.c
@@ -20,10 +20,15 @@
20#include "hysdn_defs.h" 20#include "hysdn_defs.h"
21 21
22static struct pci_device_id hysdn_pci_tbl[] = { 22static struct pci_device_id hysdn_pci_tbl[] = {
23 {PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX, PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_METRO}, 23 { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
24 {PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX, PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2}, 24 PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_METRO, 0, 0, BD_METRO },
25 {PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX, PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_ERGO}, 25 { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
26 {PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX, PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO}, 26 PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2, 0, 0, BD_CHAMP2 },
27 { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
28 PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_ERGO, 0, 0, BD_ERGO },
29 { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
30 PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO, 0, 0, BD_ERGO },
31
27 { } /* Terminating entry */ 32 { } /* Terminating entry */
28}; 33};
29MODULE_DEVICE_TABLE(pci, hysdn_pci_tbl); 34MODULE_DEVICE_TABLE(pci, hysdn_pci_tbl);
@@ -34,128 +39,7 @@ MODULE_LICENSE("GPL");
34static char *hysdn_init_revision = "$Revision: 1.6.6.6 $"; 39static char *hysdn_init_revision = "$Revision: 1.6.6.6 $";
35static int cardmax; /* number of found cards */ 40static int cardmax; /* number of found cards */
36hysdn_card *card_root = NULL; /* pointer to first card */ 41hysdn_card *card_root = NULL; /* pointer to first card */
37 42static hysdn_card *card_last = NULL; /* pointer to first card */
38/**********************************************/
39/* table assigning PCI-sub ids to board types */
40/* the last entry contains all 0 */
41/**********************************************/
42static struct {
43 unsigned short subid; /* PCI sub id */
44 unsigned char cardtyp; /* card type assigned */
45} pci_subid_map[] = {
46
47 {
48 PCI_SUBDEVICE_ID_HYPERCOPE_METRO, BD_METRO
49 },
50 {
51 PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2, BD_CHAMP2
52 },
53 {
54 PCI_SUBDEVICE_ID_HYPERCOPE_ERGO, BD_ERGO
55 },
56 {
57 PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO, BD_ERGO
58 },
59 {
60 0, 0
61 } /* terminating entry */
62};
63
64
65/*********************************************************************/
66/* search_cards searches for available cards in the pci config data. */
67/* If a card is found, the card structure is allocated and the cards */
68/* ressources are reserved. cardmax is incremented. */
69/*********************************************************************/
70static void
71search_cards(void)
72{
73 struct pci_dev *akt_pcidev = NULL;
74 hysdn_card *card, *card_last;
75 int i;
76
77 card_root = NULL;
78 card_last = NULL;
79 while ((akt_pcidev = pci_find_device(PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
80 akt_pcidev)) != NULL) {
81 if (pci_enable_device(akt_pcidev))
82 continue;
83
84 if (!(card = kzalloc(sizeof(hysdn_card), GFP_KERNEL))) {
85 printk(KERN_ERR "HYSDN: unable to alloc device mem \n");
86 return;
87 }
88 card->myid = cardmax; /* set own id */
89 card->bus = akt_pcidev->bus->number;
90 card->devfn = akt_pcidev->devfn; /* slot + function */
91 card->subsysid = akt_pcidev->subsystem_device;
92 card->irq = akt_pcidev->irq;
93 card->iobase = pci_resource_start(akt_pcidev, PCI_REG_PLX_IO_BASE);
94 card->plxbase = pci_resource_start(akt_pcidev, PCI_REG_PLX_MEM_BASE);
95 card->membase = pci_resource_start(akt_pcidev, PCI_REG_MEMORY_BASE);
96 card->brdtype = BD_NONE; /* unknown */
97 card->debug_flags = DEF_DEB_FLAGS; /* set default debug */
98 card->faxchans = 0; /* default no fax channels */
99 card->bchans = 2; /* and 2 b-channels */
100 for (i = 0; pci_subid_map[i].subid; i++)
101 if (pci_subid_map[i].subid == card->subsysid) {
102 card->brdtype = pci_subid_map[i].cardtyp;
103 break;
104 }
105 if (card->brdtype != BD_NONE) {
106 if (ergo_inithardware(card)) {
107 printk(KERN_WARNING "HYSDN: card at io 0x%04x already in use\n", card->iobase);
108 kfree(card);
109 continue;
110 }
111 } else {
112 printk(KERN_WARNING "HYSDN: unknown card id 0x%04x\n", card->subsysid);
113 kfree(card); /* release mem */
114 continue;
115 }
116 cardmax++;
117 card->next = NULL; /*end of chain */
118 if (card_last)
119 card_last->next = card; /* pointer to next card */
120 else
121 card_root = card;
122 card_last = card; /* new chain end */
123 } /* device found */
124} /* search_cards */
125
126/************************************************************************************/
127/* free_resources frees the acquired PCI resources and returns the allocated memory */
128/************************************************************************************/
129static void
130free_resources(void)
131{
132 hysdn_card *card;
133
134 while (card_root) {
135 card = card_root;
136 if (card->releasehardware)
137 card->releasehardware(card); /* free all hardware resources */
138 card_root = card_root->next; /* remove card from chain */
139 kfree(card); /* return mem */
140
141 } /* while card_root */
142} /* free_resources */
143
144/**************************************************************************/
145/* stop_cards disables (hardware resets) all cards and disables interrupt */
146/**************************************************************************/
147static void
148stop_cards(void)
149{
150 hysdn_card *card;
151
152 card = card_root; /* first in chain */
153 while (card) {
154 if (card->stopcard)
155 card->stopcard(card);
156 card = card->next; /* remove card from chain */
157 } /* while card */
158} /* stop_cards */
159 43
160 44
161/****************************************************************************/ 45/****************************************************************************/
@@ -191,31 +75,138 @@ hysdn_getrev(const char *revision)
191/* and the module is added to the list in /proc/modules, otherwise an error */ 75/* and the module is added to the list in /proc/modules, otherwise an error */
192/* is assumed and the module will not be kept in memory. */ 76/* is assumed and the module will not be kept in memory. */
193/****************************************************************************/ 77/****************************************************************************/
78
79static int __devinit hysdn_pci_init_one(struct pci_dev *akt_pcidev,
80 const struct pci_device_id *ent)
81{
82 hysdn_card *card;
83 int rc;
84
85 rc = pci_enable_device(akt_pcidev);
86 if (rc)
87 return rc;
88
89 if (!(card = kzalloc(sizeof(hysdn_card), GFP_KERNEL))) {
90 printk(KERN_ERR "HYSDN: unable to alloc device mem \n");
91 rc = -ENOMEM;
92 goto err_out;
93 }
94 card->myid = cardmax; /* set own id */
95 card->bus = akt_pcidev->bus->number;
96 card->devfn = akt_pcidev->devfn; /* slot + function */
97 card->subsysid = akt_pcidev->subsystem_device;
98 card->irq = akt_pcidev->irq;
99 card->iobase = pci_resource_start(akt_pcidev, PCI_REG_PLX_IO_BASE);
100 card->plxbase = pci_resource_start(akt_pcidev, PCI_REG_PLX_MEM_BASE);
101 card->membase = pci_resource_start(akt_pcidev, PCI_REG_MEMORY_BASE);
102 card->brdtype = BD_NONE; /* unknown */
103 card->debug_flags = DEF_DEB_FLAGS; /* set default debug */
104 card->faxchans = 0; /* default no fax channels */
105 card->bchans = 2; /* and 2 b-channels */
106 card->brdtype = ent->driver_data;
107
108 if (ergo_inithardware(card)) {
109 printk(KERN_WARNING "HYSDN: card at io 0x%04x already in use\n", card->iobase);
110 rc = -EBUSY;
111 goto err_out_card;
112 }
113
114 cardmax++;
115 card->next = NULL; /*end of chain */
116 if (card_last)
117 card_last->next = card; /* pointer to next card */
118 else
119 card_root = card;
120 card_last = card; /* new chain end */
121
122 pci_set_drvdata(akt_pcidev, card);
123 return 0;
124
125err_out_card:
126 kfree(card);
127err_out:
128 pci_disable_device(akt_pcidev);
129 return rc;
130}
131
132static void __devexit hysdn_pci_remove_one(struct pci_dev *akt_pcidev)
133{
134 hysdn_card *card = pci_get_drvdata(akt_pcidev);
135
136 pci_set_drvdata(akt_pcidev, NULL);
137
138 if (card->stopcard)
139 card->stopcard(card);
140
141#ifdef CONFIG_HYSDN_CAPI
142 hycapi_capi_release(card);
143#endif
144
145 if (card->releasehardware)
146 card->releasehardware(card); /* free all hardware resources */
147
148 if (card == card_root) {
149 card_root = card_root->next;
150 if (!card_root)
151 card_last = NULL;
152 } else {
153 hysdn_card *tmp = card_root;
154 while (tmp) {
155 if (tmp->next == card)
156 tmp->next = card->next;
157 card_last = tmp;
158 tmp = tmp->next;
159 }
160 }
161
162 kfree(card);
163 pci_disable_device(akt_pcidev);
164}
165
166static struct pci_driver hysdn_pci_driver = {
167 .name = "hysdn",
168 .id_table = hysdn_pci_tbl,
169 .probe = hysdn_pci_init_one,
170 .remove = __devexit_p(hysdn_pci_remove_one),
171};
172
173static int hysdn_have_procfs;
174
194static int __init 175static int __init
195hysdn_init(void) 176hysdn_init(void)
196{ 177{
197 char tmp[50]; 178 char tmp[50];
179 int rc;
198 180
199 strcpy(tmp, hysdn_init_revision); 181 strcpy(tmp, hysdn_init_revision);
200 printk(KERN_NOTICE "HYSDN: module Rev: %s loaded\n", hysdn_getrev(tmp)); 182 printk(KERN_NOTICE "HYSDN: module Rev: %s loaded\n", hysdn_getrev(tmp));
201 strcpy(tmp, hysdn_net_revision); 183 strcpy(tmp, hysdn_net_revision);
202 printk(KERN_NOTICE "HYSDN: network interface Rev: %s \n", hysdn_getrev(tmp)); 184 printk(KERN_NOTICE "HYSDN: network interface Rev: %s \n", hysdn_getrev(tmp));
203 search_cards(); 185
186 rc = pci_register_driver(&hysdn_pci_driver);
187 if (rc)
188 return rc;
189
204 printk(KERN_INFO "HYSDN: %d card(s) found.\n", cardmax); 190 printk(KERN_INFO "HYSDN: %d card(s) found.\n", cardmax);
205 191
206 if (hysdn_procconf_init()) { 192 if (!hysdn_procconf_init())
207 free_resources(); /* proc file_sys not created */ 193 hysdn_have_procfs = 1;
208 return (-1); 194
209 }
210#ifdef CONFIG_HYSDN_CAPI 195#ifdef CONFIG_HYSDN_CAPI
211 if(cardmax > 0) { 196 if(cardmax > 0) {
212 if(hycapi_init()) { 197 if(hycapi_init()) {
213 printk(KERN_ERR "HYCAPI: init failed\n"); 198 printk(KERN_ERR "HYCAPI: init failed\n");
214 return(-1); 199
200 if (hysdn_have_procfs)
201 hysdn_procconf_release();
202
203 pci_unregister_driver(&hysdn_pci_driver);
204 return -ESPIPE;
215 } 205 }
216 } 206 }
217#endif /* CONFIG_HYSDN_CAPI */ 207#endif /* CONFIG_HYSDN_CAPI */
218 return (0); /* no error */ 208
209 return 0; /* no error */
219} /* init_module */ 210} /* init_module */
220 211
221 212
@@ -230,20 +221,15 @@ hysdn_init(void)
230static void __exit 221static void __exit
231hysdn_exit(void) 222hysdn_exit(void)
232{ 223{
224 if (hysdn_have_procfs)
225 hysdn_procconf_release();
226
227 pci_unregister_driver(&hysdn_pci_driver);
228
233#ifdef CONFIG_HYSDN_CAPI 229#ifdef CONFIG_HYSDN_CAPI
234 hysdn_card *card;
235#endif /* CONFIG_HYSDN_CAPI */
236 stop_cards();
237#ifdef CONFIG_HYSDN_CAPI
238 card = card_root; /* first in chain */
239 while (card) {
240 hycapi_capi_release(card);
241 card = card->next; /* remove card from chain */
242 } /* while card */
243 hycapi_cleanup(); 230 hycapi_cleanup();
244#endif /* CONFIG_HYSDN_CAPI */ 231#endif /* CONFIG_HYSDN_CAPI */
245 hysdn_procconf_release(); 232
246 free_resources();
247 printk(KERN_NOTICE "HYSDN: module unloaded\n"); 233 printk(KERN_NOTICE "HYSDN: module unloaded\n");
248} /* cleanup_module */ 234} /* cleanup_module */
249 235
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
index 0a419a0de6..8749fa4ffc 100644
--- a/drivers/kvm/Kconfig
+++ b/drivers/kvm/Kconfig
@@ -17,6 +17,7 @@ if VIRTUALIZATION
17config KVM 17config KVM
18 tristate "Kernel-based Virtual Machine (KVM) support" 18 tristate "Kernel-based Virtual Machine (KVM) support"
19 depends on X86 && EXPERIMENTAL 19 depends on X86 && EXPERIMENTAL
20 select PREEMPT_NOTIFIERS
20 select ANON_INODES 21 select ANON_INODES
21 ---help--- 22 ---help---
22 Support hosting fully virtualized guest machines using hardware 23 Support hosting fully virtualized guest machines using hardware
diff --git a/drivers/kvm/Makefile b/drivers/kvm/Makefile
index c0a789fa9d..e5a8f4d3e9 100644
--- a/drivers/kvm/Makefile
+++ b/drivers/kvm/Makefile
@@ -2,7 +2,7 @@
2# Makefile for Kernel-based Virtual Machine module 2# Makefile for Kernel-based Virtual Machine module
3# 3#
4 4
5kvm-objs := kvm_main.o mmu.o x86_emulate.o 5kvm-objs := kvm_main.o mmu.o x86_emulate.o i8259.o irq.o lapic.o ioapic.o
6obj-$(CONFIG_KVM) += kvm.o 6obj-$(CONFIG_KVM) += kvm.o
7kvm-intel-objs = vmx.o 7kvm-intel-objs = vmx.o
8obj-$(CONFIG_KVM_INTEL) += kvm-intel.o 8obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/drivers/kvm/i8259.c b/drivers/kvm/i8259.c
new file mode 100644
index 0000000000..a679157bc5
--- /dev/null
+++ b/drivers/kvm/i8259.c
@@ -0,0 +1,450 @@
1/*
2 * 8259 interrupt controller emulation
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2007 Intel Corporation
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 * Authors:
25 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
26 * Port from Qemu.
27 */
28#include <linux/mm.h>
29#include "irq.h"
30
31/*
32 * set irq level. If an edge is detected, then the IRR is set to 1
33 */
34static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
35{
36 int mask;
37 mask = 1 << irq;
38 if (s->elcr & mask) /* level triggered */
39 if (level) {
40 s->irr |= mask;
41 s->last_irr |= mask;
42 } else {
43 s->irr &= ~mask;
44 s->last_irr &= ~mask;
45 }
46 else /* edge triggered */
47 if (level) {
48 if ((s->last_irr & mask) == 0)
49 s->irr |= mask;
50 s->last_irr |= mask;
51 } else
52 s->last_irr &= ~mask;
53}
54
55/*
56 * return the highest priority found in mask (highest = smallest
57 * number). Return 8 if no irq
58 */
59static inline int get_priority(struct kvm_kpic_state *s, int mask)
60{
61 int priority;
62 if (mask == 0)
63 return 8;
64 priority = 0;
65 while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0)
66 priority++;
67 return priority;
68}
69
70/*
71 * return the pic wanted interrupt. return -1 if none
72 */
73static int pic_get_irq(struct kvm_kpic_state *s)
74{
75 int mask, cur_priority, priority;
76
77 mask = s->irr & ~s->imr;
78 priority = get_priority(s, mask);
79 if (priority == 8)
80 return -1;
81 /*
82 * compute current priority. If special fully nested mode on the
83 * master, the IRQ coming from the slave is not taken into account
84 * for the priority computation.
85 */
86 mask = s->isr;
87 if (s->special_fully_nested_mode && s == &s->pics_state->pics[0])
88 mask &= ~(1 << 2);
89 cur_priority = get_priority(s, mask);
90 if (priority < cur_priority)
91 /*
92 * higher priority found: an irq should be generated
93 */
94 return (priority + s->priority_add) & 7;
95 else
96 return -1;
97}
98
99/*
100 * raise irq to CPU if necessary. must be called every time the active
101 * irq may change
102 */
103static void pic_update_irq(struct kvm_pic *s)
104{
105 int irq2, irq;
106
107 irq2 = pic_get_irq(&s->pics[1]);
108 if (irq2 >= 0) {
109 /*
110 * if irq request by slave pic, signal master PIC
111 */
112 pic_set_irq1(&s->pics[0], 2, 1);
113 pic_set_irq1(&s->pics[0], 2, 0);
114 }
115 irq = pic_get_irq(&s->pics[0]);
116 if (irq >= 0)
117 s->irq_request(s->irq_request_opaque, 1);
118 else
119 s->irq_request(s->irq_request_opaque, 0);
120}
121
122void kvm_pic_update_irq(struct kvm_pic *s)
123{
124 pic_update_irq(s);
125}
126
127void kvm_pic_set_irq(void *opaque, int irq, int level)
128{
129 struct kvm_pic *s = opaque;
130
131 pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
132 pic_update_irq(s);
133}
134
135/*
136 * acknowledge interrupt 'irq'
137 */
138static inline void pic_intack(struct kvm_kpic_state *s, int irq)
139{
140 if (s->auto_eoi) {
141 if (s->rotate_on_auto_eoi)
142 s->priority_add = (irq + 1) & 7;
143 } else
144 s->isr |= (1 << irq);
145 /*
146 * We don't clear a level sensitive interrupt here
147 */
148 if (!(s->elcr & (1 << irq)))
149 s->irr &= ~(1 << irq);
150}
151
152int kvm_pic_read_irq(struct kvm_pic *s)
153{
154 int irq, irq2, intno;
155
156 irq = pic_get_irq(&s->pics[0]);
157 if (irq >= 0) {
158 pic_intack(&s->pics[0], irq);
159 if (irq == 2) {
160 irq2 = pic_get_irq(&s->pics[1]);
161 if (irq2 >= 0)
162 pic_intack(&s->pics[1], irq2);
163 else
164 /*
165 * spurious IRQ on slave controller
166 */
167 irq2 = 7;
168 intno = s->pics[1].irq_base + irq2;
169 irq = irq2 + 8;
170 } else
171 intno = s->pics[0].irq_base + irq;
172 } else {
173 /*
174 * spurious IRQ on host controller
175 */
176 irq = 7;
177 intno = s->pics[0].irq_base + irq;
178 }
179 pic_update_irq(s);
180
181 return intno;
182}
183
184static void pic_reset(void *opaque)
185{
186 struct kvm_kpic_state *s = opaque;
187
188 s->last_irr = 0;
189 s->irr = 0;
190 s->imr = 0;
191 s->isr = 0;
192 s->priority_add = 0;
193 s->irq_base = 0;
194 s->read_reg_select = 0;
195 s->poll = 0;
196 s->special_mask = 0;
197 s->init_state = 0;
198 s->auto_eoi = 0;
199 s->rotate_on_auto_eoi = 0;
200 s->special_fully_nested_mode = 0;
201 s->init4 = 0;
202}
203
204static void pic_ioport_write(void *opaque, u32 addr, u32 val)
205{
206 struct kvm_kpic_state *s = opaque;
207 int priority, cmd, irq;
208
209 addr &= 1;
210 if (addr == 0) {
211 if (val & 0x10) {
212 pic_reset(s); /* init */
213 /*
214 * deassert a pending interrupt
215 */
216 s->pics_state->irq_request(s->pics_state->
217 irq_request_opaque, 0);
218 s->init_state = 1;
219 s->init4 = val & 1;
220 if (val & 0x02)
221 printk(KERN_ERR "single mode not supported");
222 if (val & 0x08)
223 printk(KERN_ERR
224 "level sensitive irq not supported");
225 } else if (val & 0x08) {
226 if (val & 0x04)
227 s->poll = 1;
228 if (val & 0x02)
229 s->read_reg_select = val & 1;
230 if (val & 0x40)
231 s->special_mask = (val >> 5) & 1;
232 } else {
233 cmd = val >> 5;
234 switch (cmd) {
235 case 0:
236 case 4:
237 s->rotate_on_auto_eoi = cmd >> 2;
238 break;
239 case 1: /* end of interrupt */
240 case 5:
241 priority = get_priority(s, s->isr);
242 if (priority != 8) {
243 irq = (priority + s->priority_add) & 7;
244 s->isr &= ~(1 << irq);
245 if (cmd == 5)
246 s->priority_add = (irq + 1) & 7;
247 pic_update_irq(s->pics_state);
248 }
249 break;
250 case 3:
251 irq = val & 7;
252 s->isr &= ~(1 << irq);
253 pic_update_irq(s->pics_state);
254 break;
255 case 6:
256 s->priority_add = (val + 1) & 7;
257 pic_update_irq(s->pics_state);
258 break;
259 case 7:
260 irq = val & 7;
261 s->isr &= ~(1 << irq);
262 s->priority_add = (irq + 1) & 7;
263 pic_update_irq(s->pics_state);
264 break;
265 default:
266 break; /* no operation */
267 }
268 }
269 } else
270 switch (s->init_state) {
271 case 0: /* normal mode */
272 s->imr = val;
273 pic_update_irq(s->pics_state);
274 break;
275 case 1:
276 s->irq_base = val & 0xf8;
277 s->init_state = 2;
278 break;
279 case 2:
280 if (s->init4)
281 s->init_state = 3;
282 else
283 s->init_state = 0;
284 break;
285 case 3:
286 s->special_fully_nested_mode = (val >> 4) & 1;
287 s->auto_eoi = (val >> 1) & 1;
288 s->init_state = 0;
289 break;
290 }
291}
292
293static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
294{
295 int ret;
296
297 ret = pic_get_irq(s);
298 if (ret >= 0) {
299 if (addr1 >> 7) {
300 s->pics_state->pics[0].isr &= ~(1 << 2);
301 s->pics_state->pics[0].irr &= ~(1 << 2);
302 }
303 s->irr &= ~(1 << ret);
304 s->isr &= ~(1 << ret);
305 if (addr1 >> 7 || ret != 2)
306 pic_update_irq(s->pics_state);
307 } else {
308 ret = 0x07;
309 pic_update_irq(s->pics_state);
310 }
311
312 return ret;
313}
314
315static u32 pic_ioport_read(void *opaque, u32 addr1)
316{
317 struct kvm_kpic_state *s = opaque;
318 unsigned int addr;
319 int ret;
320
321 addr = addr1;
322 addr &= 1;
323 if (s->poll) {
324 ret = pic_poll_read(s, addr1);
325 s->poll = 0;
326 } else
327 if (addr == 0)
328 if (s->read_reg_select)
329 ret = s->isr;
330 else
331 ret = s->irr;
332 else
333 ret = s->imr;
334 return ret;
335}
336
337static void elcr_ioport_write(void *opaque, u32 addr, u32 val)
338{
339 struct kvm_kpic_state *s = opaque;
340 s->elcr = val & s->elcr_mask;
341}
342
343static u32 elcr_ioport_read(void *opaque, u32 addr1)
344{
345 struct kvm_kpic_state *s = opaque;
346 return s->elcr;
347}
348
349static int picdev_in_range(struct kvm_io_device *this, gpa_t addr)
350{
351 switch (addr) {
352 case 0x20:
353 case 0x21:
354 case 0xa0:
355 case 0xa1:
356 case 0x4d0:
357 case 0x4d1:
358 return 1;
359 default:
360 return 0;
361 }
362}
363
364static void picdev_write(struct kvm_io_device *this,
365 gpa_t addr, int len, const void *val)
366{
367 struct kvm_pic *s = this->private;
368 unsigned char data = *(unsigned char *)val;
369
370 if (len != 1) {
371 if (printk_ratelimit())
372 printk(KERN_ERR "PIC: non byte write\n");
373 return;
374 }
375 switch (addr) {
376 case 0x20:
377 case 0x21:
378 case 0xa0:
379 case 0xa1:
380 pic_ioport_write(&s->pics[addr >> 7], addr, data);
381 break;
382 case 0x4d0:
383 case 0x4d1:
384 elcr_ioport_write(&s->pics[addr & 1], addr, data);
385 break;
386 }
387}
388
389static void picdev_read(struct kvm_io_device *this,
390 gpa_t addr, int len, void *val)
391{
392 struct kvm_pic *s = this->private;
393 unsigned char data = 0;
394
395 if (len != 1) {
396 if (printk_ratelimit())
397 printk(KERN_ERR "PIC: non byte read\n");
398 return;
399 }
400 switch (addr) {
401 case 0x20:
402 case 0x21:
403 case 0xa0:
404 case 0xa1:
405 data = pic_ioport_read(&s->pics[addr >> 7], addr);
406 break;
407 case 0x4d0:
408 case 0x4d1:
409 data = elcr_ioport_read(&s->pics[addr & 1], addr);
410 break;
411 }
412 *(unsigned char *)val = data;
413}
414
415/*
416 * callback when PIC0 irq status changed
417 */
418static void pic_irq_request(void *opaque, int level)
419{
420 struct kvm *kvm = opaque;
421 struct kvm_vcpu *vcpu = kvm->vcpus[0];
422
423 pic_irqchip(kvm)->output = level;
424 if (vcpu)
425 kvm_vcpu_kick(vcpu);
426}
427
428struct kvm_pic *kvm_create_pic(struct kvm *kvm)
429{
430 struct kvm_pic *s;
431 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
432 if (!s)
433 return NULL;
434 s->pics[0].elcr_mask = 0xf8;
435 s->pics[1].elcr_mask = 0xde;
436 s->irq_request = pic_irq_request;
437 s->irq_request_opaque = kvm;
438 s->pics[0].pics_state = s;
439 s->pics[1].pics_state = s;
440
441 /*
442 * Initialize PIO device
443 */
444 s->dev.read = picdev_read;
445 s->dev.write = picdev_write;
446 s->dev.in_range = picdev_in_range;
447 s->dev.private = s;
448 kvm_io_bus_register_dev(&kvm->pio_bus, &s->dev);
449 return s;
450}
diff --git a/drivers/kvm/ioapic.c b/drivers/kvm/ioapic.c
new file mode 100644
index 0000000000..c7992e667f
--- /dev/null
+++ b/drivers/kvm/ioapic.c
@@ -0,0 +1,388 @@
1/*
2 * Copyright (C) 2001 MandrakeSoft S.A.
3 *
4 * MandrakeSoft S.A.
5 * 43, rue d'Aboukir
6 * 75002 Paris - France
7 * http://www.linux-mandrake.com/
8 * http://www.mandrakesoft.com/
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Yunhong Jiang <yunhong.jiang@intel.com>
25 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
26 * Based on Xen 3.1 code.
27 */
28
29#include "kvm.h"
30#include <linux/kvm.h>
31#include <linux/mm.h>
32#include <linux/highmem.h>
33#include <linux/smp.h>
34#include <linux/hrtimer.h>
35#include <linux/io.h>
36#include <asm/processor.h>
37#include <asm/msr.h>
38#include <asm/page.h>
39#include <asm/current.h>
40#include <asm/apicdef.h>
41#include <asm/io_apic.h>
42#include "irq.h"
43/* #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
44#define ioapic_debug(fmt, arg...)
45static void ioapic_deliver(struct kvm_ioapic *vioapic, int irq);
46
47static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
48 unsigned long addr,
49 unsigned long length)
50{
51 unsigned long result = 0;
52
53 switch (ioapic->ioregsel) {
54 case IOAPIC_REG_VERSION:
55 result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
56 | (IOAPIC_VERSION_ID & 0xff));
57 break;
58
59 case IOAPIC_REG_APIC_ID:
60 case IOAPIC_REG_ARB_ID:
61 result = ((ioapic->id & 0xf) << 24);
62 break;
63
64 default:
65 {
66 u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
67 u64 redir_content;
68
69 ASSERT(redir_index < IOAPIC_NUM_PINS);
70
71 redir_content = ioapic->redirtbl[redir_index].bits;
72 result = (ioapic->ioregsel & 0x1) ?
73 (redir_content >> 32) & 0xffffffff :
74 redir_content & 0xffffffff;
75 break;
76 }
77 }
78
79 return result;
80}
81
82static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
83{
84 union ioapic_redir_entry *pent;
85
86 pent = &ioapic->redirtbl[idx];
87
88 if (!pent->fields.mask) {
89 ioapic_deliver(ioapic, idx);
90 if (pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
91 pent->fields.remote_irr = 1;
92 }
93 if (!pent->fields.trig_mode)
94 ioapic->irr &= ~(1 << idx);
95}
96
97static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
98{
99 unsigned index;
100
101 switch (ioapic->ioregsel) {
102 case IOAPIC_REG_VERSION:
103 /* Writes are ignored. */
104 break;
105
106 case IOAPIC_REG_APIC_ID:
107 ioapic->id = (val >> 24) & 0xf;
108 break;
109
110 case IOAPIC_REG_ARB_ID:
111 break;
112
113 default:
114 index = (ioapic->ioregsel - 0x10) >> 1;
115
116 ioapic_debug("change redir index %x val %x", index, val);
117 if (index >= IOAPIC_NUM_PINS)
118 return;
119 if (ioapic->ioregsel & 1) {
120 ioapic->redirtbl[index].bits &= 0xffffffff;
121 ioapic->redirtbl[index].bits |= (u64) val << 32;
122 } else {
123 ioapic->redirtbl[index].bits &= ~0xffffffffULL;
124 ioapic->redirtbl[index].bits |= (u32) val;
125 ioapic->redirtbl[index].fields.remote_irr = 0;
126 }
127 if (ioapic->irr & (1 << index))
128 ioapic_service(ioapic, index);
129 break;
130 }
131}
132
133static void ioapic_inj_irq(struct kvm_ioapic *ioapic,
134 struct kvm_lapic *target,
135 u8 vector, u8 trig_mode, u8 delivery_mode)
136{
137 ioapic_debug("irq %d trig %d deliv %d", vector, trig_mode,
138 delivery_mode);
139
140 ASSERT((delivery_mode == dest_Fixed) ||
141 (delivery_mode == dest_LowestPrio));
142
143 kvm_apic_set_irq(target, vector, trig_mode);
144}
145
146static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
147 u8 dest_mode)
148{
149 u32 mask = 0;
150 int i;
151 struct kvm *kvm = ioapic->kvm;
152 struct kvm_vcpu *vcpu;
153
154 ioapic_debug("dest %d dest_mode %d", dest, dest_mode);
155
156 if (dest_mode == 0) { /* Physical mode. */
157 if (dest == 0xFF) { /* Broadcast. */
158 for (i = 0; i < KVM_MAX_VCPUS; ++i)
159 if (kvm->vcpus[i] && kvm->vcpus[i]->apic)
160 mask |= 1 << i;
161 return mask;
162 }
163 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
164 vcpu = kvm->vcpus[i];
165 if (!vcpu)
166 continue;
167 if (kvm_apic_match_physical_addr(vcpu->apic, dest)) {
168 if (vcpu->apic)
169 mask = 1 << i;
170 break;
171 }
172 }
173 } else if (dest != 0) /* Logical mode, MDA non-zero. */
174 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
175 vcpu = kvm->vcpus[i];
176 if (!vcpu)
177 continue;
178 if (vcpu->apic &&
179 kvm_apic_match_logical_addr(vcpu->apic, dest))
180 mask |= 1 << vcpu->vcpu_id;
181 }
182 ioapic_debug("mask %x", mask);
183 return mask;
184}
185
186static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
187{
188 u8 dest = ioapic->redirtbl[irq].fields.dest_id;
189 u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode;
190 u8 delivery_mode = ioapic->redirtbl[irq].fields.delivery_mode;
191 u8 vector = ioapic->redirtbl[irq].fields.vector;
192 u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode;
193 u32 deliver_bitmask;
194 struct kvm_lapic *target;
195 struct kvm_vcpu *vcpu;
196 int vcpu_id;
197
198 ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
199 "vector=%x trig_mode=%x",
200 dest, dest_mode, delivery_mode, vector, trig_mode);
201
202 deliver_bitmask = ioapic_get_delivery_bitmask(ioapic, dest, dest_mode);
203 if (!deliver_bitmask) {
204 ioapic_debug("no target on destination");
205 return;
206 }
207
208 switch (delivery_mode) {
209 case dest_LowestPrio:
210 target =
211 kvm_apic_round_robin(ioapic->kvm, vector, deliver_bitmask);
212 if (target != NULL)
213 ioapic_inj_irq(ioapic, target, vector,
214 trig_mode, delivery_mode);
215 else
216 ioapic_debug("null round robin: "
217 "mask=%x vector=%x delivery_mode=%x",
218 deliver_bitmask, vector, dest_LowestPrio);
219 break;
220 case dest_Fixed:
221 for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
222 if (!(deliver_bitmask & (1 << vcpu_id)))
223 continue;
224 deliver_bitmask &= ~(1 << vcpu_id);
225 vcpu = ioapic->kvm->vcpus[vcpu_id];
226 if (vcpu) {
227 target = vcpu->apic;
228 ioapic_inj_irq(ioapic, target, vector,
229 trig_mode, delivery_mode);
230 }
231 }
232 break;
233
234 /* TODO: NMI */
235 default:
236 printk(KERN_WARNING "Unsupported delivery mode %d\n",
237 delivery_mode);
238 break;
239 }
240}
241
242void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
243{
244 u32 old_irr = ioapic->irr;
245 u32 mask = 1 << irq;
246 union ioapic_redir_entry entry;
247
248 if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
249 entry = ioapic->redirtbl[irq];
250 level ^= entry.fields.polarity;
251 if (!level)
252 ioapic->irr &= ~mask;
253 else {
254 ioapic->irr |= mask;
255 if ((!entry.fields.trig_mode && old_irr != ioapic->irr)
256 || !entry.fields.remote_irr)
257 ioapic_service(ioapic, irq);
258 }
259 }
260}
261
262static int get_eoi_gsi(struct kvm_ioapic *ioapic, int vector)
263{
264 int i;
265
266 for (i = 0; i < IOAPIC_NUM_PINS; i++)
267 if (ioapic->redirtbl[i].fields.vector == vector)
268 return i;
269 return -1;
270}
271
272void kvm_ioapic_update_eoi(struct kvm *kvm, int vector)
273{
274 struct kvm_ioapic *ioapic = kvm->vioapic;
275 union ioapic_redir_entry *ent;
276 int gsi;
277
278 gsi = get_eoi_gsi(ioapic, vector);
279 if (gsi == -1) {
280 printk(KERN_WARNING "Can't find redir item for %d EOI\n",
281 vector);
282 return;
283 }
284
285 ent = &ioapic->redirtbl[gsi];
286 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
287
288 ent->fields.remote_irr = 0;
289 if (!ent->fields.mask && (ioapic->irr & (1 << gsi)))
290 ioapic_deliver(ioapic, gsi);
291}
292
293static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr)
294{
295 struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
296
297 return ((addr >= ioapic->base_address &&
298 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
299}
300
301static void ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
302 void *val)
303{
304 struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
305 u32 result;
306
307 ioapic_debug("addr %lx", (unsigned long)addr);
308 ASSERT(!(addr & 0xf)); /* check alignment */
309
310 addr &= 0xff;
311 switch (addr) {
312 case IOAPIC_REG_SELECT:
313 result = ioapic->ioregsel;
314 break;
315
316 case IOAPIC_REG_WINDOW:
317 result = ioapic_read_indirect(ioapic, addr, len);
318 break;
319
320 default:
321 result = 0;
322 break;
323 }
324 switch (len) {
325 case 8:
326 *(u64 *) val = result;
327 break;
328 case 1:
329 case 2:
330 case 4:
331 memcpy(val, (char *)&result, len);
332 break;
333 default:
334 printk(KERN_WARNING "ioapic: wrong length %d\n", len);
335 }
336}
337
338static void ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
339 const void *val)
340{
341 struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
342 u32 data;
343
344 ioapic_debug("ioapic_mmio_write addr=%lx len=%d val=%p\n",
345 addr, len, val);
346 ASSERT(!(addr & 0xf)); /* check alignment */
347 if (len == 4 || len == 8)
348 data = *(u32 *) val;
349 else {
350 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
351 return;
352 }
353
354 addr &= 0xff;
355 switch (addr) {
356 case IOAPIC_REG_SELECT:
357 ioapic->ioregsel = data;
358 break;
359
360 case IOAPIC_REG_WINDOW:
361 ioapic_write_indirect(ioapic, data);
362 break;
363
364 default:
365 break;
366 }
367}
368
369int kvm_ioapic_init(struct kvm *kvm)
370{
371 struct kvm_ioapic *ioapic;
372 int i;
373
374 ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
375 if (!ioapic)
376 return -ENOMEM;
377 kvm->vioapic = ioapic;
378 for (i = 0; i < IOAPIC_NUM_PINS; i++)
379 ioapic->redirtbl[i].fields.mask = 1;
380 ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
381 ioapic->dev.read = ioapic_mmio_read;
382 ioapic->dev.write = ioapic_mmio_write;
383 ioapic->dev.in_range = ioapic_in_range;
384 ioapic->dev.private = ioapic;
385 ioapic->kvm = kvm;
386 kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev);
387 return 0;
388}
diff --git a/drivers/kvm/irq.c b/drivers/kvm/irq.c
new file mode 100644
index 0000000000..7628c7ff62
--- /dev/null
+++ b/drivers/kvm/irq.c
@@ -0,0 +1,98 @@
1/*
2 * irq.c: API for in kernel interrupt controller
3 * Copyright (c) 2007, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Authors:
18 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
19 *
20 */
21
22#include <linux/module.h>
23
24#include "kvm.h"
25#include "irq.h"
26
27/*
28 * check if there is pending interrupt without
29 * intack.
30 */
31int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
32{
33 struct kvm_pic *s;
34
35 if (kvm_apic_has_interrupt(v) == -1) { /* LAPIC */
36 if (kvm_apic_accept_pic_intr(v)) {
37 s = pic_irqchip(v->kvm); /* PIC */
38 return s->output;
39 } else
40 return 0;
41 }
42 return 1;
43}
44EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
45
46/*
47 * Read pending interrupt vector and intack.
48 */
49int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
50{
51 struct kvm_pic *s;
52 int vector;
53
54 vector = kvm_get_apic_interrupt(v); /* APIC */
55 if (vector == -1) {
56 if (kvm_apic_accept_pic_intr(v)) {
57 s = pic_irqchip(v->kvm);
58 s->output = 0; /* PIC */
59 vector = kvm_pic_read_irq(s);
60 }
61 }
62 return vector;
63}
64EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
65
66static void vcpu_kick_intr(void *info)
67{
68#ifdef DEBUG
69 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
70 printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
71#endif
72}
73
74void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
75{
76 int ipi_pcpu = vcpu->cpu;
77
78 if (waitqueue_active(&vcpu->wq)) {
79 wake_up_interruptible(&vcpu->wq);
80 ++vcpu->stat.halt_wakeup;
81 }
82 if (vcpu->guest_mode)
83 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
84}
85
86void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
87{
88 kvm_inject_apic_timer_irqs(vcpu);
89 /* TODO: PIT, RTC etc. */
90}
91EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
92
93void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
94{
95 kvm_apic_timer_intr_post(vcpu, vec);
96 /* TODO: PIT, RTC etc. */
97}
98EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
diff --git a/drivers/kvm/irq.h b/drivers/kvm/irq.h
new file mode 100644
index 0000000000..11fc014e2b
--- /dev/null
+++ b/drivers/kvm/irq.h
@@ -0,0 +1,165 @@
1/*
2 * irq.h: in kernel interrupt controller related definitions
3 * Copyright (c) 2007, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Authors:
18 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
19 *
20 */
21
22#ifndef __IRQ_H
23#define __IRQ_H
24
25#include "kvm.h"
26
27typedef void irq_request_func(void *opaque, int level);
28
29struct kvm_kpic_state {
30 u8 last_irr; /* edge detection */
31 u8 irr; /* interrupt request register */
32 u8 imr; /* interrupt mask register */
33 u8 isr; /* interrupt service register */
34 u8 priority_add; /* highest irq priority */
35 u8 irq_base;
36 u8 read_reg_select;
37 u8 poll;
38 u8 special_mask;
39 u8 init_state;
40 u8 auto_eoi;
41 u8 rotate_on_auto_eoi;
42 u8 special_fully_nested_mode;
43 u8 init4; /* true if 4 byte init */
44 u8 elcr; /* PIIX edge/trigger selection */
45 u8 elcr_mask;
46 struct kvm_pic *pics_state;
47};
48
49struct kvm_pic {
50 struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */
51 irq_request_func *irq_request;
52 void *irq_request_opaque;
53 int output; /* intr from master PIC */
54 struct kvm_io_device dev;
55};
56
57struct kvm_pic *kvm_create_pic(struct kvm *kvm);
58void kvm_pic_set_irq(void *opaque, int irq, int level);
59int kvm_pic_read_irq(struct kvm_pic *s);
60int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
61int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
62void kvm_pic_update_irq(struct kvm_pic *s);
63
64#define IOAPIC_NUM_PINS KVM_IOAPIC_NUM_PINS
65#define IOAPIC_VERSION_ID 0x11 /* IOAPIC version */
66#define IOAPIC_EDGE_TRIG 0
67#define IOAPIC_LEVEL_TRIG 1
68
69#define IOAPIC_DEFAULT_BASE_ADDRESS 0xfec00000
70#define IOAPIC_MEM_LENGTH 0x100
71
72/* Direct registers. */
73#define IOAPIC_REG_SELECT 0x00
74#define IOAPIC_REG_WINDOW 0x10
75#define IOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */
76
77/* Indirect registers. */
78#define IOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */
79#define IOAPIC_REG_VERSION 0x01
80#define IOAPIC_REG_ARB_ID 0x02 /* x86 IOAPIC only */
81
82struct kvm_ioapic {
83 u64 base_address;
84 u32 ioregsel;
85 u32 id;
86 u32 irr;
87 u32 pad;
88 union ioapic_redir_entry {
89 u64 bits;
90 struct {
91 u8 vector;
92 u8 delivery_mode:3;
93 u8 dest_mode:1;
94 u8 delivery_status:1;
95 u8 polarity:1;
96 u8 remote_irr:1;
97 u8 trig_mode:1;
98 u8 mask:1;
99 u8 reserve:7;
100 u8 reserved[4];
101 u8 dest_id;
102 } fields;
103 } redirtbl[IOAPIC_NUM_PINS];
104 struct kvm_io_device dev;
105 struct kvm *kvm;
106};
107
108struct kvm_lapic {
109 unsigned long base_address;
110 struct kvm_io_device dev;
111 struct {
112 atomic_t pending;
113 s64 period; /* unit: ns */
114 u32 divide_count;
115 ktime_t last_update;
116 struct hrtimer dev;
117 } timer;
118 struct kvm_vcpu *vcpu;
119 struct page *regs_page;
120 void *regs;
121};
122
123#ifdef DEBUG
124#define ASSERT(x) \
125do { \
126 if (!(x)) { \
127 printk(KERN_EMERG "assertion failed %s: %d: %s\n", \
128 __FILE__, __LINE__, #x); \
129 BUG(); \
130 } \
131} while (0)
132#else
133#define ASSERT(x) do { } while (0)
134#endif
135
136void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
137int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
138int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
139int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
140int kvm_create_lapic(struct kvm_vcpu *vcpu);
141void kvm_lapic_reset(struct kvm_vcpu *vcpu);
142void kvm_free_apic(struct kvm_lapic *apic);
143u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
144void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
145void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
146struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
147 unsigned long bitmap);
148u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
149void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
150int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
151void kvm_ioapic_update_eoi(struct kvm *kvm, int vector);
152int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
153int kvm_apic_set_irq(struct kvm_lapic *apic, u8 vec, u8 trig);
154void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu);
155int kvm_ioapic_init(struct kvm *kvm);
156void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
157int kvm_lapic_enabled(struct kvm_vcpu *vcpu);
158int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
159void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
160void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
161void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
162void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
163void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
164
165#endif
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 336be86c6f..ad0813843a 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -13,60 +13,38 @@
13#include <linux/signal.h> 13#include <linux/signal.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/preempt.h>
16#include <asm/signal.h> 17#include <asm/signal.h>
17 18
18#include "vmx.h"
19#include <linux/kvm.h> 19#include <linux/kvm.h>
20#include <linux/kvm_para.h> 20#include <linux/kvm_para.h>
21 21
22#define CR0_PE_MASK (1ULL << 0) 22#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
23#define CR0_MP_MASK (1ULL << 1) 23#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
24#define CR0_TS_MASK (1ULL << 3) 24#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
25#define CR0_NE_MASK (1ULL << 5)
26#define CR0_WP_MASK (1ULL << 16)
27#define CR0_NW_MASK (1ULL << 29)
28#define CR0_CD_MASK (1ULL << 30)
29#define CR0_PG_MASK (1ULL << 31)
30
31#define CR3_WPT_MASK (1ULL << 3)
32#define CR3_PCD_MASK (1ULL << 4)
33
34#define CR3_RESEVED_BITS 0x07ULL
35#define CR3_L_MODE_RESEVED_BITS (~((1ULL << 40) - 1) | 0x0fe7ULL)
36#define CR3_FLAGS_MASK ((1ULL << 5) - 1)
37
38#define CR4_VME_MASK (1ULL << 0)
39#define CR4_PSE_MASK (1ULL << 4)
40#define CR4_PAE_MASK (1ULL << 5)
41#define CR4_PGE_MASK (1ULL << 7)
42#define CR4_VMXE_MASK (1ULL << 13)
43 25
44#define KVM_GUEST_CR0_MASK \ 26#define KVM_GUEST_CR0_MASK \
45 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \ 27 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
46 | CR0_NW_MASK | CR0_CD_MASK) 28 | X86_CR0_NW | X86_CR0_CD)
47#define KVM_VM_CR0_ALWAYS_ON \ 29#define KVM_VM_CR0_ALWAYS_ON \
48 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK | CR0_TS_MASK \ 30 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
49 | CR0_MP_MASK) 31 | X86_CR0_MP)
50#define KVM_GUEST_CR4_MASK \ 32#define KVM_GUEST_CR4_MASK \
51 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK) 33 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
52#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK) 34#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
53#define KVM_RMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK | CR4_VME_MASK) 35#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
54 36
55#define INVALID_PAGE (~(hpa_t)0) 37#define INVALID_PAGE (~(hpa_t)0)
56#define UNMAPPED_GVA (~(gpa_t)0) 38#define UNMAPPED_GVA (~(gpa_t)0)
57 39
58#define KVM_MAX_VCPUS 4 40#define KVM_MAX_VCPUS 4
59#define KVM_ALIAS_SLOTS 4 41#define KVM_ALIAS_SLOTS 4
60#define KVM_MEMORY_SLOTS 4 42#define KVM_MEMORY_SLOTS 8
61#define KVM_NUM_MMU_PAGES 1024 43#define KVM_NUM_MMU_PAGES 1024
62#define KVM_MIN_FREE_MMU_PAGES 5 44#define KVM_MIN_FREE_MMU_PAGES 5
63#define KVM_REFILL_PAGES 25 45#define KVM_REFILL_PAGES 25
64#define KVM_MAX_CPUID_ENTRIES 40 46#define KVM_MAX_CPUID_ENTRIES 40
65 47
66#define FX_IMAGE_SIZE 512
67#define FX_IMAGE_ALIGN 16
68#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
69
70#define DE_VECTOR 0 48#define DE_VECTOR 0
71#define NM_VECTOR 7 49#define NM_VECTOR 7
72#define DF_VECTOR 8 50#define DF_VECTOR 8
@@ -158,15 +136,8 @@ struct kvm_mmu_page {
158 }; 136 };
159}; 137};
160 138
161struct vmcs {
162 u32 revision_id;
163 u32 abort;
164 char data[0];
165};
166
167#define vmx_msr_entry kvm_msr_entry
168
169struct kvm_vcpu; 139struct kvm_vcpu;
140extern struct kmem_cache *kvm_vcpu_cache;
170 141
171/* 142/*
172 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level 143 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
@@ -260,6 +231,7 @@ struct kvm_stat {
260 u32 signal_exits; 231 u32 signal_exits;
261 u32 irq_window_exits; 232 u32 irq_window_exits;
262 u32 halt_exits; 233 u32 halt_exits;
234 u32 halt_wakeup;
263 u32 request_irq_exits; 235 u32 request_irq_exits;
264 u32 irq_exits; 236 u32 irq_exits;
265 u32 light_exits; 237 u32 light_exits;
@@ -328,21 +300,17 @@ void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
328 300
329struct kvm_vcpu { 301struct kvm_vcpu {
330 struct kvm *kvm; 302 struct kvm *kvm;
331 union { 303 struct preempt_notifier preempt_notifier;
332 struct vmcs *vmcs; 304 int vcpu_id;
333 struct vcpu_svm *svm;
334 };
335 struct mutex mutex; 305 struct mutex mutex;
336 int cpu; 306 int cpu;
337 int launched;
338 u64 host_tsc; 307 u64 host_tsc;
339 struct kvm_run *run; 308 struct kvm_run *run;
340 int interrupt_window_open; 309 int interrupt_window_open;
341 int guest_mode; 310 int guest_mode;
342 unsigned long requests; 311 unsigned long requests;
343 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ 312 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
344#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long) 313 DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
345 unsigned long irq_pending[NR_IRQ_WORDS];
346 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */ 314 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
347 unsigned long rip; /* needs vcpu_load_rsp_rip() */ 315 unsigned long rip; /* needs vcpu_load_rsp_rip() */
348 316
@@ -357,15 +325,15 @@ struct kvm_vcpu {
357 u64 pdptrs[4]; /* pae */ 325 u64 pdptrs[4]; /* pae */
358 u64 shadow_efer; 326 u64 shadow_efer;
359 u64 apic_base; 327 u64 apic_base;
328 struct kvm_lapic *apic; /* kernel irqchip context */
329#define VCPU_MP_STATE_RUNNABLE 0
330#define VCPU_MP_STATE_UNINITIALIZED 1
331#define VCPU_MP_STATE_INIT_RECEIVED 2
332#define VCPU_MP_STATE_SIPI_RECEIVED 3
333#define VCPU_MP_STATE_HALTED 4
334 int mp_state;
335 int sipi_vector;
360 u64 ia32_misc_enable_msr; 336 u64 ia32_misc_enable_msr;
361 int nmsrs;
362 int save_nmsrs;
363 int msr_offset_efer;
364#ifdef CONFIG_X86_64
365 int msr_offset_kernel_gs_base;
366#endif
367 struct vmx_msr_entry *guest_msrs;
368 struct vmx_msr_entry *host_msrs;
369 337
370 struct kvm_mmu mmu; 338 struct kvm_mmu mmu;
371 339
@@ -379,16 +347,10 @@ struct kvm_vcpu {
379 347
380 struct kvm_guest_debug guest_debug; 348 struct kvm_guest_debug guest_debug;
381 349
382 char fx_buf[FX_BUF_SIZE]; 350 struct i387_fxsave_struct host_fx_image;
383 char *host_fx_image; 351 struct i387_fxsave_struct guest_fx_image;
384 char *guest_fx_image;
385 int fpu_active; 352 int fpu_active;
386 int guest_fpu_loaded; 353 int guest_fpu_loaded;
387 struct vmx_host_state {
388 int loaded;
389 u16 fs_sel, gs_sel, ldt_sel;
390 int fs_gs_ldt_reload_needed;
391 } vmx_host_state;
392 354
393 int mmio_needed; 355 int mmio_needed;
394 int mmio_read_completed; 356 int mmio_read_completed;
@@ -399,6 +361,7 @@ struct kvm_vcpu {
399 gva_t mmio_fault_cr2; 361 gva_t mmio_fault_cr2;
400 struct kvm_pio_request pio; 362 struct kvm_pio_request pio;
401 void *pio_data; 363 void *pio_data;
364 wait_queue_head_t wq;
402 365
403 int sigset_active; 366 int sigset_active;
404 sigset_t sigset; 367 sigset_t sigset;
@@ -436,7 +399,7 @@ struct kvm_memory_slot {
436}; 399};
437 400
438struct kvm { 401struct kvm {
439 spinlock_t lock; /* protects everything except vcpus */ 402 struct mutex lock; /* protects everything except vcpus */
440 int naliases; 403 int naliases;
441 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; 404 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
442 int nmemslots; 405 int nmemslots;
@@ -447,39 +410,59 @@ struct kvm {
447 struct list_head active_mmu_pages; 410 struct list_head active_mmu_pages;
448 int n_free_mmu_pages; 411 int n_free_mmu_pages;
449 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 412 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
450 int nvcpus; 413 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
451 struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
452 int memory_config_version;
453 int busy;
454 unsigned long rmap_overflow; 414 unsigned long rmap_overflow;
455 struct list_head vm_list; 415 struct list_head vm_list;
456 struct file *filp; 416 struct file *filp;
457 struct kvm_io_bus mmio_bus; 417 struct kvm_io_bus mmio_bus;
458 struct kvm_io_bus pio_bus; 418 struct kvm_io_bus pio_bus;
419 struct kvm_pic *vpic;
420 struct kvm_ioapic *vioapic;
421 int round_robin_prev_vcpu;
459}; 422};
460 423
424static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
425{
426 return kvm->vpic;
427}
428
429static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
430{
431 return kvm->vioapic;
432}
433
434static inline int irqchip_in_kernel(struct kvm *kvm)
435{
436 return pic_irqchip(kvm) != 0;
437}
438
461struct descriptor_table { 439struct descriptor_table {
462 u16 limit; 440 u16 limit;
463 unsigned long base; 441 unsigned long base;
464} __attribute__((packed)); 442} __attribute__((packed));
465 443
466struct kvm_arch_ops { 444struct kvm_x86_ops {
467 int (*cpu_has_kvm_support)(void); /* __init */ 445 int (*cpu_has_kvm_support)(void); /* __init */
468 int (*disabled_by_bios)(void); /* __init */ 446 int (*disabled_by_bios)(void); /* __init */
469 void (*hardware_enable)(void *dummy); /* __init */ 447 void (*hardware_enable)(void *dummy); /* __init */
470 void (*hardware_disable)(void *dummy); 448 void (*hardware_disable)(void *dummy);
449 void (*check_processor_compatibility)(void *rtn);
471 int (*hardware_setup)(void); /* __init */ 450 int (*hardware_setup)(void); /* __init */
472 void (*hardware_unsetup)(void); /* __exit */ 451 void (*hardware_unsetup)(void); /* __exit */
473 452
474 int (*vcpu_create)(struct kvm_vcpu *vcpu); 453 /* Create, but do not attach this VCPU */
454 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
475 void (*vcpu_free)(struct kvm_vcpu *vcpu); 455 void (*vcpu_free)(struct kvm_vcpu *vcpu);
456 void (*vcpu_reset)(struct kvm_vcpu *vcpu);
476 457
477 void (*vcpu_load)(struct kvm_vcpu *vcpu); 458 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
459 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
478 void (*vcpu_put)(struct kvm_vcpu *vcpu); 460 void (*vcpu_put)(struct kvm_vcpu *vcpu);
479 void (*vcpu_decache)(struct kvm_vcpu *vcpu); 461 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
480 462
481 int (*set_guest_debug)(struct kvm_vcpu *vcpu, 463 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
482 struct kvm_debug_guest *dbg); 464 struct kvm_debug_guest *dbg);
465 void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
483 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 466 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
484 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 467 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
485 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 468 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
@@ -505,27 +488,43 @@ struct kvm_arch_ops {
505 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 488 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
506 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 489 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
507 490
508 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t addr);
509 void (*tlb_flush)(struct kvm_vcpu *vcpu); 491 void (*tlb_flush)(struct kvm_vcpu *vcpu);
510 void (*inject_page_fault)(struct kvm_vcpu *vcpu, 492 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
511 unsigned long addr, u32 err_code); 493 unsigned long addr, u32 err_code);
512 494
513 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code); 495 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
514 496
515 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); 497 void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
516 int (*vcpu_setup)(struct kvm_vcpu *vcpu); 498 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
517 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); 499 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
518 void (*patch_hypercall)(struct kvm_vcpu *vcpu, 500 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
519 unsigned char *hypercall_addr); 501 unsigned char *hypercall_addr);
502 int (*get_irq)(struct kvm_vcpu *vcpu);
503 void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
504 void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
505 void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
506 struct kvm_run *run);
520}; 507};
521 508
522extern struct kvm_arch_ops *kvm_arch_ops; 509extern struct kvm_x86_ops *kvm_x86_ops;
510
511/* The guest did something we don't support. */
512#define pr_unimpl(vcpu, fmt, ...) \
513 do { \
514 if (printk_ratelimit()) \
515 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
516 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
517 } while(0)
523 518
524#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) 519#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
525#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) 520#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
526 521
527int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module); 522int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
528void kvm_exit_arch(void); 523void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
524
525int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
526 struct module *module);
527void kvm_exit_x86(void);
529 528
530int kvm_mmu_module_init(void); 529int kvm_mmu_module_init(void);
531void kvm_mmu_module_exit(void); 530void kvm_mmu_module_exit(void);
@@ -545,8 +544,6 @@ static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
545hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva); 544hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
546struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); 545struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
547 546
548void kvm_emulator_want_group7_invlpg(void);
549
550extern hpa_t bad_page_address; 547extern hpa_t bad_page_address;
551 548
552struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 549struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
@@ -561,6 +558,7 @@ enum emulation_result {
561 558
562int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, 559int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
563 unsigned long cr2, u16 error_code); 560 unsigned long cr2, u16 error_code);
561void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
564void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 562void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
565void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 563void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
566void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, 564void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
@@ -574,9 +572,11 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
574 572
575struct x86_emulate_ctxt; 573struct x86_emulate_ctxt;
576 574
577int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, 575int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
578 int size, unsigned long count, int string, int down, 576 int size, unsigned port);
579 gva_t address, int rep, unsigned port); 577int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
578 int size, unsigned long count, int down,
579 gva_t address, int rep, unsigned port);
580void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 580void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
581int kvm_emulate_halt(struct kvm_vcpu *vcpu); 581int kvm_emulate_halt(struct kvm_vcpu *vcpu);
582int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); 582int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
@@ -590,34 +590,33 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
590void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); 590void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
591void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); 591void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
592void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); 592void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
593unsigned long get_cr8(struct kvm_vcpu *vcpu);
593void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 594void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
595void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
594 596
595int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 597int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
596int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); 598int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
597 599
598void fx_init(struct kvm_vcpu *vcpu); 600void fx_init(struct kvm_vcpu *vcpu);
599 601
600void load_msrs(struct vmx_msr_entry *e, int n);
601void save_msrs(struct vmx_msr_entry *e, int n);
602void kvm_resched(struct kvm_vcpu *vcpu); 602void kvm_resched(struct kvm_vcpu *vcpu);
603void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 603void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
604void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 604void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
605void kvm_flush_remote_tlbs(struct kvm *kvm); 605void kvm_flush_remote_tlbs(struct kvm *kvm);
606 606
607int kvm_read_guest(struct kvm_vcpu *vcpu, 607int emulator_read_std(unsigned long addr,
608 gva_t addr, 608 void *val,
609 unsigned long size, 609 unsigned int bytes,
610 void *dest); 610 struct kvm_vcpu *vcpu);
611 611int emulator_write_emulated(unsigned long addr,
612int kvm_write_guest(struct kvm_vcpu *vcpu, 612 const void *val,
613 gva_t addr, 613 unsigned int bytes,
614 unsigned long size, 614 struct kvm_vcpu *vcpu);
615 void *data);
616 615
617unsigned long segment_base(u16 selector); 616unsigned long segment_base(u16 selector);
618 617
619void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 618void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
620 const u8 *old, const u8 *new, int bytes); 619 const u8 *new, int bytes);
621int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 620int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
622void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 621void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
623int kvm_mmu_load(struct kvm_vcpu *vcpu); 622int kvm_mmu_load(struct kvm_vcpu *vcpu);
@@ -656,17 +655,17 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
656 655
657static inline int is_pae(struct kvm_vcpu *vcpu) 656static inline int is_pae(struct kvm_vcpu *vcpu)
658{ 657{
659 return vcpu->cr4 & CR4_PAE_MASK; 658 return vcpu->cr4 & X86_CR4_PAE;
660} 659}
661 660
662static inline int is_pse(struct kvm_vcpu *vcpu) 661static inline int is_pse(struct kvm_vcpu *vcpu)
663{ 662{
664 return vcpu->cr4 & CR4_PSE_MASK; 663 return vcpu->cr4 & X86_CR4_PSE;
665} 664}
666 665
667static inline int is_paging(struct kvm_vcpu *vcpu) 666static inline int is_paging(struct kvm_vcpu *vcpu)
668{ 667{
669 return vcpu->cr0 & CR0_PG_MASK; 668 return vcpu->cr0 & X86_CR0_PG;
670} 669}
671 670
672static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) 671static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
@@ -746,12 +745,12 @@ static inline unsigned long read_msr(unsigned long msr)
746} 745}
747#endif 746#endif
748 747
749static inline void fx_save(void *image) 748static inline void fx_save(struct i387_fxsave_struct *image)
750{ 749{
751 asm ("fxsave (%0)":: "r" (image)); 750 asm ("fxsave (%0)":: "r" (image));
752} 751}
753 752
754static inline void fx_restore(void *image) 753static inline void fx_restore(struct i387_fxsave_struct *image)
755{ 754{
756 asm ("fxrstor (%0)":: "r" (image)); 755 asm ("fxrstor (%0)":: "r" (image));
757} 756}
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index cd0557954e..353e58527d 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -18,6 +18,7 @@
18#include "kvm.h" 18#include "kvm.h"
19#include "x86_emulate.h" 19#include "x86_emulate.h"
20#include "segment_descriptor.h" 20#include "segment_descriptor.h"
21#include "irq.h"
21 22
22#include <linux/kvm.h> 23#include <linux/kvm.h>
23#include <linux/module.h> 24#include <linux/module.h>
@@ -37,6 +38,7 @@
37#include <linux/cpumask.h> 38#include <linux/cpumask.h>
38#include <linux/smp.h> 39#include <linux/smp.h>
39#include <linux/anon_inodes.h> 40#include <linux/anon_inodes.h>
41#include <linux/profile.h>
40 42
41#include <asm/processor.h> 43#include <asm/processor.h>
42#include <asm/msr.h> 44#include <asm/msr.h>
@@ -52,9 +54,11 @@ static LIST_HEAD(vm_list);
52 54
53static cpumask_t cpus_hardware_enabled; 55static cpumask_t cpus_hardware_enabled;
54 56
55struct kvm_arch_ops *kvm_arch_ops; 57struct kvm_x86_ops *kvm_x86_ops;
58struct kmem_cache *kvm_vcpu_cache;
59EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
56 60
57static void hardware_disable(void *ignored); 61static __read_mostly struct preempt_ops kvm_preempt_ops;
58 62
59#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x) 63#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
60 64
@@ -73,6 +77,7 @@ static struct kvm_stats_debugfs_item {
73 { "signal_exits", STAT_OFFSET(signal_exits) }, 77 { "signal_exits", STAT_OFFSET(signal_exits) },
74 { "irq_window", STAT_OFFSET(irq_window_exits) }, 78 { "irq_window", STAT_OFFSET(irq_window_exits) },
75 { "halt_exits", STAT_OFFSET(halt_exits) }, 79 { "halt_exits", STAT_OFFSET(halt_exits) },
80 { "halt_wakeup", STAT_OFFSET(halt_wakeup) },
76 { "request_irq", STAT_OFFSET(request_irq_exits) }, 81 { "request_irq", STAT_OFFSET(request_irq_exits) },
77 { "irq_exits", STAT_OFFSET(irq_exits) }, 82 { "irq_exits", STAT_OFFSET(irq_exits) },
78 { "light_exits", STAT_OFFSET(light_exits) }, 83 { "light_exits", STAT_OFFSET(light_exits) },
@@ -84,10 +89,17 @@ static struct dentry *debugfs_dir;
84 89
85#define MAX_IO_MSRS 256 90#define MAX_IO_MSRS 256
86 91
87#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL 92#define CR0_RESERVED_BITS \
88#define LMSW_GUEST_MASK 0x0eULL 93 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
89#define CR4_RESEVED_BITS (~((1ULL << 11) - 1)) 94 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
90#define CR8_RESEVED_BITS (~0x0fULL) 95 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
96#define CR4_RESERVED_BITS \
97 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
98 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
99 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
100 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
101
102#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
91#define EFER_RESERVED_BITS 0xfffffffffffff2fe 103#define EFER_RESERVED_BITS 0xfffffffffffff2fe
92 104
93#ifdef CONFIG_X86_64 105#ifdef CONFIG_X86_64
@@ -139,82 +151,14 @@ static inline int valid_vcpu(int n)
139 return likely(n >= 0 && n < KVM_MAX_VCPUS); 151 return likely(n >= 0 && n < KVM_MAX_VCPUS);
140} 152}
141 153
142int kvm_read_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
143 void *dest)
144{
145 unsigned char *host_buf = dest;
146 unsigned long req_size = size;
147
148 while (size) {
149 hpa_t paddr;
150 unsigned now;
151 unsigned offset;
152 hva_t guest_buf;
153
154 paddr = gva_to_hpa(vcpu, addr);
155
156 if (is_error_hpa(paddr))
157 break;
158
159 guest_buf = (hva_t)kmap_atomic(
160 pfn_to_page(paddr >> PAGE_SHIFT),
161 KM_USER0);
162 offset = addr & ~PAGE_MASK;
163 guest_buf |= offset;
164 now = min(size, PAGE_SIZE - offset);
165 memcpy(host_buf, (void*)guest_buf, now);
166 host_buf += now;
167 addr += now;
168 size -= now;
169 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
170 }
171 return req_size - size;
172}
173EXPORT_SYMBOL_GPL(kvm_read_guest);
174
175int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
176 void *data)
177{
178 unsigned char *host_buf = data;
179 unsigned long req_size = size;
180
181 while (size) {
182 hpa_t paddr;
183 unsigned now;
184 unsigned offset;
185 hva_t guest_buf;
186 gfn_t gfn;
187
188 paddr = gva_to_hpa(vcpu, addr);
189
190 if (is_error_hpa(paddr))
191 break;
192
193 gfn = vcpu->mmu.gva_to_gpa(vcpu, addr) >> PAGE_SHIFT;
194 mark_page_dirty(vcpu->kvm, gfn);
195 guest_buf = (hva_t)kmap_atomic(
196 pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
197 offset = addr & ~PAGE_MASK;
198 guest_buf |= offset;
199 now = min(size, PAGE_SIZE - offset);
200 memcpy((void*)guest_buf, host_buf, now);
201 host_buf += now;
202 addr += now;
203 size -= now;
204 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
205 }
206 return req_size - size;
207}
208EXPORT_SYMBOL_GPL(kvm_write_guest);
209
210void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 154void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
211{ 155{
212 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded) 156 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
213 return; 157 return;
214 158
215 vcpu->guest_fpu_loaded = 1; 159 vcpu->guest_fpu_loaded = 1;
216 fx_save(vcpu->host_fx_image); 160 fx_save(&vcpu->host_fx_image);
217 fx_restore(vcpu->guest_fx_image); 161 fx_restore(&vcpu->guest_fx_image);
218} 162}
219EXPORT_SYMBOL_GPL(kvm_load_guest_fpu); 163EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
220 164
@@ -224,8 +168,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
224 return; 168 return;
225 169
226 vcpu->guest_fpu_loaded = 0; 170 vcpu->guest_fpu_loaded = 0;
227 fx_save(vcpu->guest_fx_image); 171 fx_save(&vcpu->guest_fx_image);
228 fx_restore(vcpu->host_fx_image); 172 fx_restore(&vcpu->host_fx_image);
229} 173}
230EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); 174EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
231 175
@@ -234,13 +178,21 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
234 */ 178 */
235static void vcpu_load(struct kvm_vcpu *vcpu) 179static void vcpu_load(struct kvm_vcpu *vcpu)
236{ 180{
181 int cpu;
182
237 mutex_lock(&vcpu->mutex); 183 mutex_lock(&vcpu->mutex);
238 kvm_arch_ops->vcpu_load(vcpu); 184 cpu = get_cpu();
185 preempt_notifier_register(&vcpu->preempt_notifier);
186 kvm_x86_ops->vcpu_load(vcpu, cpu);
187 put_cpu();
239} 188}
240 189
241static void vcpu_put(struct kvm_vcpu *vcpu) 190static void vcpu_put(struct kvm_vcpu *vcpu)
242{ 191{
243 kvm_arch_ops->vcpu_put(vcpu); 192 preempt_disable();
193 kvm_x86_ops->vcpu_put(vcpu);
194 preempt_notifier_unregister(&vcpu->preempt_notifier);
195 preempt_enable();
244 mutex_unlock(&vcpu->mutex); 196 mutex_unlock(&vcpu->mutex);
245} 197}
246 198
@@ -261,8 +213,10 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
261 atomic_set(&completed, 0); 213 atomic_set(&completed, 0);
262 cpus_clear(cpus); 214 cpus_clear(cpus);
263 needed = 0; 215 needed = 0;
264 for (i = 0; i < kvm->nvcpus; ++i) { 216 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
265 vcpu = &kvm->vcpus[i]; 217 vcpu = kvm->vcpus[i];
218 if (!vcpu)
219 continue;
266 if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests)) 220 if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
267 continue; 221 continue;
268 cpu = vcpu->cpu; 222 cpu = vcpu->cpu;
@@ -286,37 +240,79 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
286 } 240 }
287} 241}
288 242
243int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
244{
245 struct page *page;
246 int r;
247
248 mutex_init(&vcpu->mutex);
249 vcpu->cpu = -1;
250 vcpu->mmu.root_hpa = INVALID_PAGE;
251 vcpu->kvm = kvm;
252 vcpu->vcpu_id = id;
253 if (!irqchip_in_kernel(kvm) || id == 0)
254 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
255 else
256 vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
257 init_waitqueue_head(&vcpu->wq);
258
259 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
260 if (!page) {
261 r = -ENOMEM;
262 goto fail;
263 }
264 vcpu->run = page_address(page);
265
266 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
267 if (!page) {
268 r = -ENOMEM;
269 goto fail_free_run;
270 }
271 vcpu->pio_data = page_address(page);
272
273 r = kvm_mmu_create(vcpu);
274 if (r < 0)
275 goto fail_free_pio_data;
276
277 return 0;
278
279fail_free_pio_data:
280 free_page((unsigned long)vcpu->pio_data);
281fail_free_run:
282 free_page((unsigned long)vcpu->run);
283fail:
284 return -ENOMEM;
285}
286EXPORT_SYMBOL_GPL(kvm_vcpu_init);
287
288void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
289{
290 kvm_mmu_destroy(vcpu);
291 if (vcpu->apic)
292 hrtimer_cancel(&vcpu->apic->timer.dev);
293 kvm_free_apic(vcpu->apic);
294 free_page((unsigned long)vcpu->pio_data);
295 free_page((unsigned long)vcpu->run);
296}
297EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
298
289static struct kvm *kvm_create_vm(void) 299static struct kvm *kvm_create_vm(void)
290{ 300{
291 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); 301 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
292 int i;
293 302
294 if (!kvm) 303 if (!kvm)
295 return ERR_PTR(-ENOMEM); 304 return ERR_PTR(-ENOMEM);
296 305
297 kvm_io_bus_init(&kvm->pio_bus); 306 kvm_io_bus_init(&kvm->pio_bus);
298 spin_lock_init(&kvm->lock); 307 mutex_init(&kvm->lock);
299 INIT_LIST_HEAD(&kvm->active_mmu_pages); 308 INIT_LIST_HEAD(&kvm->active_mmu_pages);
300 kvm_io_bus_init(&kvm->mmio_bus); 309 kvm_io_bus_init(&kvm->mmio_bus);
301 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
302 struct kvm_vcpu *vcpu = &kvm->vcpus[i];
303
304 mutex_init(&vcpu->mutex);
305 vcpu->cpu = -1;
306 vcpu->kvm = kvm;
307 vcpu->mmu.root_hpa = INVALID_PAGE;
308 }
309 spin_lock(&kvm_lock); 310 spin_lock(&kvm_lock);
310 list_add(&kvm->vm_list, &vm_list); 311 list_add(&kvm->vm_list, &vm_list);
311 spin_unlock(&kvm_lock); 312 spin_unlock(&kvm_lock);
312 return kvm; 313 return kvm;
313} 314}
314 315
315static int kvm_dev_open(struct inode *inode, struct file *filp)
316{
317 return 0;
318}
319
320/* 316/*
321 * Free any memory in @free but not in @dont. 317 * Free any memory in @free but not in @dont.
322 */ 318 */
@@ -353,7 +349,7 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
353{ 349{
354 int i; 350 int i;
355 351
356 for (i = 0; i < 2; ++i) 352 for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
357 if (vcpu->pio.guest_pages[i]) { 353 if (vcpu->pio.guest_pages[i]) {
358 __free_page(vcpu->pio.guest_pages[i]); 354 __free_page(vcpu->pio.guest_pages[i]);
359 vcpu->pio.guest_pages[i] = NULL; 355 vcpu->pio.guest_pages[i] = NULL;
@@ -362,30 +358,11 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
362 358
363static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) 359static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
364{ 360{
365 if (!vcpu->vmcs)
366 return;
367
368 vcpu_load(vcpu); 361 vcpu_load(vcpu);
369 kvm_mmu_unload(vcpu); 362 kvm_mmu_unload(vcpu);
370 vcpu_put(vcpu); 363 vcpu_put(vcpu);
371} 364}
372 365
373static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
374{
375 if (!vcpu->vmcs)
376 return;
377
378 vcpu_load(vcpu);
379 kvm_mmu_destroy(vcpu);
380 vcpu_put(vcpu);
381 kvm_arch_ops->vcpu_free(vcpu);
382 free_page((unsigned long)vcpu->run);
383 vcpu->run = NULL;
384 free_page((unsigned long)vcpu->pio_data);
385 vcpu->pio_data = NULL;
386 free_pio_guest_pages(vcpu);
387}
388
389static void kvm_free_vcpus(struct kvm *kvm) 366static void kvm_free_vcpus(struct kvm *kvm)
390{ 367{
391 unsigned int i; 368 unsigned int i;
@@ -394,14 +371,15 @@ static void kvm_free_vcpus(struct kvm *kvm)
394 * Unpin any mmu pages first. 371 * Unpin any mmu pages first.
395 */ 372 */
396 for (i = 0; i < KVM_MAX_VCPUS; ++i) 373 for (i = 0; i < KVM_MAX_VCPUS; ++i)
397 kvm_unload_vcpu_mmu(&kvm->vcpus[i]); 374 if (kvm->vcpus[i])
398 for (i = 0; i < KVM_MAX_VCPUS; ++i) 375 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
399 kvm_free_vcpu(&kvm->vcpus[i]); 376 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
400} 377 if (kvm->vcpus[i]) {
378 kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
379 kvm->vcpus[i] = NULL;
380 }
381 }
401 382
402static int kvm_dev_release(struct inode *inode, struct file *filp)
403{
404 return 0;
405} 383}
406 384
407static void kvm_destroy_vm(struct kvm *kvm) 385static void kvm_destroy_vm(struct kvm *kvm)
@@ -411,6 +389,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
411 spin_unlock(&kvm_lock); 389 spin_unlock(&kvm_lock);
412 kvm_io_bus_destroy(&kvm->pio_bus); 390 kvm_io_bus_destroy(&kvm->pio_bus);
413 kvm_io_bus_destroy(&kvm->mmio_bus); 391 kvm_io_bus_destroy(&kvm->mmio_bus);
392 kfree(kvm->vpic);
393 kfree(kvm->vioapic);
414 kvm_free_vcpus(kvm); 394 kvm_free_vcpus(kvm);
415 kvm_free_physmem(kvm); 395 kvm_free_physmem(kvm);
416 kfree(kvm); 396 kfree(kvm);
@@ -426,7 +406,7 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
426 406
427static void inject_gp(struct kvm_vcpu *vcpu) 407static void inject_gp(struct kvm_vcpu *vcpu)
428{ 408{
429 kvm_arch_ops->inject_gp(vcpu, 0); 409 kvm_x86_ops->inject_gp(vcpu, 0);
430} 410}
431 411
432/* 412/*
@@ -437,58 +417,60 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
437 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 417 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
438 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; 418 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
439 int i; 419 int i;
440 u64 pdpte;
441 u64 *pdpt; 420 u64 *pdpt;
442 int ret; 421 int ret;
443 struct page *page; 422 struct page *page;
423 u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
444 424
445 spin_lock(&vcpu->kvm->lock); 425 mutex_lock(&vcpu->kvm->lock);
446 page = gfn_to_page(vcpu->kvm, pdpt_gfn); 426 page = gfn_to_page(vcpu->kvm, pdpt_gfn);
447 /* FIXME: !page - emulate? 0xff? */ 427 if (!page) {
428 ret = 0;
429 goto out;
430 }
431
448 pdpt = kmap_atomic(page, KM_USER0); 432 pdpt = kmap_atomic(page, KM_USER0);
433 memcpy(pdpte, pdpt+offset, sizeof(pdpte));
434 kunmap_atomic(pdpt, KM_USER0);
449 435
450 ret = 1; 436 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
451 for (i = 0; i < 4; ++i) { 437 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
452 pdpte = pdpt[offset + i];
453 if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) {
454 ret = 0; 438 ret = 0;
455 goto out; 439 goto out;
456 } 440 }
457 } 441 }
442 ret = 1;
458 443
459 for (i = 0; i < 4; ++i) 444 memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
460 vcpu->pdptrs[i] = pdpt[offset + i];
461
462out: 445out:
463 kunmap_atomic(pdpt, KM_USER0); 446 mutex_unlock(&vcpu->kvm->lock);
464 spin_unlock(&vcpu->kvm->lock);
465 447
466 return ret; 448 return ret;
467} 449}
468 450
469void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 451void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
470{ 452{
471 if (cr0 & CR0_RESEVED_BITS) { 453 if (cr0 & CR0_RESERVED_BITS) {
472 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", 454 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
473 cr0, vcpu->cr0); 455 cr0, vcpu->cr0);
474 inject_gp(vcpu); 456 inject_gp(vcpu);
475 return; 457 return;
476 } 458 }
477 459
478 if ((cr0 & CR0_NW_MASK) && !(cr0 & CR0_CD_MASK)) { 460 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
479 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); 461 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
480 inject_gp(vcpu); 462 inject_gp(vcpu);
481 return; 463 return;
482 } 464 }
483 465
484 if ((cr0 & CR0_PG_MASK) && !(cr0 & CR0_PE_MASK)) { 466 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
485 printk(KERN_DEBUG "set_cr0: #GP, set PG flag " 467 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
486 "and a clear PE flag\n"); 468 "and a clear PE flag\n");
487 inject_gp(vcpu); 469 inject_gp(vcpu);
488 return; 470 return;
489 } 471 }
490 472
491 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { 473 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
492#ifdef CONFIG_X86_64 474#ifdef CONFIG_X86_64
493 if ((vcpu->shadow_efer & EFER_LME)) { 475 if ((vcpu->shadow_efer & EFER_LME)) {
494 int cs_db, cs_l; 476 int cs_db, cs_l;
@@ -499,7 +481,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
499 inject_gp(vcpu); 481 inject_gp(vcpu);
500 return; 482 return;
501 } 483 }
502 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 484 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
503 if (cs_l) { 485 if (cs_l) {
504 printk(KERN_DEBUG "set_cr0: #GP, start paging " 486 printk(KERN_DEBUG "set_cr0: #GP, start paging "
505 "in long mode while CS.L == 1\n"); 487 "in long mode while CS.L == 1\n");
@@ -518,12 +500,12 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
518 500
519 } 501 }
520 502
521 kvm_arch_ops->set_cr0(vcpu, cr0); 503 kvm_x86_ops->set_cr0(vcpu, cr0);
522 vcpu->cr0 = cr0; 504 vcpu->cr0 = cr0;
523 505
524 spin_lock(&vcpu->kvm->lock); 506 mutex_lock(&vcpu->kvm->lock);
525 kvm_mmu_reset_context(vcpu); 507 kvm_mmu_reset_context(vcpu);
526 spin_unlock(&vcpu->kvm->lock); 508 mutex_unlock(&vcpu->kvm->lock);
527 return; 509 return;
528} 510}
529EXPORT_SYMBOL_GPL(set_cr0); 511EXPORT_SYMBOL_GPL(set_cr0);
@@ -536,62 +518,72 @@ EXPORT_SYMBOL_GPL(lmsw);
536 518
537void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 519void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
538{ 520{
539 if (cr4 & CR4_RESEVED_BITS) { 521 if (cr4 & CR4_RESERVED_BITS) {
540 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); 522 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
541 inject_gp(vcpu); 523 inject_gp(vcpu);
542 return; 524 return;
543 } 525 }
544 526
545 if (is_long_mode(vcpu)) { 527 if (is_long_mode(vcpu)) {
546 if (!(cr4 & CR4_PAE_MASK)) { 528 if (!(cr4 & X86_CR4_PAE)) {
547 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " 529 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
548 "in long mode\n"); 530 "in long mode\n");
549 inject_gp(vcpu); 531 inject_gp(vcpu);
550 return; 532 return;
551 } 533 }
552 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK) 534 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
553 && !load_pdptrs(vcpu, vcpu->cr3)) { 535 && !load_pdptrs(vcpu, vcpu->cr3)) {
554 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); 536 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
555 inject_gp(vcpu); 537 inject_gp(vcpu);
538 return;
556 } 539 }
557 540
558 if (cr4 & CR4_VMXE_MASK) { 541 if (cr4 & X86_CR4_VMXE) {
559 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); 542 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
560 inject_gp(vcpu); 543 inject_gp(vcpu);
561 return; 544 return;
562 } 545 }
563 kvm_arch_ops->set_cr4(vcpu, cr4); 546 kvm_x86_ops->set_cr4(vcpu, cr4);
564 spin_lock(&vcpu->kvm->lock); 547 vcpu->cr4 = cr4;
548 mutex_lock(&vcpu->kvm->lock);
565 kvm_mmu_reset_context(vcpu); 549 kvm_mmu_reset_context(vcpu);
566 spin_unlock(&vcpu->kvm->lock); 550 mutex_unlock(&vcpu->kvm->lock);
567} 551}
568EXPORT_SYMBOL_GPL(set_cr4); 552EXPORT_SYMBOL_GPL(set_cr4);
569 553
570void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 554void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
571{ 555{
572 if (is_long_mode(vcpu)) { 556 if (is_long_mode(vcpu)) {
573 if (cr3 & CR3_L_MODE_RESEVED_BITS) { 557 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
574 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); 558 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
575 inject_gp(vcpu); 559 inject_gp(vcpu);
576 return; 560 return;
577 } 561 }
578 } else { 562 } else {
579 if (cr3 & CR3_RESEVED_BITS) { 563 if (is_pae(vcpu)) {
580 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); 564 if (cr3 & CR3_PAE_RESERVED_BITS) {
581 inject_gp(vcpu); 565 printk(KERN_DEBUG
582 return; 566 "set_cr3: #GP, reserved bits\n");
583 } 567 inject_gp(vcpu);
584 if (is_paging(vcpu) && is_pae(vcpu) && 568 return;
585 !load_pdptrs(vcpu, cr3)) { 569 }
586 printk(KERN_DEBUG "set_cr3: #GP, pdptrs " 570 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
587 "reserved bits\n"); 571 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
588 inject_gp(vcpu); 572 "reserved bits\n");
589 return; 573 inject_gp(vcpu);
574 return;
575 }
576 } else {
577 if (cr3 & CR3_NONPAE_RESERVED_BITS) {
578 printk(KERN_DEBUG
579 "set_cr3: #GP, reserved bits\n");
580 inject_gp(vcpu);
581 return;
582 }
590 } 583 }
591 } 584 }
592 585
593 vcpu->cr3 = cr3; 586 mutex_lock(&vcpu->kvm->lock);
594 spin_lock(&vcpu->kvm->lock);
595 /* 587 /*
596 * Does the new cr3 value map to physical memory? (Note, we 588 * Does the new cr3 value map to physical memory? (Note, we
597 * catch an invalid cr3 even in real-mode, because it would 589 * catch an invalid cr3 even in real-mode, because it would
@@ -603,46 +595,73 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
603 */ 595 */
604 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) 596 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
605 inject_gp(vcpu); 597 inject_gp(vcpu);
606 else 598 else {
599 vcpu->cr3 = cr3;
607 vcpu->mmu.new_cr3(vcpu); 600 vcpu->mmu.new_cr3(vcpu);
608 spin_unlock(&vcpu->kvm->lock); 601 }
602 mutex_unlock(&vcpu->kvm->lock);
609} 603}
610EXPORT_SYMBOL_GPL(set_cr3); 604EXPORT_SYMBOL_GPL(set_cr3);
611 605
612void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 606void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
613{ 607{
614 if ( cr8 & CR8_RESEVED_BITS) { 608 if (cr8 & CR8_RESERVED_BITS) {
615 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); 609 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
616 inject_gp(vcpu); 610 inject_gp(vcpu);
617 return; 611 return;
618 } 612 }
619 vcpu->cr8 = cr8; 613 if (irqchip_in_kernel(vcpu->kvm))
614 kvm_lapic_set_tpr(vcpu, cr8);
615 else
616 vcpu->cr8 = cr8;
620} 617}
621EXPORT_SYMBOL_GPL(set_cr8); 618EXPORT_SYMBOL_GPL(set_cr8);
622 619
623void fx_init(struct kvm_vcpu *vcpu) 620unsigned long get_cr8(struct kvm_vcpu *vcpu)
621{
622 if (irqchip_in_kernel(vcpu->kvm))
623 return kvm_lapic_get_cr8(vcpu);
624 else
625 return vcpu->cr8;
626}
627EXPORT_SYMBOL_GPL(get_cr8);
628
629u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
624{ 630{
625 struct __attribute__ ((__packed__)) fx_image_s { 631 if (irqchip_in_kernel(vcpu->kvm))
626 u16 control; //fcw 632 return vcpu->apic_base;
627 u16 status; //fsw 633 else
628 u16 tag; // ftw 634 return vcpu->apic_base;
629 u16 opcode; //fop 635}
630 u64 ip; // fpu ip 636EXPORT_SYMBOL_GPL(kvm_get_apic_base);
631 u64 operand;// fpu dp
632 u32 mxcsr;
633 u32 mxcsr_mask;
634 637
635 } *fx_image; 638void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
639{
640 /* TODO: reserve bits check */
641 if (irqchip_in_kernel(vcpu->kvm))
642 kvm_lapic_set_base(vcpu, data);
643 else
644 vcpu->apic_base = data;
645}
646EXPORT_SYMBOL_GPL(kvm_set_apic_base);
647
648void fx_init(struct kvm_vcpu *vcpu)
649{
650 unsigned after_mxcsr_mask;
636 651
637 fx_save(vcpu->host_fx_image); 652 /* Initialize guest FPU by resetting ours and saving into guest's */
653 preempt_disable();
654 fx_save(&vcpu->host_fx_image);
638 fpu_init(); 655 fpu_init();
639 fx_save(vcpu->guest_fx_image); 656 fx_save(&vcpu->guest_fx_image);
640 fx_restore(vcpu->host_fx_image); 657 fx_restore(&vcpu->host_fx_image);
658 preempt_enable();
641 659
642 fx_image = (struct fx_image_s *)vcpu->guest_fx_image; 660 vcpu->cr0 |= X86_CR0_ET;
643 fx_image->mxcsr = 0x1f80; 661 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
644 memset(vcpu->guest_fx_image + sizeof(struct fx_image_s), 662 vcpu->guest_fx_image.mxcsr = 0x1f80;
645 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s)); 663 memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
664 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
646} 665}
647EXPORT_SYMBOL_GPL(fx_init); 666EXPORT_SYMBOL_GPL(fx_init);
648 667
@@ -661,7 +680,6 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
661 unsigned long i; 680 unsigned long i;
662 struct kvm_memory_slot *memslot; 681 struct kvm_memory_slot *memslot;
663 struct kvm_memory_slot old, new; 682 struct kvm_memory_slot old, new;
664 int memory_config_version;
665 683
666 r = -EINVAL; 684 r = -EINVAL;
667 /* General sanity checks */ 685 /* General sanity checks */
@@ -681,10 +699,8 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
681 if (!npages) 699 if (!npages)
682 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 700 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
683 701
684raced: 702 mutex_lock(&kvm->lock);
685 spin_lock(&kvm->lock);
686 703
687 memory_config_version = kvm->memory_config_version;
688 new = old = *memslot; 704 new = old = *memslot;
689 705
690 new.base_gfn = base_gfn; 706 new.base_gfn = base_gfn;
@@ -707,11 +723,6 @@ raced:
707 (base_gfn >= s->base_gfn + s->npages))) 723 (base_gfn >= s->base_gfn + s->npages)))
708 goto out_unlock; 724 goto out_unlock;
709 } 725 }
710 /*
711 * Do memory allocations outside lock. memory_config_version will
712 * detect any races.
713 */
714 spin_unlock(&kvm->lock);
715 726
716 /* Deallocate if slot is being removed */ 727 /* Deallocate if slot is being removed */
717 if (!npages) 728 if (!npages)
@@ -728,14 +739,14 @@ raced:
728 new.phys_mem = vmalloc(npages * sizeof(struct page *)); 739 new.phys_mem = vmalloc(npages * sizeof(struct page *));
729 740
730 if (!new.phys_mem) 741 if (!new.phys_mem)
731 goto out_free; 742 goto out_unlock;
732 743
733 memset(new.phys_mem, 0, npages * sizeof(struct page *)); 744 memset(new.phys_mem, 0, npages * sizeof(struct page *));
734 for (i = 0; i < npages; ++i) { 745 for (i = 0; i < npages; ++i) {
735 new.phys_mem[i] = alloc_page(GFP_HIGHUSER 746 new.phys_mem[i] = alloc_page(GFP_HIGHUSER
736 | __GFP_ZERO); 747 | __GFP_ZERO);
737 if (!new.phys_mem[i]) 748 if (!new.phys_mem[i])
738 goto out_free; 749 goto out_unlock;
739 set_page_private(new.phys_mem[i],0); 750 set_page_private(new.phys_mem[i],0);
740 } 751 }
741 } 752 }
@@ -746,39 +757,25 @@ raced:
746 757
747 new.dirty_bitmap = vmalloc(dirty_bytes); 758 new.dirty_bitmap = vmalloc(dirty_bytes);
748 if (!new.dirty_bitmap) 759 if (!new.dirty_bitmap)
749 goto out_free; 760 goto out_unlock;
750 memset(new.dirty_bitmap, 0, dirty_bytes); 761 memset(new.dirty_bitmap, 0, dirty_bytes);
751 } 762 }
752 763
753 spin_lock(&kvm->lock);
754
755 if (memory_config_version != kvm->memory_config_version) {
756 spin_unlock(&kvm->lock);
757 kvm_free_physmem_slot(&new, &old);
758 goto raced;
759 }
760
761 r = -EAGAIN;
762 if (kvm->busy)
763 goto out_unlock;
764
765 if (mem->slot >= kvm->nmemslots) 764 if (mem->slot >= kvm->nmemslots)
766 kvm->nmemslots = mem->slot + 1; 765 kvm->nmemslots = mem->slot + 1;
767 766
768 *memslot = new; 767 *memslot = new;
769 ++kvm->memory_config_version;
770 768
771 kvm_mmu_slot_remove_write_access(kvm, mem->slot); 769 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
772 kvm_flush_remote_tlbs(kvm); 770 kvm_flush_remote_tlbs(kvm);
773 771
774 spin_unlock(&kvm->lock); 772 mutex_unlock(&kvm->lock);
775 773
776 kvm_free_physmem_slot(&old, &new); 774 kvm_free_physmem_slot(&old, &new);
777 return 0; 775 return 0;
778 776
779out_unlock: 777out_unlock:
780 spin_unlock(&kvm->lock); 778 mutex_unlock(&kvm->lock);
781out_free:
782 kvm_free_physmem_slot(&new, &old); 779 kvm_free_physmem_slot(&new, &old);
783out: 780out:
784 return r; 781 return r;
@@ -795,14 +792,8 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
795 int n; 792 int n;
796 unsigned long any = 0; 793 unsigned long any = 0;
797 794
798 spin_lock(&kvm->lock); 795 mutex_lock(&kvm->lock);
799 796
800 /*
801 * Prevent changes to guest memory configuration even while the lock
802 * is not taken.
803 */
804 ++kvm->busy;
805 spin_unlock(&kvm->lock);
806 r = -EINVAL; 797 r = -EINVAL;
807 if (log->slot >= KVM_MEMORY_SLOTS) 798 if (log->slot >= KVM_MEMORY_SLOTS)
808 goto out; 799 goto out;
@@ -821,18 +812,17 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
821 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 812 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
822 goto out; 813 goto out;
823 814
824 spin_lock(&kvm->lock); 815 /* If nothing is dirty, don't bother messing with page tables. */
825 kvm_mmu_slot_remove_write_access(kvm, log->slot); 816 if (any) {
826 kvm_flush_remote_tlbs(kvm); 817 kvm_mmu_slot_remove_write_access(kvm, log->slot);
827 memset(memslot->dirty_bitmap, 0, n); 818 kvm_flush_remote_tlbs(kvm);
828 spin_unlock(&kvm->lock); 819 memset(memslot->dirty_bitmap, 0, n);
820 }
829 821
830 r = 0; 822 r = 0;
831 823
832out: 824out:
833 spin_lock(&kvm->lock); 825 mutex_unlock(&kvm->lock);
834 --kvm->busy;
835 spin_unlock(&kvm->lock);
836 return r; 826 return r;
837} 827}
838 828
@@ -862,7 +852,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
862 < alias->target_phys_addr) 852 < alias->target_phys_addr)
863 goto out; 853 goto out;
864 854
865 spin_lock(&kvm->lock); 855 mutex_lock(&kvm->lock);
866 856
867 p = &kvm->aliases[alias->slot]; 857 p = &kvm->aliases[alias->slot];
868 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; 858 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
@@ -876,7 +866,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
876 866
877 kvm_mmu_zap_all(kvm); 867 kvm_mmu_zap_all(kvm);
878 868
879 spin_unlock(&kvm->lock); 869 mutex_unlock(&kvm->lock);
880 870
881 return 0; 871 return 0;
882 872
@@ -884,6 +874,63 @@ out:
884 return r; 874 return r;
885} 875}
886 876
877static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
878{
879 int r;
880
881 r = 0;
882 switch (chip->chip_id) {
883 case KVM_IRQCHIP_PIC_MASTER:
884 memcpy (&chip->chip.pic,
885 &pic_irqchip(kvm)->pics[0],
886 sizeof(struct kvm_pic_state));
887 break;
888 case KVM_IRQCHIP_PIC_SLAVE:
889 memcpy (&chip->chip.pic,
890 &pic_irqchip(kvm)->pics[1],
891 sizeof(struct kvm_pic_state));
892 break;
893 case KVM_IRQCHIP_IOAPIC:
894 memcpy (&chip->chip.ioapic,
895 ioapic_irqchip(kvm),
896 sizeof(struct kvm_ioapic_state));
897 break;
898 default:
899 r = -EINVAL;
900 break;
901 }
902 return r;
903}
904
905static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
906{
907 int r;
908
909 r = 0;
910 switch (chip->chip_id) {
911 case KVM_IRQCHIP_PIC_MASTER:
912 memcpy (&pic_irqchip(kvm)->pics[0],
913 &chip->chip.pic,
914 sizeof(struct kvm_pic_state));
915 break;
916 case KVM_IRQCHIP_PIC_SLAVE:
917 memcpy (&pic_irqchip(kvm)->pics[1],
918 &chip->chip.pic,
919 sizeof(struct kvm_pic_state));
920 break;
921 case KVM_IRQCHIP_IOAPIC:
922 memcpy (ioapic_irqchip(kvm),
923 &chip->chip.ioapic,
924 sizeof(struct kvm_ioapic_state));
925 break;
926 default:
927 r = -EINVAL;
928 break;
929 }
930 kvm_pic_update_irq(pic_irqchip(kvm));
931 return r;
932}
933
887static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) 934static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
888{ 935{
889 int i; 936 int i;
@@ -930,37 +977,26 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
930} 977}
931EXPORT_SYMBOL_GPL(gfn_to_page); 978EXPORT_SYMBOL_GPL(gfn_to_page);
932 979
980/* WARNING: Does not work on aliased pages. */
933void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 981void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
934{ 982{
935 int i;
936 struct kvm_memory_slot *memslot; 983 struct kvm_memory_slot *memslot;
937 unsigned long rel_gfn;
938 984
939 for (i = 0; i < kvm->nmemslots; ++i) { 985 memslot = __gfn_to_memslot(kvm, gfn);
940 memslot = &kvm->memslots[i]; 986 if (memslot && memslot->dirty_bitmap) {
941 987 unsigned long rel_gfn = gfn - memslot->base_gfn;
942 if (gfn >= memslot->base_gfn
943 && gfn < memslot->base_gfn + memslot->npages) {
944 988
945 if (!memslot->dirty_bitmap) 989 /* avoid RMW */
946 return; 990 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
947 991 set_bit(rel_gfn, memslot->dirty_bitmap);
948 rel_gfn = gfn - memslot->base_gfn;
949
950 /* avoid RMW */
951 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
952 set_bit(rel_gfn, memslot->dirty_bitmap);
953 return;
954 }
955 } 992 }
956} 993}
957 994
958static int emulator_read_std(unsigned long addr, 995int emulator_read_std(unsigned long addr,
959 void *val, 996 void *val,
960 unsigned int bytes, 997 unsigned int bytes,
961 struct x86_emulate_ctxt *ctxt) 998 struct kvm_vcpu *vcpu)
962{ 999{
963 struct kvm_vcpu *vcpu = ctxt->vcpu;
964 void *data = val; 1000 void *data = val;
965 1001
966 while (bytes) { 1002 while (bytes) {
@@ -990,26 +1026,42 @@ static int emulator_read_std(unsigned long addr,
990 1026
991 return X86EMUL_CONTINUE; 1027 return X86EMUL_CONTINUE;
992} 1028}
1029EXPORT_SYMBOL_GPL(emulator_read_std);
993 1030
994static int emulator_write_std(unsigned long addr, 1031static int emulator_write_std(unsigned long addr,
995 const void *val, 1032 const void *val,
996 unsigned int bytes, 1033 unsigned int bytes,
997 struct x86_emulate_ctxt *ctxt) 1034 struct kvm_vcpu *vcpu)
998{ 1035{
999 printk(KERN_ERR "emulator_write_std: addr %lx n %d\n", 1036 pr_unimpl(vcpu, "emulator_write_std: addr %lx n %d\n", addr, bytes);
1000 addr, bytes);
1001 return X86EMUL_UNHANDLEABLE; 1037 return X86EMUL_UNHANDLEABLE;
1002} 1038}
1003 1039
1040/*
1041 * Only apic need an MMIO device hook, so shortcut now..
1042 */
1043static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1044 gpa_t addr)
1045{
1046 struct kvm_io_device *dev;
1047
1048 if (vcpu->apic) {
1049 dev = &vcpu->apic->dev;
1050 if (dev->in_range(dev, addr))
1051 return dev;
1052 }
1053 return NULL;
1054}
1055
1004static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, 1056static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1005 gpa_t addr) 1057 gpa_t addr)
1006{ 1058{
1007 /* 1059 struct kvm_io_device *dev;
1008 * Note that its important to have this wrapper function because 1060
1009 * in the very near future we will be checking for MMIOs against 1061 dev = vcpu_find_pervcpu_dev(vcpu, addr);
1010 * the LAPIC as well as the general MMIO bus 1062 if (dev == NULL)
1011 */ 1063 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1012 return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); 1064 return dev;
1013} 1065}
1014 1066
1015static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu, 1067static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
@@ -1021,9 +1073,8 @@ static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
1021static int emulator_read_emulated(unsigned long addr, 1073static int emulator_read_emulated(unsigned long addr,
1022 void *val, 1074 void *val,
1023 unsigned int bytes, 1075 unsigned int bytes,
1024 struct x86_emulate_ctxt *ctxt) 1076 struct kvm_vcpu *vcpu)
1025{ 1077{
1026 struct kvm_vcpu *vcpu = ctxt->vcpu;
1027 struct kvm_io_device *mmio_dev; 1078 struct kvm_io_device *mmio_dev;
1028 gpa_t gpa; 1079 gpa_t gpa;
1029 1080
@@ -1031,7 +1082,7 @@ static int emulator_read_emulated(unsigned long addr,
1031 memcpy(val, vcpu->mmio_data, bytes); 1082 memcpy(val, vcpu->mmio_data, bytes);
1032 vcpu->mmio_read_completed = 0; 1083 vcpu->mmio_read_completed = 0;
1033 return X86EMUL_CONTINUE; 1084 return X86EMUL_CONTINUE;
1034 } else if (emulator_read_std(addr, val, bytes, ctxt) 1085 } else if (emulator_read_std(addr, val, bytes, vcpu)
1035 == X86EMUL_CONTINUE) 1086 == X86EMUL_CONTINUE)
1036 return X86EMUL_CONTINUE; 1087 return X86EMUL_CONTINUE;
1037 1088
@@ -1061,7 +1112,6 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1061{ 1112{
1062 struct page *page; 1113 struct page *page;
1063 void *virt; 1114 void *virt;
1064 unsigned offset = offset_in_page(gpa);
1065 1115
1066 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) 1116 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
1067 return 0; 1117 return 0;
@@ -1070,7 +1120,7 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1070 return 0; 1120 return 0;
1071 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); 1121 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
1072 virt = kmap_atomic(page, KM_USER0); 1122 virt = kmap_atomic(page, KM_USER0);
1073 kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes); 1123 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
1074 memcpy(virt + offset_in_page(gpa), val, bytes); 1124 memcpy(virt + offset_in_page(gpa), val, bytes);
1075 kunmap_atomic(virt, KM_USER0); 1125 kunmap_atomic(virt, KM_USER0);
1076 return 1; 1126 return 1;
@@ -1079,14 +1129,13 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1079static int emulator_write_emulated_onepage(unsigned long addr, 1129static int emulator_write_emulated_onepage(unsigned long addr,
1080 const void *val, 1130 const void *val,
1081 unsigned int bytes, 1131 unsigned int bytes,
1082 struct x86_emulate_ctxt *ctxt) 1132 struct kvm_vcpu *vcpu)
1083{ 1133{
1084 struct kvm_vcpu *vcpu = ctxt->vcpu;
1085 struct kvm_io_device *mmio_dev; 1134 struct kvm_io_device *mmio_dev;
1086 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); 1135 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1087 1136
1088 if (gpa == UNMAPPED_GVA) { 1137 if (gpa == UNMAPPED_GVA) {
1089 kvm_arch_ops->inject_page_fault(vcpu, addr, 2); 1138 kvm_x86_ops->inject_page_fault(vcpu, addr, 2);
1090 return X86EMUL_PROPAGATE_FAULT; 1139 return X86EMUL_PROPAGATE_FAULT;
1091 } 1140 }
1092 1141
@@ -1111,31 +1160,32 @@ static int emulator_write_emulated_onepage(unsigned long addr,
1111 return X86EMUL_CONTINUE; 1160 return X86EMUL_CONTINUE;
1112} 1161}
1113 1162
1114static int emulator_write_emulated(unsigned long addr, 1163int emulator_write_emulated(unsigned long addr,
1115 const void *val, 1164 const void *val,
1116 unsigned int bytes, 1165 unsigned int bytes,
1117 struct x86_emulate_ctxt *ctxt) 1166 struct kvm_vcpu *vcpu)
1118{ 1167{
1119 /* Crossing a page boundary? */ 1168 /* Crossing a page boundary? */
1120 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 1169 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1121 int rc, now; 1170 int rc, now;
1122 1171
1123 now = -addr & ~PAGE_MASK; 1172 now = -addr & ~PAGE_MASK;
1124 rc = emulator_write_emulated_onepage(addr, val, now, ctxt); 1173 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
1125 if (rc != X86EMUL_CONTINUE) 1174 if (rc != X86EMUL_CONTINUE)
1126 return rc; 1175 return rc;
1127 addr += now; 1176 addr += now;
1128 val += now; 1177 val += now;
1129 bytes -= now; 1178 bytes -= now;
1130 } 1179 }
1131 return emulator_write_emulated_onepage(addr, val, bytes, ctxt); 1180 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
1132} 1181}
1182EXPORT_SYMBOL_GPL(emulator_write_emulated);
1133 1183
1134static int emulator_cmpxchg_emulated(unsigned long addr, 1184static int emulator_cmpxchg_emulated(unsigned long addr,
1135 const void *old, 1185 const void *old,
1136 const void *new, 1186 const void *new,
1137 unsigned int bytes, 1187 unsigned int bytes,
1138 struct x86_emulate_ctxt *ctxt) 1188 struct kvm_vcpu *vcpu)
1139{ 1189{
1140 static int reported; 1190 static int reported;
1141 1191
@@ -1143,12 +1193,12 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
1143 reported = 1; 1193 reported = 1;
1144 printk(KERN_WARNING "kvm: emulating exchange as write\n"); 1194 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1145 } 1195 }
1146 return emulator_write_emulated(addr, new, bytes, ctxt); 1196 return emulator_write_emulated(addr, new, bytes, vcpu);
1147} 1197}
1148 1198
1149static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) 1199static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1150{ 1200{
1151 return kvm_arch_ops->get_segment_base(vcpu, seg); 1201 return kvm_x86_ops->get_segment_base(vcpu, seg);
1152} 1202}
1153 1203
1154int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) 1204int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
@@ -1158,10 +1208,8 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1158 1208
1159int emulate_clts(struct kvm_vcpu *vcpu) 1209int emulate_clts(struct kvm_vcpu *vcpu)
1160{ 1210{
1161 unsigned long cr0; 1211 vcpu->cr0 &= ~X86_CR0_TS;
1162 1212 kvm_x86_ops->set_cr0(vcpu, vcpu->cr0);
1163 cr0 = vcpu->cr0 & ~CR0_TS_MASK;
1164 kvm_arch_ops->set_cr0(vcpu, cr0);
1165 return X86EMUL_CONTINUE; 1213 return X86EMUL_CONTINUE;
1166} 1214}
1167 1215
@@ -1171,11 +1219,10 @@ int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
1171 1219
1172 switch (dr) { 1220 switch (dr) {
1173 case 0 ... 3: 1221 case 0 ... 3:
1174 *dest = kvm_arch_ops->get_dr(vcpu, dr); 1222 *dest = kvm_x86_ops->get_dr(vcpu, dr);
1175 return X86EMUL_CONTINUE; 1223 return X86EMUL_CONTINUE;
1176 default: 1224 default:
1177 printk(KERN_DEBUG "%s: unexpected dr %u\n", 1225 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
1178 __FUNCTION__, dr);
1179 return X86EMUL_UNHANDLEABLE; 1226 return X86EMUL_UNHANDLEABLE;
1180 } 1227 }
1181} 1228}
@@ -1185,7 +1232,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1185 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U; 1232 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1186 int exception; 1233 int exception;
1187 1234
1188 kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception); 1235 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1189 if (exception) { 1236 if (exception) {
1190 /* FIXME: better handling */ 1237 /* FIXME: better handling */
1191 return X86EMUL_UNHANDLEABLE; 1238 return X86EMUL_UNHANDLEABLE;
@@ -1193,25 +1240,25 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1193 return X86EMUL_CONTINUE; 1240 return X86EMUL_CONTINUE;
1194} 1241}
1195 1242
1196static void report_emulation_failure(struct x86_emulate_ctxt *ctxt) 1243void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
1197{ 1244{
1198 static int reported; 1245 static int reported;
1199 u8 opcodes[4]; 1246 u8 opcodes[4];
1200 unsigned long rip = ctxt->vcpu->rip; 1247 unsigned long rip = vcpu->rip;
1201 unsigned long rip_linear; 1248 unsigned long rip_linear;
1202 1249
1203 rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS); 1250 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
1204 1251
1205 if (reported) 1252 if (reported)
1206 return; 1253 return;
1207 1254
1208 emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt); 1255 emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
1209 1256
1210 printk(KERN_ERR "emulation failed but !mmio_needed?" 1257 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
1211 " rip %lx %02x %02x %02x %02x\n", 1258 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1212 rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1213 reported = 1; 1259 reported = 1;
1214} 1260}
1261EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
1215 1262
1216struct x86_emulate_ops emulate_ops = { 1263struct x86_emulate_ops emulate_ops = {
1217 .read_std = emulator_read_std, 1264 .read_std = emulator_read_std,
@@ -1231,12 +1278,12 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1231 int cs_db, cs_l; 1278 int cs_db, cs_l;
1232 1279
1233 vcpu->mmio_fault_cr2 = cr2; 1280 vcpu->mmio_fault_cr2 = cr2;
1234 kvm_arch_ops->cache_regs(vcpu); 1281 kvm_x86_ops->cache_regs(vcpu);
1235 1282
1236 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 1283 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1237 1284
1238 emulate_ctxt.vcpu = vcpu; 1285 emulate_ctxt.vcpu = vcpu;
1239 emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu); 1286 emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
1240 emulate_ctxt.cr2 = cr2; 1287 emulate_ctxt.cr2 = cr2;
1241 emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM) 1288 emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
1242 ? X86EMUL_MODE_REAL : cs_l 1289 ? X86EMUL_MODE_REAL : cs_l
@@ -1259,9 +1306,13 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1259 emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS); 1306 emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
1260 1307
1261 vcpu->mmio_is_write = 0; 1308 vcpu->mmio_is_write = 0;
1309 vcpu->pio.string = 0;
1262 r = x86_emulate_memop(&emulate_ctxt, &emulate_ops); 1310 r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
1311 if (vcpu->pio.string)
1312 return EMULATE_DO_MMIO;
1263 1313
1264 if ((r || vcpu->mmio_is_write) && run) { 1314 if ((r || vcpu->mmio_is_write) && run) {
1315 run->exit_reason = KVM_EXIT_MMIO;
1265 run->mmio.phys_addr = vcpu->mmio_phys_addr; 1316 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1266 memcpy(run->mmio.data, vcpu->mmio_data, 8); 1317 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1267 run->mmio.len = vcpu->mmio_size; 1318 run->mmio.len = vcpu->mmio_size;
@@ -1272,14 +1323,14 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1272 if (kvm_mmu_unprotect_page_virt(vcpu, cr2)) 1323 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1273 return EMULATE_DONE; 1324 return EMULATE_DONE;
1274 if (!vcpu->mmio_needed) { 1325 if (!vcpu->mmio_needed) {
1275 report_emulation_failure(&emulate_ctxt); 1326 kvm_report_emulation_failure(vcpu, "mmio");
1276 return EMULATE_FAIL; 1327 return EMULATE_FAIL;
1277 } 1328 }
1278 return EMULATE_DO_MMIO; 1329 return EMULATE_DO_MMIO;
1279 } 1330 }
1280 1331
1281 kvm_arch_ops->decache_regs(vcpu); 1332 kvm_x86_ops->decache_regs(vcpu);
1282 kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags); 1333 kvm_x86_ops->set_rflags(vcpu, emulate_ctxt.eflags);
1283 1334
1284 if (vcpu->mmio_is_write) { 1335 if (vcpu->mmio_is_write) {
1285 vcpu->mmio_needed = 0; 1336 vcpu->mmio_needed = 0;
@@ -1290,14 +1341,45 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1290} 1341}
1291EXPORT_SYMBOL_GPL(emulate_instruction); 1342EXPORT_SYMBOL_GPL(emulate_instruction);
1292 1343
1293int kvm_emulate_halt(struct kvm_vcpu *vcpu) 1344/*
1345 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1346 */
1347static void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1294{ 1348{
1295 if (vcpu->irq_summary) 1349 DECLARE_WAITQUEUE(wait, current);
1296 return 1;
1297 1350
1298 vcpu->run->exit_reason = KVM_EXIT_HLT; 1351 add_wait_queue(&vcpu->wq, &wait);
1352
1353 /*
1354 * We will block until either an interrupt or a signal wakes us up
1355 */
1356 while (!kvm_cpu_has_interrupt(vcpu)
1357 && !signal_pending(current)
1358 && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
1359 && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
1360 set_current_state(TASK_INTERRUPTIBLE);
1361 vcpu_put(vcpu);
1362 schedule();
1363 vcpu_load(vcpu);
1364 }
1365
1366 __set_current_state(TASK_RUNNING);
1367 remove_wait_queue(&vcpu->wq, &wait);
1368}
1369
1370int kvm_emulate_halt(struct kvm_vcpu *vcpu)
1371{
1299 ++vcpu->stat.halt_exits; 1372 ++vcpu->stat.halt_exits;
1300 return 0; 1373 if (irqchip_in_kernel(vcpu->kvm)) {
1374 vcpu->mp_state = VCPU_MP_STATE_HALTED;
1375 kvm_vcpu_block(vcpu);
1376 if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
1377 return -EINTR;
1378 return 1;
1379 } else {
1380 vcpu->run->exit_reason = KVM_EXIT_HLT;
1381 return 0;
1382 }
1301} 1383}
1302EXPORT_SYMBOL_GPL(kvm_emulate_halt); 1384EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1303 1385
@@ -1305,7 +1387,7 @@ int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
1305{ 1387{
1306 unsigned long nr, a0, a1, a2, a3, a4, a5, ret; 1388 unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
1307 1389
1308 kvm_arch_ops->cache_regs(vcpu); 1390 kvm_x86_ops->cache_regs(vcpu);
1309 ret = -KVM_EINVAL; 1391 ret = -KVM_EINVAL;
1310#ifdef CONFIG_X86_64 1392#ifdef CONFIG_X86_64
1311 if (is_long_mode(vcpu)) { 1393 if (is_long_mode(vcpu)) {
@@ -1329,6 +1411,7 @@ int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
1329 } 1411 }
1330 switch (nr) { 1412 switch (nr) {
1331 default: 1413 default:
1414 run->hypercall.nr = nr;
1332 run->hypercall.args[0] = a0; 1415 run->hypercall.args[0] = a0;
1333 run->hypercall.args[1] = a1; 1416 run->hypercall.args[1] = a1;
1334 run->hypercall.args[2] = a2; 1417 run->hypercall.args[2] = a2;
@@ -1337,11 +1420,11 @@ int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
1337 run->hypercall.args[5] = a5; 1420 run->hypercall.args[5] = a5;
1338 run->hypercall.ret = ret; 1421 run->hypercall.ret = ret;
1339 run->hypercall.longmode = is_long_mode(vcpu); 1422 run->hypercall.longmode = is_long_mode(vcpu);
1340 kvm_arch_ops->decache_regs(vcpu); 1423 kvm_x86_ops->decache_regs(vcpu);
1341 return 0; 1424 return 0;
1342 } 1425 }
1343 vcpu->regs[VCPU_REGS_RAX] = ret; 1426 vcpu->regs[VCPU_REGS_RAX] = ret;
1344 kvm_arch_ops->decache_regs(vcpu); 1427 kvm_x86_ops->decache_regs(vcpu);
1345 return 1; 1428 return 1;
1346} 1429}
1347EXPORT_SYMBOL_GPL(kvm_hypercall); 1430EXPORT_SYMBOL_GPL(kvm_hypercall);
@@ -1355,26 +1438,26 @@ void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1355{ 1438{
1356 struct descriptor_table dt = { limit, base }; 1439 struct descriptor_table dt = { limit, base };
1357 1440
1358 kvm_arch_ops->set_gdt(vcpu, &dt); 1441 kvm_x86_ops->set_gdt(vcpu, &dt);
1359} 1442}
1360 1443
1361void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) 1444void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1362{ 1445{
1363 struct descriptor_table dt = { limit, base }; 1446 struct descriptor_table dt = { limit, base };
1364 1447
1365 kvm_arch_ops->set_idt(vcpu, &dt); 1448 kvm_x86_ops->set_idt(vcpu, &dt);
1366} 1449}
1367 1450
1368void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, 1451void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1369 unsigned long *rflags) 1452 unsigned long *rflags)
1370{ 1453{
1371 lmsw(vcpu, msw); 1454 lmsw(vcpu, msw);
1372 *rflags = kvm_arch_ops->get_rflags(vcpu); 1455 *rflags = kvm_x86_ops->get_rflags(vcpu);
1373} 1456}
1374 1457
1375unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) 1458unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1376{ 1459{
1377 kvm_arch_ops->decache_cr4_guest_bits(vcpu); 1460 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
1378 switch (cr) { 1461 switch (cr) {
1379 case 0: 1462 case 0:
1380 return vcpu->cr0; 1463 return vcpu->cr0;
@@ -1396,7 +1479,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1396 switch (cr) { 1479 switch (cr) {
1397 case 0: 1480 case 0:
1398 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val)); 1481 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
1399 *rflags = kvm_arch_ops->get_rflags(vcpu); 1482 *rflags = kvm_x86_ops->get_rflags(vcpu);
1400 break; 1483 break;
1401 case 2: 1484 case 2:
1402 vcpu->cr2 = val; 1485 vcpu->cr2 = val;
@@ -1439,7 +1522,7 @@ static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
1439 1522
1440 mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT); 1523 mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
1441 para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT); 1524 para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
1442 para_state = kmap_atomic(para_state_page, KM_USER0); 1525 para_state = kmap(para_state_page);
1443 1526
1444 printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version); 1527 printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version);
1445 printk(KERN_DEBUG ".... size: %d\n", para_state->size); 1528 printk(KERN_DEBUG ".... size: %d\n", para_state->size);
@@ -1470,12 +1553,12 @@ static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
1470 mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT); 1553 mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
1471 hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT), 1554 hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
1472 KM_USER1) + (hypercall_hpa & ~PAGE_MASK); 1555 KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
1473 kvm_arch_ops->patch_hypercall(vcpu, hypercall); 1556 kvm_x86_ops->patch_hypercall(vcpu, hypercall);
1474 kunmap_atomic(hypercall, KM_USER1); 1557 kunmap_atomic(hypercall, KM_USER1);
1475 1558
1476 para_state->ret = 0; 1559 para_state->ret = 0;
1477err_kunmap_skip: 1560err_kunmap_skip:
1478 kunmap_atomic(para_state, KM_USER0); 1561 kunmap(para_state_page);
1479 return 0; 1562 return 0;
1480err_gp: 1563err_gp:
1481 return 1; 1564 return 1;
@@ -1511,7 +1594,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1511 data = 3; 1594 data = 3;
1512 break; 1595 break;
1513 case MSR_IA32_APICBASE: 1596 case MSR_IA32_APICBASE:
1514 data = vcpu->apic_base; 1597 data = kvm_get_apic_base(vcpu);
1515 break; 1598 break;
1516 case MSR_IA32_MISC_ENABLE: 1599 case MSR_IA32_MISC_ENABLE:
1517 data = vcpu->ia32_misc_enable_msr; 1600 data = vcpu->ia32_misc_enable_msr;
@@ -1522,7 +1605,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1522 break; 1605 break;
1523#endif 1606#endif
1524 default: 1607 default:
1525 printk(KERN_ERR "kvm: unhandled rdmsr: 0x%x\n", msr); 1608 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1526 return 1; 1609 return 1;
1527 } 1610 }
1528 *pdata = data; 1611 *pdata = data;
@@ -1537,7 +1620,7 @@ EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1537 */ 1620 */
1538int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) 1621int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1539{ 1622{
1540 return kvm_arch_ops->get_msr(vcpu, msr_index, pdata); 1623 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1541} 1624}
1542 1625
1543#ifdef CONFIG_X86_64 1626#ifdef CONFIG_X86_64
@@ -1558,7 +1641,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1558 return; 1641 return;
1559 } 1642 }
1560 1643
1561 kvm_arch_ops->set_efer(vcpu, efer); 1644 kvm_x86_ops->set_efer(vcpu, efer);
1562 1645
1563 efer &= ~EFER_LMA; 1646 efer &= ~EFER_LMA;
1564 efer |= vcpu->shadow_efer & EFER_LMA; 1647 efer |= vcpu->shadow_efer & EFER_LMA;
@@ -1577,11 +1660,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1577 break; 1660 break;
1578#endif 1661#endif
1579 case MSR_IA32_MC0_STATUS: 1662 case MSR_IA32_MC0_STATUS:
1580 printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n", 1663 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
1581 __FUNCTION__, data); 1664 __FUNCTION__, data);
1582 break; 1665 break;
1583 case MSR_IA32_MCG_STATUS: 1666 case MSR_IA32_MCG_STATUS:
1584 printk(KERN_WARNING "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n", 1667 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
1585 __FUNCTION__, data); 1668 __FUNCTION__, data);
1586 break; 1669 break;
1587 case MSR_IA32_UCODE_REV: 1670 case MSR_IA32_UCODE_REV:
@@ -1589,7 +1672,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1589 case 0x200 ... 0x2ff: /* MTRRs */ 1672 case 0x200 ... 0x2ff: /* MTRRs */
1590 break; 1673 break;
1591 case MSR_IA32_APICBASE: 1674 case MSR_IA32_APICBASE:
1592 vcpu->apic_base = data; 1675 kvm_set_apic_base(vcpu, data);
1593 break; 1676 break;
1594 case MSR_IA32_MISC_ENABLE: 1677 case MSR_IA32_MISC_ENABLE:
1595 vcpu->ia32_misc_enable_msr = data; 1678 vcpu->ia32_misc_enable_msr = data;
@@ -1601,7 +1684,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1601 return vcpu_register_para(vcpu, data); 1684 return vcpu_register_para(vcpu, data);
1602 1685
1603 default: 1686 default:
1604 printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr); 1687 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
1605 return 1; 1688 return 1;
1606 } 1689 }
1607 return 0; 1690 return 0;
@@ -1615,44 +1698,24 @@ EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1615 */ 1698 */
1616int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 1699int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1617{ 1700{
1618 return kvm_arch_ops->set_msr(vcpu, msr_index, data); 1701 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
1619} 1702}
1620 1703
1621void kvm_resched(struct kvm_vcpu *vcpu) 1704void kvm_resched(struct kvm_vcpu *vcpu)
1622{ 1705{
1623 if (!need_resched()) 1706 if (!need_resched())
1624 return; 1707 return;
1625 vcpu_put(vcpu);
1626 cond_resched(); 1708 cond_resched();
1627 vcpu_load(vcpu);
1628} 1709}
1629EXPORT_SYMBOL_GPL(kvm_resched); 1710EXPORT_SYMBOL_GPL(kvm_resched);
1630 1711
1631void load_msrs(struct vmx_msr_entry *e, int n)
1632{
1633 int i;
1634
1635 for (i = 0; i < n; ++i)
1636 wrmsrl(e[i].index, e[i].data);
1637}
1638EXPORT_SYMBOL_GPL(load_msrs);
1639
1640void save_msrs(struct vmx_msr_entry *e, int n)
1641{
1642 int i;
1643
1644 for (i = 0; i < n; ++i)
1645 rdmsrl(e[i].index, e[i].data);
1646}
1647EXPORT_SYMBOL_GPL(save_msrs);
1648
1649void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) 1712void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1650{ 1713{
1651 int i; 1714 int i;
1652 u32 function; 1715 u32 function;
1653 struct kvm_cpuid_entry *e, *best; 1716 struct kvm_cpuid_entry *e, *best;
1654 1717
1655 kvm_arch_ops->cache_regs(vcpu); 1718 kvm_x86_ops->cache_regs(vcpu);
1656 function = vcpu->regs[VCPU_REGS_RAX]; 1719 function = vcpu->regs[VCPU_REGS_RAX];
1657 vcpu->regs[VCPU_REGS_RAX] = 0; 1720 vcpu->regs[VCPU_REGS_RAX] = 0;
1658 vcpu->regs[VCPU_REGS_RBX] = 0; 1721 vcpu->regs[VCPU_REGS_RBX] = 0;
@@ -1678,8 +1741,8 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1678 vcpu->regs[VCPU_REGS_RCX] = best->ecx; 1741 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
1679 vcpu->regs[VCPU_REGS_RDX] = best->edx; 1742 vcpu->regs[VCPU_REGS_RDX] = best->edx;
1680 } 1743 }
1681 kvm_arch_ops->decache_regs(vcpu); 1744 kvm_x86_ops->decache_regs(vcpu);
1682 kvm_arch_ops->skip_emulated_instruction(vcpu); 1745 kvm_x86_ops->skip_emulated_instruction(vcpu);
1683} 1746}
1684EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 1747EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1685 1748
@@ -1690,11 +1753,9 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
1690 unsigned bytes; 1753 unsigned bytes;
1691 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1; 1754 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1692 1755
1693 kvm_arch_ops->vcpu_put(vcpu);
1694 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE, 1756 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1695 PAGE_KERNEL); 1757 PAGE_KERNEL);
1696 if (!q) { 1758 if (!q) {
1697 kvm_arch_ops->vcpu_load(vcpu);
1698 free_pio_guest_pages(vcpu); 1759 free_pio_guest_pages(vcpu);
1699 return -ENOMEM; 1760 return -ENOMEM;
1700 } 1761 }
@@ -1706,7 +1767,6 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
1706 memcpy(p, q, bytes); 1767 memcpy(p, q, bytes);
1707 q -= vcpu->pio.guest_page_offset; 1768 q -= vcpu->pio.guest_page_offset;
1708 vunmap(q); 1769 vunmap(q);
1709 kvm_arch_ops->vcpu_load(vcpu);
1710 free_pio_guest_pages(vcpu); 1770 free_pio_guest_pages(vcpu);
1711 return 0; 1771 return 0;
1712} 1772}
@@ -1717,7 +1777,7 @@ static int complete_pio(struct kvm_vcpu *vcpu)
1717 long delta; 1777 long delta;
1718 int r; 1778 int r;
1719 1779
1720 kvm_arch_ops->cache_regs(vcpu); 1780 kvm_x86_ops->cache_regs(vcpu);
1721 1781
1722 if (!io->string) { 1782 if (!io->string) {
1723 if (io->in) 1783 if (io->in)
@@ -1727,7 +1787,7 @@ static int complete_pio(struct kvm_vcpu *vcpu)
1727 if (io->in) { 1787 if (io->in) {
1728 r = pio_copy_data(vcpu); 1788 r = pio_copy_data(vcpu);
1729 if (r) { 1789 if (r) {
1730 kvm_arch_ops->cache_regs(vcpu); 1790 kvm_x86_ops->cache_regs(vcpu);
1731 return r; 1791 return r;
1732 } 1792 }
1733 } 1793 }
@@ -1750,79 +1810,109 @@ static int complete_pio(struct kvm_vcpu *vcpu)
1750 vcpu->regs[VCPU_REGS_RSI] += delta; 1810 vcpu->regs[VCPU_REGS_RSI] += delta;
1751 } 1811 }
1752 1812
1753 kvm_arch_ops->decache_regs(vcpu); 1813 kvm_x86_ops->decache_regs(vcpu);
1754 1814
1755 io->count -= io->cur_count; 1815 io->count -= io->cur_count;
1756 io->cur_count = 0; 1816 io->cur_count = 0;
1757 1817
1758 if (!io->count)
1759 kvm_arch_ops->skip_emulated_instruction(vcpu);
1760 return 0; 1818 return 0;
1761} 1819}
1762 1820
1763void kernel_pio(struct kvm_io_device *pio_dev, struct kvm_vcpu *vcpu) 1821static void kernel_pio(struct kvm_io_device *pio_dev,
1822 struct kvm_vcpu *vcpu,
1823 void *pd)
1764{ 1824{
1765 /* TODO: String I/O for in kernel device */ 1825 /* TODO: String I/O for in kernel device */
1766 1826
1827 mutex_lock(&vcpu->kvm->lock);
1767 if (vcpu->pio.in) 1828 if (vcpu->pio.in)
1768 kvm_iodevice_read(pio_dev, vcpu->pio.port, 1829 kvm_iodevice_read(pio_dev, vcpu->pio.port,
1769 vcpu->pio.size, 1830 vcpu->pio.size,
1770 vcpu->pio_data); 1831 pd);
1771 else 1832 else
1772 kvm_iodevice_write(pio_dev, vcpu->pio.port, 1833 kvm_iodevice_write(pio_dev, vcpu->pio.port,
1773 vcpu->pio.size, 1834 vcpu->pio.size,
1774 vcpu->pio_data); 1835 pd);
1836 mutex_unlock(&vcpu->kvm->lock);
1837}
1838
1839static void pio_string_write(struct kvm_io_device *pio_dev,
1840 struct kvm_vcpu *vcpu)
1841{
1842 struct kvm_pio_request *io = &vcpu->pio;
1843 void *pd = vcpu->pio_data;
1844 int i;
1845
1846 mutex_lock(&vcpu->kvm->lock);
1847 for (i = 0; i < io->cur_count; i++) {
1848 kvm_iodevice_write(pio_dev, io->port,
1849 io->size,
1850 pd);
1851 pd += io->size;
1852 }
1853 mutex_unlock(&vcpu->kvm->lock);
1775} 1854}
1776 1855
1777int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, 1856int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1778 int size, unsigned long count, int string, int down, 1857 int size, unsigned port)
1858{
1859 struct kvm_io_device *pio_dev;
1860
1861 vcpu->run->exit_reason = KVM_EXIT_IO;
1862 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1863 vcpu->run->io.size = vcpu->pio.size = size;
1864 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1865 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1;
1866 vcpu->run->io.port = vcpu->pio.port = port;
1867 vcpu->pio.in = in;
1868 vcpu->pio.string = 0;
1869 vcpu->pio.down = 0;
1870 vcpu->pio.guest_page_offset = 0;
1871 vcpu->pio.rep = 0;
1872
1873 kvm_x86_ops->cache_regs(vcpu);
1874 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1875 kvm_x86_ops->decache_regs(vcpu);
1876
1877 kvm_x86_ops->skip_emulated_instruction(vcpu);
1878
1879 pio_dev = vcpu_find_pio_dev(vcpu, port);
1880 if (pio_dev) {
1881 kernel_pio(pio_dev, vcpu, vcpu->pio_data);
1882 complete_pio(vcpu);
1883 return 1;
1884 }
1885 return 0;
1886}
1887EXPORT_SYMBOL_GPL(kvm_emulate_pio);
1888
1889int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1890 int size, unsigned long count, int down,
1779 gva_t address, int rep, unsigned port) 1891 gva_t address, int rep, unsigned port)
1780{ 1892{
1781 unsigned now, in_page; 1893 unsigned now, in_page;
1782 int i; 1894 int i, ret = 0;
1783 int nr_pages = 1; 1895 int nr_pages = 1;
1784 struct page *page; 1896 struct page *page;
1785 struct kvm_io_device *pio_dev; 1897 struct kvm_io_device *pio_dev;
1786 1898
1787 vcpu->run->exit_reason = KVM_EXIT_IO; 1899 vcpu->run->exit_reason = KVM_EXIT_IO;
1788 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 1900 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1789 vcpu->run->io.size = size; 1901 vcpu->run->io.size = vcpu->pio.size = size;
1790 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 1902 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1791 vcpu->run->io.count = count; 1903 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count;
1792 vcpu->run->io.port = port; 1904 vcpu->run->io.port = vcpu->pio.port = port;
1793 vcpu->pio.count = count;
1794 vcpu->pio.cur_count = count;
1795 vcpu->pio.size = size;
1796 vcpu->pio.in = in; 1905 vcpu->pio.in = in;
1797 vcpu->pio.port = port; 1906 vcpu->pio.string = 1;
1798 vcpu->pio.string = string;
1799 vcpu->pio.down = down; 1907 vcpu->pio.down = down;
1800 vcpu->pio.guest_page_offset = offset_in_page(address); 1908 vcpu->pio.guest_page_offset = offset_in_page(address);
1801 vcpu->pio.rep = rep; 1909 vcpu->pio.rep = rep;
1802 1910
1803 pio_dev = vcpu_find_pio_dev(vcpu, port);
1804 if (!string) {
1805 kvm_arch_ops->cache_regs(vcpu);
1806 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1807 kvm_arch_ops->decache_regs(vcpu);
1808 if (pio_dev) {
1809 kernel_pio(pio_dev, vcpu);
1810 complete_pio(vcpu);
1811 return 1;
1812 }
1813 return 0;
1814 }
1815 /* TODO: String I/O for in kernel device */
1816 if (pio_dev)
1817 printk(KERN_ERR "kvm_setup_pio: no string io support\n");
1818
1819 if (!count) { 1911 if (!count) {
1820 kvm_arch_ops->skip_emulated_instruction(vcpu); 1912 kvm_x86_ops->skip_emulated_instruction(vcpu);
1821 return 1; 1913 return 1;
1822 } 1914 }
1823 1915
1824 now = min(count, PAGE_SIZE / size);
1825
1826 if (!down) 1916 if (!down)
1827 in_page = PAGE_SIZE - offset_in_page(address); 1917 in_page = PAGE_SIZE - offset_in_page(address);
1828 else 1918 else
@@ -1841,20 +1931,23 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1841 /* 1931 /*
1842 * String I/O in reverse. Yuck. Kill the guest, fix later. 1932 * String I/O in reverse. Yuck. Kill the guest, fix later.
1843 */ 1933 */
1844 printk(KERN_ERR "kvm: guest string pio down\n"); 1934 pr_unimpl(vcpu, "guest string pio down\n");
1845 inject_gp(vcpu); 1935 inject_gp(vcpu);
1846 return 1; 1936 return 1;
1847 } 1937 }
1848 vcpu->run->io.count = now; 1938 vcpu->run->io.count = now;
1849 vcpu->pio.cur_count = now; 1939 vcpu->pio.cur_count = now;
1850 1940
1941 if (vcpu->pio.cur_count == vcpu->pio.count)
1942 kvm_x86_ops->skip_emulated_instruction(vcpu);
1943
1851 for (i = 0; i < nr_pages; ++i) { 1944 for (i = 0; i < nr_pages; ++i) {
1852 spin_lock(&vcpu->kvm->lock); 1945 mutex_lock(&vcpu->kvm->lock);
1853 page = gva_to_page(vcpu, address + i * PAGE_SIZE); 1946 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
1854 if (page) 1947 if (page)
1855 get_page(page); 1948 get_page(page);
1856 vcpu->pio.guest_pages[i] = page; 1949 vcpu->pio.guest_pages[i] = page;
1857 spin_unlock(&vcpu->kvm->lock); 1950 mutex_unlock(&vcpu->kvm->lock);
1858 if (!page) { 1951 if (!page) {
1859 inject_gp(vcpu); 1952 inject_gp(vcpu);
1860 free_pio_guest_pages(vcpu); 1953 free_pio_guest_pages(vcpu);
@@ -1862,11 +1955,145 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1862 } 1955 }
1863 } 1956 }
1864 1957
1865 if (!vcpu->pio.in) 1958 pio_dev = vcpu_find_pio_dev(vcpu, port);
1866 return pio_copy_data(vcpu); 1959 if (!vcpu->pio.in) {
1867 return 0; 1960 /* string PIO write */
1961 ret = pio_copy_data(vcpu);
1962 if (ret >= 0 && pio_dev) {
1963 pio_string_write(pio_dev, vcpu);
1964 complete_pio(vcpu);
1965 if (vcpu->pio.count == 0)
1966 ret = 1;
1967 }
1968 } else if (pio_dev)
1969 pr_unimpl(vcpu, "no string pio read support yet, "
1970 "port %x size %d count %ld\n",
1971 port, size, count);
1972
1973 return ret;
1974}
1975EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
1976
1977/*
1978 * Check if userspace requested an interrupt window, and that the
1979 * interrupt window is open.
1980 *
1981 * No need to exit to userspace if we already have an interrupt queued.
1982 */
1983static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1984 struct kvm_run *kvm_run)
1985{
1986 return (!vcpu->irq_summary &&
1987 kvm_run->request_interrupt_window &&
1988 vcpu->interrupt_window_open &&
1989 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
1990}
1991
1992static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1993 struct kvm_run *kvm_run)
1994{
1995 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
1996 kvm_run->cr8 = get_cr8(vcpu);
1997 kvm_run->apic_base = kvm_get_apic_base(vcpu);
1998 if (irqchip_in_kernel(vcpu->kvm))
1999 kvm_run->ready_for_interrupt_injection = 1;
2000 else
2001 kvm_run->ready_for_interrupt_injection =
2002 (vcpu->interrupt_window_open &&
2003 vcpu->irq_summary == 0);
2004}
2005
2006static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2007{
2008 int r;
2009
2010 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
2011 printk("vcpu %d received sipi with vector # %x\n",
2012 vcpu->vcpu_id, vcpu->sipi_vector);
2013 kvm_lapic_reset(vcpu);
2014 kvm_x86_ops->vcpu_reset(vcpu);
2015 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
2016 }
2017
2018preempted:
2019 if (vcpu->guest_debug.enabled)
2020 kvm_x86_ops->guest_debug_pre(vcpu);
2021
2022again:
2023 r = kvm_mmu_reload(vcpu);
2024 if (unlikely(r))
2025 goto out;
2026
2027 preempt_disable();
2028
2029 kvm_x86_ops->prepare_guest_switch(vcpu);
2030 kvm_load_guest_fpu(vcpu);
2031
2032 local_irq_disable();
2033
2034 if (signal_pending(current)) {
2035 local_irq_enable();
2036 preempt_enable();
2037 r = -EINTR;
2038 kvm_run->exit_reason = KVM_EXIT_INTR;
2039 ++vcpu->stat.signal_exits;
2040 goto out;
2041 }
2042
2043 if (irqchip_in_kernel(vcpu->kvm))
2044 kvm_x86_ops->inject_pending_irq(vcpu);
2045 else if (!vcpu->mmio_read_completed)
2046 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
2047
2048 vcpu->guest_mode = 1;
2049
2050 if (vcpu->requests)
2051 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
2052 kvm_x86_ops->tlb_flush(vcpu);
2053
2054 kvm_x86_ops->run(vcpu, kvm_run);
2055
2056 vcpu->guest_mode = 0;
2057 local_irq_enable();
2058
2059 ++vcpu->stat.exits;
2060
2061 preempt_enable();
2062
2063 /*
2064 * Profile KVM exit RIPs:
2065 */
2066 if (unlikely(prof_on == KVM_PROFILING)) {
2067 kvm_x86_ops->cache_regs(vcpu);
2068 profile_hit(KVM_PROFILING, (void *)vcpu->rip);
2069 }
2070
2071 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2072
2073 if (r > 0) {
2074 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2075 r = -EINTR;
2076 kvm_run->exit_reason = KVM_EXIT_INTR;
2077 ++vcpu->stat.request_irq_exits;
2078 goto out;
2079 }
2080 if (!need_resched()) {
2081 ++vcpu->stat.light_exits;
2082 goto again;
2083 }
2084 }
2085
2086out:
2087 if (r > 0) {
2088 kvm_resched(vcpu);
2089 goto preempted;
2090 }
2091
2092 post_kvm_run_save(vcpu, kvm_run);
2093
2094 return r;
1868} 2095}
1869EXPORT_SYMBOL_GPL(kvm_setup_pio); 2096
1870 2097
1871static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2098static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1872{ 2099{
@@ -1875,11 +2102,18 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1875 2102
1876 vcpu_load(vcpu); 2103 vcpu_load(vcpu);
1877 2104
2105 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
2106 kvm_vcpu_block(vcpu);
2107 vcpu_put(vcpu);
2108 return -EAGAIN;
2109 }
2110
1878 if (vcpu->sigset_active) 2111 if (vcpu->sigset_active)
1879 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 2112 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1880 2113
1881 /* re-sync apic's tpr */ 2114 /* re-sync apic's tpr */
1882 vcpu->cr8 = kvm_run->cr8; 2115 if (!irqchip_in_kernel(vcpu->kvm))
2116 set_cr8(vcpu, kvm_run->cr8);
1883 2117
1884 if (vcpu->pio.cur_count) { 2118 if (vcpu->pio.cur_count) {
1885 r = complete_pio(vcpu); 2119 r = complete_pio(vcpu);
@@ -1897,19 +2131,18 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1897 /* 2131 /*
1898 * Read-modify-write. Back to userspace. 2132 * Read-modify-write. Back to userspace.
1899 */ 2133 */
1900 kvm_run->exit_reason = KVM_EXIT_MMIO;
1901 r = 0; 2134 r = 0;
1902 goto out; 2135 goto out;
1903 } 2136 }
1904 } 2137 }
1905 2138
1906 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) { 2139 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
1907 kvm_arch_ops->cache_regs(vcpu); 2140 kvm_x86_ops->cache_regs(vcpu);
1908 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret; 2141 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
1909 kvm_arch_ops->decache_regs(vcpu); 2142 kvm_x86_ops->decache_regs(vcpu);
1910 } 2143 }
1911 2144
1912 r = kvm_arch_ops->run(vcpu, kvm_run); 2145 r = __vcpu_run(vcpu, kvm_run);
1913 2146
1914out: 2147out:
1915 if (vcpu->sigset_active) 2148 if (vcpu->sigset_active)
@@ -1924,7 +2157,7 @@ static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
1924{ 2157{
1925 vcpu_load(vcpu); 2158 vcpu_load(vcpu);
1926 2159
1927 kvm_arch_ops->cache_regs(vcpu); 2160 kvm_x86_ops->cache_regs(vcpu);
1928 2161
1929 regs->rax = vcpu->regs[VCPU_REGS_RAX]; 2162 regs->rax = vcpu->regs[VCPU_REGS_RAX];
1930 regs->rbx = vcpu->regs[VCPU_REGS_RBX]; 2163 regs->rbx = vcpu->regs[VCPU_REGS_RBX];
@@ -1946,7 +2179,7 @@ static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
1946#endif 2179#endif
1947 2180
1948 regs->rip = vcpu->rip; 2181 regs->rip = vcpu->rip;
1949 regs->rflags = kvm_arch_ops->get_rflags(vcpu); 2182 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
1950 2183
1951 /* 2184 /*
1952 * Don't leak debug flags in case they were set for guest debugging 2185 * Don't leak debug flags in case they were set for guest debugging
@@ -1984,9 +2217,9 @@ static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
1984#endif 2217#endif
1985 2218
1986 vcpu->rip = regs->rip; 2219 vcpu->rip = regs->rip;
1987 kvm_arch_ops->set_rflags(vcpu, regs->rflags); 2220 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
1988 2221
1989 kvm_arch_ops->decache_regs(vcpu); 2222 kvm_x86_ops->decache_regs(vcpu);
1990 2223
1991 vcpu_put(vcpu); 2224 vcpu_put(vcpu);
1992 2225
@@ -1996,13 +2229,14 @@ static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
1996static void get_segment(struct kvm_vcpu *vcpu, 2229static void get_segment(struct kvm_vcpu *vcpu,
1997 struct kvm_segment *var, int seg) 2230 struct kvm_segment *var, int seg)
1998{ 2231{
1999 return kvm_arch_ops->get_segment(vcpu, var, seg); 2232 return kvm_x86_ops->get_segment(vcpu, var, seg);
2000} 2233}
2001 2234
2002static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 2235static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2003 struct kvm_sregs *sregs) 2236 struct kvm_sregs *sregs)
2004{ 2237{
2005 struct descriptor_table dt; 2238 struct descriptor_table dt;
2239 int pending_vec;
2006 2240
2007 vcpu_load(vcpu); 2241 vcpu_load(vcpu);
2008 2242
@@ -2016,24 +2250,31 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2016 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 2250 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2017 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 2251 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2018 2252
2019 kvm_arch_ops->get_idt(vcpu, &dt); 2253 kvm_x86_ops->get_idt(vcpu, &dt);
2020 sregs->idt.limit = dt.limit; 2254 sregs->idt.limit = dt.limit;
2021 sregs->idt.base = dt.base; 2255 sregs->idt.base = dt.base;
2022 kvm_arch_ops->get_gdt(vcpu, &dt); 2256 kvm_x86_ops->get_gdt(vcpu, &dt);
2023 sregs->gdt.limit = dt.limit; 2257 sregs->gdt.limit = dt.limit;
2024 sregs->gdt.base = dt.base; 2258 sregs->gdt.base = dt.base;
2025 2259
2026 kvm_arch_ops->decache_cr4_guest_bits(vcpu); 2260 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2027 sregs->cr0 = vcpu->cr0; 2261 sregs->cr0 = vcpu->cr0;
2028 sregs->cr2 = vcpu->cr2; 2262 sregs->cr2 = vcpu->cr2;
2029 sregs->cr3 = vcpu->cr3; 2263 sregs->cr3 = vcpu->cr3;
2030 sregs->cr4 = vcpu->cr4; 2264 sregs->cr4 = vcpu->cr4;
2031 sregs->cr8 = vcpu->cr8; 2265 sregs->cr8 = get_cr8(vcpu);
2032 sregs->efer = vcpu->shadow_efer; 2266 sregs->efer = vcpu->shadow_efer;
2033 sregs->apic_base = vcpu->apic_base; 2267 sregs->apic_base = kvm_get_apic_base(vcpu);
2034 2268
2035 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending, 2269 if (irqchip_in_kernel(vcpu->kvm)) {
2036 sizeof sregs->interrupt_bitmap); 2270 memset(sregs->interrupt_bitmap, 0,
2271 sizeof sregs->interrupt_bitmap);
2272 pending_vec = kvm_x86_ops->get_irq(vcpu);
2273 if (pending_vec >= 0)
2274 set_bit(pending_vec, (unsigned long *)sregs->interrupt_bitmap);
2275 } else
2276 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2277 sizeof sregs->interrupt_bitmap);
2037 2278
2038 vcpu_put(vcpu); 2279 vcpu_put(vcpu);
2039 2280
@@ -2043,56 +2284,69 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2043static void set_segment(struct kvm_vcpu *vcpu, 2284static void set_segment(struct kvm_vcpu *vcpu,
2044 struct kvm_segment *var, int seg) 2285 struct kvm_segment *var, int seg)
2045{ 2286{
2046 return kvm_arch_ops->set_segment(vcpu, var, seg); 2287 return kvm_x86_ops->set_segment(vcpu, var, seg);
2047} 2288}
2048 2289
2049static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 2290static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2050 struct kvm_sregs *sregs) 2291 struct kvm_sregs *sregs)
2051{ 2292{
2052 int mmu_reset_needed = 0; 2293 int mmu_reset_needed = 0;
2053 int i; 2294 int i, pending_vec, max_bits;
2054 struct descriptor_table dt; 2295 struct descriptor_table dt;
2055 2296
2056 vcpu_load(vcpu); 2297 vcpu_load(vcpu);
2057 2298
2058 dt.limit = sregs->idt.limit; 2299 dt.limit = sregs->idt.limit;
2059 dt.base = sregs->idt.base; 2300 dt.base = sregs->idt.base;
2060 kvm_arch_ops->set_idt(vcpu, &dt); 2301 kvm_x86_ops->set_idt(vcpu, &dt);
2061 dt.limit = sregs->gdt.limit; 2302 dt.limit = sregs->gdt.limit;
2062 dt.base = sregs->gdt.base; 2303 dt.base = sregs->gdt.base;
2063 kvm_arch_ops->set_gdt(vcpu, &dt); 2304 kvm_x86_ops->set_gdt(vcpu, &dt);
2064 2305
2065 vcpu->cr2 = sregs->cr2; 2306 vcpu->cr2 = sregs->cr2;
2066 mmu_reset_needed |= vcpu->cr3 != sregs->cr3; 2307 mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2067 vcpu->cr3 = sregs->cr3; 2308 vcpu->cr3 = sregs->cr3;
2068 2309
2069 vcpu->cr8 = sregs->cr8; 2310 set_cr8(vcpu, sregs->cr8);
2070 2311
2071 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer; 2312 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
2072#ifdef CONFIG_X86_64 2313#ifdef CONFIG_X86_64
2073 kvm_arch_ops->set_efer(vcpu, sregs->efer); 2314 kvm_x86_ops->set_efer(vcpu, sregs->efer);
2074#endif 2315#endif
2075 vcpu->apic_base = sregs->apic_base; 2316 kvm_set_apic_base(vcpu, sregs->apic_base);
2076 2317
2077 kvm_arch_ops->decache_cr4_guest_bits(vcpu); 2318 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2078 2319
2079 mmu_reset_needed |= vcpu->cr0 != sregs->cr0; 2320 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
2080 kvm_arch_ops->set_cr0(vcpu, sregs->cr0); 2321 vcpu->cr0 = sregs->cr0;
2322 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
2081 2323
2082 mmu_reset_needed |= vcpu->cr4 != sregs->cr4; 2324 mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
2083 kvm_arch_ops->set_cr4(vcpu, sregs->cr4); 2325 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2084 if (!is_long_mode(vcpu) && is_pae(vcpu)) 2326 if (!is_long_mode(vcpu) && is_pae(vcpu))
2085 load_pdptrs(vcpu, vcpu->cr3); 2327 load_pdptrs(vcpu, vcpu->cr3);
2086 2328
2087 if (mmu_reset_needed) 2329 if (mmu_reset_needed)
2088 kvm_mmu_reset_context(vcpu); 2330 kvm_mmu_reset_context(vcpu);
2089 2331
2090 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap, 2332 if (!irqchip_in_kernel(vcpu->kvm)) {
2091 sizeof vcpu->irq_pending); 2333 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2092 vcpu->irq_summary = 0; 2334 sizeof vcpu->irq_pending);
2093 for (i = 0; i < NR_IRQ_WORDS; ++i) 2335 vcpu->irq_summary = 0;
2094 if (vcpu->irq_pending[i]) 2336 for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
2095 __set_bit(i, &vcpu->irq_summary); 2337 if (vcpu->irq_pending[i])
2338 __set_bit(i, &vcpu->irq_summary);
2339 } else {
2340 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
2341 pending_vec = find_first_bit(
2342 (const unsigned long *)sregs->interrupt_bitmap,
2343 max_bits);
2344 /* Only pending external irq is handled here */
2345 if (pending_vec < max_bits) {
2346 kvm_x86_ops->set_irq(vcpu, pending_vec);
2347 printk("Set back pending irq %d\n", pending_vec);
2348 }
2349 }
2096 2350
2097 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 2351 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2098 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 2352 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
@@ -2109,6 +2363,16 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2109 return 0; 2363 return 0;
2110} 2364}
2111 2365
2366void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2367{
2368 struct kvm_segment cs;
2369
2370 get_segment(vcpu, &cs, VCPU_SREG_CS);
2371 *db = cs.db;
2372 *l = cs.l;
2373}
2374EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
2375
2112/* 2376/*
2113 * List of msr numbers which we expose to userspace through KVM_GET_MSRS 2377 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
2114 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. 2378 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
@@ -2236,13 +2500,13 @@ static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2236 gpa_t gpa; 2500 gpa_t gpa;
2237 2501
2238 vcpu_load(vcpu); 2502 vcpu_load(vcpu);
2239 spin_lock(&vcpu->kvm->lock); 2503 mutex_lock(&vcpu->kvm->lock);
2240 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); 2504 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
2241 tr->physical_address = gpa; 2505 tr->physical_address = gpa;
2242 tr->valid = gpa != UNMAPPED_GVA; 2506 tr->valid = gpa != UNMAPPED_GVA;
2243 tr->writeable = 1; 2507 tr->writeable = 1;
2244 tr->usermode = 0; 2508 tr->usermode = 0;
2245 spin_unlock(&vcpu->kvm->lock); 2509 mutex_unlock(&vcpu->kvm->lock);
2246 vcpu_put(vcpu); 2510 vcpu_put(vcpu);
2247 2511
2248 return 0; 2512 return 0;
@@ -2253,6 +2517,8 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2253{ 2517{
2254 if (irq->irq < 0 || irq->irq >= 256) 2518 if (irq->irq < 0 || irq->irq >= 256)
2255 return -EINVAL; 2519 return -EINVAL;
2520 if (irqchip_in_kernel(vcpu->kvm))
2521 return -ENXIO;
2256 vcpu_load(vcpu); 2522 vcpu_load(vcpu);
2257 2523
2258 set_bit(irq->irq, vcpu->irq_pending); 2524 set_bit(irq->irq, vcpu->irq_pending);
@@ -2270,7 +2536,7 @@ static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2270 2536
2271 vcpu_load(vcpu); 2537 vcpu_load(vcpu);
2272 2538
2273 r = kvm_arch_ops->set_guest_debug(vcpu, dbg); 2539 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
2274 2540
2275 vcpu_put(vcpu); 2541 vcpu_put(vcpu);
2276 2542
@@ -2285,7 +2551,6 @@ static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
2285 unsigned long pgoff; 2551 unsigned long pgoff;
2286 struct page *page; 2552 struct page *page;
2287 2553
2288 *type = VM_FAULT_MINOR;
2289 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 2554 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2290 if (pgoff == 0) 2555 if (pgoff == 0)
2291 page = virt_to_page(vcpu->run); 2556 page = virt_to_page(vcpu->run);
@@ -2294,6 +2559,9 @@ static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
2294 else 2559 else
2295 return NOPAGE_SIGBUS; 2560 return NOPAGE_SIGBUS;
2296 get_page(page); 2561 get_page(page);
2562 if (type != NULL)
2563 *type = VM_FAULT_MINOR;
2564
2297 return page; 2565 return page;
2298} 2566}
2299 2567
@@ -2346,74 +2614,52 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2346{ 2614{
2347 int r; 2615 int r;
2348 struct kvm_vcpu *vcpu; 2616 struct kvm_vcpu *vcpu;
2349 struct page *page;
2350 2617
2351 r = -EINVAL;
2352 if (!valid_vcpu(n)) 2618 if (!valid_vcpu(n))
2353 goto out; 2619 return -EINVAL;
2354
2355 vcpu = &kvm->vcpus[n];
2356
2357 mutex_lock(&vcpu->mutex);
2358
2359 if (vcpu->vmcs) {
2360 mutex_unlock(&vcpu->mutex);
2361 return -EEXIST;
2362 }
2363
2364 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2365 r = -ENOMEM;
2366 if (!page)
2367 goto out_unlock;
2368 vcpu->run = page_address(page);
2369
2370 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2371 r = -ENOMEM;
2372 if (!page)
2373 goto out_free_run;
2374 vcpu->pio_data = page_address(page);
2375 2620
2376 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf, 2621 vcpu = kvm_x86_ops->vcpu_create(kvm, n);
2377 FX_IMAGE_ALIGN); 2622 if (IS_ERR(vcpu))
2378 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE; 2623 return PTR_ERR(vcpu);
2379 vcpu->cr0 = 0x10;
2380 2624
2381 r = kvm_arch_ops->vcpu_create(vcpu); 2625 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
2382 if (r < 0)
2383 goto out_free_vcpus;
2384 2626
2385 r = kvm_mmu_create(vcpu); 2627 /* We do fxsave: this must be aligned. */
2386 if (r < 0) 2628 BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
2387 goto out_free_vcpus;
2388 2629
2389 kvm_arch_ops->vcpu_load(vcpu); 2630 vcpu_load(vcpu);
2390 r = kvm_mmu_setup(vcpu); 2631 r = kvm_mmu_setup(vcpu);
2391 if (r >= 0)
2392 r = kvm_arch_ops->vcpu_setup(vcpu);
2393 vcpu_put(vcpu); 2632 vcpu_put(vcpu);
2394
2395 if (r < 0) 2633 if (r < 0)
2396 goto out_free_vcpus; 2634 goto free_vcpu;
2397 2635
2636 mutex_lock(&kvm->lock);
2637 if (kvm->vcpus[n]) {
2638 r = -EEXIST;
2639 mutex_unlock(&kvm->lock);
2640 goto mmu_unload;
2641 }
2642 kvm->vcpus[n] = vcpu;
2643 mutex_unlock(&kvm->lock);
2644
2645 /* Now it's all set up, let userspace reach it */
2398 r = create_vcpu_fd(vcpu); 2646 r = create_vcpu_fd(vcpu);
2399 if (r < 0) 2647 if (r < 0)
2400 goto out_free_vcpus; 2648 goto unlink;
2649 return r;
2401 2650
2402 spin_lock(&kvm_lock); 2651unlink:
2403 if (n >= kvm->nvcpus) 2652 mutex_lock(&kvm->lock);
2404 kvm->nvcpus = n + 1; 2653 kvm->vcpus[n] = NULL;
2405 spin_unlock(&kvm_lock); 2654 mutex_unlock(&kvm->lock);
2406 2655
2407 return r; 2656mmu_unload:
2657 vcpu_load(vcpu);
2658 kvm_mmu_unload(vcpu);
2659 vcpu_put(vcpu);
2408 2660
2409out_free_vcpus: 2661free_vcpu:
2410 kvm_free_vcpu(vcpu); 2662 kvm_x86_ops->vcpu_free(vcpu);
2411out_free_run:
2412 free_page((unsigned long)vcpu->run);
2413 vcpu->run = NULL;
2414out_unlock:
2415 mutex_unlock(&vcpu->mutex);
2416out:
2417 return r; 2663 return r;
2418} 2664}
2419 2665
@@ -2493,7 +2739,7 @@ struct fxsave {
2493 2739
2494static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 2740static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2495{ 2741{
2496 struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image; 2742 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2497 2743
2498 vcpu_load(vcpu); 2744 vcpu_load(vcpu);
2499 2745
@@ -2513,7 +2759,7 @@ static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2513 2759
2514static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 2760static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2515{ 2761{
2516 struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image; 2762 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2517 2763
2518 vcpu_load(vcpu); 2764 vcpu_load(vcpu);
2519 2765
@@ -2531,6 +2777,27 @@ static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2531 return 0; 2777 return 0;
2532} 2778}
2533 2779
2780static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2781 struct kvm_lapic_state *s)
2782{
2783 vcpu_load(vcpu);
2784 memcpy(s->regs, vcpu->apic->regs, sizeof *s);
2785 vcpu_put(vcpu);
2786
2787 return 0;
2788}
2789
2790static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2791 struct kvm_lapic_state *s)
2792{
2793 vcpu_load(vcpu);
2794 memcpy(vcpu->apic->regs, s->regs, sizeof *s);
2795 kvm_apic_post_state_restore(vcpu);
2796 vcpu_put(vcpu);
2797
2798 return 0;
2799}
2800
2534static long kvm_vcpu_ioctl(struct file *filp, 2801static long kvm_vcpu_ioctl(struct file *filp,
2535 unsigned int ioctl, unsigned long arg) 2802 unsigned int ioctl, unsigned long arg)
2536{ 2803{
@@ -2700,6 +2967,31 @@ static long kvm_vcpu_ioctl(struct file *filp,
2700 r = 0; 2967 r = 0;
2701 break; 2968 break;
2702 } 2969 }
2970 case KVM_GET_LAPIC: {
2971 struct kvm_lapic_state lapic;
2972
2973 memset(&lapic, 0, sizeof lapic);
2974 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
2975 if (r)
2976 goto out;
2977 r = -EFAULT;
2978 if (copy_to_user(argp, &lapic, sizeof lapic))
2979 goto out;
2980 r = 0;
2981 break;
2982 }
2983 case KVM_SET_LAPIC: {
2984 struct kvm_lapic_state lapic;
2985
2986 r = -EFAULT;
2987 if (copy_from_user(&lapic, argp, sizeof lapic))
2988 goto out;
2989 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
2990 if (r)
2991 goto out;
2992 r = 0;
2993 break;
2994 }
2703 default: 2995 default:
2704 ; 2996 ;
2705 } 2997 }
@@ -2753,6 +3045,75 @@ static long kvm_vm_ioctl(struct file *filp,
2753 goto out; 3045 goto out;
2754 break; 3046 break;
2755 } 3047 }
3048 case KVM_CREATE_IRQCHIP:
3049 r = -ENOMEM;
3050 kvm->vpic = kvm_create_pic(kvm);
3051 if (kvm->vpic) {
3052 r = kvm_ioapic_init(kvm);
3053 if (r) {
3054 kfree(kvm->vpic);
3055 kvm->vpic = NULL;
3056 goto out;
3057 }
3058 }
3059 else
3060 goto out;
3061 break;
3062 case KVM_IRQ_LINE: {
3063 struct kvm_irq_level irq_event;
3064
3065 r = -EFAULT;
3066 if (copy_from_user(&irq_event, argp, sizeof irq_event))
3067 goto out;
3068 if (irqchip_in_kernel(kvm)) {
3069 mutex_lock(&kvm->lock);
3070 if (irq_event.irq < 16)
3071 kvm_pic_set_irq(pic_irqchip(kvm),
3072 irq_event.irq,
3073 irq_event.level);
3074 kvm_ioapic_set_irq(kvm->vioapic,
3075 irq_event.irq,
3076 irq_event.level);
3077 mutex_unlock(&kvm->lock);
3078 r = 0;
3079 }
3080 break;
3081 }
3082 case KVM_GET_IRQCHIP: {
3083 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3084 struct kvm_irqchip chip;
3085
3086 r = -EFAULT;
3087 if (copy_from_user(&chip, argp, sizeof chip))
3088 goto out;
3089 r = -ENXIO;
3090 if (!irqchip_in_kernel(kvm))
3091 goto out;
3092 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
3093 if (r)
3094 goto out;
3095 r = -EFAULT;
3096 if (copy_to_user(argp, &chip, sizeof chip))
3097 goto out;
3098 r = 0;
3099 break;
3100 }
3101 case KVM_SET_IRQCHIP: {
3102 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3103 struct kvm_irqchip chip;
3104
3105 r = -EFAULT;
3106 if (copy_from_user(&chip, argp, sizeof chip))
3107 goto out;
3108 r = -ENXIO;
3109 if (!irqchip_in_kernel(kvm))
3110 goto out;
3111 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
3112 if (r)
3113 goto out;
3114 r = 0;
3115 break;
3116 }
2756 default: 3117 default:
2757 ; 3118 ;
2758 } 3119 }
@@ -2768,12 +3129,14 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
2768 unsigned long pgoff; 3129 unsigned long pgoff;
2769 struct page *page; 3130 struct page *page;
2770 3131
2771 *type = VM_FAULT_MINOR;
2772 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 3132 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2773 page = gfn_to_page(kvm, pgoff); 3133 page = gfn_to_page(kvm, pgoff);
2774 if (!page) 3134 if (!page)
2775 return NOPAGE_SIGBUS; 3135 return NOPAGE_SIGBUS;
2776 get_page(page); 3136 get_page(page);
3137 if (type != NULL)
3138 *type = VM_FAULT_MINOR;
3139
2777 return page; 3140 return page;
2778} 3141}
2779 3142
@@ -2861,12 +3224,20 @@ static long kvm_dev_ioctl(struct file *filp,
2861 r = 0; 3224 r = 0;
2862 break; 3225 break;
2863 } 3226 }
2864 case KVM_CHECK_EXTENSION: 3227 case KVM_CHECK_EXTENSION: {
2865 /* 3228 int ext = (long)argp;
2866 * No extensions defined at present. 3229
2867 */ 3230 switch (ext) {
2868 r = 0; 3231 case KVM_CAP_IRQCHIP:
3232 case KVM_CAP_HLT:
3233 r = 1;
3234 break;
3235 default:
3236 r = 0;
3237 break;
3238 }
2869 break; 3239 break;
3240 }
2870 case KVM_GET_VCPU_MMAP_SIZE: 3241 case KVM_GET_VCPU_MMAP_SIZE:
2871 r = -EINVAL; 3242 r = -EINVAL;
2872 if (arg) 3243 if (arg)
@@ -2881,8 +3252,6 @@ out:
2881} 3252}
2882 3253
2883static struct file_operations kvm_chardev_ops = { 3254static struct file_operations kvm_chardev_ops = {
2884 .open = kvm_dev_open,
2885 .release = kvm_dev_release,
2886 .unlocked_ioctl = kvm_dev_ioctl, 3255 .unlocked_ioctl = kvm_dev_ioctl,
2887 .compat_ioctl = kvm_dev_ioctl, 3256 .compat_ioctl = kvm_dev_ioctl,
2888}; 3257};
@@ -2893,25 +3262,6 @@ static struct miscdevice kvm_dev = {
2893 &kvm_chardev_ops, 3262 &kvm_chardev_ops,
2894}; 3263};
2895 3264
2896static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2897 void *v)
2898{
2899 if (val == SYS_RESTART) {
2900 /*
2901 * Some (well, at least mine) BIOSes hang on reboot if
2902 * in vmx root mode.
2903 */
2904 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2905 on_each_cpu(hardware_disable, NULL, 0, 1);
2906 }
2907 return NOTIFY_OK;
2908}
2909
2910static struct notifier_block kvm_reboot_notifier = {
2911 .notifier_call = kvm_reboot,
2912 .priority = 0,
2913};
2914
2915/* 3265/*
2916 * Make sure that a cpu that is being hot-unplugged does not have any vcpus 3266 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
2917 * cached on it. 3267 * cached on it.
@@ -2925,7 +3275,9 @@ static void decache_vcpus_on_cpu(int cpu)
2925 spin_lock(&kvm_lock); 3275 spin_lock(&kvm_lock);
2926 list_for_each_entry(vm, &vm_list, vm_list) 3276 list_for_each_entry(vm, &vm_list, vm_list)
2927 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 3277 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2928 vcpu = &vm->vcpus[i]; 3278 vcpu = vm->vcpus[i];
3279 if (!vcpu)
3280 continue;
2929 /* 3281 /*
2930 * If the vcpu is locked, then it is running on some 3282 * If the vcpu is locked, then it is running on some
2931 * other cpu and therefore it is not cached on the 3283 * other cpu and therefore it is not cached on the
@@ -2936,7 +3288,7 @@ static void decache_vcpus_on_cpu(int cpu)
2936 */ 3288 */
2937 if (mutex_trylock(&vcpu->mutex)) { 3289 if (mutex_trylock(&vcpu->mutex)) {
2938 if (vcpu->cpu == cpu) { 3290 if (vcpu->cpu == cpu) {
2939 kvm_arch_ops->vcpu_decache(vcpu); 3291 kvm_x86_ops->vcpu_decache(vcpu);
2940 vcpu->cpu = -1; 3292 vcpu->cpu = -1;
2941 } 3293 }
2942 mutex_unlock(&vcpu->mutex); 3294 mutex_unlock(&vcpu->mutex);
@@ -2952,7 +3304,7 @@ static void hardware_enable(void *junk)
2952 if (cpu_isset(cpu, cpus_hardware_enabled)) 3304 if (cpu_isset(cpu, cpus_hardware_enabled))
2953 return; 3305 return;
2954 cpu_set(cpu, cpus_hardware_enabled); 3306 cpu_set(cpu, cpus_hardware_enabled);
2955 kvm_arch_ops->hardware_enable(NULL); 3307 kvm_x86_ops->hardware_enable(NULL);
2956} 3308}
2957 3309
2958static void hardware_disable(void *junk) 3310static void hardware_disable(void *junk)
@@ -2963,7 +3315,7 @@ static void hardware_disable(void *junk)
2963 return; 3315 return;
2964 cpu_clear(cpu, cpus_hardware_enabled); 3316 cpu_clear(cpu, cpus_hardware_enabled);
2965 decache_vcpus_on_cpu(cpu); 3317 decache_vcpus_on_cpu(cpu);
2966 kvm_arch_ops->hardware_disable(NULL); 3318 kvm_x86_ops->hardware_disable(NULL);
2967} 3319}
2968 3320
2969static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 3321static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
@@ -2994,6 +3346,25 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2994 return NOTIFY_OK; 3346 return NOTIFY_OK;
2995} 3347}
2996 3348
3349static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
3350 void *v)
3351{
3352 if (val == SYS_RESTART) {
3353 /*
3354 * Some (well, at least mine) BIOSes hang on reboot if
3355 * in vmx root mode.
3356 */
3357 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
3358 on_each_cpu(hardware_disable, NULL, 0, 1);
3359 }
3360 return NOTIFY_OK;
3361}
3362
3363static struct notifier_block kvm_reboot_notifier = {
3364 .notifier_call = kvm_reboot,
3365 .priority = 0,
3366};
3367
2997void kvm_io_bus_init(struct kvm_io_bus *bus) 3368void kvm_io_bus_init(struct kvm_io_bus *bus)
2998{ 3369{
2999 memset(bus, 0, sizeof(*bus)); 3370 memset(bus, 0, sizeof(*bus));
@@ -3047,18 +3418,15 @@ static u64 stat_get(void *_offset)
3047 spin_lock(&kvm_lock); 3418 spin_lock(&kvm_lock);
3048 list_for_each_entry(kvm, &vm_list, vm_list) 3419 list_for_each_entry(kvm, &vm_list, vm_list)
3049 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 3420 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
3050 vcpu = &kvm->vcpus[i]; 3421 vcpu = kvm->vcpus[i];
3051 total += *(u32 *)((void *)vcpu + offset); 3422 if (vcpu)
3423 total += *(u32 *)((void *)vcpu + offset);
3052 } 3424 }
3053 spin_unlock(&kvm_lock); 3425 spin_unlock(&kvm_lock);
3054 return total; 3426 return total;
3055} 3427}
3056 3428
3057static void stat_set(void *offset, u64 val) 3429DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, NULL, "%llu\n");
3058{
3059}
3060
3061DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, stat_set, "%llu\n");
3062 3430
3063static __init void kvm_init_debug(void) 3431static __init void kvm_init_debug(void)
3064{ 3432{
@@ -3105,11 +3473,34 @@ static struct sys_device kvm_sysdev = {
3105 3473
3106hpa_t bad_page_address; 3474hpa_t bad_page_address;
3107 3475
3108int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) 3476static inline
3477struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
3478{
3479 return container_of(pn, struct kvm_vcpu, preempt_notifier);
3480}
3481
3482static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
3483{
3484 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3485
3486 kvm_x86_ops->vcpu_load(vcpu, cpu);
3487}
3488
3489static void kvm_sched_out(struct preempt_notifier *pn,
3490 struct task_struct *next)
3491{
3492 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3493
3494 kvm_x86_ops->vcpu_put(vcpu);
3495}
3496
3497int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
3498 struct module *module)
3109{ 3499{
3110 int r; 3500 int r;
3501 int cpu;
3111 3502
3112 if (kvm_arch_ops) { 3503 if (kvm_x86_ops) {
3113 printk(KERN_ERR "kvm: already loaded the other module\n"); 3504 printk(KERN_ERR "kvm: already loaded the other module\n");
3114 return -EEXIST; 3505 return -EEXIST;
3115 } 3506 }
@@ -3123,12 +3514,20 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
3123 return -EOPNOTSUPP; 3514 return -EOPNOTSUPP;
3124 } 3515 }
3125 3516
3126 kvm_arch_ops = ops; 3517 kvm_x86_ops = ops;
3127 3518
3128 r = kvm_arch_ops->hardware_setup(); 3519 r = kvm_x86_ops->hardware_setup();
3129 if (r < 0) 3520 if (r < 0)
3130 goto out; 3521 goto out;
3131 3522
3523 for_each_online_cpu(cpu) {
3524 smp_call_function_single(cpu,
3525 kvm_x86_ops->check_processor_compatibility,
3526 &r, 0, 1);
3527 if (r < 0)
3528 goto out_free_0;
3529 }
3530
3132 on_each_cpu(hardware_enable, NULL, 0, 1); 3531 on_each_cpu(hardware_enable, NULL, 0, 1);
3133 r = register_cpu_notifier(&kvm_cpu_notifier); 3532 r = register_cpu_notifier(&kvm_cpu_notifier);
3134 if (r) 3533 if (r)
@@ -3143,6 +3542,14 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
3143 if (r) 3542 if (r)
3144 goto out_free_3; 3543 goto out_free_3;
3145 3544
3545 /* A kmem cache lets us meet the alignment requirements of fx_save. */
3546 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
3547 __alignof__(struct kvm_vcpu), 0, 0);
3548 if (!kvm_vcpu_cache) {
3549 r = -ENOMEM;
3550 goto out_free_4;
3551 }
3552
3146 kvm_chardev_ops.owner = module; 3553 kvm_chardev_ops.owner = module;
3147 3554
3148 r = misc_register(&kvm_dev); 3555 r = misc_register(&kvm_dev);
@@ -3151,9 +3558,14 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
3151 goto out_free; 3558 goto out_free;
3152 } 3559 }
3153 3560
3561 kvm_preempt_ops.sched_in = kvm_sched_in;
3562 kvm_preempt_ops.sched_out = kvm_sched_out;
3563
3154 return r; 3564 return r;
3155 3565
3156out_free: 3566out_free:
3567 kmem_cache_destroy(kvm_vcpu_cache);
3568out_free_4:
3157 sysdev_unregister(&kvm_sysdev); 3569 sysdev_unregister(&kvm_sysdev);
3158out_free_3: 3570out_free_3:
3159 sysdev_class_unregister(&kvm_sysdev_class); 3571 sysdev_class_unregister(&kvm_sysdev_class);
@@ -3162,22 +3574,24 @@ out_free_2:
3162 unregister_cpu_notifier(&kvm_cpu_notifier); 3574 unregister_cpu_notifier(&kvm_cpu_notifier);
3163out_free_1: 3575out_free_1:
3164 on_each_cpu(hardware_disable, NULL, 0, 1); 3576 on_each_cpu(hardware_disable, NULL, 0, 1);
3165 kvm_arch_ops->hardware_unsetup(); 3577out_free_0:
3578 kvm_x86_ops->hardware_unsetup();
3166out: 3579out:
3167 kvm_arch_ops = NULL; 3580 kvm_x86_ops = NULL;
3168 return r; 3581 return r;
3169} 3582}
3170 3583
3171void kvm_exit_arch(void) 3584void kvm_exit_x86(void)
3172{ 3585{
3173 misc_deregister(&kvm_dev); 3586 misc_deregister(&kvm_dev);
3587 kmem_cache_destroy(kvm_vcpu_cache);
3174 sysdev_unregister(&kvm_sysdev); 3588 sysdev_unregister(&kvm_sysdev);
3175 sysdev_class_unregister(&kvm_sysdev_class); 3589 sysdev_class_unregister(&kvm_sysdev_class);
3176 unregister_reboot_notifier(&kvm_reboot_notifier); 3590 unregister_reboot_notifier(&kvm_reboot_notifier);
3177 unregister_cpu_notifier(&kvm_cpu_notifier); 3591 unregister_cpu_notifier(&kvm_cpu_notifier);
3178 on_each_cpu(hardware_disable, NULL, 0, 1); 3592 on_each_cpu(hardware_disable, NULL, 0, 1);
3179 kvm_arch_ops->hardware_unsetup(); 3593 kvm_x86_ops->hardware_unsetup();
3180 kvm_arch_ops = NULL; 3594 kvm_x86_ops = NULL;
3181} 3595}
3182 3596
3183static __init int kvm_init(void) 3597static __init int kvm_init(void)
@@ -3220,5 +3634,5 @@ static __exit void kvm_exit(void)
3220module_init(kvm_init) 3634module_init(kvm_init)
3221module_exit(kvm_exit) 3635module_exit(kvm_exit)
3222 3636
3223EXPORT_SYMBOL_GPL(kvm_init_arch); 3637EXPORT_SYMBOL_GPL(kvm_init_x86);
3224EXPORT_SYMBOL_GPL(kvm_exit_arch); 3638EXPORT_SYMBOL_GPL(kvm_exit_x86);
diff --git a/drivers/kvm/kvm_svm.h b/drivers/kvm/kvm_svm.h
index a869983d68..a0e415daef 100644
--- a/drivers/kvm/kvm_svm.h
+++ b/drivers/kvm/kvm_svm.h
@@ -20,7 +20,10 @@ static const u32 host_save_user_msrs[] = {
20#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) 20#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
21#define NUM_DB_REGS 4 21#define NUM_DB_REGS 4
22 22
23struct kvm_vcpu;
24
23struct vcpu_svm { 25struct vcpu_svm {
26 struct kvm_vcpu vcpu;
24 struct vmcb *vmcb; 27 struct vmcb *vmcb;
25 unsigned long vmcb_pa; 28 unsigned long vmcb_pa;
26 struct svm_cpu_data *svm_data; 29 struct svm_cpu_data *svm_data;
diff --git a/drivers/kvm/lapic.c b/drivers/kvm/lapic.c
new file mode 100644
index 0000000000..a190587cf6
--- /dev/null
+++ b/drivers/kvm/lapic.c
@@ -0,0 +1,1064 @@
1
2/*
3 * Local APIC virtualization
4 *
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright (C) 2007 Novell
7 * Copyright (C) 2007 Intel
8 *
9 * Authors:
10 * Dor Laor <dor.laor@qumranet.com>
11 * Gregory Haskins <ghaskins@novell.com>
12 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
13 *
14 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 */
19
20#include "kvm.h"
21#include <linux/kvm.h>
22#include <linux/mm.h>
23#include <linux/highmem.h>
24#include <linux/smp.h>
25#include <linux/hrtimer.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <asm/processor.h>
29#include <asm/msr.h>
30#include <asm/page.h>
31#include <asm/current.h>
32#include <asm/apicdef.h>
33#include <asm/atomic.h>
34#include <asm/div64.h>
35#include "irq.h"
36
37#define PRId64 "d"
38#define PRIx64 "llx"
39#define PRIu64 "u"
40#define PRIo64 "o"
41
42#define APIC_BUS_CYCLE_NS 1
43
44/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
45#define apic_debug(fmt, arg...)
46
47#define APIC_LVT_NUM 6
48/* 14 is the version for Xeon and Pentium 8.4.8*/
49#define APIC_VERSION (0x14UL | ((APIC_LVT_NUM - 1) << 16))
50#define LAPIC_MMIO_LENGTH (1 << 12)
51/* followed define is not in apicdef.h */
52#define APIC_SHORT_MASK 0xc0000
53#define APIC_DEST_NOSHORT 0x0
54#define APIC_DEST_MASK 0x800
55#define MAX_APIC_VECTOR 256
56
57#define VEC_POS(v) ((v) & (32 - 1))
58#define REG_POS(v) (((v) >> 5) << 4)
59static inline u32 apic_get_reg(struct kvm_lapic *apic, int reg_off)
60{
61 return *((u32 *) (apic->regs + reg_off));
62}
63
64static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
65{
66 *((u32 *) (apic->regs + reg_off)) = val;
67}
68
69static inline int apic_test_and_set_vector(int vec, void *bitmap)
70{
71 return test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
72}
73
74static inline int apic_test_and_clear_vector(int vec, void *bitmap)
75{
76 return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
77}
78
79static inline void apic_set_vector(int vec, void *bitmap)
80{
81 set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
82}
83
84static inline void apic_clear_vector(int vec, void *bitmap)
85{
86 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
87}
88
89static inline int apic_hw_enabled(struct kvm_lapic *apic)
90{
91 return (apic)->vcpu->apic_base & MSR_IA32_APICBASE_ENABLE;
92}
93
94static inline int apic_sw_enabled(struct kvm_lapic *apic)
95{
96 return apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED;
97}
98
99static inline int apic_enabled(struct kvm_lapic *apic)
100{
101 return apic_sw_enabled(apic) && apic_hw_enabled(apic);
102}
103
104#define LVT_MASK \
105 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
106
107#define LINT_MASK \
108 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
109 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
110
111static inline int kvm_apic_id(struct kvm_lapic *apic)
112{
113 return (apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
114}
115
116static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
117{
118 return !(apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
119}
120
121static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
122{
123 return apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
124}
125
126static inline int apic_lvtt_period(struct kvm_lapic *apic)
127{
128 return apic_get_reg(apic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC;
129}
130
131static unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
132 LVT_MASK | APIC_LVT_TIMER_PERIODIC, /* LVTT */
133 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
134 LVT_MASK | APIC_MODE_MASK, /* LVTPC */
135 LINT_MASK, LINT_MASK, /* LVT0-1 */
136 LVT_MASK /* LVTERR */
137};
138
139static int find_highest_vector(void *bitmap)
140{
141 u32 *word = bitmap;
142 int word_offset = MAX_APIC_VECTOR >> 5;
143
144 while ((word_offset != 0) && (word[(--word_offset) << 2] == 0))
145 continue;
146
147 if (likely(!word_offset && !word[0]))
148 return -1;
149 else
150 return fls(word[word_offset << 2]) - 1 + (word_offset << 5);
151}
152
153static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
154{
155 return apic_test_and_set_vector(vec, apic->regs + APIC_IRR);
156}
157
158static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
159{
160 apic_clear_vector(vec, apic->regs + APIC_IRR);
161}
162
163static inline int apic_find_highest_irr(struct kvm_lapic *apic)
164{
165 int result;
166
167 result = find_highest_vector(apic->regs + APIC_IRR);
168 ASSERT(result == -1 || result >= 16);
169
170 return result;
171}
172
173int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
174{
175 struct kvm_lapic *apic = (struct kvm_lapic *)vcpu->apic;
176 int highest_irr;
177
178 if (!apic)
179 return 0;
180 highest_irr = apic_find_highest_irr(apic);
181
182 return highest_irr;
183}
184EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
185
186int kvm_apic_set_irq(struct kvm_lapic *apic, u8 vec, u8 trig)
187{
188 if (!apic_test_and_set_irr(vec, apic)) {
189 /* a new pending irq is set in IRR */
190 if (trig)
191 apic_set_vector(vec, apic->regs + APIC_TMR);
192 else
193 apic_clear_vector(vec, apic->regs + APIC_TMR);
194 kvm_vcpu_kick(apic->vcpu);
195 return 1;
196 }
197 return 0;
198}
199
200static inline int apic_find_highest_isr(struct kvm_lapic *apic)
201{
202 int result;
203
204 result = find_highest_vector(apic->regs + APIC_ISR);
205 ASSERT(result == -1 || result >= 16);
206
207 return result;
208}
209
210static void apic_update_ppr(struct kvm_lapic *apic)
211{
212 u32 tpr, isrv, ppr;
213 int isr;
214
215 tpr = apic_get_reg(apic, APIC_TASKPRI);
216 isr = apic_find_highest_isr(apic);
217 isrv = (isr != -1) ? isr : 0;
218
219 if ((tpr & 0xf0) >= (isrv & 0xf0))
220 ppr = tpr & 0xff;
221 else
222 ppr = isrv & 0xf0;
223
224 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
225 apic, ppr, isr, isrv);
226
227 apic_set_reg(apic, APIC_PROCPRI, ppr);
228}
229
230static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
231{
232 apic_set_reg(apic, APIC_TASKPRI, tpr);
233 apic_update_ppr(apic);
234}
235
236int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
237{
238 return kvm_apic_id(apic) == dest;
239}
240
241int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
242{
243 int result = 0;
244 u8 logical_id;
245
246 logical_id = GET_APIC_LOGICAL_ID(apic_get_reg(apic, APIC_LDR));
247
248 switch (apic_get_reg(apic, APIC_DFR)) {
249 case APIC_DFR_FLAT:
250 if (logical_id & mda)
251 result = 1;
252 break;
253 case APIC_DFR_CLUSTER:
254 if (((logical_id >> 4) == (mda >> 0x4))
255 && (logical_id & mda & 0xf))
256 result = 1;
257 break;
258 default:
259 printk(KERN_WARNING "Bad DFR vcpu %d: %08x\n",
260 apic->vcpu->vcpu_id, apic_get_reg(apic, APIC_DFR));
261 break;
262 }
263
264 return result;
265}
266
267static int apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
268 int short_hand, int dest, int dest_mode)
269{
270 int result = 0;
271 struct kvm_lapic *target = vcpu->apic;
272
273 apic_debug("target %p, source %p, dest 0x%x, "
274 "dest_mode 0x%x, short_hand 0x%x",
275 target, source, dest, dest_mode, short_hand);
276
277 ASSERT(!target);
278 switch (short_hand) {
279 case APIC_DEST_NOSHORT:
280 if (dest_mode == 0) {
281 /* Physical mode. */
282 if ((dest == 0xFF) || (dest == kvm_apic_id(target)))
283 result = 1;
284 } else
285 /* Logical mode. */
286 result = kvm_apic_match_logical_addr(target, dest);
287 break;
288 case APIC_DEST_SELF:
289 if (target == source)
290 result = 1;
291 break;
292 case APIC_DEST_ALLINC:
293 result = 1;
294 break;
295 case APIC_DEST_ALLBUT:
296 if (target != source)
297 result = 1;
298 break;
299 default:
300 printk(KERN_WARNING "Bad dest shorthand value %x\n",
301 short_hand);
302 break;
303 }
304
305 return result;
306}
307
308/*
309 * Add a pending IRQ into lapic.
310 * Return 1 if successfully added and 0 if discarded.
311 */
312static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
313 int vector, int level, int trig_mode)
314{
315 int orig_irr, result = 0;
316 struct kvm_vcpu *vcpu = apic->vcpu;
317
318 switch (delivery_mode) {
319 case APIC_DM_FIXED:
320 case APIC_DM_LOWEST:
321 /* FIXME add logic for vcpu on reset */
322 if (unlikely(!apic_enabled(apic)))
323 break;
324
325 orig_irr = apic_test_and_set_irr(vector, apic);
326 if (orig_irr && trig_mode) {
327 apic_debug("level trig mode repeatedly for vector %d",
328 vector);
329 break;
330 }
331
332 if (trig_mode) {
333 apic_debug("level trig mode for vector %d", vector);
334 apic_set_vector(vector, apic->regs + APIC_TMR);
335 } else
336 apic_clear_vector(vector, apic->regs + APIC_TMR);
337
338 if (vcpu->mp_state == VCPU_MP_STATE_RUNNABLE)
339 kvm_vcpu_kick(vcpu);
340 else if (vcpu->mp_state == VCPU_MP_STATE_HALTED) {
341 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
342 if (waitqueue_active(&vcpu->wq))
343 wake_up_interruptible(&vcpu->wq);
344 }
345
346 result = (orig_irr == 0);
347 break;
348
349 case APIC_DM_REMRD:
350 printk(KERN_DEBUG "Ignoring delivery mode 3\n");
351 break;
352
353 case APIC_DM_SMI:
354 printk(KERN_DEBUG "Ignoring guest SMI\n");
355 break;
356 case APIC_DM_NMI:
357 printk(KERN_DEBUG "Ignoring guest NMI\n");
358 break;
359
360 case APIC_DM_INIT:
361 if (level) {
362 if (vcpu->mp_state == VCPU_MP_STATE_RUNNABLE)
363 printk(KERN_DEBUG
364 "INIT on a runnable vcpu %d\n",
365 vcpu->vcpu_id);
366 vcpu->mp_state = VCPU_MP_STATE_INIT_RECEIVED;
367 kvm_vcpu_kick(vcpu);
368 } else {
369 printk(KERN_DEBUG
370 "Ignoring de-assert INIT to vcpu %d\n",
371 vcpu->vcpu_id);
372 }
373
374 break;
375
376 case APIC_DM_STARTUP:
377 printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n",
378 vcpu->vcpu_id, vector);
379 if (vcpu->mp_state == VCPU_MP_STATE_INIT_RECEIVED) {
380 vcpu->sipi_vector = vector;
381 vcpu->mp_state = VCPU_MP_STATE_SIPI_RECEIVED;
382 if (waitqueue_active(&vcpu->wq))
383 wake_up_interruptible(&vcpu->wq);
384 }
385 break;
386
387 default:
388 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
389 delivery_mode);
390 break;
391 }
392 return result;
393}
394
395struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
396 unsigned long bitmap)
397{
398 int vcpu_id;
399 int last;
400 int next;
401 struct kvm_lapic *apic;
402
403 last = kvm->round_robin_prev_vcpu;
404 next = last;
405
406 do {
407 if (++next == KVM_MAX_VCPUS)
408 next = 0;
409 if (kvm->vcpus[next] == NULL || !test_bit(next, &bitmap))
410 continue;
411 apic = kvm->vcpus[next]->apic;
412 if (apic && apic_enabled(apic))
413 break;
414 apic = NULL;
415 } while (next != last);
416 kvm->round_robin_prev_vcpu = next;
417
418 if (!apic) {
419 vcpu_id = ffs(bitmap) - 1;
420 if (vcpu_id < 0) {
421 vcpu_id = 0;
422 printk(KERN_DEBUG "vcpu not ready for apic_round_robin\n");
423 }
424 apic = kvm->vcpus[vcpu_id]->apic;
425 }
426
427 return apic;
428}
429
430static void apic_set_eoi(struct kvm_lapic *apic)
431{
432 int vector = apic_find_highest_isr(apic);
433
434 /*
435 * Not every write EOI will has corresponding ISR,
436 * one example is when Kernel check timer on setup_IO_APIC
437 */
438 if (vector == -1)
439 return;
440
441 apic_clear_vector(vector, apic->regs + APIC_ISR);
442 apic_update_ppr(apic);
443
444 if (apic_test_and_clear_vector(vector, apic->regs + APIC_TMR))
445 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector);
446}
447
448static void apic_send_ipi(struct kvm_lapic *apic)
449{
450 u32 icr_low = apic_get_reg(apic, APIC_ICR);
451 u32 icr_high = apic_get_reg(apic, APIC_ICR2);
452
453 unsigned int dest = GET_APIC_DEST_FIELD(icr_high);
454 unsigned int short_hand = icr_low & APIC_SHORT_MASK;
455 unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG;
456 unsigned int level = icr_low & APIC_INT_ASSERT;
457 unsigned int dest_mode = icr_low & APIC_DEST_MASK;
458 unsigned int delivery_mode = icr_low & APIC_MODE_MASK;
459 unsigned int vector = icr_low & APIC_VECTOR_MASK;
460
461 struct kvm_lapic *target;
462 struct kvm_vcpu *vcpu;
463 unsigned long lpr_map = 0;
464 int i;
465
466 apic_debug("icr_high 0x%x, icr_low 0x%x, "
467 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
468 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
469 icr_high, icr_low, short_hand, dest,
470 trig_mode, level, dest_mode, delivery_mode, vector);
471
472 for (i = 0; i < KVM_MAX_VCPUS; i++) {
473 vcpu = apic->vcpu->kvm->vcpus[i];
474 if (!vcpu)
475 continue;
476
477 if (vcpu->apic &&
478 apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) {
479 if (delivery_mode == APIC_DM_LOWEST)
480 set_bit(vcpu->vcpu_id, &lpr_map);
481 else
482 __apic_accept_irq(vcpu->apic, delivery_mode,
483 vector, level, trig_mode);
484 }
485 }
486
487 if (delivery_mode == APIC_DM_LOWEST) {
488 target = kvm_apic_round_robin(vcpu->kvm, vector, lpr_map);
489 if (target != NULL)
490 __apic_accept_irq(target, delivery_mode,
491 vector, level, trig_mode);
492 }
493}
494
495static u32 apic_get_tmcct(struct kvm_lapic *apic)
496{
497 u32 counter_passed;
498 ktime_t passed, now = apic->timer.dev.base->get_time();
499 u32 tmcct = apic_get_reg(apic, APIC_TMICT);
500
501 ASSERT(apic != NULL);
502
503 if (unlikely(ktime_to_ns(now) <=
504 ktime_to_ns(apic->timer.last_update))) {
505 /* Wrap around */
506 passed = ktime_add(( {
507 (ktime_t) {
508 .tv64 = KTIME_MAX -
509 (apic->timer.last_update).tv64}; }
510 ), now);
511 apic_debug("time elapsed\n");
512 } else
513 passed = ktime_sub(now, apic->timer.last_update);
514
515 counter_passed = div64_64(ktime_to_ns(passed),
516 (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
517 tmcct -= counter_passed;
518
519 if (tmcct <= 0) {
520 if (unlikely(!apic_lvtt_period(apic)))
521 tmcct = 0;
522 else
523 do {
524 tmcct += apic_get_reg(apic, APIC_TMICT);
525 } while (tmcct <= 0);
526 }
527
528 return tmcct;
529}
530
531static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
532{
533 u32 val = 0;
534
535 if (offset >= LAPIC_MMIO_LENGTH)
536 return 0;
537
538 switch (offset) {
539 case APIC_ARBPRI:
540 printk(KERN_WARNING "Access APIC ARBPRI register "
541 "which is for P6\n");
542 break;
543
544 case APIC_TMCCT: /* Timer CCR */
545 val = apic_get_tmcct(apic);
546 break;
547
548 default:
549 apic_update_ppr(apic);
550 val = apic_get_reg(apic, offset);
551 break;
552 }
553
554 return val;
555}
556
557static void apic_mmio_read(struct kvm_io_device *this,
558 gpa_t address, int len, void *data)
559{
560 struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
561 unsigned int offset = address - apic->base_address;
562 unsigned char alignment = offset & 0xf;
563 u32 result;
564
565 if ((alignment + len) > 4) {
566 printk(KERN_ERR "KVM_APIC_READ: alignment error %lx %d",
567 (unsigned long)address, len);
568 return;
569 }
570 result = __apic_read(apic, offset & ~0xf);
571
572 switch (len) {
573 case 1:
574 case 2:
575 case 4:
576 memcpy(data, (char *)&result + alignment, len);
577 break;
578 default:
579 printk(KERN_ERR "Local APIC read with len = %x, "
580 "should be 1,2, or 4 instead\n", len);
581 break;
582 }
583}
584
585static void update_divide_count(struct kvm_lapic *apic)
586{
587 u32 tmp1, tmp2, tdcr;
588
589 tdcr = apic_get_reg(apic, APIC_TDCR);
590 tmp1 = tdcr & 0xf;
591 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
592 apic->timer.divide_count = 0x1 << (tmp2 & 0x7);
593
594 apic_debug("timer divide count is 0x%x\n",
595 apic->timer.divide_count);
596}
597
598static void start_apic_timer(struct kvm_lapic *apic)
599{
600 ktime_t now = apic->timer.dev.base->get_time();
601
602 apic->timer.last_update = now;
603
604 apic->timer.period = apic_get_reg(apic, APIC_TMICT) *
605 APIC_BUS_CYCLE_NS * apic->timer.divide_count;
606 atomic_set(&apic->timer.pending, 0);
607 hrtimer_start(&apic->timer.dev,
608 ktime_add_ns(now, apic->timer.period),
609 HRTIMER_MODE_ABS);
610
611 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
612 PRIx64 ", "
613 "timer initial count 0x%x, period %lldns, "
614 "expire @ 0x%016" PRIx64 ".\n", __FUNCTION__,
615 APIC_BUS_CYCLE_NS, ktime_to_ns(now),
616 apic_get_reg(apic, APIC_TMICT),
617 apic->timer.period,
618 ktime_to_ns(ktime_add_ns(now,
619 apic->timer.period)));
620}
621
622static void apic_mmio_write(struct kvm_io_device *this,
623 gpa_t address, int len, const void *data)
624{
625 struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
626 unsigned int offset = address - apic->base_address;
627 unsigned char alignment = offset & 0xf;
628 u32 val;
629
630 /*
631 * APIC register must be aligned on 128-bits boundary.
632 * 32/64/128 bits registers must be accessed thru 32 bits.
633 * Refer SDM 8.4.1
634 */
635 if (len != 4 || alignment) {
636 if (printk_ratelimit())
637 printk(KERN_ERR "apic write: bad size=%d %lx\n",
638 len, (long)address);
639 return;
640 }
641
642 val = *(u32 *) data;
643
644 /* too common printing */
645 if (offset != APIC_EOI)
646 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
647 "0x%x\n", __FUNCTION__, offset, len, val);
648
649 offset &= 0xff0;
650
651 switch (offset) {
652 case APIC_ID: /* Local APIC ID */
653 apic_set_reg(apic, APIC_ID, val);
654 break;
655
656 case APIC_TASKPRI:
657 apic_set_tpr(apic, val & 0xff);
658 break;
659
660 case APIC_EOI:
661 apic_set_eoi(apic);
662 break;
663
664 case APIC_LDR:
665 apic_set_reg(apic, APIC_LDR, val & APIC_LDR_MASK);
666 break;
667
668 case APIC_DFR:
669 apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
670 break;
671
672 case APIC_SPIV:
673 apic_set_reg(apic, APIC_SPIV, val & 0x3ff);
674 if (!(val & APIC_SPIV_APIC_ENABLED)) {
675 int i;
676 u32 lvt_val;
677
678 for (i = 0; i < APIC_LVT_NUM; i++) {
679 lvt_val = apic_get_reg(apic,
680 APIC_LVTT + 0x10 * i);
681 apic_set_reg(apic, APIC_LVTT + 0x10 * i,
682 lvt_val | APIC_LVT_MASKED);
683 }
684 atomic_set(&apic->timer.pending, 0);
685
686 }
687 break;
688
689 case APIC_ICR:
690 /* No delay here, so we always clear the pending bit */
691 apic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
692 apic_send_ipi(apic);
693 break;
694
695 case APIC_ICR2:
696 apic_set_reg(apic, APIC_ICR2, val & 0xff000000);
697 break;
698
699 case APIC_LVTT:
700 case APIC_LVTTHMR:
701 case APIC_LVTPC:
702 case APIC_LVT0:
703 case APIC_LVT1:
704 case APIC_LVTERR:
705 /* TODO: Check vector */
706 if (!apic_sw_enabled(apic))
707 val |= APIC_LVT_MASKED;
708
709 val &= apic_lvt_mask[(offset - APIC_LVTT) >> 4];
710 apic_set_reg(apic, offset, val);
711
712 break;
713
714 case APIC_TMICT:
715 hrtimer_cancel(&apic->timer.dev);
716 apic_set_reg(apic, APIC_TMICT, val);
717 start_apic_timer(apic);
718 return;
719
720 case APIC_TDCR:
721 if (val & 4)
722 printk(KERN_ERR "KVM_WRITE:TDCR %x\n", val);
723 apic_set_reg(apic, APIC_TDCR, val);
724 update_divide_count(apic);
725 break;
726
727 default:
728 apic_debug("Local APIC Write to read-only register %x\n",
729 offset);
730 break;
731 }
732
733}
734
735static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr)
736{
737 struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
738 int ret = 0;
739
740
741 if (apic_hw_enabled(apic) &&
742 (addr >= apic->base_address) &&
743 (addr < (apic->base_address + LAPIC_MMIO_LENGTH)))
744 ret = 1;
745
746 return ret;
747}
748
749void kvm_free_apic(struct kvm_lapic *apic)
750{
751 if (!apic)
752 return;
753
754 hrtimer_cancel(&apic->timer.dev);
755
756 if (apic->regs_page) {
757 __free_page(apic->regs_page);
758 apic->regs_page = 0;
759 }
760
761 kfree(apic);
762}
763
764/*
765 *----------------------------------------------------------------------
766 * LAPIC interface
767 *----------------------------------------------------------------------
768 */
769
770void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
771{
772 struct kvm_lapic *apic = (struct kvm_lapic *)vcpu->apic;
773
774 if (!apic)
775 return;
776 apic_set_tpr(apic, ((cr8 & 0x0f) << 4));
777}
778
779u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
780{
781 struct kvm_lapic *apic = (struct kvm_lapic *)vcpu->apic;
782 u64 tpr;
783
784 if (!apic)
785 return 0;
786 tpr = (u64) apic_get_reg(apic, APIC_TASKPRI);
787
788 return (tpr & 0xf0) >> 4;
789}
790EXPORT_SYMBOL_GPL(kvm_lapic_get_cr8);
791
792void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
793{
794 struct kvm_lapic *apic = (struct kvm_lapic *)vcpu->apic;
795
796 if (!apic) {
797 value |= MSR_IA32_APICBASE_BSP;
798 vcpu->apic_base = value;
799 return;
800 }
801 if (apic->vcpu->vcpu_id)
802 value &= ~MSR_IA32_APICBASE_BSP;
803
804 vcpu->apic_base = value;
805 apic->base_address = apic->vcpu->apic_base &
806 MSR_IA32_APICBASE_BASE;
807
808 /* with FSB delivery interrupt, we can restart APIC functionality */
809 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
810 "0x%lx.\n", apic->apic_base, apic->base_address);
811
812}
813
814u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu)
815{
816 return vcpu->apic_base;
817}
818EXPORT_SYMBOL_GPL(kvm_lapic_get_base);
819
820void kvm_lapic_reset(struct kvm_vcpu *vcpu)
821{
822 struct kvm_lapic *apic;
823 int i;
824
825 apic_debug("%s\n", __FUNCTION__);
826
827 ASSERT(vcpu);
828 apic = vcpu->apic;
829 ASSERT(apic != NULL);
830
831 /* Stop the timer in case it's a reset to an active apic */
832 hrtimer_cancel(&apic->timer.dev);
833
834 apic_set_reg(apic, APIC_ID, vcpu->vcpu_id << 24);
835 apic_set_reg(apic, APIC_LVR, APIC_VERSION);
836
837 for (i = 0; i < APIC_LVT_NUM; i++)
838 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
839 apic_set_reg(apic, APIC_LVT0,
840 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
841
842 apic_set_reg(apic, APIC_DFR, 0xffffffffU);
843 apic_set_reg(apic, APIC_SPIV, 0xff);
844 apic_set_reg(apic, APIC_TASKPRI, 0);
845 apic_set_reg(apic, APIC_LDR, 0);
846 apic_set_reg(apic, APIC_ESR, 0);
847 apic_set_reg(apic, APIC_ICR, 0);
848 apic_set_reg(apic, APIC_ICR2, 0);
849 apic_set_reg(apic, APIC_TDCR, 0);
850 apic_set_reg(apic, APIC_TMICT, 0);
851 for (i = 0; i < 8; i++) {
852 apic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
853 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
854 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
855 }
856 apic->timer.divide_count = 0;
857 atomic_set(&apic->timer.pending, 0);
858 if (vcpu->vcpu_id == 0)
859 vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
860 apic_update_ppr(apic);
861
862 apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
863 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__,
864 vcpu, kvm_apic_id(apic),
865 vcpu->apic_base, apic->base_address);
866}
867EXPORT_SYMBOL_GPL(kvm_lapic_reset);
868
869int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
870{
871 struct kvm_lapic *apic = (struct kvm_lapic *)vcpu->apic;
872 int ret = 0;
873
874 if (!apic)
875 return 0;
876 ret = apic_enabled(apic);
877
878 return ret;
879}
880EXPORT_SYMBOL_GPL(kvm_lapic_enabled);
881
882/*
883 *----------------------------------------------------------------------
884 * timer interface
885 *----------------------------------------------------------------------
886 */
887
888/* TODO: make sure __apic_timer_fn runs in current pCPU */
889static int __apic_timer_fn(struct kvm_lapic *apic)
890{
891 int result = 0;
892 wait_queue_head_t *q = &apic->vcpu->wq;
893
894 atomic_inc(&apic->timer.pending);
895 if (waitqueue_active(q))
896 {
897 apic->vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
898 wake_up_interruptible(q);
899 }
900 if (apic_lvtt_period(apic)) {
901 result = 1;
902 apic->timer.dev.expires = ktime_add_ns(
903 apic->timer.dev.expires,
904 apic->timer.period);
905 }
906 return result;
907}
908
909static int __inject_apic_timer_irq(struct kvm_lapic *apic)
910{
911 int vector;
912
913 vector = apic_lvt_vector(apic, APIC_LVTT);
914 return __apic_accept_irq(apic, APIC_DM_FIXED, vector, 1, 0);
915}
916
917static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
918{
919 struct kvm_lapic *apic;
920 int restart_timer = 0;
921
922 apic = container_of(data, struct kvm_lapic, timer.dev);
923
924 restart_timer = __apic_timer_fn(apic);
925
926 if (restart_timer)
927 return HRTIMER_RESTART;
928 else
929 return HRTIMER_NORESTART;
930}
931
932int kvm_create_lapic(struct kvm_vcpu *vcpu)
933{
934 struct kvm_lapic *apic;
935
936 ASSERT(vcpu != NULL);
937 apic_debug("apic_init %d\n", vcpu->vcpu_id);
938
939 apic = kzalloc(sizeof(*apic), GFP_KERNEL);
940 if (!apic)
941 goto nomem;
942
943 vcpu->apic = apic;
944
945 apic->regs_page = alloc_page(GFP_KERNEL);
946 if (apic->regs_page == NULL) {
947 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
948 vcpu->vcpu_id);
949 goto nomem;
950 }
951 apic->regs = page_address(apic->regs_page);
952 memset(apic->regs, 0, PAGE_SIZE);
953 apic->vcpu = vcpu;
954
955 hrtimer_init(&apic->timer.dev, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
956 apic->timer.dev.function = apic_timer_fn;
957 apic->base_address = APIC_DEFAULT_PHYS_BASE;
958 vcpu->apic_base = APIC_DEFAULT_PHYS_BASE;
959
960 kvm_lapic_reset(vcpu);
961 apic->dev.read = apic_mmio_read;
962 apic->dev.write = apic_mmio_write;
963 apic->dev.in_range = apic_mmio_range;
964 apic->dev.private = apic;
965
966 return 0;
967nomem:
968 kvm_free_apic(apic);
969 return -ENOMEM;
970}
971EXPORT_SYMBOL_GPL(kvm_create_lapic);
972
973int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
974{
975 struct kvm_lapic *apic = vcpu->apic;
976 int highest_irr;
977
978 if (!apic || !apic_enabled(apic))
979 return -1;
980
981 apic_update_ppr(apic);
982 highest_irr = apic_find_highest_irr(apic);
983 if ((highest_irr == -1) ||
984 ((highest_irr & 0xF0) <= apic_get_reg(apic, APIC_PROCPRI)))
985 return -1;
986 return highest_irr;
987}
988
989int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
990{
991 u32 lvt0 = apic_get_reg(vcpu->apic, APIC_LVT0);
992 int r = 0;
993
994 if (vcpu->vcpu_id == 0) {
995 if (!apic_hw_enabled(vcpu->apic))
996 r = 1;
997 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
998 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
999 r = 1;
1000 }
1001 return r;
1002}
1003
1004void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
1005{
1006 struct kvm_lapic *apic = vcpu->apic;
1007
1008 if (apic && apic_lvt_enabled(apic, APIC_LVTT) &&
1009 atomic_read(&apic->timer.pending) > 0) {
1010 if (__inject_apic_timer_irq(apic))
1011 atomic_dec(&apic->timer.pending);
1012 }
1013}
1014
1015void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
1016{
1017 struct kvm_lapic *apic = vcpu->apic;
1018
1019 if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec)
1020 apic->timer.last_update = ktime_add_ns(
1021 apic->timer.last_update,
1022 apic->timer.period);
1023}
1024
1025int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
1026{
1027 int vector = kvm_apic_has_interrupt(vcpu);
1028 struct kvm_lapic *apic = vcpu->apic;
1029
1030 if (vector == -1)
1031 return -1;
1032
1033 apic_set_vector(vector, apic->regs + APIC_ISR);
1034 apic_update_ppr(apic);
1035 apic_clear_irr(vector, apic);
1036 return vector;
1037}
1038
1039void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
1040{
1041 struct kvm_lapic *apic = vcpu->apic;
1042
1043 apic->base_address = vcpu->apic_base &
1044 MSR_IA32_APICBASE_BASE;
1045 apic_set_reg(apic, APIC_LVR, APIC_VERSION);
1046 apic_update_ppr(apic);
1047 hrtimer_cancel(&apic->timer.dev);
1048 update_divide_count(apic);
1049 start_apic_timer(apic);
1050}
1051
1052void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
1053{
1054 struct kvm_lapic *apic = vcpu->apic;
1055 struct hrtimer *timer;
1056
1057 if (!apic)
1058 return;
1059
1060 timer = &apic->timer.dev;
1061 if (hrtimer_cancel(timer))
1062 hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
1063}
1064EXPORT_SYMBOL_GPL(kvm_migrate_apic_timer);
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 23965aa5ee..6d84d30f5e 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -158,7 +158,7 @@ static struct kmem_cache *mmu_page_header_cache;
158 158
159static int is_write_protection(struct kvm_vcpu *vcpu) 159static int is_write_protection(struct kvm_vcpu *vcpu)
160{ 160{
161 return vcpu->cr0 & CR0_WP_MASK; 161 return vcpu->cr0 & X86_CR0_WP;
162} 162}
163 163
164static int is_cpuid_PSE36(void) 164static int is_cpuid_PSE36(void)
@@ -202,15 +202,14 @@ static void set_shadow_pte(u64 *sptep, u64 spte)
202} 202}
203 203
204static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 204static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
205 struct kmem_cache *base_cache, int min, 205 struct kmem_cache *base_cache, int min)
206 gfp_t gfp_flags)
207{ 206{
208 void *obj; 207 void *obj;
209 208
210 if (cache->nobjs >= min) 209 if (cache->nobjs >= min)
211 return 0; 210 return 0;
212 while (cache->nobjs < ARRAY_SIZE(cache->objects)) { 211 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
213 obj = kmem_cache_zalloc(base_cache, gfp_flags); 212 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
214 if (!obj) 213 if (!obj)
215 return -ENOMEM; 214 return -ENOMEM;
216 cache->objects[cache->nobjs++] = obj; 215 cache->objects[cache->nobjs++] = obj;
@@ -225,14 +224,14 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
225} 224}
226 225
227static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, 226static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
228 int min, gfp_t gfp_flags) 227 int min)
229{ 228{
230 struct page *page; 229 struct page *page;
231 230
232 if (cache->nobjs >= min) 231 if (cache->nobjs >= min)
233 return 0; 232 return 0;
234 while (cache->nobjs < ARRAY_SIZE(cache->objects)) { 233 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
235 page = alloc_page(gfp_flags); 234 page = alloc_page(GFP_KERNEL);
236 if (!page) 235 if (!page)
237 return -ENOMEM; 236 return -ENOMEM;
238 set_page_private(page, 0); 237 set_page_private(page, 0);
@@ -247,44 +246,28 @@ static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
247 free_page((unsigned long)mc->objects[--mc->nobjs]); 246 free_page((unsigned long)mc->objects[--mc->nobjs]);
248} 247}
249 248
250static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) 249static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
251{ 250{
252 int r; 251 int r;
253 252
253 kvm_mmu_free_some_pages(vcpu);
254 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache, 254 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
255 pte_chain_cache, 4, gfp_flags); 255 pte_chain_cache, 4);
256 if (r) 256 if (r)
257 goto out; 257 goto out;
258 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, 258 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
259 rmap_desc_cache, 1, gfp_flags); 259 rmap_desc_cache, 1);
260 if (r) 260 if (r)
261 goto out; 261 goto out;
262 r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4, gfp_flags); 262 r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4);
263 if (r) 263 if (r)
264 goto out; 264 goto out;
265 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, 265 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
266 mmu_page_header_cache, 4, gfp_flags); 266 mmu_page_header_cache, 4);
267out: 267out:
268 return r; 268 return r;
269} 269}
270 270
271static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
272{
273 int r;
274
275 r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT);
276 kvm_mmu_free_some_pages(vcpu);
277 if (r < 0) {
278 spin_unlock(&vcpu->kvm->lock);
279 kvm_arch_ops->vcpu_put(vcpu);
280 r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL);
281 kvm_arch_ops->vcpu_load(vcpu);
282 spin_lock(&vcpu->kvm->lock);
283 kvm_mmu_free_some_pages(vcpu);
284 }
285 return r;
286}
287
288static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) 271static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
289{ 272{
290 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); 273 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
@@ -969,7 +952,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
969static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) 952static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
970{ 953{
971 ++vcpu->stat.tlb_flush; 954 ++vcpu->stat.tlb_flush;
972 kvm_arch_ops->tlb_flush(vcpu); 955 kvm_x86_ops->tlb_flush(vcpu);
973} 956}
974 957
975static void paging_new_cr3(struct kvm_vcpu *vcpu) 958static void paging_new_cr3(struct kvm_vcpu *vcpu)
@@ -982,7 +965,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
982 u64 addr, 965 u64 addr,
983 u32 err_code) 966 u32 err_code)
984{ 967{
985 kvm_arch_ops->inject_page_fault(vcpu, addr, err_code); 968 kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
986} 969}
987 970
988static void paging_free(struct kvm_vcpu *vcpu) 971static void paging_free(struct kvm_vcpu *vcpu)
@@ -1071,15 +1054,15 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
1071{ 1054{
1072 int r; 1055 int r;
1073 1056
1074 spin_lock(&vcpu->kvm->lock); 1057 mutex_lock(&vcpu->kvm->lock);
1075 r = mmu_topup_memory_caches(vcpu); 1058 r = mmu_topup_memory_caches(vcpu);
1076 if (r) 1059 if (r)
1077 goto out; 1060 goto out;
1078 mmu_alloc_roots(vcpu); 1061 mmu_alloc_roots(vcpu);
1079 kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa); 1062 kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
1080 kvm_mmu_flush_tlb(vcpu); 1063 kvm_mmu_flush_tlb(vcpu);
1081out: 1064out:
1082 spin_unlock(&vcpu->kvm->lock); 1065 mutex_unlock(&vcpu->kvm->lock);
1083 return r; 1066 return r;
1084} 1067}
1085EXPORT_SYMBOL_GPL(kvm_mmu_load); 1068EXPORT_SYMBOL_GPL(kvm_mmu_load);
@@ -1124,7 +1107,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1124} 1107}
1125 1108
1126void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 1109void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1127 const u8 *old, const u8 *new, int bytes) 1110 const u8 *new, int bytes)
1128{ 1111{
1129 gfn_t gfn = gpa >> PAGE_SHIFT; 1112 gfn_t gfn = gpa >> PAGE_SHIFT;
1130 struct kvm_mmu_page *page; 1113 struct kvm_mmu_page *page;
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 4b5391c717..6b094b44f8 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -58,7 +58,10 @@ struct guest_walker {
58 int level; 58 int level;
59 gfn_t table_gfn[PT_MAX_FULL_LEVELS]; 59 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
60 pt_element_t *table; 60 pt_element_t *table;
61 pt_element_t pte;
61 pt_element_t *ptep; 62 pt_element_t *ptep;
63 struct page *page;
64 int index;
62 pt_element_t inherited_ar; 65 pt_element_t inherited_ar;
63 gfn_t gfn; 66 gfn_t gfn;
64 u32 error_code; 67 u32 error_code;
@@ -80,11 +83,14 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
80 pgprintk("%s: addr %lx\n", __FUNCTION__, addr); 83 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
81 walker->level = vcpu->mmu.root_level; 84 walker->level = vcpu->mmu.root_level;
82 walker->table = NULL; 85 walker->table = NULL;
86 walker->page = NULL;
87 walker->ptep = NULL;
83 root = vcpu->cr3; 88 root = vcpu->cr3;
84#if PTTYPE == 64 89#if PTTYPE == 64
85 if (!is_long_mode(vcpu)) { 90 if (!is_long_mode(vcpu)) {
86 walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3]; 91 walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
87 root = *walker->ptep; 92 root = *walker->ptep;
93 walker->pte = root;
88 if (!(root & PT_PRESENT_MASK)) 94 if (!(root & PT_PRESENT_MASK))
89 goto not_present; 95 goto not_present;
90 --walker->level; 96 --walker->level;
@@ -96,10 +102,11 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
96 walker->level - 1, table_gfn); 102 walker->level - 1, table_gfn);
97 slot = gfn_to_memslot(vcpu->kvm, table_gfn); 103 slot = gfn_to_memslot(vcpu->kvm, table_gfn);
98 hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK); 104 hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
99 walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); 105 walker->page = pfn_to_page(hpa >> PAGE_SHIFT);
106 walker->table = kmap_atomic(walker->page, KM_USER0);
100 107
101 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || 108 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
102 (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0); 109 (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
103 110
104 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK; 111 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
105 112
@@ -108,6 +115,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
108 hpa_t paddr; 115 hpa_t paddr;
109 116
110 ptep = &walker->table[index]; 117 ptep = &walker->table[index];
118 walker->index = index;
111 ASSERT(((unsigned long)walker->table & PAGE_MASK) == 119 ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
112 ((unsigned long)ptep & PAGE_MASK)); 120 ((unsigned long)ptep & PAGE_MASK));
113 121
@@ -148,16 +156,20 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
148 156
149 walker->inherited_ar &= walker->table[index]; 157 walker->inherited_ar &= walker->table[index];
150 table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; 158 table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
151 paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK);
152 kunmap_atomic(walker->table, KM_USER0); 159 kunmap_atomic(walker->table, KM_USER0);
153 walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT), 160 paddr = safe_gpa_to_hpa(vcpu, table_gfn << PAGE_SHIFT);
154 KM_USER0); 161 walker->page = pfn_to_page(paddr >> PAGE_SHIFT);
162 walker->table = kmap_atomic(walker->page, KM_USER0);
155 --walker->level; 163 --walker->level;
156 walker->table_gfn[walker->level - 1 ] = table_gfn; 164 walker->table_gfn[walker->level - 1 ] = table_gfn;
157 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, 165 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
158 walker->level - 1, table_gfn); 166 walker->level - 1, table_gfn);
159 } 167 }
160 walker->ptep = ptep; 168 walker->pte = *ptep;
169 if (walker->page)
170 walker->ptep = NULL;
171 if (walker->table)
172 kunmap_atomic(walker->table, KM_USER0);
161 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep); 173 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
162 return 1; 174 return 1;
163 175
@@ -175,13 +187,9 @@ err:
175 walker->error_code |= PFERR_USER_MASK; 187 walker->error_code |= PFERR_USER_MASK;
176 if (fetch_fault) 188 if (fetch_fault)
177 walker->error_code |= PFERR_FETCH_MASK; 189 walker->error_code |= PFERR_FETCH_MASK;
178 return 0;
179}
180
181static void FNAME(release_walker)(struct guest_walker *walker)
182{
183 if (walker->table) 190 if (walker->table)
184 kunmap_atomic(walker->table, KM_USER0); 191 kunmap_atomic(walker->table, KM_USER0);
192 return 0;
185} 193}
186 194
187static void FNAME(mark_pagetable_dirty)(struct kvm *kvm, 195static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
@@ -193,7 +201,7 @@ static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
193static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, 201static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
194 u64 *shadow_pte, 202 u64 *shadow_pte,
195 gpa_t gaddr, 203 gpa_t gaddr,
196 pt_element_t *gpte, 204 pt_element_t gpte,
197 u64 access_bits, 205 u64 access_bits,
198 int user_fault, 206 int user_fault,
199 int write_fault, 207 int write_fault,
@@ -202,23 +210,34 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
202 gfn_t gfn) 210 gfn_t gfn)
203{ 211{
204 hpa_t paddr; 212 hpa_t paddr;
205 int dirty = *gpte & PT_DIRTY_MASK; 213 int dirty = gpte & PT_DIRTY_MASK;
206 u64 spte = *shadow_pte; 214 u64 spte = *shadow_pte;
207 int was_rmapped = is_rmap_pte(spte); 215 int was_rmapped = is_rmap_pte(spte);
208 216
209 pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d" 217 pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
210 " user_fault %d gfn %lx\n", 218 " user_fault %d gfn %lx\n",
211 __FUNCTION__, spte, (u64)*gpte, access_bits, 219 __FUNCTION__, spte, (u64)gpte, access_bits,
212 write_fault, user_fault, gfn); 220 write_fault, user_fault, gfn);
213 221
214 if (write_fault && !dirty) { 222 if (write_fault && !dirty) {
215 *gpte |= PT_DIRTY_MASK; 223 pt_element_t *guest_ent, *tmp = NULL;
224
225 if (walker->ptep)
226 guest_ent = walker->ptep;
227 else {
228 tmp = kmap_atomic(walker->page, KM_USER0);
229 guest_ent = &tmp[walker->index];
230 }
231
232 *guest_ent |= PT_DIRTY_MASK;
233 if (!walker->ptep)
234 kunmap_atomic(tmp, KM_USER0);
216 dirty = 1; 235 dirty = 1;
217 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker); 236 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
218 } 237 }
219 238
220 spte |= PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK; 239 spte |= PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK;
221 spte |= *gpte & PT64_NX_MASK; 240 spte |= gpte & PT64_NX_MASK;
222 if (!dirty) 241 if (!dirty)
223 access_bits &= ~PT_WRITABLE_MASK; 242 access_bits &= ~PT_WRITABLE_MASK;
224 243
@@ -255,7 +274,7 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
255 access_bits &= ~PT_WRITABLE_MASK; 274 access_bits &= ~PT_WRITABLE_MASK;
256 if (is_writeble_pte(spte)) { 275 if (is_writeble_pte(spte)) {
257 spte &= ~PT_WRITABLE_MASK; 276 spte &= ~PT_WRITABLE_MASK;
258 kvm_arch_ops->tlb_flush(vcpu); 277 kvm_x86_ops->tlb_flush(vcpu);
259 } 278 }
260 if (write_fault) 279 if (write_fault)
261 *ptwrite = 1; 280 *ptwrite = 1;
@@ -273,13 +292,13 @@ unshadowed:
273 rmap_add(vcpu, shadow_pte); 292 rmap_add(vcpu, shadow_pte);
274} 293}
275 294
276static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t *gpte, 295static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte,
277 u64 *shadow_pte, u64 access_bits, 296 u64 *shadow_pte, u64 access_bits,
278 int user_fault, int write_fault, int *ptwrite, 297 int user_fault, int write_fault, int *ptwrite,
279 struct guest_walker *walker, gfn_t gfn) 298 struct guest_walker *walker, gfn_t gfn)
280{ 299{
281 access_bits &= *gpte; 300 access_bits &= gpte;
282 FNAME(set_pte_common)(vcpu, shadow_pte, *gpte & PT_BASE_ADDR_MASK, 301 FNAME(set_pte_common)(vcpu, shadow_pte, gpte & PT_BASE_ADDR_MASK,
283 gpte, access_bits, user_fault, write_fault, 302 gpte, access_bits, user_fault, write_fault,
284 ptwrite, walker, gfn); 303 ptwrite, walker, gfn);
285} 304}
@@ -295,22 +314,22 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
295 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) 314 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
296 return; 315 return;
297 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); 316 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
298 FNAME(set_pte)(vcpu, &gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0, 317 FNAME(set_pte)(vcpu, gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
299 0, NULL, NULL, 318 0, NULL, NULL,
300 (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT); 319 (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
301} 320}
302 321
303static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t *gpde, 322static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t gpde,
304 u64 *shadow_pte, u64 access_bits, 323 u64 *shadow_pte, u64 access_bits,
305 int user_fault, int write_fault, int *ptwrite, 324 int user_fault, int write_fault, int *ptwrite,
306 struct guest_walker *walker, gfn_t gfn) 325 struct guest_walker *walker, gfn_t gfn)
307{ 326{
308 gpa_t gaddr; 327 gpa_t gaddr;
309 328
310 access_bits &= *gpde; 329 access_bits &= gpde;
311 gaddr = (gpa_t)gfn << PAGE_SHIFT; 330 gaddr = (gpa_t)gfn << PAGE_SHIFT;
312 if (PTTYPE == 32 && is_cpuid_PSE36()) 331 if (PTTYPE == 32 && is_cpuid_PSE36())
313 gaddr |= (*gpde & PT32_DIR_PSE36_MASK) << 332 gaddr |= (gpde & PT32_DIR_PSE36_MASK) <<
314 (32 - PT32_DIR_PSE36_SHIFT); 333 (32 - PT32_DIR_PSE36_SHIFT);
315 FNAME(set_pte_common)(vcpu, shadow_pte, gaddr, 334 FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
316 gpde, access_bits, user_fault, write_fault, 335 gpde, access_bits, user_fault, write_fault,
@@ -328,9 +347,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
328 int level; 347 int level;
329 u64 *shadow_ent; 348 u64 *shadow_ent;
330 u64 *prev_shadow_ent = NULL; 349 u64 *prev_shadow_ent = NULL;
331 pt_element_t *guest_ent = walker->ptep;
332 350
333 if (!is_present_pte(*guest_ent)) 351 if (!is_present_pte(walker->pte))
334 return NULL; 352 return NULL;
335 353
336 shadow_addr = vcpu->mmu.root_hpa; 354 shadow_addr = vcpu->mmu.root_hpa;
@@ -364,12 +382,12 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
364 if (level - 1 == PT_PAGE_TABLE_LEVEL 382 if (level - 1 == PT_PAGE_TABLE_LEVEL
365 && walker->level == PT_DIRECTORY_LEVEL) { 383 && walker->level == PT_DIRECTORY_LEVEL) {
366 metaphysical = 1; 384 metaphysical = 1;
367 hugepage_access = *guest_ent; 385 hugepage_access = walker->pte;
368 hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK; 386 hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
369 if (*guest_ent & PT64_NX_MASK) 387 if (walker->pte & PT64_NX_MASK)
370 hugepage_access |= (1 << 2); 388 hugepage_access |= (1 << 2);
371 hugepage_access >>= PT_WRITABLE_SHIFT; 389 hugepage_access >>= PT_WRITABLE_SHIFT;
372 table_gfn = (*guest_ent & PT_BASE_ADDR_MASK) 390 table_gfn = (walker->pte & PT_BASE_ADDR_MASK)
373 >> PAGE_SHIFT; 391 >> PAGE_SHIFT;
374 } else { 392 } else {
375 metaphysical = 0; 393 metaphysical = 0;
@@ -386,12 +404,12 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
386 } 404 }
387 405
388 if (walker->level == PT_DIRECTORY_LEVEL) { 406 if (walker->level == PT_DIRECTORY_LEVEL) {
389 FNAME(set_pde)(vcpu, guest_ent, shadow_ent, 407 FNAME(set_pde)(vcpu, walker->pte, shadow_ent,
390 walker->inherited_ar, user_fault, write_fault, 408 walker->inherited_ar, user_fault, write_fault,
391 ptwrite, walker, walker->gfn); 409 ptwrite, walker, walker->gfn);
392 } else { 410 } else {
393 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL); 411 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
394 FNAME(set_pte)(vcpu, guest_ent, shadow_ent, 412 FNAME(set_pte)(vcpu, walker->pte, shadow_ent,
395 walker->inherited_ar, user_fault, write_fault, 413 walker->inherited_ar, user_fault, write_fault,
396 ptwrite, walker, walker->gfn); 414 ptwrite, walker, walker->gfn);
397 } 415 }
@@ -442,7 +460,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
442 if (!r) { 460 if (!r) {
443 pgprintk("%s: guest page fault\n", __FUNCTION__); 461 pgprintk("%s: guest page fault\n", __FUNCTION__);
444 inject_page_fault(vcpu, addr, walker.error_code); 462 inject_page_fault(vcpu, addr, walker.error_code);
445 FNAME(release_walker)(&walker);
446 vcpu->last_pt_write_count = 0; /* reset fork detector */ 463 vcpu->last_pt_write_count = 0; /* reset fork detector */
447 return 0; 464 return 0;
448 } 465 }
@@ -452,8 +469,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
452 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, 469 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
453 shadow_pte, *shadow_pte, write_pt); 470 shadow_pte, *shadow_pte, write_pt);
454 471
455 FNAME(release_walker)(&walker);
456
457 if (!write_pt) 472 if (!write_pt)
458 vcpu->last_pt_write_count = 0; /* reset fork detector */ 473 vcpu->last_pt_write_count = 0; /* reset fork detector */
459 474
@@ -482,7 +497,6 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
482 gpa |= vaddr & ~PAGE_MASK; 497 gpa |= vaddr & ~PAGE_MASK;
483 } 498 }
484 499
485 FNAME(release_walker)(&walker);
486 return gpa; 500 return gpa;
487} 501}
488 502
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index bc818cc126..729f1cd936 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -16,12 +16,12 @@
16 16
17#include "kvm_svm.h" 17#include "kvm_svm.h"
18#include "x86_emulate.h" 18#include "x86_emulate.h"
19#include "irq.h"
19 20
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
22#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
23#include <linux/highmem.h> 24#include <linux/highmem.h>
24#include <linux/profile.h>
25#include <linux/sched.h> 25#include <linux/sched.h>
26 26
27#include <asm/desc.h> 27#include <asm/desc.h>
@@ -38,7 +38,6 @@ MODULE_LICENSE("GPL");
38 38
39#define DR7_GD_MASK (1 << 13) 39#define DR7_GD_MASK (1 << 13)
40#define DR6_BD_MASK (1 << 13) 40#define DR6_BD_MASK (1 << 13)
41#define CR4_DE_MASK (1UL << 3)
42 41
43#define SEG_TYPE_LDT 2 42#define SEG_TYPE_LDT 2
44#define SEG_TYPE_BUSY_TSS16 3 43#define SEG_TYPE_BUSY_TSS16 3
@@ -50,6 +49,13 @@ MODULE_LICENSE("GPL");
50#define SVM_FEATURE_LBRV (1 << 1) 49#define SVM_FEATURE_LBRV (1 << 1)
51#define SVM_DEATURE_SVML (1 << 2) 50#define SVM_DEATURE_SVML (1 << 2)
52 51
52static void kvm_reput_irq(struct vcpu_svm *svm);
53
54static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
55{
56 return container_of(vcpu, struct vcpu_svm, vcpu);
57}
58
53unsigned long iopm_base; 59unsigned long iopm_base;
54unsigned long msrpm_base; 60unsigned long msrpm_base;
55 61
@@ -94,20 +100,6 @@ static inline u32 svm_has(u32 feat)
94 return svm_features & feat; 100 return svm_features & feat;
95} 101}
96 102
97static unsigned get_addr_size(struct kvm_vcpu *vcpu)
98{
99 struct vmcb_save_area *sa = &vcpu->svm->vmcb->save;
100 u16 cs_attrib;
101
102 if (!(sa->cr0 & CR0_PE_MASK) || (sa->rflags & X86_EFLAGS_VM))
103 return 2;
104
105 cs_attrib = sa->cs.attrib;
106
107 return (cs_attrib & SVM_SELECTOR_L_MASK) ? 8 :
108 (cs_attrib & SVM_SELECTOR_DB_MASK) ? 4 : 2;
109}
110
111static inline u8 pop_irq(struct kvm_vcpu *vcpu) 103static inline u8 pop_irq(struct kvm_vcpu *vcpu)
112{ 104{
113 int word_index = __ffs(vcpu->irq_summary); 105 int word_index = __ffs(vcpu->irq_summary);
@@ -182,7 +174,7 @@ static inline void write_dr7(unsigned long val)
182 174
183static inline void force_new_asid(struct kvm_vcpu *vcpu) 175static inline void force_new_asid(struct kvm_vcpu *vcpu)
184{ 176{
185 vcpu->svm->asid_generation--; 177 to_svm(vcpu)->asid_generation--;
186} 178}
187 179
188static inline void flush_guest_tlb(struct kvm_vcpu *vcpu) 180static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
@@ -195,22 +187,24 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
195 if (!(efer & KVM_EFER_LMA)) 187 if (!(efer & KVM_EFER_LMA))
196 efer &= ~KVM_EFER_LME; 188 efer &= ~KVM_EFER_LME;
197 189
198 vcpu->svm->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; 190 to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
199 vcpu->shadow_efer = efer; 191 vcpu->shadow_efer = efer;
200} 192}
201 193
202static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) 194static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
203{ 195{
204 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | 196 struct vcpu_svm *svm = to_svm(vcpu);
197
198 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
205 SVM_EVTINJ_VALID_ERR | 199 SVM_EVTINJ_VALID_ERR |
206 SVM_EVTINJ_TYPE_EXEPT | 200 SVM_EVTINJ_TYPE_EXEPT |
207 GP_VECTOR; 201 GP_VECTOR;
208 vcpu->svm->vmcb->control.event_inj_err = error_code; 202 svm->vmcb->control.event_inj_err = error_code;
209} 203}
210 204
211static void inject_ud(struct kvm_vcpu *vcpu) 205static void inject_ud(struct kvm_vcpu *vcpu)
212{ 206{
213 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | 207 to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID |
214 SVM_EVTINJ_TYPE_EXEPT | 208 SVM_EVTINJ_TYPE_EXEPT |
215 UD_VECTOR; 209 UD_VECTOR;
216} 210}
@@ -229,19 +223,21 @@ static int is_external_interrupt(u32 info)
229 223
230static void skip_emulated_instruction(struct kvm_vcpu *vcpu) 224static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
231{ 225{
232 if (!vcpu->svm->next_rip) { 226 struct vcpu_svm *svm = to_svm(vcpu);
227
228 if (!svm->next_rip) {
233 printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__); 229 printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
234 return; 230 return;
235 } 231 }
236 if (vcpu->svm->next_rip - vcpu->svm->vmcb->save.rip > 15) { 232 if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE) {
237 printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", 233 printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
238 __FUNCTION__, 234 __FUNCTION__,
239 vcpu->svm->vmcb->save.rip, 235 svm->vmcb->save.rip,
240 vcpu->svm->next_rip); 236 svm->next_rip);
241 } 237 }
242 238
243 vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip; 239 vcpu->rip = svm->vmcb->save.rip = svm->next_rip;
244 vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 240 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
245 241
246 vcpu->interrupt_window_open = 1; 242 vcpu->interrupt_window_open = 1;
247} 243}
@@ -351,8 +347,8 @@ err_1:
351 347
352} 348}
353 349
354static int set_msr_interception(u32 *msrpm, unsigned msr, 350static void set_msr_interception(u32 *msrpm, unsigned msr,
355 int read, int write) 351 int read, int write)
356{ 352{
357 int i; 353 int i;
358 354
@@ -367,11 +363,10 @@ static int set_msr_interception(u32 *msrpm, unsigned msr,
367 u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1); 363 u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
368 *base = (*base & ~(0x3 << msr_shift)) | 364 *base = (*base & ~(0x3 << msr_shift)) |
369 (mask << msr_shift); 365 (mask << msr_shift);
370 return 1; 366 return;
371 } 367 }
372 } 368 }
373 printk(KERN_DEBUG "%s: not found 0x%x\n", __FUNCTION__, msr); 369 BUG();
374 return 0;
375} 370}
376 371
377static __init int svm_hardware_setup(void) 372static __init int svm_hardware_setup(void)
@@ -382,8 +377,6 @@ static __init int svm_hardware_setup(void)
382 void *iopm_va, *msrpm_va; 377 void *iopm_va, *msrpm_va;
383 int r; 378 int r;
384 379
385 kvm_emulator_want_group7_invlpg();
386
387 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); 380 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
388 381
389 if (!iopm_pages) 382 if (!iopm_pages)
@@ -458,11 +451,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
458 seg->base = 0; 451 seg->base = 0;
459} 452}
460 453
461static int svm_vcpu_setup(struct kvm_vcpu *vcpu)
462{
463 return 0;
464}
465
466static void init_vmcb(struct vmcb *vmcb) 454static void init_vmcb(struct vmcb *vmcb)
467{ 455{
468 struct vmcb_control_area *control = &vmcb->control; 456 struct vmcb_control_area *control = &vmcb->control;
@@ -563,59 +551,83 @@ static void init_vmcb(struct vmcb *vmcb)
563 * cr0 val on cpu init should be 0x60000010, we enable cpu 551 * cr0 val on cpu init should be 0x60000010, we enable cpu
564 * cache by default. the orderly way is to enable cache in bios. 552 * cache by default. the orderly way is to enable cache in bios.
565 */ 553 */
566 save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK; 554 save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
567 save->cr4 = CR4_PAE_MASK; 555 save->cr4 = X86_CR4_PAE;
568 /* rdx = ?? */ 556 /* rdx = ?? */
569} 557}
570 558
571static int svm_create_vcpu(struct kvm_vcpu *vcpu) 559static void svm_vcpu_reset(struct kvm_vcpu *vcpu)
560{
561 struct vcpu_svm *svm = to_svm(vcpu);
562
563 init_vmcb(svm->vmcb);
564}
565
566static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
572{ 567{
568 struct vcpu_svm *svm;
573 struct page *page; 569 struct page *page;
574 int r; 570 int err;
571
572 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
573 if (!svm) {
574 err = -ENOMEM;
575 goto out;
576 }
577
578 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
579 if (err)
580 goto free_svm;
581
582 if (irqchip_in_kernel(kvm)) {
583 err = kvm_create_lapic(&svm->vcpu);
584 if (err < 0)
585 goto free_svm;
586 }
575 587
576 r = -ENOMEM;
577 vcpu->svm = kzalloc(sizeof *vcpu->svm, GFP_KERNEL);
578 if (!vcpu->svm)
579 goto out1;
580 page = alloc_page(GFP_KERNEL); 588 page = alloc_page(GFP_KERNEL);
581 if (!page) 589 if (!page) {
582 goto out2; 590 err = -ENOMEM;
583 591 goto uninit;
584 vcpu->svm->vmcb = page_address(page); 592 }
585 clear_page(vcpu->svm->vmcb);
586 vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
587 vcpu->svm->asid_generation = 0;
588 memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs));
589 init_vmcb(vcpu->svm->vmcb);
590
591 fx_init(vcpu);
592 vcpu->fpu_active = 1;
593 vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
594 if (vcpu == &vcpu->kvm->vcpus[0])
595 vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
596 593
597 return 0; 594 svm->vmcb = page_address(page);
595 clear_page(svm->vmcb);
596 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
597 svm->asid_generation = 0;
598 memset(svm->db_regs, 0, sizeof(svm->db_regs));
599 init_vmcb(svm->vmcb);
598 600
599out2: 601 fx_init(&svm->vcpu);
600 kfree(vcpu->svm); 602 svm->vcpu.fpu_active = 1;
601out1: 603 svm->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
602 return r; 604 if (svm->vcpu.vcpu_id == 0)
605 svm->vcpu.apic_base |= MSR_IA32_APICBASE_BSP;
606
607 return &svm->vcpu;
608
609uninit:
610 kvm_vcpu_uninit(&svm->vcpu);
611free_svm:
612 kmem_cache_free(kvm_vcpu_cache, svm);
613out:
614 return ERR_PTR(err);
603} 615}
604 616
605static void svm_free_vcpu(struct kvm_vcpu *vcpu) 617static void svm_free_vcpu(struct kvm_vcpu *vcpu)
606{ 618{
607 if (!vcpu->svm) 619 struct vcpu_svm *svm = to_svm(vcpu);
608 return; 620
609 if (vcpu->svm->vmcb) 621 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
610 __free_page(pfn_to_page(vcpu->svm->vmcb_pa >> PAGE_SHIFT)); 622 kvm_vcpu_uninit(vcpu);
611 kfree(vcpu->svm); 623 kmem_cache_free(kvm_vcpu_cache, svm);
612} 624}
613 625
614static void svm_vcpu_load(struct kvm_vcpu *vcpu) 626static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
615{ 627{
616 int cpu, i; 628 struct vcpu_svm *svm = to_svm(vcpu);
629 int i;
617 630
618 cpu = get_cpu();
619 if (unlikely(cpu != vcpu->cpu)) { 631 if (unlikely(cpu != vcpu->cpu)) {
620 u64 tsc_this, delta; 632 u64 tsc_this, delta;
621 633
@@ -625,23 +637,24 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu)
625 */ 637 */
626 rdtscll(tsc_this); 638 rdtscll(tsc_this);
627 delta = vcpu->host_tsc - tsc_this; 639 delta = vcpu->host_tsc - tsc_this;
628 vcpu->svm->vmcb->control.tsc_offset += delta; 640 svm->vmcb->control.tsc_offset += delta;
629 vcpu->cpu = cpu; 641 vcpu->cpu = cpu;
642 kvm_migrate_apic_timer(vcpu);
630 } 643 }
631 644
632 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 645 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
633 rdmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]); 646 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
634} 647}
635 648
636static void svm_vcpu_put(struct kvm_vcpu *vcpu) 649static void svm_vcpu_put(struct kvm_vcpu *vcpu)
637{ 650{
651 struct vcpu_svm *svm = to_svm(vcpu);
638 int i; 652 int i;
639 653
640 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 654 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
641 wrmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]); 655 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
642 656
643 rdtscll(vcpu->host_tsc); 657 rdtscll(vcpu->host_tsc);
644 put_cpu();
645} 658}
646 659
647static void svm_vcpu_decache(struct kvm_vcpu *vcpu) 660static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
@@ -650,31 +663,34 @@ static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
650 663
651static void svm_cache_regs(struct kvm_vcpu *vcpu) 664static void svm_cache_regs(struct kvm_vcpu *vcpu)
652{ 665{
653 vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax; 666 struct vcpu_svm *svm = to_svm(vcpu);
654 vcpu->regs[VCPU_REGS_RSP] = vcpu->svm->vmcb->save.rsp; 667
655 vcpu->rip = vcpu->svm->vmcb->save.rip; 668 vcpu->regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
669 vcpu->regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
670 vcpu->rip = svm->vmcb->save.rip;
656} 671}
657 672
658static void svm_decache_regs(struct kvm_vcpu *vcpu) 673static void svm_decache_regs(struct kvm_vcpu *vcpu)
659{ 674{
660 vcpu->svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX]; 675 struct vcpu_svm *svm = to_svm(vcpu);
661 vcpu->svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP]; 676 svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX];
662 vcpu->svm->vmcb->save.rip = vcpu->rip; 677 svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP];
678 svm->vmcb->save.rip = vcpu->rip;
663} 679}
664 680
665static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) 681static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
666{ 682{
667 return vcpu->svm->vmcb->save.rflags; 683 return to_svm(vcpu)->vmcb->save.rflags;
668} 684}
669 685
670static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 686static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
671{ 687{
672 vcpu->svm->vmcb->save.rflags = rflags; 688 to_svm(vcpu)->vmcb->save.rflags = rflags;
673} 689}
674 690
675static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) 691static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
676{ 692{
677 struct vmcb_save_area *save = &vcpu->svm->vmcb->save; 693 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
678 694
679 switch (seg) { 695 switch (seg) {
680 case VCPU_SREG_CS: return &save->cs; 696 case VCPU_SREG_CS: return &save->cs;
@@ -716,36 +732,36 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
716 var->unusable = !var->present; 732 var->unusable = !var->present;
717} 733}
718 734
719static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
720{
721 struct vmcb_seg *s = svm_seg(vcpu, VCPU_SREG_CS);
722
723 *db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
724 *l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
725}
726
727static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 735static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
728{ 736{
729 dt->limit = vcpu->svm->vmcb->save.idtr.limit; 737 struct vcpu_svm *svm = to_svm(vcpu);
730 dt->base = vcpu->svm->vmcb->save.idtr.base; 738
739 dt->limit = svm->vmcb->save.idtr.limit;
740 dt->base = svm->vmcb->save.idtr.base;
731} 741}
732 742
733static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 743static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
734{ 744{
735 vcpu->svm->vmcb->save.idtr.limit = dt->limit; 745 struct vcpu_svm *svm = to_svm(vcpu);
736 vcpu->svm->vmcb->save.idtr.base = dt->base ; 746
747 svm->vmcb->save.idtr.limit = dt->limit;
748 svm->vmcb->save.idtr.base = dt->base ;
737} 749}
738 750
739static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 751static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
740{ 752{
741 dt->limit = vcpu->svm->vmcb->save.gdtr.limit; 753 struct vcpu_svm *svm = to_svm(vcpu);
742 dt->base = vcpu->svm->vmcb->save.gdtr.base; 754
755 dt->limit = svm->vmcb->save.gdtr.limit;
756 dt->base = svm->vmcb->save.gdtr.base;
743} 757}
744 758
745static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) 759static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
746{ 760{
747 vcpu->svm->vmcb->save.gdtr.limit = dt->limit; 761 struct vcpu_svm *svm = to_svm(vcpu);
748 vcpu->svm->vmcb->save.gdtr.base = dt->base ; 762
763 svm->vmcb->save.gdtr.limit = dt->limit;
764 svm->vmcb->save.gdtr.base = dt->base ;
749} 765}
750 766
751static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 767static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
@@ -754,39 +770,42 @@ static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
754 770
755static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 771static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
756{ 772{
773 struct vcpu_svm *svm = to_svm(vcpu);
774
757#ifdef CONFIG_X86_64 775#ifdef CONFIG_X86_64
758 if (vcpu->shadow_efer & KVM_EFER_LME) { 776 if (vcpu->shadow_efer & KVM_EFER_LME) {
759 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { 777 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
760 vcpu->shadow_efer |= KVM_EFER_LMA; 778 vcpu->shadow_efer |= KVM_EFER_LMA;
761 vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; 779 svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME;
762 } 780 }
763 781
764 if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK) ) { 782 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) {
765 vcpu->shadow_efer &= ~KVM_EFER_LMA; 783 vcpu->shadow_efer &= ~KVM_EFER_LMA;
766 vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); 784 svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME);
767 } 785 }
768 } 786 }
769#endif 787#endif
770 if ((vcpu->cr0 & CR0_TS_MASK) && !(cr0 & CR0_TS_MASK)) { 788 if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
771 vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 789 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
772 vcpu->fpu_active = 1; 790 vcpu->fpu_active = 1;
773 } 791 }
774 792
775 vcpu->cr0 = cr0; 793 vcpu->cr0 = cr0;
776 cr0 |= CR0_PG_MASK | CR0_WP_MASK; 794 cr0 |= X86_CR0_PG | X86_CR0_WP;
777 cr0 &= ~(CR0_CD_MASK | CR0_NW_MASK); 795 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
778 vcpu->svm->vmcb->save.cr0 = cr0; 796 svm->vmcb->save.cr0 = cr0;
779} 797}
780 798
781static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 799static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
782{ 800{
783 vcpu->cr4 = cr4; 801 vcpu->cr4 = cr4;
784 vcpu->svm->vmcb->save.cr4 = cr4 | CR4_PAE_MASK; 802 to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
785} 803}
786 804
787static void svm_set_segment(struct kvm_vcpu *vcpu, 805static void svm_set_segment(struct kvm_vcpu *vcpu,
788 struct kvm_segment *var, int seg) 806 struct kvm_segment *var, int seg)
789{ 807{
808 struct vcpu_svm *svm = to_svm(vcpu);
790 struct vmcb_seg *s = svm_seg(vcpu, seg); 809 struct vmcb_seg *s = svm_seg(vcpu, seg);
791 810
792 s->base = var->base; 811 s->base = var->base;
@@ -805,16 +824,16 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
805 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; 824 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
806 } 825 }
807 if (seg == VCPU_SREG_CS) 826 if (seg == VCPU_SREG_CS)
808 vcpu->svm->vmcb->save.cpl 827 svm->vmcb->save.cpl
809 = (vcpu->svm->vmcb->save.cs.attrib 828 = (svm->vmcb->save.cs.attrib
810 >> SVM_SELECTOR_DPL_SHIFT) & 3; 829 >> SVM_SELECTOR_DPL_SHIFT) & 3;
811 830
812} 831}
813 832
814/* FIXME: 833/* FIXME:
815 834
816 vcpu->svm->vmcb->control.int_ctl &= ~V_TPR_MASK; 835 svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
817 vcpu->svm->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK); 836 svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
818 837
819*/ 838*/
820 839
@@ -823,61 +842,68 @@ static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
823 return -EOPNOTSUPP; 842 return -EOPNOTSUPP;
824} 843}
825 844
845static int svm_get_irq(struct kvm_vcpu *vcpu)
846{
847 struct vcpu_svm *svm = to_svm(vcpu);
848 u32 exit_int_info = svm->vmcb->control.exit_int_info;
849
850 if (is_external_interrupt(exit_int_info))
851 return exit_int_info & SVM_EVTINJ_VEC_MASK;
852 return -1;
853}
854
826static void load_host_msrs(struct kvm_vcpu *vcpu) 855static void load_host_msrs(struct kvm_vcpu *vcpu)
827{ 856{
828#ifdef CONFIG_X86_64 857#ifdef CONFIG_X86_64
829 wrmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base); 858 wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
830#endif 859#endif
831} 860}
832 861
833static void save_host_msrs(struct kvm_vcpu *vcpu) 862static void save_host_msrs(struct kvm_vcpu *vcpu)
834{ 863{
835#ifdef CONFIG_X86_64 864#ifdef CONFIG_X86_64
836 rdmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base); 865 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
837#endif 866#endif
838} 867}
839 868
840static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data) 869static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
841{ 870{
842 if (svm_data->next_asid > svm_data->max_asid) { 871 if (svm_data->next_asid > svm_data->max_asid) {
843 ++svm_data->asid_generation; 872 ++svm_data->asid_generation;
844 svm_data->next_asid = 1; 873 svm_data->next_asid = 1;
845 vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; 874 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
846 } 875 }
847 876
848 vcpu->cpu = svm_data->cpu; 877 svm->vcpu.cpu = svm_data->cpu;
849 vcpu->svm->asid_generation = svm_data->asid_generation; 878 svm->asid_generation = svm_data->asid_generation;
850 vcpu->svm->vmcb->control.asid = svm_data->next_asid++; 879 svm->vmcb->control.asid = svm_data->next_asid++;
851}
852
853static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address)
854{
855 invlpga(address, vcpu->svm->vmcb->control.asid); // is needed?
856} 880}
857 881
858static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) 882static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
859{ 883{
860 return vcpu->svm->db_regs[dr]; 884 return to_svm(vcpu)->db_regs[dr];
861} 885}
862 886
863static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, 887static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
864 int *exception) 888 int *exception)
865{ 889{
890 struct vcpu_svm *svm = to_svm(vcpu);
891
866 *exception = 0; 892 *exception = 0;
867 893
868 if (vcpu->svm->vmcb->save.dr7 & DR7_GD_MASK) { 894 if (svm->vmcb->save.dr7 & DR7_GD_MASK) {
869 vcpu->svm->vmcb->save.dr7 &= ~DR7_GD_MASK; 895 svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
870 vcpu->svm->vmcb->save.dr6 |= DR6_BD_MASK; 896 svm->vmcb->save.dr6 |= DR6_BD_MASK;
871 *exception = DB_VECTOR; 897 *exception = DB_VECTOR;
872 return; 898 return;
873 } 899 }
874 900
875 switch (dr) { 901 switch (dr) {
876 case 0 ... 3: 902 case 0 ... 3:
877 vcpu->svm->db_regs[dr] = value; 903 svm->db_regs[dr] = value;
878 return; 904 return;
879 case 4 ... 5: 905 case 4 ... 5:
880 if (vcpu->cr4 & CR4_DE_MASK) { 906 if (vcpu->cr4 & X86_CR4_DE) {
881 *exception = UD_VECTOR; 907 *exception = UD_VECTOR;
882 return; 908 return;
883 } 909 }
@@ -886,7 +912,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
886 *exception = GP_VECTOR; 912 *exception = GP_VECTOR;
887 return; 913 return;
888 } 914 }
889 vcpu->svm->vmcb->save.dr7 = value; 915 svm->vmcb->save.dr7 = value;
890 return; 916 return;
891 } 917 }
892 default: 918 default:
@@ -897,42 +923,44 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
897 } 923 }
898} 924}
899 925
900static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 926static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
901{ 927{
902 u32 exit_int_info = vcpu->svm->vmcb->control.exit_int_info; 928 u32 exit_int_info = svm->vmcb->control.exit_int_info;
929 struct kvm *kvm = svm->vcpu.kvm;
903 u64 fault_address; 930 u64 fault_address;
904 u32 error_code; 931 u32 error_code;
905 enum emulation_result er; 932 enum emulation_result er;
906 int r; 933 int r;
907 934
908 if (is_external_interrupt(exit_int_info)) 935 if (!irqchip_in_kernel(kvm) &&
909 push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); 936 is_external_interrupt(exit_int_info))
937 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
910 938
911 spin_lock(&vcpu->kvm->lock); 939 mutex_lock(&kvm->lock);
912 940
913 fault_address = vcpu->svm->vmcb->control.exit_info_2; 941 fault_address = svm->vmcb->control.exit_info_2;
914 error_code = vcpu->svm->vmcb->control.exit_info_1; 942 error_code = svm->vmcb->control.exit_info_1;
915 r = kvm_mmu_page_fault(vcpu, fault_address, error_code); 943 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
916 if (r < 0) { 944 if (r < 0) {
917 spin_unlock(&vcpu->kvm->lock); 945 mutex_unlock(&kvm->lock);
918 return r; 946 return r;
919 } 947 }
920 if (!r) { 948 if (!r) {
921 spin_unlock(&vcpu->kvm->lock); 949 mutex_unlock(&kvm->lock);
922 return 1; 950 return 1;
923 } 951 }
924 er = emulate_instruction(vcpu, kvm_run, fault_address, error_code); 952 er = emulate_instruction(&svm->vcpu, kvm_run, fault_address,
925 spin_unlock(&vcpu->kvm->lock); 953 error_code);
954 mutex_unlock(&kvm->lock);
926 955
927 switch (er) { 956 switch (er) {
928 case EMULATE_DONE: 957 case EMULATE_DONE:
929 return 1; 958 return 1;
930 case EMULATE_DO_MMIO: 959 case EMULATE_DO_MMIO:
931 ++vcpu->stat.mmio_exits; 960 ++svm->vcpu.stat.mmio_exits;
932 kvm_run->exit_reason = KVM_EXIT_MMIO;
933 return 0; 961 return 0;
934 case EMULATE_FAIL: 962 case EMULATE_FAIL:
935 vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__); 963 kvm_report_emulation_failure(&svm->vcpu, "pagetable");
936 break; 964 break;
937 default: 965 default:
938 BUG(); 966 BUG();
@@ -942,252 +970,142 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
942 return 0; 970 return 0;
943} 971}
944 972
945static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 973static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
946{ 974{
947 vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 975 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
948 if (!(vcpu->cr0 & CR0_TS_MASK)) 976 if (!(svm->vcpu.cr0 & X86_CR0_TS))
949 vcpu->svm->vmcb->save.cr0 &= ~CR0_TS_MASK; 977 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
950 vcpu->fpu_active = 1; 978 svm->vcpu.fpu_active = 1;
951 979
952 return 1; 980 return 1;
953} 981}
954 982
955static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 983static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
956{ 984{
957 /* 985 /*
958 * VMCB is undefined after a SHUTDOWN intercept 986 * VMCB is undefined after a SHUTDOWN intercept
959 * so reinitialize it. 987 * so reinitialize it.
960 */ 988 */
961 clear_page(vcpu->svm->vmcb); 989 clear_page(svm->vmcb);
962 init_vmcb(vcpu->svm->vmcb); 990 init_vmcb(svm->vmcb);
963 991
964 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; 992 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
965 return 0; 993 return 0;
966} 994}
967 995
968static int io_get_override(struct kvm_vcpu *vcpu, 996static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
969 struct vmcb_seg **seg,
970 int *addr_override)
971{
972 u8 inst[MAX_INST_SIZE];
973 unsigned ins_length;
974 gva_t rip;
975 int i;
976
977 rip = vcpu->svm->vmcb->save.rip;
978 ins_length = vcpu->svm->next_rip - rip;
979 rip += vcpu->svm->vmcb->save.cs.base;
980
981 if (ins_length > MAX_INST_SIZE)
982 printk(KERN_DEBUG
983 "%s: inst length err, cs base 0x%llx rip 0x%llx "
984 "next rip 0x%llx ins_length %u\n",
985 __FUNCTION__,
986 vcpu->svm->vmcb->save.cs.base,
987 vcpu->svm->vmcb->save.rip,
988 vcpu->svm->vmcb->control.exit_info_2,
989 ins_length);
990
991 if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length)
992 /* #PF */
993 return 0;
994
995 *addr_override = 0;
996 *seg = NULL;
997 for (i = 0; i < ins_length; i++)
998 switch (inst[i]) {
999 case 0xf0:
1000 case 0xf2:
1001 case 0xf3:
1002 case 0x66:
1003 continue;
1004 case 0x67:
1005 *addr_override = 1;
1006 continue;
1007 case 0x2e:
1008 *seg = &vcpu->svm->vmcb->save.cs;
1009 continue;
1010 case 0x36:
1011 *seg = &vcpu->svm->vmcb->save.ss;
1012 continue;
1013 case 0x3e:
1014 *seg = &vcpu->svm->vmcb->save.ds;
1015 continue;
1016 case 0x26:
1017 *seg = &vcpu->svm->vmcb->save.es;
1018 continue;
1019 case 0x64:
1020 *seg = &vcpu->svm->vmcb->save.fs;
1021 continue;
1022 case 0x65:
1023 *seg = &vcpu->svm->vmcb->save.gs;
1024 continue;
1025 default:
1026 return 1;
1027 }
1028 printk(KERN_DEBUG "%s: unexpected\n", __FUNCTION__);
1029 return 0;
1030}
1031
1032static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address)
1033{ 997{
1034 unsigned long addr_mask; 998 u32 io_info = svm->vmcb->control.exit_info_1; //address size bug?
1035 unsigned long *reg; 999 int size, down, in, string, rep;
1036 struct vmcb_seg *seg; 1000 unsigned port;
1037 int addr_override;
1038 struct vmcb_save_area *save_area = &vcpu->svm->vmcb->save;
1039 u16 cs_attrib = save_area->cs.attrib;
1040 unsigned addr_size = get_addr_size(vcpu);
1041
1042 if (!io_get_override(vcpu, &seg, &addr_override))
1043 return 0;
1044
1045 if (addr_override)
1046 addr_size = (addr_size == 2) ? 4: (addr_size >> 1);
1047 1001
1048 if (ins) { 1002 ++svm->vcpu.stat.io_exits;
1049 reg = &vcpu->regs[VCPU_REGS_RDI];
1050 seg = &vcpu->svm->vmcb->save.es;
1051 } else {
1052 reg = &vcpu->regs[VCPU_REGS_RSI];
1053 seg = (seg) ? seg : &vcpu->svm->vmcb->save.ds;
1054 }
1055 1003
1056 addr_mask = ~0ULL >> (64 - (addr_size * 8)); 1004 svm->next_rip = svm->vmcb->control.exit_info_2;
1057 1005
1058 if ((cs_attrib & SVM_SELECTOR_L_MASK) && 1006 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1059 !(vcpu->svm->vmcb->save.rflags & X86_EFLAGS_VM)) {
1060 *address = (*reg & addr_mask);
1061 return addr_mask;
1062 }
1063 1007
1064 if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) { 1008 if (string) {
1065 svm_inject_gp(vcpu, 0); 1009 if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO)
1066 return 0; 1010 return 0;
1011 return 1;
1067 } 1012 }
1068 1013
1069 *address = (*reg & addr_mask) + seg->base;
1070 return addr_mask;
1071}
1072
1073static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1074{
1075 u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug?
1076 int size, down, in, string, rep;
1077 unsigned port;
1078 unsigned long count;
1079 gva_t address = 0;
1080
1081 ++vcpu->stat.io_exits;
1082
1083 vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2;
1084
1085 in = (io_info & SVM_IOIO_TYPE_MASK) != 0; 1014 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1086 port = io_info >> 16; 1015 port = io_info >> 16;
1087 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; 1016 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1088 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1089 rep = (io_info & SVM_IOIO_REP_MASK) != 0; 1017 rep = (io_info & SVM_IOIO_REP_MASK) != 0;
1090 count = 1; 1018 down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
1091 down = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
1092 1019
1093 if (string) { 1020 return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
1094 unsigned addr_mask;
1095
1096 addr_mask = io_adress(vcpu, in, &address);
1097 if (!addr_mask) {
1098 printk(KERN_DEBUG "%s: get io address failed\n",
1099 __FUNCTION__);
1100 return 1;
1101 }
1102
1103 if (rep)
1104 count = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
1105 }
1106 return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
1107 address, rep, port);
1108} 1021}
1109 1022
1110static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1023static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1111{ 1024{
1112 return 1; 1025 return 1;
1113} 1026}
1114 1027
1115static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1028static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1116{ 1029{
1117 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1; 1030 svm->next_rip = svm->vmcb->save.rip + 1;
1118 skip_emulated_instruction(vcpu); 1031 skip_emulated_instruction(&svm->vcpu);
1119 return kvm_emulate_halt(vcpu); 1032 return kvm_emulate_halt(&svm->vcpu);
1120} 1033}
1121 1034
1122static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1035static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1123{ 1036{
1124 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 3; 1037 svm->next_rip = svm->vmcb->save.rip + 3;
1125 skip_emulated_instruction(vcpu); 1038 skip_emulated_instruction(&svm->vcpu);
1126 return kvm_hypercall(vcpu, kvm_run); 1039 return kvm_hypercall(&svm->vcpu, kvm_run);
1127} 1040}
1128 1041
1129static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1042static int invalid_op_interception(struct vcpu_svm *svm,
1043 struct kvm_run *kvm_run)
1130{ 1044{
1131 inject_ud(vcpu); 1045 inject_ud(&svm->vcpu);
1132 return 1; 1046 return 1;
1133} 1047}
1134 1048
1135static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1049static int task_switch_interception(struct vcpu_svm *svm,
1050 struct kvm_run *kvm_run)
1136{ 1051{
1137 printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__); 1052 pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __FUNCTION__);
1138 kvm_run->exit_reason = KVM_EXIT_UNKNOWN; 1053 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1139 return 0; 1054 return 0;
1140} 1055}
1141 1056
1142static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1057static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1143{ 1058{
1144 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; 1059 svm->next_rip = svm->vmcb->save.rip + 2;
1145 kvm_emulate_cpuid(vcpu); 1060 kvm_emulate_cpuid(&svm->vcpu);
1146 return 1; 1061 return 1;
1147} 1062}
1148 1063
1149static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1064static int emulate_on_interception(struct vcpu_svm *svm,
1065 struct kvm_run *kvm_run)
1150{ 1066{
1151 if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE) 1067 if (emulate_instruction(&svm->vcpu, NULL, 0, 0) != EMULATE_DONE)
1152 printk(KERN_ERR "%s: failed\n", __FUNCTION__); 1068 pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__);
1153 return 1; 1069 return 1;
1154} 1070}
1155 1071
1156static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) 1072static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1157{ 1073{
1074 struct vcpu_svm *svm = to_svm(vcpu);
1075
1158 switch (ecx) { 1076 switch (ecx) {
1159 case MSR_IA32_TIME_STAMP_COUNTER: { 1077 case MSR_IA32_TIME_STAMP_COUNTER: {
1160 u64 tsc; 1078 u64 tsc;
1161 1079
1162 rdtscll(tsc); 1080 rdtscll(tsc);
1163 *data = vcpu->svm->vmcb->control.tsc_offset + tsc; 1081 *data = svm->vmcb->control.tsc_offset + tsc;
1164 break; 1082 break;
1165 } 1083 }
1166 case MSR_K6_STAR: 1084 case MSR_K6_STAR:
1167 *data = vcpu->svm->vmcb->save.star; 1085 *data = svm->vmcb->save.star;
1168 break; 1086 break;
1169#ifdef CONFIG_X86_64 1087#ifdef CONFIG_X86_64
1170 case MSR_LSTAR: 1088 case MSR_LSTAR:
1171 *data = vcpu->svm->vmcb->save.lstar; 1089 *data = svm->vmcb->save.lstar;
1172 break; 1090 break;
1173 case MSR_CSTAR: 1091 case MSR_CSTAR:
1174 *data = vcpu->svm->vmcb->save.cstar; 1092 *data = svm->vmcb->save.cstar;
1175 break; 1093 break;
1176 case MSR_KERNEL_GS_BASE: 1094 case MSR_KERNEL_GS_BASE:
1177 *data = vcpu->svm->vmcb->save.kernel_gs_base; 1095 *data = svm->vmcb->save.kernel_gs_base;
1178 break; 1096 break;
1179 case MSR_SYSCALL_MASK: 1097 case MSR_SYSCALL_MASK:
1180 *data = vcpu->svm->vmcb->save.sfmask; 1098 *data = svm->vmcb->save.sfmask;
1181 break; 1099 break;
1182#endif 1100#endif
1183 case MSR_IA32_SYSENTER_CS: 1101 case MSR_IA32_SYSENTER_CS:
1184 *data = vcpu->svm->vmcb->save.sysenter_cs; 1102 *data = svm->vmcb->save.sysenter_cs;
1185 break; 1103 break;
1186 case MSR_IA32_SYSENTER_EIP: 1104 case MSR_IA32_SYSENTER_EIP:
1187 *data = vcpu->svm->vmcb->save.sysenter_eip; 1105 *data = svm->vmcb->save.sysenter_eip;
1188 break; 1106 break;
1189 case MSR_IA32_SYSENTER_ESP: 1107 case MSR_IA32_SYSENTER_ESP:
1190 *data = vcpu->svm->vmcb->save.sysenter_esp; 1108 *data = svm->vmcb->save.sysenter_esp;
1191 break; 1109 break;
1192 default: 1110 default:
1193 return kvm_get_msr_common(vcpu, ecx, data); 1111 return kvm_get_msr_common(vcpu, ecx, data);
@@ -1195,57 +1113,59 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1195 return 0; 1113 return 0;
1196} 1114}
1197 1115
1198static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1116static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1199{ 1117{
1200 u32 ecx = vcpu->regs[VCPU_REGS_RCX]; 1118 u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
1201 u64 data; 1119 u64 data;
1202 1120
1203 if (svm_get_msr(vcpu, ecx, &data)) 1121 if (svm_get_msr(&svm->vcpu, ecx, &data))
1204 svm_inject_gp(vcpu, 0); 1122 svm_inject_gp(&svm->vcpu, 0);
1205 else { 1123 else {
1206 vcpu->svm->vmcb->save.rax = data & 0xffffffff; 1124 svm->vmcb->save.rax = data & 0xffffffff;
1207 vcpu->regs[VCPU_REGS_RDX] = data >> 32; 1125 svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32;
1208 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; 1126 svm->next_rip = svm->vmcb->save.rip + 2;
1209 skip_emulated_instruction(vcpu); 1127 skip_emulated_instruction(&svm->vcpu);
1210 } 1128 }
1211 return 1; 1129 return 1;
1212} 1130}
1213 1131
1214static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) 1132static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1215{ 1133{
1134 struct vcpu_svm *svm = to_svm(vcpu);
1135
1216 switch (ecx) { 1136 switch (ecx) {
1217 case MSR_IA32_TIME_STAMP_COUNTER: { 1137 case MSR_IA32_TIME_STAMP_COUNTER: {
1218 u64 tsc; 1138 u64 tsc;
1219 1139
1220 rdtscll(tsc); 1140 rdtscll(tsc);
1221 vcpu->svm->vmcb->control.tsc_offset = data - tsc; 1141 svm->vmcb->control.tsc_offset = data - tsc;
1222 break; 1142 break;
1223 } 1143 }
1224 case MSR_K6_STAR: 1144 case MSR_K6_STAR:
1225 vcpu->svm->vmcb->save.star = data; 1145 svm->vmcb->save.star = data;
1226 break; 1146 break;
1227#ifdef CONFIG_X86_64 1147#ifdef CONFIG_X86_64
1228 case MSR_LSTAR: 1148 case MSR_LSTAR:
1229 vcpu->svm->vmcb->save.lstar = data; 1149 svm->vmcb->save.lstar = data;
1230 break; 1150 break;
1231 case MSR_CSTAR: 1151 case MSR_CSTAR:
1232 vcpu->svm->vmcb->save.cstar = data; 1152 svm->vmcb->save.cstar = data;
1233 break; 1153 break;
1234 case MSR_KERNEL_GS_BASE: 1154 case MSR_KERNEL_GS_BASE:
1235 vcpu->svm->vmcb->save.kernel_gs_base = data; 1155 svm->vmcb->save.kernel_gs_base = data;
1236 break; 1156 break;
1237 case MSR_SYSCALL_MASK: 1157 case MSR_SYSCALL_MASK:
1238 vcpu->svm->vmcb->save.sfmask = data; 1158 svm->vmcb->save.sfmask = data;
1239 break; 1159 break;
1240#endif 1160#endif
1241 case MSR_IA32_SYSENTER_CS: 1161 case MSR_IA32_SYSENTER_CS:
1242 vcpu->svm->vmcb->save.sysenter_cs = data; 1162 svm->vmcb->save.sysenter_cs = data;
1243 break; 1163 break;
1244 case MSR_IA32_SYSENTER_EIP: 1164 case MSR_IA32_SYSENTER_EIP:
1245 vcpu->svm->vmcb->save.sysenter_eip = data; 1165 svm->vmcb->save.sysenter_eip = data;
1246 break; 1166 break;
1247 case MSR_IA32_SYSENTER_ESP: 1167 case MSR_IA32_SYSENTER_ESP:
1248 vcpu->svm->vmcb->save.sysenter_esp = data; 1168 svm->vmcb->save.sysenter_esp = data;
1249 break; 1169 break;
1250 default: 1170 default:
1251 return kvm_set_msr_common(vcpu, ecx, data); 1171 return kvm_set_msr_common(vcpu, ecx, data);
@@ -1253,37 +1173,39 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1253 return 0; 1173 return 0;
1254} 1174}
1255 1175
1256static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1176static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1257{ 1177{
1258 u32 ecx = vcpu->regs[VCPU_REGS_RCX]; 1178 u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
1259 u64 data = (vcpu->svm->vmcb->save.rax & -1u) 1179 u64 data = (svm->vmcb->save.rax & -1u)
1260 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); 1180 | ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32);
1261 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; 1181 svm->next_rip = svm->vmcb->save.rip + 2;
1262 if (svm_set_msr(vcpu, ecx, data)) 1182 if (svm_set_msr(&svm->vcpu, ecx, data))
1263 svm_inject_gp(vcpu, 0); 1183 svm_inject_gp(&svm->vcpu, 0);
1264 else 1184 else
1265 skip_emulated_instruction(vcpu); 1185 skip_emulated_instruction(&svm->vcpu);
1266 return 1; 1186 return 1;
1267} 1187}
1268 1188
1269static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1189static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1270{ 1190{
1271 if (vcpu->svm->vmcb->control.exit_info_1) 1191 if (svm->vmcb->control.exit_info_1)
1272 return wrmsr_interception(vcpu, kvm_run); 1192 return wrmsr_interception(svm, kvm_run);
1273 else 1193 else
1274 return rdmsr_interception(vcpu, kvm_run); 1194 return rdmsr_interception(svm, kvm_run);
1275} 1195}
1276 1196
1277static int interrupt_window_interception(struct kvm_vcpu *vcpu, 1197static int interrupt_window_interception(struct vcpu_svm *svm,
1278 struct kvm_run *kvm_run) 1198 struct kvm_run *kvm_run)
1279{ 1199{
1200 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
1201 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1280 /* 1202 /*
1281 * If the user space waits to inject interrupts, exit as soon as 1203 * If the user space waits to inject interrupts, exit as soon as
1282 * possible 1204 * possible
1283 */ 1205 */
1284 if (kvm_run->request_interrupt_window && 1206 if (kvm_run->request_interrupt_window &&
1285 !vcpu->irq_summary) { 1207 !svm->vcpu.irq_summary) {
1286 ++vcpu->stat.irq_window_exits; 1208 ++svm->vcpu.stat.irq_window_exits;
1287 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 1209 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1288 return 0; 1210 return 0;
1289 } 1211 }
@@ -1291,7 +1213,7 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu,
1291 return 1; 1213 return 1;
1292} 1214}
1293 1215
1294static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, 1216static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
1295 struct kvm_run *kvm_run) = { 1217 struct kvm_run *kvm_run) = {
1296 [SVM_EXIT_READ_CR0] = emulate_on_interception, 1218 [SVM_EXIT_READ_CR0] = emulate_on_interception,
1297 [SVM_EXIT_READ_CR3] = emulate_on_interception, 1219 [SVM_EXIT_READ_CR3] = emulate_on_interception,
@@ -1338,15 +1260,25 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
1338}; 1260};
1339 1261
1340 1262
1341static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1263static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1342{ 1264{
1343 u32 exit_code = vcpu->svm->vmcb->control.exit_code; 1265 struct vcpu_svm *svm = to_svm(vcpu);
1266 u32 exit_code = svm->vmcb->control.exit_code;
1267
1268 kvm_reput_irq(svm);
1344 1269
1345 if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) && 1270 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1271 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1272 kvm_run->fail_entry.hardware_entry_failure_reason
1273 = svm->vmcb->control.exit_code;
1274 return 0;
1275 }
1276
1277 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
1346 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) 1278 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
1347 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " 1279 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
1348 "exit_code 0x%x\n", 1280 "exit_code 0x%x\n",
1349 __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info, 1281 __FUNCTION__, svm->vmcb->control.exit_int_info,
1350 exit_code); 1282 exit_code);
1351 1283
1352 if (exit_code >= ARRAY_SIZE(svm_exit_handlers) 1284 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
@@ -1356,7 +1288,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1356 return 0; 1288 return 0;
1357 } 1289 }
1358 1290
1359 return svm_exit_handlers[exit_code](vcpu, kvm_run); 1291 return svm_exit_handlers[exit_code](svm, kvm_run);
1360} 1292}
1361 1293
1362static void reload_tss(struct kvm_vcpu *vcpu) 1294static void reload_tss(struct kvm_vcpu *vcpu)
@@ -1368,93 +1300,126 @@ static void reload_tss(struct kvm_vcpu *vcpu)
1368 load_TR_desc(); 1300 load_TR_desc();
1369} 1301}
1370 1302
1371static void pre_svm_run(struct kvm_vcpu *vcpu) 1303static void pre_svm_run(struct vcpu_svm *svm)
1372{ 1304{
1373 int cpu = raw_smp_processor_id(); 1305 int cpu = raw_smp_processor_id();
1374 1306
1375 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 1307 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1376 1308
1377 vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 1309 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
1378 if (vcpu->cpu != cpu || 1310 if (svm->vcpu.cpu != cpu ||
1379 vcpu->svm->asid_generation != svm_data->asid_generation) 1311 svm->asid_generation != svm_data->asid_generation)
1380 new_asid(vcpu, svm_data); 1312 new_asid(svm, svm_data);
1381} 1313}
1382 1314
1383 1315
1384static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu) 1316static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
1385{ 1317{
1386 struct vmcb_control_area *control; 1318 struct vmcb_control_area *control;
1387 1319
1388 control = &vcpu->svm->vmcb->control; 1320 control = &svm->vmcb->control;
1389 control->int_vector = pop_irq(vcpu); 1321 control->int_vector = irq;
1390 control->int_ctl &= ~V_INTR_PRIO_MASK; 1322 control->int_ctl &= ~V_INTR_PRIO_MASK;
1391 control->int_ctl |= V_IRQ_MASK | 1323 control->int_ctl |= V_IRQ_MASK |
1392 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); 1324 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1393} 1325}
1394 1326
1395static void kvm_reput_irq(struct kvm_vcpu *vcpu) 1327static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
1328{
1329 struct vcpu_svm *svm = to_svm(vcpu);
1330
1331 svm_inject_irq(svm, irq);
1332}
1333
1334static void svm_intr_assist(struct kvm_vcpu *vcpu)
1335{
1336 struct vcpu_svm *svm = to_svm(vcpu);
1337 struct vmcb *vmcb = svm->vmcb;
1338 int intr_vector = -1;
1339
1340 kvm_inject_pending_timer_irqs(vcpu);
1341 if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
1342 ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
1343 intr_vector = vmcb->control.exit_int_info &
1344 SVM_EVTINJ_VEC_MASK;
1345 vmcb->control.exit_int_info = 0;
1346 svm_inject_irq(svm, intr_vector);
1347 return;
1348 }
1349
1350 if (vmcb->control.int_ctl & V_IRQ_MASK)
1351 return;
1352
1353 if (!kvm_cpu_has_interrupt(vcpu))
1354 return;
1355
1356 if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
1357 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
1358 (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
1359 /* unable to deliver irq, set pending irq */
1360 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
1361 svm_inject_irq(svm, 0x0);
1362 return;
1363 }
1364 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
1365 intr_vector = kvm_cpu_get_interrupt(vcpu);
1366 svm_inject_irq(svm, intr_vector);
1367 kvm_timer_intr_post(vcpu, intr_vector);
1368}
1369
1370static void kvm_reput_irq(struct vcpu_svm *svm)
1396{ 1371{
1397 struct vmcb_control_area *control = &vcpu->svm->vmcb->control; 1372 struct vmcb_control_area *control = &svm->vmcb->control;
1398 1373
1399 if (control->int_ctl & V_IRQ_MASK) { 1374 if ((control->int_ctl & V_IRQ_MASK)
1375 && !irqchip_in_kernel(svm->vcpu.kvm)) {
1400 control->int_ctl &= ~V_IRQ_MASK; 1376 control->int_ctl &= ~V_IRQ_MASK;
1401 push_irq(vcpu, control->int_vector); 1377 push_irq(&svm->vcpu, control->int_vector);
1402 } 1378 }
1403 1379
1404 vcpu->interrupt_window_open = 1380 svm->vcpu.interrupt_window_open =
1405 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); 1381 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
1406} 1382}
1407 1383
1384static void svm_do_inject_vector(struct vcpu_svm *svm)
1385{
1386 struct kvm_vcpu *vcpu = &svm->vcpu;
1387 int word_index = __ffs(vcpu->irq_summary);
1388 int bit_index = __ffs(vcpu->irq_pending[word_index]);
1389 int irq = word_index * BITS_PER_LONG + bit_index;
1390
1391 clear_bit(bit_index, &vcpu->irq_pending[word_index]);
1392 if (!vcpu->irq_pending[word_index])
1393 clear_bit(word_index, &vcpu->irq_summary);
1394 svm_inject_irq(svm, irq);
1395}
1396
1408static void do_interrupt_requests(struct kvm_vcpu *vcpu, 1397static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1409 struct kvm_run *kvm_run) 1398 struct kvm_run *kvm_run)
1410{ 1399{
1411 struct vmcb_control_area *control = &vcpu->svm->vmcb->control; 1400 struct vcpu_svm *svm = to_svm(vcpu);
1401 struct vmcb_control_area *control = &svm->vmcb->control;
1412 1402
1413 vcpu->interrupt_window_open = 1403 svm->vcpu.interrupt_window_open =
1414 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && 1404 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
1415 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)); 1405 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
1416 1406
1417 if (vcpu->interrupt_window_open && vcpu->irq_summary) 1407 if (svm->vcpu.interrupt_window_open && svm->vcpu.irq_summary)
1418 /* 1408 /*
1419 * If interrupts enabled, and not blocked by sti or mov ss. Good. 1409 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1420 */ 1410 */
1421 kvm_do_inject_irq(vcpu); 1411 svm_do_inject_vector(svm);
1422 1412
1423 /* 1413 /*
1424 * Interrupts blocked. Wait for unblock. 1414 * Interrupts blocked. Wait for unblock.
1425 */ 1415 */
1426 if (!vcpu->interrupt_window_open && 1416 if (!svm->vcpu.interrupt_window_open &&
1427 (vcpu->irq_summary || kvm_run->request_interrupt_window)) { 1417 (svm->vcpu.irq_summary || kvm_run->request_interrupt_window)) {
1428 control->intercept |= 1ULL << INTERCEPT_VINTR; 1418 control->intercept |= 1ULL << INTERCEPT_VINTR;
1429 } else 1419 } else
1430 control->intercept &= ~(1ULL << INTERCEPT_VINTR); 1420 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1431} 1421}
1432 1422
1433static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1434 struct kvm_run *kvm_run)
1435{
1436 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1437 vcpu->irq_summary == 0);
1438 kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
1439 kvm_run->cr8 = vcpu->cr8;
1440 kvm_run->apic_base = vcpu->apic_base;
1441}
1442
1443/*
1444 * Check if userspace requested an interrupt window, and that the
1445 * interrupt window is open.
1446 *
1447 * No need to exit to userspace if we already have an interrupt queued.
1448 */
1449static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1450 struct kvm_run *kvm_run)
1451{
1452 return (!vcpu->irq_summary &&
1453 kvm_run->request_interrupt_window &&
1454 vcpu->interrupt_window_open &&
1455 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
1456}
1457
1458static void save_db_regs(unsigned long *db_regs) 1423static void save_db_regs(unsigned long *db_regs)
1459{ 1424{
1460 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0])); 1425 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
@@ -1476,49 +1441,37 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1476 force_new_asid(vcpu); 1441 force_new_asid(vcpu);
1477} 1442}
1478 1443
1479static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1444static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
1445{
1446}
1447
1448static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1480{ 1449{
1450 struct vcpu_svm *svm = to_svm(vcpu);
1481 u16 fs_selector; 1451 u16 fs_selector;
1482 u16 gs_selector; 1452 u16 gs_selector;
1483 u16 ldt_selector; 1453 u16 ldt_selector;
1484 int r;
1485
1486again:
1487 r = kvm_mmu_reload(vcpu);
1488 if (unlikely(r))
1489 return r;
1490
1491 if (!vcpu->mmio_read_completed)
1492 do_interrupt_requests(vcpu, kvm_run);
1493 1454
1494 clgi(); 1455 pre_svm_run(svm);
1495
1496 vcpu->guest_mode = 1;
1497 if (vcpu->requests)
1498 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
1499 svm_flush_tlb(vcpu);
1500
1501 pre_svm_run(vcpu);
1502 1456
1503 save_host_msrs(vcpu); 1457 save_host_msrs(vcpu);
1504 fs_selector = read_fs(); 1458 fs_selector = read_fs();
1505 gs_selector = read_gs(); 1459 gs_selector = read_gs();
1506 ldt_selector = read_ldt(); 1460 ldt_selector = read_ldt();
1507 vcpu->svm->host_cr2 = kvm_read_cr2(); 1461 svm->host_cr2 = kvm_read_cr2();
1508 vcpu->svm->host_dr6 = read_dr6(); 1462 svm->host_dr6 = read_dr6();
1509 vcpu->svm->host_dr7 = read_dr7(); 1463 svm->host_dr7 = read_dr7();
1510 vcpu->svm->vmcb->save.cr2 = vcpu->cr2; 1464 svm->vmcb->save.cr2 = vcpu->cr2;
1511 1465
1512 if (vcpu->svm->vmcb->save.dr7 & 0xff) { 1466 if (svm->vmcb->save.dr7 & 0xff) {
1513 write_dr7(0); 1467 write_dr7(0);
1514 save_db_regs(vcpu->svm->host_db_regs); 1468 save_db_regs(svm->host_db_regs);
1515 load_db_regs(vcpu->svm->db_regs); 1469 load_db_regs(svm->db_regs);
1516 } 1470 }
1517 1471
1518 if (vcpu->fpu_active) { 1472 clgi();
1519 fx_save(vcpu->host_fx_image); 1473
1520 fx_restore(vcpu->guest_fx_image); 1474 local_irq_enable();
1521 }
1522 1475
1523 asm volatile ( 1476 asm volatile (
1524#ifdef CONFIG_X86_64 1477#ifdef CONFIG_X86_64
@@ -1532,34 +1485,33 @@ again:
1532#endif 1485#endif
1533 1486
1534#ifdef CONFIG_X86_64 1487#ifdef CONFIG_X86_64
1535 "mov %c[rbx](%[vcpu]), %%rbx \n\t" 1488 "mov %c[rbx](%[svm]), %%rbx \n\t"
1536 "mov %c[rcx](%[vcpu]), %%rcx \n\t" 1489 "mov %c[rcx](%[svm]), %%rcx \n\t"
1537 "mov %c[rdx](%[vcpu]), %%rdx \n\t" 1490 "mov %c[rdx](%[svm]), %%rdx \n\t"
1538 "mov %c[rsi](%[vcpu]), %%rsi \n\t" 1491 "mov %c[rsi](%[svm]), %%rsi \n\t"
1539 "mov %c[rdi](%[vcpu]), %%rdi \n\t" 1492 "mov %c[rdi](%[svm]), %%rdi \n\t"
1540 "mov %c[rbp](%[vcpu]), %%rbp \n\t" 1493 "mov %c[rbp](%[svm]), %%rbp \n\t"
1541 "mov %c[r8](%[vcpu]), %%r8 \n\t" 1494 "mov %c[r8](%[svm]), %%r8 \n\t"
1542 "mov %c[r9](%[vcpu]), %%r9 \n\t" 1495 "mov %c[r9](%[svm]), %%r9 \n\t"
1543 "mov %c[r10](%[vcpu]), %%r10 \n\t" 1496 "mov %c[r10](%[svm]), %%r10 \n\t"
1544 "mov %c[r11](%[vcpu]), %%r11 \n\t" 1497 "mov %c[r11](%[svm]), %%r11 \n\t"
1545 "mov %c[r12](%[vcpu]), %%r12 \n\t" 1498 "mov %c[r12](%[svm]), %%r12 \n\t"
1546 "mov %c[r13](%[vcpu]), %%r13 \n\t" 1499 "mov %c[r13](%[svm]), %%r13 \n\t"
1547 "mov %c[r14](%[vcpu]), %%r14 \n\t" 1500 "mov %c[r14](%[svm]), %%r14 \n\t"
1548 "mov %c[r15](%[vcpu]), %%r15 \n\t" 1501 "mov %c[r15](%[svm]), %%r15 \n\t"
1549#else 1502#else
1550 "mov %c[rbx](%[vcpu]), %%ebx \n\t" 1503 "mov %c[rbx](%[svm]), %%ebx \n\t"
1551 "mov %c[rcx](%[vcpu]), %%ecx \n\t" 1504 "mov %c[rcx](%[svm]), %%ecx \n\t"
1552 "mov %c[rdx](%[vcpu]), %%edx \n\t" 1505 "mov %c[rdx](%[svm]), %%edx \n\t"
1553 "mov %c[rsi](%[vcpu]), %%esi \n\t" 1506 "mov %c[rsi](%[svm]), %%esi \n\t"
1554 "mov %c[rdi](%[vcpu]), %%edi \n\t" 1507 "mov %c[rdi](%[svm]), %%edi \n\t"
1555 "mov %c[rbp](%[vcpu]), %%ebp \n\t" 1508 "mov %c[rbp](%[svm]), %%ebp \n\t"
1556#endif 1509#endif
1557 1510
1558#ifdef CONFIG_X86_64 1511#ifdef CONFIG_X86_64
1559 /* Enter guest mode */ 1512 /* Enter guest mode */
1560 "push %%rax \n\t" 1513 "push %%rax \n\t"
1561 "mov %c[svm](%[vcpu]), %%rax \n\t" 1514 "mov %c[vmcb](%[svm]), %%rax \n\t"
1562 "mov %c[vmcb](%%rax), %%rax \n\t"
1563 SVM_VMLOAD "\n\t" 1515 SVM_VMLOAD "\n\t"
1564 SVM_VMRUN "\n\t" 1516 SVM_VMRUN "\n\t"
1565 SVM_VMSAVE "\n\t" 1517 SVM_VMSAVE "\n\t"
@@ -1567,8 +1519,7 @@ again:
1567#else 1519#else
1568 /* Enter guest mode */ 1520 /* Enter guest mode */
1569 "push %%eax \n\t" 1521 "push %%eax \n\t"
1570 "mov %c[svm](%[vcpu]), %%eax \n\t" 1522 "mov %c[vmcb](%[svm]), %%eax \n\t"
1571 "mov %c[vmcb](%%eax), %%eax \n\t"
1572 SVM_VMLOAD "\n\t" 1523 SVM_VMLOAD "\n\t"
1573 SVM_VMRUN "\n\t" 1524 SVM_VMRUN "\n\t"
1574 SVM_VMSAVE "\n\t" 1525 SVM_VMSAVE "\n\t"
@@ -1577,73 +1528,69 @@ again:
1577 1528
1578 /* Save guest registers, load host registers */ 1529 /* Save guest registers, load host registers */
1579#ifdef CONFIG_X86_64 1530#ifdef CONFIG_X86_64
1580 "mov %%rbx, %c[rbx](%[vcpu]) \n\t" 1531 "mov %%rbx, %c[rbx](%[svm]) \n\t"
1581 "mov %%rcx, %c[rcx](%[vcpu]) \n\t" 1532 "mov %%rcx, %c[rcx](%[svm]) \n\t"
1582 "mov %%rdx, %c[rdx](%[vcpu]) \n\t" 1533 "mov %%rdx, %c[rdx](%[svm]) \n\t"
1583 "mov %%rsi, %c[rsi](%[vcpu]) \n\t" 1534 "mov %%rsi, %c[rsi](%[svm]) \n\t"
1584 "mov %%rdi, %c[rdi](%[vcpu]) \n\t" 1535 "mov %%rdi, %c[rdi](%[svm]) \n\t"
1585 "mov %%rbp, %c[rbp](%[vcpu]) \n\t" 1536 "mov %%rbp, %c[rbp](%[svm]) \n\t"
1586 "mov %%r8, %c[r8](%[vcpu]) \n\t" 1537 "mov %%r8, %c[r8](%[svm]) \n\t"
1587 "mov %%r9, %c[r9](%[vcpu]) \n\t" 1538 "mov %%r9, %c[r9](%[svm]) \n\t"
1588 "mov %%r10, %c[r10](%[vcpu]) \n\t" 1539 "mov %%r10, %c[r10](%[svm]) \n\t"
1589 "mov %%r11, %c[r11](%[vcpu]) \n\t" 1540 "mov %%r11, %c[r11](%[svm]) \n\t"
1590 "mov %%r12, %c[r12](%[vcpu]) \n\t" 1541 "mov %%r12, %c[r12](%[svm]) \n\t"
1591 "mov %%r13, %c[r13](%[vcpu]) \n\t" 1542 "mov %%r13, %c[r13](%[svm]) \n\t"
1592 "mov %%r14, %c[r14](%[vcpu]) \n\t" 1543 "mov %%r14, %c[r14](%[svm]) \n\t"
1593 "mov %%r15, %c[r15](%[vcpu]) \n\t" 1544 "mov %%r15, %c[r15](%[svm]) \n\t"
1594 1545
1595 "pop %%r15; pop %%r14; pop %%r13; pop %%r12;" 1546 "pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
1596 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;" 1547 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
1597 "pop %%rbp; pop %%rdi; pop %%rsi;" 1548 "pop %%rbp; pop %%rdi; pop %%rsi;"
1598 "pop %%rdx; pop %%rcx; pop %%rbx; \n\t" 1549 "pop %%rdx; pop %%rcx; pop %%rbx; \n\t"
1599#else 1550#else
1600 "mov %%ebx, %c[rbx](%[vcpu]) \n\t" 1551 "mov %%ebx, %c[rbx](%[svm]) \n\t"
1601 "mov %%ecx, %c[rcx](%[vcpu]) \n\t" 1552 "mov %%ecx, %c[rcx](%[svm]) \n\t"
1602 "mov %%edx, %c[rdx](%[vcpu]) \n\t" 1553 "mov %%edx, %c[rdx](%[svm]) \n\t"
1603 "mov %%esi, %c[rsi](%[vcpu]) \n\t" 1554 "mov %%esi, %c[rsi](%[svm]) \n\t"
1604 "mov %%edi, %c[rdi](%[vcpu]) \n\t" 1555 "mov %%edi, %c[rdi](%[svm]) \n\t"
1605 "mov %%ebp, %c[rbp](%[vcpu]) \n\t" 1556 "mov %%ebp, %c[rbp](%[svm]) \n\t"
1606 1557
1607 "pop %%ebp; pop %%edi; pop %%esi;" 1558 "pop %%ebp; pop %%edi; pop %%esi;"
1608 "pop %%edx; pop %%ecx; pop %%ebx; \n\t" 1559 "pop %%edx; pop %%ecx; pop %%ebx; \n\t"
1609#endif 1560#endif
1610 : 1561 :
1611 : [vcpu]"a"(vcpu), 1562 : [svm]"a"(svm),
1612 [svm]"i"(offsetof(struct kvm_vcpu, svm)),
1613 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), 1563 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
1614 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), 1564 [rbx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBX])),
1615 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])), 1565 [rcx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RCX])),
1616 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])), 1566 [rdx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDX])),
1617 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])), 1567 [rsi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RSI])),
1618 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), 1568 [rdi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDI])),
1619 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])) 1569 [rbp]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBP]))
1620#ifdef CONFIG_X86_64 1570#ifdef CONFIG_X86_64
1621 ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), 1571 ,[r8 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R8])),
1622 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), 1572 [r9 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R9 ])),
1623 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), 1573 [r10]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R10])),
1624 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])), 1574 [r11]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R11])),
1625 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])), 1575 [r12]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R12])),
1626 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])), 1576 [r13]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R13])),
1627 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])), 1577 [r14]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R14])),
1628 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])) 1578 [r15]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R15]))
1629#endif 1579#endif
1630 : "cc", "memory" ); 1580 : "cc", "memory" );
1631 1581
1632 vcpu->guest_mode = 0; 1582 local_irq_disable();
1633 1583
1634 if (vcpu->fpu_active) { 1584 stgi();
1635 fx_save(vcpu->guest_fx_image);
1636 fx_restore(vcpu->host_fx_image);
1637 }
1638 1585
1639 if ((vcpu->svm->vmcb->save.dr7 & 0xff)) 1586 if ((svm->vmcb->save.dr7 & 0xff))
1640 load_db_regs(vcpu->svm->host_db_regs); 1587 load_db_regs(svm->host_db_regs);
1641 1588
1642 vcpu->cr2 = vcpu->svm->vmcb->save.cr2; 1589 vcpu->cr2 = svm->vmcb->save.cr2;
1643 1590
1644 write_dr6(vcpu->svm->host_dr6); 1591 write_dr6(svm->host_dr6);
1645 write_dr7(vcpu->svm->host_dr7); 1592 write_dr7(svm->host_dr7);
1646 kvm_write_cr2(vcpu->svm->host_cr2); 1593 kvm_write_cr2(svm->host_cr2);
1647 1594
1648 load_fs(fs_selector); 1595 load_fs(fs_selector);
1649 load_gs(gs_selector); 1596 load_gs(gs_selector);
@@ -1652,57 +1599,19 @@ again:
1652 1599
1653 reload_tss(vcpu); 1600 reload_tss(vcpu);
1654 1601
1655 /* 1602 svm->next_rip = 0;
1656 * Profile KVM exit RIPs:
1657 */
1658 if (unlikely(prof_on == KVM_PROFILING))
1659 profile_hit(KVM_PROFILING,
1660 (void *)(unsigned long)vcpu->svm->vmcb->save.rip);
1661
1662 stgi();
1663
1664 kvm_reput_irq(vcpu);
1665
1666 vcpu->svm->next_rip = 0;
1667
1668 if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1669 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1670 kvm_run->fail_entry.hardware_entry_failure_reason
1671 = vcpu->svm->vmcb->control.exit_code;
1672 post_kvm_run_save(vcpu, kvm_run);
1673 return 0;
1674 }
1675
1676 r = handle_exit(vcpu, kvm_run);
1677 if (r > 0) {
1678 if (signal_pending(current)) {
1679 ++vcpu->stat.signal_exits;
1680 post_kvm_run_save(vcpu, kvm_run);
1681 kvm_run->exit_reason = KVM_EXIT_INTR;
1682 return -EINTR;
1683 }
1684
1685 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1686 ++vcpu->stat.request_irq_exits;
1687 post_kvm_run_save(vcpu, kvm_run);
1688 kvm_run->exit_reason = KVM_EXIT_INTR;
1689 return -EINTR;
1690 }
1691 kvm_resched(vcpu);
1692 goto again;
1693 }
1694 post_kvm_run_save(vcpu, kvm_run);
1695 return r;
1696} 1603}
1697 1604
1698static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) 1605static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
1699{ 1606{
1700 vcpu->svm->vmcb->save.cr3 = root; 1607 struct vcpu_svm *svm = to_svm(vcpu);
1608
1609 svm->vmcb->save.cr3 = root;
1701 force_new_asid(vcpu); 1610 force_new_asid(vcpu);
1702 1611
1703 if (vcpu->fpu_active) { 1612 if (vcpu->fpu_active) {
1704 vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); 1613 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
1705 vcpu->svm->vmcb->save.cr0 |= CR0_TS_MASK; 1614 svm->vmcb->save.cr0 |= X86_CR0_TS;
1706 vcpu->fpu_active = 0; 1615 vcpu->fpu_active = 0;
1707 } 1616 }
1708} 1617}
@@ -1711,26 +1620,27 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
1711 unsigned long addr, 1620 unsigned long addr,
1712 uint32_t err_code) 1621 uint32_t err_code)
1713{ 1622{
1714 uint32_t exit_int_info = vcpu->svm->vmcb->control.exit_int_info; 1623 struct vcpu_svm *svm = to_svm(vcpu);
1624 uint32_t exit_int_info = svm->vmcb->control.exit_int_info;
1715 1625
1716 ++vcpu->stat.pf_guest; 1626 ++vcpu->stat.pf_guest;
1717 1627
1718 if (is_page_fault(exit_int_info)) { 1628 if (is_page_fault(exit_int_info)) {
1719 1629
1720 vcpu->svm->vmcb->control.event_inj_err = 0; 1630 svm->vmcb->control.event_inj_err = 0;
1721 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | 1631 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
1722 SVM_EVTINJ_VALID_ERR | 1632 SVM_EVTINJ_VALID_ERR |
1723 SVM_EVTINJ_TYPE_EXEPT | 1633 SVM_EVTINJ_TYPE_EXEPT |
1724 DF_VECTOR; 1634 DF_VECTOR;
1725 return; 1635 return;
1726 } 1636 }
1727 vcpu->cr2 = addr; 1637 vcpu->cr2 = addr;
1728 vcpu->svm->vmcb->save.cr2 = addr; 1638 svm->vmcb->save.cr2 = addr;
1729 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | 1639 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
1730 SVM_EVTINJ_VALID_ERR | 1640 SVM_EVTINJ_VALID_ERR |
1731 SVM_EVTINJ_TYPE_EXEPT | 1641 SVM_EVTINJ_TYPE_EXEPT |
1732 PF_VECTOR; 1642 PF_VECTOR;
1733 vcpu->svm->vmcb->control.event_inj_err = err_code; 1643 svm->vmcb->control.event_inj_err = err_code;
1734} 1644}
1735 1645
1736 1646
@@ -1757,17 +1667,25 @@ svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1757 hypercall[3] = 0xc3; 1667 hypercall[3] = 0xc3;
1758} 1668}
1759 1669
1760static struct kvm_arch_ops svm_arch_ops = { 1670static void svm_check_processor_compat(void *rtn)
1671{
1672 *(int *)rtn = 0;
1673}
1674
1675static struct kvm_x86_ops svm_x86_ops = {
1761 .cpu_has_kvm_support = has_svm, 1676 .cpu_has_kvm_support = has_svm,
1762 .disabled_by_bios = is_disabled, 1677 .disabled_by_bios = is_disabled,
1763 .hardware_setup = svm_hardware_setup, 1678 .hardware_setup = svm_hardware_setup,
1764 .hardware_unsetup = svm_hardware_unsetup, 1679 .hardware_unsetup = svm_hardware_unsetup,
1680 .check_processor_compatibility = svm_check_processor_compat,
1765 .hardware_enable = svm_hardware_enable, 1681 .hardware_enable = svm_hardware_enable,
1766 .hardware_disable = svm_hardware_disable, 1682 .hardware_disable = svm_hardware_disable,
1767 1683
1768 .vcpu_create = svm_create_vcpu, 1684 .vcpu_create = svm_create_vcpu,
1769 .vcpu_free = svm_free_vcpu, 1685 .vcpu_free = svm_free_vcpu,
1686 .vcpu_reset = svm_vcpu_reset,
1770 1687
1688 .prepare_guest_switch = svm_prepare_guest_switch,
1771 .vcpu_load = svm_vcpu_load, 1689 .vcpu_load = svm_vcpu_load,
1772 .vcpu_put = svm_vcpu_put, 1690 .vcpu_put = svm_vcpu_put,
1773 .vcpu_decache = svm_vcpu_decache, 1691 .vcpu_decache = svm_vcpu_decache,
@@ -1778,7 +1696,7 @@ static struct kvm_arch_ops svm_arch_ops = {
1778 .get_segment_base = svm_get_segment_base, 1696 .get_segment_base = svm_get_segment_base,
1779 .get_segment = svm_get_segment, 1697 .get_segment = svm_get_segment,
1780 .set_segment = svm_set_segment, 1698 .set_segment = svm_set_segment,
1781 .get_cs_db_l_bits = svm_get_cs_db_l_bits, 1699 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
1782 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, 1700 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
1783 .set_cr0 = svm_set_cr0, 1701 .set_cr0 = svm_set_cr0,
1784 .set_cr3 = svm_set_cr3, 1702 .set_cr3 = svm_set_cr3,
@@ -1795,26 +1713,30 @@ static struct kvm_arch_ops svm_arch_ops = {
1795 .get_rflags = svm_get_rflags, 1713 .get_rflags = svm_get_rflags,
1796 .set_rflags = svm_set_rflags, 1714 .set_rflags = svm_set_rflags,
1797 1715
1798 .invlpg = svm_invlpg,
1799 .tlb_flush = svm_flush_tlb, 1716 .tlb_flush = svm_flush_tlb,
1800 .inject_page_fault = svm_inject_page_fault, 1717 .inject_page_fault = svm_inject_page_fault,
1801 1718
1802 .inject_gp = svm_inject_gp, 1719 .inject_gp = svm_inject_gp,
1803 1720
1804 .run = svm_vcpu_run, 1721 .run = svm_vcpu_run,
1722 .handle_exit = handle_exit,
1805 .skip_emulated_instruction = skip_emulated_instruction, 1723 .skip_emulated_instruction = skip_emulated_instruction,
1806 .vcpu_setup = svm_vcpu_setup,
1807 .patch_hypercall = svm_patch_hypercall, 1724 .patch_hypercall = svm_patch_hypercall,
1725 .get_irq = svm_get_irq,
1726 .set_irq = svm_set_irq,
1727 .inject_pending_irq = svm_intr_assist,
1728 .inject_pending_vectors = do_interrupt_requests,
1808}; 1729};
1809 1730
1810static int __init svm_init(void) 1731static int __init svm_init(void)
1811{ 1732{
1812 return kvm_init_arch(&svm_arch_ops, THIS_MODULE); 1733 return kvm_init_x86(&svm_x86_ops, sizeof(struct vcpu_svm),
1734 THIS_MODULE);
1813} 1735}
1814 1736
1815static void __exit svm_exit(void) 1737static void __exit svm_exit(void)
1816{ 1738{
1817 kvm_exit_arch(); 1739 kvm_exit_x86();
1818} 1740}
1819 1741
1820module_init(svm_init) 1742module_init(svm_init)
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 80628f6991..4f115a8e45 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -16,6 +16,8 @@
16 */ 16 */
17 17
18#include "kvm.h" 18#include "kvm.h"
19#include "x86_emulate.h"
20#include "irq.h"
19#include "vmx.h" 21#include "vmx.h"
20#include "segment_descriptor.h" 22#include "segment_descriptor.h"
21 23
@@ -23,7 +25,6 @@
23#include <linux/kernel.h> 25#include <linux/kernel.h>
24#include <linux/mm.h> 26#include <linux/mm.h>
25#include <linux/highmem.h> 27#include <linux/highmem.h>
26#include <linux/profile.h>
27#include <linux/sched.h> 28#include <linux/sched.h>
28 29
29#include <asm/io.h> 30#include <asm/io.h>
@@ -32,6 +33,39 @@
32MODULE_AUTHOR("Qumranet"); 33MODULE_AUTHOR("Qumranet");
33MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
34 35
36struct vmcs {
37 u32 revision_id;
38 u32 abort;
39 char data[0];
40};
41
42struct vcpu_vmx {
43 struct kvm_vcpu vcpu;
44 int launched;
45 u8 fail;
46 struct kvm_msr_entry *guest_msrs;
47 struct kvm_msr_entry *host_msrs;
48 int nmsrs;
49 int save_nmsrs;
50 int msr_offset_efer;
51#ifdef CONFIG_X86_64
52 int msr_offset_kernel_gs_base;
53#endif
54 struct vmcs *vmcs;
55 struct {
56 int loaded;
57 u16 fs_sel, gs_sel, ldt_sel;
58 int gs_ldt_reload_needed;
59 int fs_reload_needed;
60 }host_state;
61
62};
63
64static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
65{
66 return container_of(vcpu, struct vcpu_vmx, vcpu);
67}
68
35static int init_rmode_tss(struct kvm *kvm); 69static int init_rmode_tss(struct kvm *kvm);
36 70
37static DEFINE_PER_CPU(struct vmcs *, vmxarea); 71static DEFINE_PER_CPU(struct vmcs *, vmxarea);
@@ -40,18 +74,17 @@ static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
40static struct page *vmx_io_bitmap_a; 74static struct page *vmx_io_bitmap_a;
41static struct page *vmx_io_bitmap_b; 75static struct page *vmx_io_bitmap_b;
42 76
43#ifdef CONFIG_X86_64
44#define HOST_IS_64 1
45#else
46#define HOST_IS_64 0
47#endif
48#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE) 77#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
49 78
50static struct vmcs_descriptor { 79static struct vmcs_config {
51 int size; 80 int size;
52 int order; 81 int order;
53 u32 revision_id; 82 u32 revision_id;
54} vmcs_descriptor; 83 u32 pin_based_exec_ctrl;
84 u32 cpu_based_exec_ctrl;
85 u32 vmexit_ctrl;
86 u32 vmentry_ctrl;
87} vmcs_config;
55 88
56#define VMX_SEGMENT_FIELD(seg) \ 89#define VMX_SEGMENT_FIELD(seg) \
57 [VCPU_SREG_##seg] = { \ 90 [VCPU_SREG_##seg] = { \
@@ -89,16 +122,32 @@ static const u32 vmx_msr_index[] = {
89}; 122};
90#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) 123#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
91 124
92static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr) 125static void load_msrs(struct kvm_msr_entry *e, int n)
126{
127 int i;
128
129 for (i = 0; i < n; ++i)
130 wrmsrl(e[i].index, e[i].data);
131}
132
133static void save_msrs(struct kvm_msr_entry *e, int n)
134{
135 int i;
136
137 for (i = 0; i < n; ++i)
138 rdmsrl(e[i].index, e[i].data);
139}
140
141static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
93{ 142{
94 return (u64)msr.data & EFER_SAVE_RESTORE_BITS; 143 return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
95} 144}
96 145
97static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu) 146static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx)
98{ 147{
99 int efer_offset = vcpu->msr_offset_efer; 148 int efer_offset = vmx->msr_offset_efer;
100 return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) != 149 return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
101 msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]); 150 msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
102} 151}
103 152
104static inline int is_page_fault(u32 intr_info) 153static inline int is_page_fault(u32 intr_info)
@@ -121,23 +170,33 @@ static inline int is_external_interrupt(u32 intr_info)
121 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); 170 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
122} 171}
123 172
124static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr) 173static inline int cpu_has_vmx_tpr_shadow(void)
174{
175 return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
176}
177
178static inline int vm_need_tpr_shadow(struct kvm *kvm)
179{
180 return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
181}
182
183static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
125{ 184{
126 int i; 185 int i;
127 186
128 for (i = 0; i < vcpu->nmsrs; ++i) 187 for (i = 0; i < vmx->nmsrs; ++i)
129 if (vcpu->guest_msrs[i].index == msr) 188 if (vmx->guest_msrs[i].index == msr)
130 return i; 189 return i;
131 return -1; 190 return -1;
132} 191}
133 192
134static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) 193static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
135{ 194{
136 int i; 195 int i;
137 196
138 i = __find_msr_index(vcpu, msr); 197 i = __find_msr_index(vmx, msr);
139 if (i >= 0) 198 if (i >= 0)
140 return &vcpu->guest_msrs[i]; 199 return &vmx->guest_msrs[i];
141 return NULL; 200 return NULL;
142} 201}
143 202
@@ -156,23 +215,24 @@ static void vmcs_clear(struct vmcs *vmcs)
156 215
157static void __vcpu_clear(void *arg) 216static void __vcpu_clear(void *arg)
158{ 217{
159 struct kvm_vcpu *vcpu = arg; 218 struct vcpu_vmx *vmx = arg;
160 int cpu = raw_smp_processor_id(); 219 int cpu = raw_smp_processor_id();
161 220
162 if (vcpu->cpu == cpu) 221 if (vmx->vcpu.cpu == cpu)
163 vmcs_clear(vcpu->vmcs); 222 vmcs_clear(vmx->vmcs);
164 if (per_cpu(current_vmcs, cpu) == vcpu->vmcs) 223 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
165 per_cpu(current_vmcs, cpu) = NULL; 224 per_cpu(current_vmcs, cpu) = NULL;
166 rdtscll(vcpu->host_tsc); 225 rdtscll(vmx->vcpu.host_tsc);
167} 226}
168 227
169static void vcpu_clear(struct kvm_vcpu *vcpu) 228static void vcpu_clear(struct vcpu_vmx *vmx)
170{ 229{
171 if (vcpu->cpu != raw_smp_processor_id() && vcpu->cpu != -1) 230 if (vmx->vcpu.cpu != raw_smp_processor_id() && vmx->vcpu.cpu != -1)
172 smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1); 231 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear,
232 vmx, 0, 1);
173 else 233 else
174 __vcpu_clear(vcpu); 234 __vcpu_clear(vmx);
175 vcpu->launched = 0; 235 vmx->launched = 0;
176} 236}
177 237
178static unsigned long vmcs_readl(unsigned long field) 238static unsigned long vmcs_readl(unsigned long field)
@@ -282,121 +342,122 @@ static void reload_tss(void)
282#endif 342#endif
283} 343}
284 344
285static void load_transition_efer(struct kvm_vcpu *vcpu) 345static void load_transition_efer(struct vcpu_vmx *vmx)
286{ 346{
287 u64 trans_efer; 347 u64 trans_efer;
288 int efer_offset = vcpu->msr_offset_efer; 348 int efer_offset = vmx->msr_offset_efer;
289 349
290 trans_efer = vcpu->host_msrs[efer_offset].data; 350 trans_efer = vmx->host_msrs[efer_offset].data;
291 trans_efer &= ~EFER_SAVE_RESTORE_BITS; 351 trans_efer &= ~EFER_SAVE_RESTORE_BITS;
292 trans_efer |= msr_efer_save_restore_bits( 352 trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
293 vcpu->guest_msrs[efer_offset]);
294 wrmsrl(MSR_EFER, trans_efer); 353 wrmsrl(MSR_EFER, trans_efer);
295 vcpu->stat.efer_reload++; 354 vmx->vcpu.stat.efer_reload++;
296} 355}
297 356
298static void vmx_save_host_state(struct kvm_vcpu *vcpu) 357static void vmx_save_host_state(struct kvm_vcpu *vcpu)
299{ 358{
300 struct vmx_host_state *hs = &vcpu->vmx_host_state; 359 struct vcpu_vmx *vmx = to_vmx(vcpu);
301 360
302 if (hs->loaded) 361 if (vmx->host_state.loaded)
303 return; 362 return;
304 363
305 hs->loaded = 1; 364 vmx->host_state.loaded = 1;
306 /* 365 /*
307 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not 366 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
308 * allow segment selectors with cpl > 0 or ti == 1. 367 * allow segment selectors with cpl > 0 or ti == 1.
309 */ 368 */
310 hs->ldt_sel = read_ldt(); 369 vmx->host_state.ldt_sel = read_ldt();
311 hs->fs_gs_ldt_reload_needed = hs->ldt_sel; 370 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
312 hs->fs_sel = read_fs(); 371 vmx->host_state.fs_sel = read_fs();
313 if (!(hs->fs_sel & 7)) 372 if (!(vmx->host_state.fs_sel & 7)) {
314 vmcs_write16(HOST_FS_SELECTOR, hs->fs_sel); 373 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
315 else { 374 vmx->host_state.fs_reload_needed = 0;
375 } else {
316 vmcs_write16(HOST_FS_SELECTOR, 0); 376 vmcs_write16(HOST_FS_SELECTOR, 0);
317 hs->fs_gs_ldt_reload_needed = 1; 377 vmx->host_state.fs_reload_needed = 1;
318 } 378 }
319 hs->gs_sel = read_gs(); 379 vmx->host_state.gs_sel = read_gs();
320 if (!(hs->gs_sel & 7)) 380 if (!(vmx->host_state.gs_sel & 7))
321 vmcs_write16(HOST_GS_SELECTOR, hs->gs_sel); 381 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
322 else { 382 else {
323 vmcs_write16(HOST_GS_SELECTOR, 0); 383 vmcs_write16(HOST_GS_SELECTOR, 0);
324 hs->fs_gs_ldt_reload_needed = 1; 384 vmx->host_state.gs_ldt_reload_needed = 1;
325 } 385 }
326 386
327#ifdef CONFIG_X86_64 387#ifdef CONFIG_X86_64
328 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); 388 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
329 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); 389 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
330#else 390#else
331 vmcs_writel(HOST_FS_BASE, segment_base(hs->fs_sel)); 391 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
332 vmcs_writel(HOST_GS_BASE, segment_base(hs->gs_sel)); 392 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
333#endif 393#endif
334 394
335#ifdef CONFIG_X86_64 395#ifdef CONFIG_X86_64
336 if (is_long_mode(vcpu)) { 396 if (is_long_mode(&vmx->vcpu)) {
337 save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1); 397 save_msrs(vmx->host_msrs +
398 vmx->msr_offset_kernel_gs_base, 1);
338 } 399 }
339#endif 400#endif
340 load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); 401 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
341 if (msr_efer_need_save_restore(vcpu)) 402 if (msr_efer_need_save_restore(vmx))
342 load_transition_efer(vcpu); 403 load_transition_efer(vmx);
343} 404}
344 405
345static void vmx_load_host_state(struct kvm_vcpu *vcpu) 406static void vmx_load_host_state(struct vcpu_vmx *vmx)
346{ 407{
347 struct vmx_host_state *hs = &vcpu->vmx_host_state; 408 unsigned long flags;
348 409
349 if (!hs->loaded) 410 if (!vmx->host_state.loaded)
350 return; 411 return;
351 412
352 hs->loaded = 0; 413 vmx->host_state.loaded = 0;
353 if (hs->fs_gs_ldt_reload_needed) { 414 if (vmx->host_state.fs_reload_needed)
354 load_ldt(hs->ldt_sel); 415 load_fs(vmx->host_state.fs_sel);
355 load_fs(hs->fs_sel); 416 if (vmx->host_state.gs_ldt_reload_needed) {
417 load_ldt(vmx->host_state.ldt_sel);
356 /* 418 /*
357 * If we have to reload gs, we must take care to 419 * If we have to reload gs, we must take care to
358 * preserve our gs base. 420 * preserve our gs base.
359 */ 421 */
360 local_irq_disable(); 422 local_irq_save(flags);
361 load_gs(hs->gs_sel); 423 load_gs(vmx->host_state.gs_sel);
362#ifdef CONFIG_X86_64 424#ifdef CONFIG_X86_64
363 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); 425 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
364#endif 426#endif
365 local_irq_enable(); 427 local_irq_restore(flags);
366
367 reload_tss();
368 } 428 }
369 save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); 429 reload_tss();
370 load_msrs(vcpu->host_msrs, vcpu->save_nmsrs); 430 save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
371 if (msr_efer_need_save_restore(vcpu)) 431 load_msrs(vmx->host_msrs, vmx->save_nmsrs);
372 load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1); 432 if (msr_efer_need_save_restore(vmx))
433 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
373} 434}
374 435
375/* 436/*
376 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 437 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
377 * vcpu mutex is already taken. 438 * vcpu mutex is already taken.
378 */ 439 */
379static void vmx_vcpu_load(struct kvm_vcpu *vcpu) 440static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
380{ 441{
381 u64 phys_addr = __pa(vcpu->vmcs); 442 struct vcpu_vmx *vmx = to_vmx(vcpu);
382 int cpu; 443 u64 phys_addr = __pa(vmx->vmcs);
383 u64 tsc_this, delta; 444 u64 tsc_this, delta;
384 445
385 cpu = get_cpu(); 446 if (vcpu->cpu != cpu) {
386 447 vcpu_clear(vmx);
387 if (vcpu->cpu != cpu) 448 kvm_migrate_apic_timer(vcpu);
388 vcpu_clear(vcpu); 449 }
389 450
390 if (per_cpu(current_vmcs, cpu) != vcpu->vmcs) { 451 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
391 u8 error; 452 u8 error;
392 453
393 per_cpu(current_vmcs, cpu) = vcpu->vmcs; 454 per_cpu(current_vmcs, cpu) = vmx->vmcs;
394 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0" 455 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
395 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) 456 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
396 : "cc"); 457 : "cc");
397 if (error) 458 if (error)
398 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", 459 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
399 vcpu->vmcs, phys_addr); 460 vmx->vmcs, phys_addr);
400 } 461 }
401 462
402 if (vcpu->cpu != cpu) { 463 if (vcpu->cpu != cpu) {
@@ -426,9 +487,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
426 487
427static void vmx_vcpu_put(struct kvm_vcpu *vcpu) 488static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
428{ 489{
429 vmx_load_host_state(vcpu); 490 vmx_load_host_state(to_vmx(vcpu));
430 kvm_put_guest_fpu(vcpu); 491 kvm_put_guest_fpu(vcpu);
431 put_cpu();
432} 492}
433 493
434static void vmx_fpu_activate(struct kvm_vcpu *vcpu) 494static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
@@ -436,9 +496,9 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
436 if (vcpu->fpu_active) 496 if (vcpu->fpu_active)
437 return; 497 return;
438 vcpu->fpu_active = 1; 498 vcpu->fpu_active = 1;
439 vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK); 499 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
440 if (vcpu->cr0 & CR0_TS_MASK) 500 if (vcpu->cr0 & X86_CR0_TS)
441 vmcs_set_bits(GUEST_CR0, CR0_TS_MASK); 501 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
442 update_exception_bitmap(vcpu); 502 update_exception_bitmap(vcpu);
443} 503}
444 504
@@ -447,13 +507,13 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
447 if (!vcpu->fpu_active) 507 if (!vcpu->fpu_active)
448 return; 508 return;
449 vcpu->fpu_active = 0; 509 vcpu->fpu_active = 0;
450 vmcs_set_bits(GUEST_CR0, CR0_TS_MASK); 510 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
451 update_exception_bitmap(vcpu); 511 update_exception_bitmap(vcpu);
452} 512}
453 513
454static void vmx_vcpu_decache(struct kvm_vcpu *vcpu) 514static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
455{ 515{
456 vcpu_clear(vcpu); 516 vcpu_clear(to_vmx(vcpu));
457} 517}
458 518
459static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) 519static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
@@ -501,59 +561,62 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
501/* 561/*
502 * Swap MSR entry in host/guest MSR entry array. 562 * Swap MSR entry in host/guest MSR entry array.
503 */ 563 */
504void move_msr_up(struct kvm_vcpu *vcpu, int from, int to) 564#ifdef CONFIG_X86_64
565static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
505{ 566{
506 struct vmx_msr_entry tmp; 567 struct kvm_msr_entry tmp;
507 tmp = vcpu->guest_msrs[to]; 568
508 vcpu->guest_msrs[to] = vcpu->guest_msrs[from]; 569 tmp = vmx->guest_msrs[to];
509 vcpu->guest_msrs[from] = tmp; 570 vmx->guest_msrs[to] = vmx->guest_msrs[from];
510 tmp = vcpu->host_msrs[to]; 571 vmx->guest_msrs[from] = tmp;
511 vcpu->host_msrs[to] = vcpu->host_msrs[from]; 572 tmp = vmx->host_msrs[to];
512 vcpu->host_msrs[from] = tmp; 573 vmx->host_msrs[to] = vmx->host_msrs[from];
574 vmx->host_msrs[from] = tmp;
513} 575}
576#endif
514 577
515/* 578/*
516 * Set up the vmcs to automatically save and restore system 579 * Set up the vmcs to automatically save and restore system
517 * msrs. Don't touch the 64-bit msrs if the guest is in legacy 580 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
518 * mode, as fiddling with msrs is very expensive. 581 * mode, as fiddling with msrs is very expensive.
519 */ 582 */
520static void setup_msrs(struct kvm_vcpu *vcpu) 583static void setup_msrs(struct vcpu_vmx *vmx)
521{ 584{
522 int save_nmsrs; 585 int save_nmsrs;
523 586
524 save_nmsrs = 0; 587 save_nmsrs = 0;
525#ifdef CONFIG_X86_64 588#ifdef CONFIG_X86_64
526 if (is_long_mode(vcpu)) { 589 if (is_long_mode(&vmx->vcpu)) {
527 int index; 590 int index;
528 591
529 index = __find_msr_index(vcpu, MSR_SYSCALL_MASK); 592 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
530 if (index >= 0) 593 if (index >= 0)
531 move_msr_up(vcpu, index, save_nmsrs++); 594 move_msr_up(vmx, index, save_nmsrs++);
532 index = __find_msr_index(vcpu, MSR_LSTAR); 595 index = __find_msr_index(vmx, MSR_LSTAR);
533 if (index >= 0) 596 if (index >= 0)
534 move_msr_up(vcpu, index, save_nmsrs++); 597 move_msr_up(vmx, index, save_nmsrs++);
535 index = __find_msr_index(vcpu, MSR_CSTAR); 598 index = __find_msr_index(vmx, MSR_CSTAR);
536 if (index >= 0) 599 if (index >= 0)
537 move_msr_up(vcpu, index, save_nmsrs++); 600 move_msr_up(vmx, index, save_nmsrs++);
538 index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE); 601 index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
539 if (index >= 0) 602 if (index >= 0)
540 move_msr_up(vcpu, index, save_nmsrs++); 603 move_msr_up(vmx, index, save_nmsrs++);
541 /* 604 /*
542 * MSR_K6_STAR is only needed on long mode guests, and only 605 * MSR_K6_STAR is only needed on long mode guests, and only
543 * if efer.sce is enabled. 606 * if efer.sce is enabled.
544 */ 607 */
545 index = __find_msr_index(vcpu, MSR_K6_STAR); 608 index = __find_msr_index(vmx, MSR_K6_STAR);
546 if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE)) 609 if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE))
547 move_msr_up(vcpu, index, save_nmsrs++); 610 move_msr_up(vmx, index, save_nmsrs++);
548 } 611 }
549#endif 612#endif
550 vcpu->save_nmsrs = save_nmsrs; 613 vmx->save_nmsrs = save_nmsrs;
551 614
552#ifdef CONFIG_X86_64 615#ifdef CONFIG_X86_64
553 vcpu->msr_offset_kernel_gs_base = 616 vmx->msr_offset_kernel_gs_base =
554 __find_msr_index(vcpu, MSR_KERNEL_GS_BASE); 617 __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
555#endif 618#endif
556 vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER); 619 vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
557} 620}
558 621
559/* 622/*
@@ -589,7 +652,7 @@ static void guest_write_tsc(u64 guest_tsc)
589static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) 652static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
590{ 653{
591 u64 data; 654 u64 data;
592 struct vmx_msr_entry *msr; 655 struct kvm_msr_entry *msr;
593 656
594 if (!pdata) { 657 if (!pdata) {
595 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); 658 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
@@ -620,7 +683,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
620 data = vmcs_readl(GUEST_SYSENTER_ESP); 683 data = vmcs_readl(GUEST_SYSENTER_ESP);
621 break; 684 break;
622 default: 685 default:
623 msr = find_msr_entry(vcpu, msr_index); 686 msr = find_msr_entry(to_vmx(vcpu), msr_index);
624 if (msr) { 687 if (msr) {
625 data = msr->data; 688 data = msr->data;
626 break; 689 break;
@@ -639,15 +702,16 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
639 */ 702 */
640static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 703static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
641{ 704{
642 struct vmx_msr_entry *msr; 705 struct vcpu_vmx *vmx = to_vmx(vcpu);
706 struct kvm_msr_entry *msr;
643 int ret = 0; 707 int ret = 0;
644 708
645 switch (msr_index) { 709 switch (msr_index) {
646#ifdef CONFIG_X86_64 710#ifdef CONFIG_X86_64
647 case MSR_EFER: 711 case MSR_EFER:
648 ret = kvm_set_msr_common(vcpu, msr_index, data); 712 ret = kvm_set_msr_common(vcpu, msr_index, data);
649 if (vcpu->vmx_host_state.loaded) 713 if (vmx->host_state.loaded)
650 load_transition_efer(vcpu); 714 load_transition_efer(vmx);
651 break; 715 break;
652 case MSR_FS_BASE: 716 case MSR_FS_BASE:
653 vmcs_writel(GUEST_FS_BASE, data); 717 vmcs_writel(GUEST_FS_BASE, data);
@@ -669,11 +733,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
669 guest_write_tsc(data); 733 guest_write_tsc(data);
670 break; 734 break;
671 default: 735 default:
672 msr = find_msr_entry(vcpu, msr_index); 736 msr = find_msr_entry(vmx, msr_index);
673 if (msr) { 737 if (msr) {
674 msr->data = data; 738 msr->data = data;
675 if (vcpu->vmx_host_state.loaded) 739 if (vmx->host_state.loaded)
676 load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); 740 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
677 break; 741 break;
678 } 742 }
679 ret = kvm_set_msr_common(vcpu, msr_index, data); 743 ret = kvm_set_msr_common(vcpu, msr_index, data);
@@ -740,6 +804,20 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
740 return 0; 804 return 0;
741} 805}
742 806
807static int vmx_get_irq(struct kvm_vcpu *vcpu)
808{
809 u32 idtv_info_field;
810
811 idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
812 if (idtv_info_field & INTR_INFO_VALID_MASK) {
813 if (is_external_interrupt(idtv_info_field))
814 return idtv_info_field & VECTORING_INFO_VECTOR_MASK;
815 else
816 printk("pending exception: not handled yet\n");
817 }
818 return -1;
819}
820
743static __init int cpu_has_kvm_support(void) 821static __init int cpu_has_kvm_support(void)
744{ 822{
745 unsigned long ecx = cpuid_ecx(1); 823 unsigned long ecx = cpuid_ecx(1);
@@ -751,7 +829,10 @@ static __init int vmx_disabled_by_bios(void)
751 u64 msr; 829 u64 msr;
752 830
753 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); 831 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
754 return (msr & 5) == 1; /* locked but not enabled */ 832 return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED |
833 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
834 == MSR_IA32_FEATURE_CONTROL_LOCKED;
835 /* locked but not enabled */
755} 836}
756 837
757static void hardware_enable(void *garbage) 838static void hardware_enable(void *garbage)
@@ -761,10 +842,15 @@ static void hardware_enable(void *garbage)
761 u64 old; 842 u64 old;
762 843
763 rdmsrl(MSR_IA32_FEATURE_CONTROL, old); 844 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
764 if ((old & 5) != 5) 845 if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
846 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
847 != (MSR_IA32_FEATURE_CONTROL_LOCKED |
848 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
765 /* enable and lock */ 849 /* enable and lock */
766 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5); 850 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
767 write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */ 851 MSR_IA32_FEATURE_CONTROL_LOCKED |
852 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
853 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
768 asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr) 854 asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
769 : "memory", "cc"); 855 : "memory", "cc");
770} 856}
@@ -774,14 +860,102 @@ static void hardware_disable(void *garbage)
774 asm volatile (ASM_VMX_VMXOFF : : : "cc"); 860 asm volatile (ASM_VMX_VMXOFF : : : "cc");
775} 861}
776 862
777static __init void setup_vmcs_descriptor(void) 863static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
864 u32 msr, u32* result)
865{
866 u32 vmx_msr_low, vmx_msr_high;
867 u32 ctl = ctl_min | ctl_opt;
868
869 rdmsr(msr, vmx_msr_low, vmx_msr_high);
870
871 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
872 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
873
874 /* Ensure minimum (required) set of control bits are supported. */
875 if (ctl_min & ~ctl)
876 return -EIO;
877
878 *result = ctl;
879 return 0;
880}
881
882static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
778{ 883{
779 u32 vmx_msr_low, vmx_msr_high; 884 u32 vmx_msr_low, vmx_msr_high;
885 u32 min, opt;
886 u32 _pin_based_exec_control = 0;
887 u32 _cpu_based_exec_control = 0;
888 u32 _vmexit_control = 0;
889 u32 _vmentry_control = 0;
890
891 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
892 opt = 0;
893 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
894 &_pin_based_exec_control) < 0)
895 return -EIO;
896
897 min = CPU_BASED_HLT_EXITING |
898#ifdef CONFIG_X86_64
899 CPU_BASED_CR8_LOAD_EXITING |
900 CPU_BASED_CR8_STORE_EXITING |
901#endif
902 CPU_BASED_USE_IO_BITMAPS |
903 CPU_BASED_MOV_DR_EXITING |
904 CPU_BASED_USE_TSC_OFFSETING;
905#ifdef CONFIG_X86_64
906 opt = CPU_BASED_TPR_SHADOW;
907#else
908 opt = 0;
909#endif
910 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
911 &_cpu_based_exec_control) < 0)
912 return -EIO;
913#ifdef CONFIG_X86_64
914 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
915 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
916 ~CPU_BASED_CR8_STORE_EXITING;
917#endif
918
919 min = 0;
920#ifdef CONFIG_X86_64
921 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
922#endif
923 opt = 0;
924 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
925 &_vmexit_control) < 0)
926 return -EIO;
927
928 min = opt = 0;
929 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
930 &_vmentry_control) < 0)
931 return -EIO;
780 932
781 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); 933 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
782 vmcs_descriptor.size = vmx_msr_high & 0x1fff; 934
783 vmcs_descriptor.order = get_order(vmcs_descriptor.size); 935 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
784 vmcs_descriptor.revision_id = vmx_msr_low; 936 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
937 return -EIO;
938
939#ifdef CONFIG_X86_64
940 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
941 if (vmx_msr_high & (1u<<16))
942 return -EIO;
943#endif
944
945 /* Require Write-Back (WB) memory type for VMCS accesses. */
946 if (((vmx_msr_high >> 18) & 15) != 6)
947 return -EIO;
948
949 vmcs_conf->size = vmx_msr_high & 0x1fff;
950 vmcs_conf->order = get_order(vmcs_config.size);
951 vmcs_conf->revision_id = vmx_msr_low;
952
953 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
954 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
955 vmcs_conf->vmexit_ctrl = _vmexit_control;
956 vmcs_conf->vmentry_ctrl = _vmentry_control;
957
958 return 0;
785} 959}
786 960
787static struct vmcs *alloc_vmcs_cpu(int cpu) 961static struct vmcs *alloc_vmcs_cpu(int cpu)
@@ -790,12 +964,12 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
790 struct page *pages; 964 struct page *pages;
791 struct vmcs *vmcs; 965 struct vmcs *vmcs;
792 966
793 pages = alloc_pages_node(node, GFP_KERNEL, vmcs_descriptor.order); 967 pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
794 if (!pages) 968 if (!pages)
795 return NULL; 969 return NULL;
796 vmcs = page_address(pages); 970 vmcs = page_address(pages);
797 memset(vmcs, 0, vmcs_descriptor.size); 971 memset(vmcs, 0, vmcs_config.size);
798 vmcs->revision_id = vmcs_descriptor.revision_id; /* vmcs revision id */ 972 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
799 return vmcs; 973 return vmcs;
800} 974}
801 975
@@ -806,7 +980,7 @@ static struct vmcs *alloc_vmcs(void)
806 980
807static void free_vmcs(struct vmcs *vmcs) 981static void free_vmcs(struct vmcs *vmcs)
808{ 982{
809 free_pages((unsigned long)vmcs, vmcs_descriptor.order); 983 free_pages((unsigned long)vmcs, vmcs_config.order);
810} 984}
811 985
812static void free_kvm_area(void) 986static void free_kvm_area(void)
@@ -817,8 +991,6 @@ static void free_kvm_area(void)
817 free_vmcs(per_cpu(vmxarea, cpu)); 991 free_vmcs(per_cpu(vmxarea, cpu));
818} 992}
819 993
820extern struct vmcs *alloc_vmcs_cpu(int cpu);
821
822static __init int alloc_kvm_area(void) 994static __init int alloc_kvm_area(void)
823{ 995{
824 int cpu; 996 int cpu;
@@ -839,7 +1011,8 @@ static __init int alloc_kvm_area(void)
839 1011
840static __init int hardware_setup(void) 1012static __init int hardware_setup(void)
841{ 1013{
842 setup_vmcs_descriptor(); 1014 if (setup_vmcs_config(&vmcs_config) < 0)
1015 return -EIO;
843 return alloc_kvm_area(); 1016 return alloc_kvm_area();
844} 1017}
845 1018
@@ -879,8 +1052,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
879 flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT); 1052 flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
880 vmcs_writel(GUEST_RFLAGS, flags); 1053 vmcs_writel(GUEST_RFLAGS, flags);
881 1054
882 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~CR4_VME_MASK) | 1055 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
883 (vmcs_readl(CR4_READ_SHADOW) & CR4_VME_MASK)); 1056 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
884 1057
885 update_exception_bitmap(vcpu); 1058 update_exception_bitmap(vcpu);
886 1059
@@ -897,7 +1070,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
897 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); 1070 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
898} 1071}
899 1072
900static int rmode_tss_base(struct kvm* kvm) 1073static gva_t rmode_tss_base(struct kvm* kvm)
901{ 1074{
902 gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3; 1075 gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
903 return base_gfn << PAGE_SHIFT; 1076 return base_gfn << PAGE_SHIFT;
@@ -937,7 +1110,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
937 flags |= IOPL_MASK | X86_EFLAGS_VM; 1110 flags |= IOPL_MASK | X86_EFLAGS_VM;
938 1111
939 vmcs_writel(GUEST_RFLAGS, flags); 1112 vmcs_writel(GUEST_RFLAGS, flags);
940 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | CR4_VME_MASK); 1113 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
941 update_exception_bitmap(vcpu); 1114 update_exception_bitmap(vcpu);
942 1115
943 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4); 1116 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
@@ -975,10 +1148,10 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
975 1148
976 vcpu->shadow_efer |= EFER_LMA; 1149 vcpu->shadow_efer |= EFER_LMA;
977 1150
978 find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME; 1151 find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
979 vmcs_write32(VM_ENTRY_CONTROLS, 1152 vmcs_write32(VM_ENTRY_CONTROLS,
980 vmcs_read32(VM_ENTRY_CONTROLS) 1153 vmcs_read32(VM_ENTRY_CONTROLS)
981 | VM_ENTRY_CONTROLS_IA32E_MASK); 1154 | VM_ENTRY_IA32E_MODE);
982} 1155}
983 1156
984static void exit_lmode(struct kvm_vcpu *vcpu) 1157static void exit_lmode(struct kvm_vcpu *vcpu)
@@ -987,7 +1160,7 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
987 1160
988 vmcs_write32(VM_ENTRY_CONTROLS, 1161 vmcs_write32(VM_ENTRY_CONTROLS,
989 vmcs_read32(VM_ENTRY_CONTROLS) 1162 vmcs_read32(VM_ENTRY_CONTROLS)
990 & ~VM_ENTRY_CONTROLS_IA32E_MASK); 1163 & ~VM_ENTRY_IA32E_MODE);
991} 1164}
992 1165
993#endif 1166#endif
@@ -1002,17 +1175,17 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1002{ 1175{
1003 vmx_fpu_deactivate(vcpu); 1176 vmx_fpu_deactivate(vcpu);
1004 1177
1005 if (vcpu->rmode.active && (cr0 & CR0_PE_MASK)) 1178 if (vcpu->rmode.active && (cr0 & X86_CR0_PE))
1006 enter_pmode(vcpu); 1179 enter_pmode(vcpu);
1007 1180
1008 if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK)) 1181 if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE))
1009 enter_rmode(vcpu); 1182 enter_rmode(vcpu);
1010 1183
1011#ifdef CONFIG_X86_64 1184#ifdef CONFIG_X86_64
1012 if (vcpu->shadow_efer & EFER_LME) { 1185 if (vcpu->shadow_efer & EFER_LME) {
1013 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) 1186 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
1014 enter_lmode(vcpu); 1187 enter_lmode(vcpu);
1015 if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK)) 1188 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
1016 exit_lmode(vcpu); 1189 exit_lmode(vcpu);
1017 } 1190 }
1018#endif 1191#endif
@@ -1022,14 +1195,14 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1022 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); 1195 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
1023 vcpu->cr0 = cr0; 1196 vcpu->cr0 = cr0;
1024 1197
1025 if (!(cr0 & CR0_TS_MASK) || !(cr0 & CR0_PE_MASK)) 1198 if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
1026 vmx_fpu_activate(vcpu); 1199 vmx_fpu_activate(vcpu);
1027} 1200}
1028 1201
1029static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1202static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1030{ 1203{
1031 vmcs_writel(GUEST_CR3, cr3); 1204 vmcs_writel(GUEST_CR3, cr3);
1032 if (vcpu->cr0 & CR0_PE_MASK) 1205 if (vcpu->cr0 & X86_CR0_PE)
1033 vmx_fpu_deactivate(vcpu); 1206 vmx_fpu_deactivate(vcpu);
1034} 1207}
1035 1208
@@ -1045,23 +1218,24 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1045 1218
1046static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 1219static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1047{ 1220{
1048 struct vmx_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER); 1221 struct vcpu_vmx *vmx = to_vmx(vcpu);
1222 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1049 1223
1050 vcpu->shadow_efer = efer; 1224 vcpu->shadow_efer = efer;
1051 if (efer & EFER_LMA) { 1225 if (efer & EFER_LMA) {
1052 vmcs_write32(VM_ENTRY_CONTROLS, 1226 vmcs_write32(VM_ENTRY_CONTROLS,
1053 vmcs_read32(VM_ENTRY_CONTROLS) | 1227 vmcs_read32(VM_ENTRY_CONTROLS) |
1054 VM_ENTRY_CONTROLS_IA32E_MASK); 1228 VM_ENTRY_IA32E_MODE);
1055 msr->data = efer; 1229 msr->data = efer;
1056 1230
1057 } else { 1231 } else {
1058 vmcs_write32(VM_ENTRY_CONTROLS, 1232 vmcs_write32(VM_ENTRY_CONTROLS,
1059 vmcs_read32(VM_ENTRY_CONTROLS) & 1233 vmcs_read32(VM_ENTRY_CONTROLS) &
1060 ~VM_ENTRY_CONTROLS_IA32E_MASK); 1234 ~VM_ENTRY_IA32E_MODE);
1061 1235
1062 msr->data = efer & ~EFER_LME; 1236 msr->data = efer & ~EFER_LME;
1063 } 1237 }
1064 setup_msrs(vcpu); 1238 setup_msrs(vmx);
1065} 1239}
1066 1240
1067#endif 1241#endif
@@ -1210,17 +1384,6 @@ static int init_rmode_tss(struct kvm* kvm)
1210 return 1; 1384 return 1;
1211} 1385}
1212 1386
1213static void vmcs_write32_fixedbits(u32 msr, u32 vmcs_field, u32 val)
1214{
1215 u32 msr_high, msr_low;
1216
1217 rdmsr(msr, msr_low, msr_high);
1218
1219 val &= msr_high;
1220 val |= msr_low;
1221 vmcs_write32(vmcs_field, val);
1222}
1223
1224static void seg_setup(int seg) 1387static void seg_setup(int seg)
1225{ 1388{
1226 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 1389 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -1234,7 +1397,7 @@ static void seg_setup(int seg)
1234/* 1397/*
1235 * Sets up the vmcs for emulated real mode. 1398 * Sets up the vmcs for emulated real mode.
1236 */ 1399 */
1237static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) 1400static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
1238{ 1401{
1239 u32 host_sysenter_cs; 1402 u32 host_sysenter_cs;
1240 u32 junk; 1403 u32 junk;
@@ -1243,27 +1406,36 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1243 int i; 1406 int i;
1244 int ret = 0; 1407 int ret = 0;
1245 unsigned long kvm_vmx_return; 1408 unsigned long kvm_vmx_return;
1409 u64 msr;
1410 u32 exec_control;
1246 1411
1247 if (!init_rmode_tss(vcpu->kvm)) { 1412 if (!init_rmode_tss(vmx->vcpu.kvm)) {
1248 ret = -ENOMEM; 1413 ret = -ENOMEM;
1249 goto out; 1414 goto out;
1250 } 1415 }
1251 1416
1252 memset(vcpu->regs, 0, sizeof(vcpu->regs)); 1417 vmx->vcpu.rmode.active = 0;
1253 vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
1254 vcpu->cr8 = 0;
1255 vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1256 if (vcpu == &vcpu->kvm->vcpus[0])
1257 vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
1258 1418
1259 fx_init(vcpu); 1419 vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
1420 set_cr8(&vmx->vcpu, 0);
1421 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1422 if (vmx->vcpu.vcpu_id == 0)
1423 msr |= MSR_IA32_APICBASE_BSP;
1424 kvm_set_apic_base(&vmx->vcpu, msr);
1425
1426 fx_init(&vmx->vcpu);
1260 1427
1261 /* 1428 /*
1262 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode 1429 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
1263 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh. 1430 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
1264 */ 1431 */
1265 vmcs_write16(GUEST_CS_SELECTOR, 0xf000); 1432 if (vmx->vcpu.vcpu_id == 0) {
1266 vmcs_writel(GUEST_CS_BASE, 0x000f0000); 1433 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1434 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1435 } else {
1436 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.sipi_vector << 8);
1437 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.sipi_vector << 12);
1438 }
1267 vmcs_write32(GUEST_CS_LIMIT, 0xffff); 1439 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1268 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); 1440 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1269 1441
@@ -1288,7 +1460,10 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1288 vmcs_writel(GUEST_SYSENTER_EIP, 0); 1460 vmcs_writel(GUEST_SYSENTER_EIP, 0);
1289 1461
1290 vmcs_writel(GUEST_RFLAGS, 0x02); 1462 vmcs_writel(GUEST_RFLAGS, 0x02);
1291 vmcs_writel(GUEST_RIP, 0xfff0); 1463 if (vmx->vcpu.vcpu_id == 0)
1464 vmcs_writel(GUEST_RIP, 0xfff0);
1465 else
1466 vmcs_writel(GUEST_RIP, 0);
1292 vmcs_writel(GUEST_RSP, 0); 1467 vmcs_writel(GUEST_RSP, 0);
1293 1468
1294 //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 1469 //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
@@ -1316,20 +1491,18 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1316 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 1491 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1317 1492
1318 /* Control */ 1493 /* Control */
1319 vmcs_write32_fixedbits(MSR_IA32_VMX_PINBASED_CTLS, 1494 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
1320 PIN_BASED_VM_EXEC_CONTROL, 1495 vmcs_config.pin_based_exec_ctrl);
1321 PIN_BASED_EXT_INTR_MASK /* 20.6.1 */ 1496
1322 | PIN_BASED_NMI_EXITING /* 20.6.1 */ 1497 exec_control = vmcs_config.cpu_based_exec_ctrl;
1323 ); 1498 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
1324 vmcs_write32_fixedbits(MSR_IA32_VMX_PROCBASED_CTLS, 1499 exec_control &= ~CPU_BASED_TPR_SHADOW;
1325 CPU_BASED_VM_EXEC_CONTROL, 1500#ifdef CONFIG_X86_64
1326 CPU_BASED_HLT_EXITING /* 20.6.2 */ 1501 exec_control |= CPU_BASED_CR8_STORE_EXITING |
1327 | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */ 1502 CPU_BASED_CR8_LOAD_EXITING;
1328 | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */ 1503#endif
1329 | CPU_BASED_ACTIVATE_IO_BITMAP /* 20.6.2 */ 1504 }
1330 | CPU_BASED_MOV_DR_EXITING 1505 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
1331 | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */
1332 );
1333 1506
1334 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 1507 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
1335 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 1508 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
@@ -1377,46 +1550,48 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1377 u32 index = vmx_msr_index[i]; 1550 u32 index = vmx_msr_index[i];
1378 u32 data_low, data_high; 1551 u32 data_low, data_high;
1379 u64 data; 1552 u64 data;
1380 int j = vcpu->nmsrs; 1553 int j = vmx->nmsrs;
1381 1554
1382 if (rdmsr_safe(index, &data_low, &data_high) < 0) 1555 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1383 continue; 1556 continue;
1384 if (wrmsr_safe(index, data_low, data_high) < 0) 1557 if (wrmsr_safe(index, data_low, data_high) < 0)
1385 continue; 1558 continue;
1386 data = data_low | ((u64)data_high << 32); 1559 data = data_low | ((u64)data_high << 32);
1387 vcpu->host_msrs[j].index = index; 1560 vmx->host_msrs[j].index = index;
1388 vcpu->host_msrs[j].reserved = 0; 1561 vmx->host_msrs[j].reserved = 0;
1389 vcpu->host_msrs[j].data = data; 1562 vmx->host_msrs[j].data = data;
1390 vcpu->guest_msrs[j] = vcpu->host_msrs[j]; 1563 vmx->guest_msrs[j] = vmx->host_msrs[j];
1391 ++vcpu->nmsrs; 1564 ++vmx->nmsrs;
1392 } 1565 }
1393 1566
1394 setup_msrs(vcpu); 1567 setup_msrs(vmx);
1395 1568
1396 vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_CONTROLS, 1569 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
1397 (HOST_IS_64 << 9)); /* 22.2,1, 20.7.1 */
1398 1570
1399 /* 22.2.1, 20.8.1 */ 1571 /* 22.2.1, 20.8.1 */
1400 vmcs_write32_fixedbits(MSR_IA32_VMX_ENTRY_CTLS, 1572 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
1401 VM_ENTRY_CONTROLS, 0); 1573
1402 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ 1574 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
1403 1575
1404#ifdef CONFIG_X86_64 1576#ifdef CONFIG_X86_64
1405 vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0); 1577 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
1406 vmcs_writel(TPR_THRESHOLD, 0); 1578 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
1579 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
1580 page_to_phys(vmx->vcpu.apic->regs_page));
1581 vmcs_write32(TPR_THRESHOLD, 0);
1407#endif 1582#endif
1408 1583
1409 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); 1584 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
1410 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); 1585 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1411 1586
1412 vcpu->cr0 = 0x60000010; 1587 vmx->vcpu.cr0 = 0x60000010;
1413 vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode 1588 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode
1414 vmx_set_cr4(vcpu, 0); 1589 vmx_set_cr4(&vmx->vcpu, 0);
1415#ifdef CONFIG_X86_64 1590#ifdef CONFIG_X86_64
1416 vmx_set_efer(vcpu, 0); 1591 vmx_set_efer(&vmx->vcpu, 0);
1417#endif 1592#endif
1418 vmx_fpu_activate(vcpu); 1593 vmx_fpu_activate(&vmx->vcpu);
1419 update_exception_bitmap(vcpu); 1594 update_exception_bitmap(&vmx->vcpu);
1420 1595
1421 return 0; 1596 return 0;
1422 1597
@@ -1424,6 +1599,13 @@ out:
1424 return ret; 1599 return ret;
1425} 1600}
1426 1601
1602static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1603{
1604 struct vcpu_vmx *vmx = to_vmx(vcpu);
1605
1606 vmx_vcpu_setup(vmx);
1607}
1608
1427static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq) 1609static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1428{ 1610{
1429 u16 ent[2]; 1611 u16 ent[2];
@@ -1443,8 +1625,8 @@ static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1443 return; 1625 return;
1444 } 1626 }
1445 1627
1446 if (kvm_read_guest(vcpu, irq * sizeof(ent), sizeof(ent), &ent) != 1628 if (emulator_read_std(irq * sizeof(ent), &ent, sizeof(ent), vcpu) !=
1447 sizeof(ent)) { 1629 X86EMUL_CONTINUE) {
1448 vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__); 1630 vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
1449 return; 1631 return;
1450 } 1632 }
@@ -1454,9 +1636,9 @@ static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1454 ip = vmcs_readl(GUEST_RIP); 1636 ip = vmcs_readl(GUEST_RIP);
1455 1637
1456 1638
1457 if (kvm_write_guest(vcpu, ss_base + sp - 2, 2, &flags) != 2 || 1639 if (emulator_write_emulated(ss_base + sp - 2, &flags, 2, vcpu) != X86EMUL_CONTINUE ||
1458 kvm_write_guest(vcpu, ss_base + sp - 4, 2, &cs) != 2 || 1640 emulator_write_emulated(ss_base + sp - 4, &cs, 2, vcpu) != X86EMUL_CONTINUE ||
1459 kvm_write_guest(vcpu, ss_base + sp - 6, 2, &ip) != 2) { 1641 emulator_write_emulated(ss_base + sp - 6, &ip, 2, vcpu) != X86EMUL_CONTINUE) {
1460 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__); 1642 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
1461 return; 1643 return;
1462 } 1644 }
@@ -1469,6 +1651,16 @@ static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1469 vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6)); 1651 vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
1470} 1652}
1471 1653
1654static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
1655{
1656 if (vcpu->rmode.active) {
1657 inject_rmode_irq(vcpu, irq);
1658 return;
1659 }
1660 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1661 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1662}
1663
1472static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) 1664static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1473{ 1665{
1474 int word_index = __ffs(vcpu->irq_summary); 1666 int word_index = __ffs(vcpu->irq_summary);
@@ -1478,13 +1670,7 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1478 clear_bit(bit_index, &vcpu->irq_pending[word_index]); 1670 clear_bit(bit_index, &vcpu->irq_pending[word_index]);
1479 if (!vcpu->irq_pending[word_index]) 1671 if (!vcpu->irq_pending[word_index])
1480 clear_bit(word_index, &vcpu->irq_summary); 1672 clear_bit(word_index, &vcpu->irq_summary);
1481 1673 vmx_inject_irq(vcpu, irq);
1482 if (vcpu->rmode.active) {
1483 inject_rmode_irq(vcpu, irq);
1484 return;
1485 }
1486 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1487 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1488} 1674}
1489 1675
1490 1676
@@ -1568,7 +1754,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1568 "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info); 1754 "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
1569 } 1755 }
1570 1756
1571 if (is_external_interrupt(vect_info)) { 1757 if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
1572 int irq = vect_info & VECTORING_INFO_VECTOR_MASK; 1758 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
1573 set_bit(irq, vcpu->irq_pending); 1759 set_bit(irq, vcpu->irq_pending);
1574 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary); 1760 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
@@ -1591,29 +1777,28 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1591 if (is_page_fault(intr_info)) { 1777 if (is_page_fault(intr_info)) {
1592 cr2 = vmcs_readl(EXIT_QUALIFICATION); 1778 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1593 1779
1594 spin_lock(&vcpu->kvm->lock); 1780 mutex_lock(&vcpu->kvm->lock);
1595 r = kvm_mmu_page_fault(vcpu, cr2, error_code); 1781 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1596 if (r < 0) { 1782 if (r < 0) {
1597 spin_unlock(&vcpu->kvm->lock); 1783 mutex_unlock(&vcpu->kvm->lock);
1598 return r; 1784 return r;
1599 } 1785 }
1600 if (!r) { 1786 if (!r) {
1601 spin_unlock(&vcpu->kvm->lock); 1787 mutex_unlock(&vcpu->kvm->lock);
1602 return 1; 1788 return 1;
1603 } 1789 }
1604 1790
1605 er = emulate_instruction(vcpu, kvm_run, cr2, error_code); 1791 er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
1606 spin_unlock(&vcpu->kvm->lock); 1792 mutex_unlock(&vcpu->kvm->lock);
1607 1793
1608 switch (er) { 1794 switch (er) {
1609 case EMULATE_DONE: 1795 case EMULATE_DONE:
1610 return 1; 1796 return 1;
1611 case EMULATE_DO_MMIO: 1797 case EMULATE_DO_MMIO:
1612 ++vcpu->stat.mmio_exits; 1798 ++vcpu->stat.mmio_exits;
1613 kvm_run->exit_reason = KVM_EXIT_MMIO;
1614 return 0; 1799 return 0;
1615 case EMULATE_FAIL: 1800 case EMULATE_FAIL:
1616 vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__); 1801 kvm_report_emulation_failure(vcpu, "pagetable");
1617 break; 1802 break;
1618 default: 1803 default:
1619 BUG(); 1804 BUG();
@@ -1653,80 +1838,29 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1653 return 0; 1838 return 0;
1654} 1839}
1655 1840
1656static int get_io_count(struct kvm_vcpu *vcpu, unsigned long *count)
1657{
1658 u64 inst;
1659 gva_t rip;
1660 int countr_size;
1661 int i, n;
1662
1663 if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_VM)) {
1664 countr_size = 2;
1665 } else {
1666 u32 cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
1667
1668 countr_size = (cs_ar & AR_L_MASK) ? 8:
1669 (cs_ar & AR_DB_MASK) ? 4: 2;
1670 }
1671
1672 rip = vmcs_readl(GUEST_RIP);
1673 if (countr_size != 8)
1674 rip += vmcs_readl(GUEST_CS_BASE);
1675
1676 n = kvm_read_guest(vcpu, rip, sizeof(inst), &inst);
1677
1678 for (i = 0; i < n; i++) {
1679 switch (((u8*)&inst)[i]) {
1680 case 0xf0:
1681 case 0xf2:
1682 case 0xf3:
1683 case 0x2e:
1684 case 0x36:
1685 case 0x3e:
1686 case 0x26:
1687 case 0x64:
1688 case 0x65:
1689 case 0x66:
1690 break;
1691 case 0x67:
1692 countr_size = (countr_size == 2) ? 4: (countr_size >> 1);
1693 default:
1694 goto done;
1695 }
1696 }
1697 return 0;
1698done:
1699 countr_size *= 8;
1700 *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size));
1701 //printk("cx: %lx\n", vcpu->regs[VCPU_REGS_RCX]);
1702 return 1;
1703}
1704
1705static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1841static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1706{ 1842{
1707 u64 exit_qualification; 1843 unsigned long exit_qualification;
1708 int size, down, in, string, rep; 1844 int size, down, in, string, rep;
1709 unsigned port; 1845 unsigned port;
1710 unsigned long count;
1711 gva_t address;
1712 1846
1713 ++vcpu->stat.io_exits; 1847 ++vcpu->stat.io_exits;
1714 exit_qualification = vmcs_read64(EXIT_QUALIFICATION); 1848 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
1715 in = (exit_qualification & 8) != 0;
1716 size = (exit_qualification & 7) + 1;
1717 string = (exit_qualification & 16) != 0; 1849 string = (exit_qualification & 16) != 0;
1850
1851 if (string) {
1852 if (emulate_instruction(vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO)
1853 return 0;
1854 return 1;
1855 }
1856
1857 size = (exit_qualification & 7) + 1;
1858 in = (exit_qualification & 8) != 0;
1718 down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0; 1859 down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
1719 count = 1;
1720 rep = (exit_qualification & 32) != 0; 1860 rep = (exit_qualification & 32) != 0;
1721 port = exit_qualification >> 16; 1861 port = exit_qualification >> 16;
1722 address = 0; 1862
1723 if (string) { 1863 return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
1724 if (rep && !get_io_count(vcpu, &count))
1725 return 1;
1726 address = vmcs_readl(GUEST_LINEAR_ADDRESS);
1727 }
1728 return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
1729 address, rep, port);
1730} 1864}
1731 1865
1732static void 1866static void
@@ -1743,11 +1877,11 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1743 1877
1744static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1878static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1745{ 1879{
1746 u64 exit_qualification; 1880 unsigned long exit_qualification;
1747 int cr; 1881 int cr;
1748 int reg; 1882 int reg;
1749 1883
1750 exit_qualification = vmcs_read64(EXIT_QUALIFICATION); 1884 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
1751 cr = exit_qualification & 15; 1885 cr = exit_qualification & 15;
1752 reg = (exit_qualification >> 8) & 15; 1886 reg = (exit_qualification >> 8) & 15;
1753 switch ((exit_qualification >> 4) & 3) { 1887 switch ((exit_qualification >> 4) & 3) {
@@ -1772,13 +1906,14 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1772 vcpu_load_rsp_rip(vcpu); 1906 vcpu_load_rsp_rip(vcpu);
1773 set_cr8(vcpu, vcpu->regs[reg]); 1907 set_cr8(vcpu, vcpu->regs[reg]);
1774 skip_emulated_instruction(vcpu); 1908 skip_emulated_instruction(vcpu);
1775 return 1; 1909 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1910 return 0;
1776 }; 1911 };
1777 break; 1912 break;
1778 case 2: /* clts */ 1913 case 2: /* clts */
1779 vcpu_load_rsp_rip(vcpu); 1914 vcpu_load_rsp_rip(vcpu);
1780 vmx_fpu_deactivate(vcpu); 1915 vmx_fpu_deactivate(vcpu);
1781 vcpu->cr0 &= ~CR0_TS_MASK; 1916 vcpu->cr0 &= ~X86_CR0_TS;
1782 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0); 1917 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
1783 vmx_fpu_activate(vcpu); 1918 vmx_fpu_activate(vcpu);
1784 skip_emulated_instruction(vcpu); 1919 skip_emulated_instruction(vcpu);
@@ -1793,7 +1928,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1793 return 1; 1928 return 1;
1794 case 8: 1929 case 8:
1795 vcpu_load_rsp_rip(vcpu); 1930 vcpu_load_rsp_rip(vcpu);
1796 vcpu->regs[reg] = vcpu->cr8; 1931 vcpu->regs[reg] = get_cr8(vcpu);
1797 vcpu_put_rsp_rip(vcpu); 1932 vcpu_put_rsp_rip(vcpu);
1798 skip_emulated_instruction(vcpu); 1933 skip_emulated_instruction(vcpu);
1799 return 1; 1934 return 1;
@@ -1808,14 +1943,14 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1808 break; 1943 break;
1809 } 1944 }
1810 kvm_run->exit_reason = 0; 1945 kvm_run->exit_reason = 0;
1811 printk(KERN_ERR "kvm: unhandled control register: op %d cr %d\n", 1946 pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
1812 (int)(exit_qualification >> 4) & 3, cr); 1947 (int)(exit_qualification >> 4) & 3, cr);
1813 return 0; 1948 return 0;
1814} 1949}
1815 1950
1816static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1951static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1817{ 1952{
1818 u64 exit_qualification; 1953 unsigned long exit_qualification;
1819 unsigned long val; 1954 unsigned long val;
1820 int dr, reg; 1955 int dr, reg;
1821 1956
@@ -1823,7 +1958,7 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1823 * FIXME: this code assumes the host is debugging the guest. 1958 * FIXME: this code assumes the host is debugging the guest.
1824 * need to deal with guest debugging itself too. 1959 * need to deal with guest debugging itself too.
1825 */ 1960 */
1826 exit_qualification = vmcs_read64(EXIT_QUALIFICATION); 1961 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
1827 dr = exit_qualification & 7; 1962 dr = exit_qualification & 7;
1828 reg = (exit_qualification >> 8) & 15; 1963 reg = (exit_qualification >> 8) & 15;
1829 vcpu_load_rsp_rip(vcpu); 1964 vcpu_load_rsp_rip(vcpu);
@@ -1886,19 +2021,21 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1886 return 1; 2021 return 1;
1887} 2022}
1888 2023
1889static void post_kvm_run_save(struct kvm_vcpu *vcpu, 2024static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
1890 struct kvm_run *kvm_run) 2025 struct kvm_run *kvm_run)
1891{ 2026{
1892 kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0; 2027 return 1;
1893 kvm_run->cr8 = vcpu->cr8;
1894 kvm_run->apic_base = vcpu->apic_base;
1895 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1896 vcpu->irq_summary == 0);
1897} 2028}
1898 2029
1899static int handle_interrupt_window(struct kvm_vcpu *vcpu, 2030static int handle_interrupt_window(struct kvm_vcpu *vcpu,
1900 struct kvm_run *kvm_run) 2031 struct kvm_run *kvm_run)
1901{ 2032{
2033 u32 cpu_based_vm_exec_control;
2034
2035 /* clear pending irq */
2036 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2037 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2038 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
1902 /* 2039 /*
1903 * If the user space waits to inject interrupts, exit as soon as 2040 * If the user space waits to inject interrupts, exit as soon as
1904 * possible 2041 * possible
@@ -1943,6 +2080,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
1943 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, 2080 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
1944 [EXIT_REASON_HLT] = handle_halt, 2081 [EXIT_REASON_HLT] = handle_halt,
1945 [EXIT_REASON_VMCALL] = handle_vmcall, 2082 [EXIT_REASON_VMCALL] = handle_vmcall,
2083 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold
1946}; 2084};
1947 2085
1948static const int kvm_vmx_max_exit_handlers = 2086static const int kvm_vmx_max_exit_handlers =
@@ -1956,6 +2094,14 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1956{ 2094{
1957 u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 2095 u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1958 u32 exit_reason = vmcs_read32(VM_EXIT_REASON); 2096 u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
2097 struct vcpu_vmx *vmx = to_vmx(vcpu);
2098
2099 if (unlikely(vmx->fail)) {
2100 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2101 kvm_run->fail_entry.hardware_entry_failure_reason
2102 = vmcs_read32(VM_INSTRUCTION_ERROR);
2103 return 0;
2104 }
1959 2105
1960 if ( (vectoring_info & VECTORING_INFO_VALID_MASK) && 2106 if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
1961 exit_reason != EXIT_REASON_EXCEPTION_NMI ) 2107 exit_reason != EXIT_REASON_EXCEPTION_NMI )
@@ -1971,57 +2117,91 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1971 return 0; 2117 return 0;
1972} 2118}
1973 2119
1974/* 2120static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1975 * Check if userspace requested an interrupt window, and that the
1976 * interrupt window is open.
1977 *
1978 * No need to exit to userspace if we already have an interrupt queued.
1979 */
1980static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1981 struct kvm_run *kvm_run)
1982{ 2121{
1983 return (!vcpu->irq_summary &&
1984 kvm_run->request_interrupt_window &&
1985 vcpu->interrupt_window_open &&
1986 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
1987} 2122}
1988 2123
1989static void vmx_flush_tlb(struct kvm_vcpu *vcpu) 2124static void update_tpr_threshold(struct kvm_vcpu *vcpu)
1990{ 2125{
2126 int max_irr, tpr;
2127
2128 if (!vm_need_tpr_shadow(vcpu->kvm))
2129 return;
2130
2131 if (!kvm_lapic_enabled(vcpu) ||
2132 ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
2133 vmcs_write32(TPR_THRESHOLD, 0);
2134 return;
2135 }
2136
2137 tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
2138 vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
1991} 2139}
1992 2140
1993static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2141static void enable_irq_window(struct kvm_vcpu *vcpu)
1994{ 2142{
1995 u8 fail; 2143 u32 cpu_based_vm_exec_control;
1996 int r;
1997 2144
1998preempted: 2145 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1999 if (vcpu->guest_debug.enabled) 2146 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
2000 kvm_guest_debug_pre(vcpu); 2147 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2148}
2001 2149
2002again: 2150static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2003 if (!vcpu->mmio_read_completed) 2151{
2004 do_interrupt_requests(vcpu, kvm_run); 2152 u32 idtv_info_field, intr_info_field;
2153 int has_ext_irq, interrupt_window_open;
2154 int vector;
2005 2155
2006 vmx_save_host_state(vcpu); 2156 kvm_inject_pending_timer_irqs(vcpu);
2007 kvm_load_guest_fpu(vcpu); 2157 update_tpr_threshold(vcpu);
2008 2158
2009 r = kvm_mmu_reload(vcpu); 2159 has_ext_irq = kvm_cpu_has_interrupt(vcpu);
2010 if (unlikely(r)) 2160 intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
2011 goto out; 2161 idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2162 if (intr_info_field & INTR_INFO_VALID_MASK) {
2163 if (idtv_info_field & INTR_INFO_VALID_MASK) {
2164 /* TODO: fault when IDT_Vectoring */
2165 printk(KERN_ERR "Fault when IDT_Vectoring\n");
2166 }
2167 if (has_ext_irq)
2168 enable_irq_window(vcpu);
2169 return;
2170 }
2171 if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
2172 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
2173 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2174 vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
2175
2176 if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK))
2177 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2178 vmcs_read32(IDT_VECTORING_ERROR_CODE));
2179 if (unlikely(has_ext_irq))
2180 enable_irq_window(vcpu);
2181 return;
2182 }
2183 if (!has_ext_irq)
2184 return;
2185 interrupt_window_open =
2186 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2187 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
2188 if (interrupt_window_open) {
2189 vector = kvm_cpu_get_interrupt(vcpu);
2190 vmx_inject_irq(vcpu, vector);
2191 kvm_timer_intr_post(vcpu, vector);
2192 } else
2193 enable_irq_window(vcpu);
2194}
2195
2196static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2197{
2198 struct vcpu_vmx *vmx = to_vmx(vcpu);
2012 2199
2013 /* 2200 /*
2014 * Loading guest fpu may have cleared host cr0.ts 2201 * Loading guest fpu may have cleared host cr0.ts
2015 */ 2202 */
2016 vmcs_writel(HOST_CR0, read_cr0()); 2203 vmcs_writel(HOST_CR0, read_cr0());
2017 2204
2018 local_irq_disable();
2019
2020 vcpu->guest_mode = 1;
2021 if (vcpu->requests)
2022 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
2023 vmx_flush_tlb(vcpu);
2024
2025 asm ( 2205 asm (
2026 /* Store host registers */ 2206 /* Store host registers */
2027#ifdef CONFIG_X86_64 2207#ifdef CONFIG_X86_64
@@ -2115,8 +2295,8 @@ again:
2115 "pop %%ecx; popa \n\t" 2295 "pop %%ecx; popa \n\t"
2116#endif 2296#endif
2117 "setbe %0 \n\t" 2297 "setbe %0 \n\t"
2118 : "=q" (fail) 2298 : "=q" (vmx->fail)
2119 : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP), 2299 : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
2120 "c"(vcpu), 2300 "c"(vcpu),
2121 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])), 2301 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
2122 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), 2302 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
@@ -2138,59 +2318,10 @@ again:
2138 [cr2]"i"(offsetof(struct kvm_vcpu, cr2)) 2318 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
2139 : "cc", "memory" ); 2319 : "cc", "memory" );
2140 2320
2141 vcpu->guest_mode = 0;
2142 local_irq_enable();
2143
2144 ++vcpu->stat.exits;
2145
2146 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; 2321 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
2147 2322
2148 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 2323 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
2149 2324 vmx->launched = 1;
2150 if (unlikely(fail)) {
2151 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2152 kvm_run->fail_entry.hardware_entry_failure_reason
2153 = vmcs_read32(VM_INSTRUCTION_ERROR);
2154 r = 0;
2155 goto out;
2156 }
2157 /*
2158 * Profile KVM exit RIPs:
2159 */
2160 if (unlikely(prof_on == KVM_PROFILING))
2161 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
2162
2163 vcpu->launched = 1;
2164 r = kvm_handle_exit(kvm_run, vcpu);
2165 if (r > 0) {
2166 /* Give scheduler a change to reschedule. */
2167 if (signal_pending(current)) {
2168 r = -EINTR;
2169 kvm_run->exit_reason = KVM_EXIT_INTR;
2170 ++vcpu->stat.signal_exits;
2171 goto out;
2172 }
2173
2174 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2175 r = -EINTR;
2176 kvm_run->exit_reason = KVM_EXIT_INTR;
2177 ++vcpu->stat.request_irq_exits;
2178 goto out;
2179 }
2180 if (!need_resched()) {
2181 ++vcpu->stat.light_exits;
2182 goto again;
2183 }
2184 }
2185
2186out:
2187 if (r > 0) {
2188 kvm_resched(vcpu);
2189 goto preempted;
2190 }
2191
2192 post_kvm_run_save(vcpu, kvm_run);
2193 return r;
2194} 2325}
2195 2326
2196static void vmx_inject_page_fault(struct kvm_vcpu *vcpu, 2327static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
@@ -2225,67 +2356,118 @@ static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
2225 2356
2226static void vmx_free_vmcs(struct kvm_vcpu *vcpu) 2357static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2227{ 2358{
2228 if (vcpu->vmcs) { 2359 struct vcpu_vmx *vmx = to_vmx(vcpu);
2229 on_each_cpu(__vcpu_clear, vcpu, 0, 1); 2360
2230 free_vmcs(vcpu->vmcs); 2361 if (vmx->vmcs) {
2231 vcpu->vmcs = NULL; 2362 on_each_cpu(__vcpu_clear, vmx, 0, 1);
2363 free_vmcs(vmx->vmcs);
2364 vmx->vmcs = NULL;
2232 } 2365 }
2233} 2366}
2234 2367
2235static void vmx_free_vcpu(struct kvm_vcpu *vcpu) 2368static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2236{ 2369{
2370 struct vcpu_vmx *vmx = to_vmx(vcpu);
2371
2237 vmx_free_vmcs(vcpu); 2372 vmx_free_vmcs(vcpu);
2373 kfree(vmx->host_msrs);
2374 kfree(vmx->guest_msrs);
2375 kvm_vcpu_uninit(vcpu);
2376 kmem_cache_free(kvm_vcpu_cache, vmx);
2238} 2377}
2239 2378
2240static int vmx_create_vcpu(struct kvm_vcpu *vcpu) 2379static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
2241{ 2380{
2242 struct vmcs *vmcs; 2381 int err;
2382 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
2383 int cpu;
2243 2384
2244 vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); 2385 if (!vmx)
2245 if (!vcpu->guest_msrs) 2386 return ERR_PTR(-ENOMEM);
2246 return -ENOMEM;
2247 2387
2248 vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); 2388 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
2249 if (!vcpu->host_msrs) 2389 if (err)
2250 goto out_free_guest_msrs; 2390 goto free_vcpu;
2251 2391
2252 vmcs = alloc_vmcs(); 2392 if (irqchip_in_kernel(kvm)) {
2253 if (!vmcs) 2393 err = kvm_create_lapic(&vmx->vcpu);
2254 goto out_free_msrs; 2394 if (err < 0)
2395 goto free_vcpu;
2396 }
2255 2397
2256 vmcs_clear(vmcs); 2398 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2257 vcpu->vmcs = vmcs; 2399 if (!vmx->guest_msrs) {
2258 vcpu->launched = 0; 2400 err = -ENOMEM;
2401 goto uninit_vcpu;
2402 }
2259 2403
2260 return 0; 2404 vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2405 if (!vmx->host_msrs)
2406 goto free_guest_msrs;
2261 2407
2262out_free_msrs: 2408 vmx->vmcs = alloc_vmcs();
2263 kfree(vcpu->host_msrs); 2409 if (!vmx->vmcs)
2264 vcpu->host_msrs = NULL; 2410 goto free_msrs;
2265 2411
2266out_free_guest_msrs: 2412 vmcs_clear(vmx->vmcs);
2267 kfree(vcpu->guest_msrs);
2268 vcpu->guest_msrs = NULL;
2269 2413
2270 return -ENOMEM; 2414 cpu = get_cpu();
2415 vmx_vcpu_load(&vmx->vcpu, cpu);
2416 err = vmx_vcpu_setup(vmx);
2417 vmx_vcpu_put(&vmx->vcpu);
2418 put_cpu();
2419 if (err)
2420 goto free_vmcs;
2421
2422 return &vmx->vcpu;
2423
2424free_vmcs:
2425 free_vmcs(vmx->vmcs);
2426free_msrs:
2427 kfree(vmx->host_msrs);
2428free_guest_msrs:
2429 kfree(vmx->guest_msrs);
2430uninit_vcpu:
2431 kvm_vcpu_uninit(&vmx->vcpu);
2432free_vcpu:
2433 kmem_cache_free(kvm_vcpu_cache, vmx);
2434 return ERR_PTR(err);
2435}
2436
2437static void __init vmx_check_processor_compat(void *rtn)
2438{
2439 struct vmcs_config vmcs_conf;
2440
2441 *(int *)rtn = 0;
2442 if (setup_vmcs_config(&vmcs_conf) < 0)
2443 *(int *)rtn = -EIO;
2444 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
2445 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
2446 smp_processor_id());
2447 *(int *)rtn = -EIO;
2448 }
2271} 2449}
2272 2450
2273static struct kvm_arch_ops vmx_arch_ops = { 2451static struct kvm_x86_ops vmx_x86_ops = {
2274 .cpu_has_kvm_support = cpu_has_kvm_support, 2452 .cpu_has_kvm_support = cpu_has_kvm_support,
2275 .disabled_by_bios = vmx_disabled_by_bios, 2453 .disabled_by_bios = vmx_disabled_by_bios,
2276 .hardware_setup = hardware_setup, 2454 .hardware_setup = hardware_setup,
2277 .hardware_unsetup = hardware_unsetup, 2455 .hardware_unsetup = hardware_unsetup,
2456 .check_processor_compatibility = vmx_check_processor_compat,
2278 .hardware_enable = hardware_enable, 2457 .hardware_enable = hardware_enable,
2279 .hardware_disable = hardware_disable, 2458 .hardware_disable = hardware_disable,
2280 2459
2281 .vcpu_create = vmx_create_vcpu, 2460 .vcpu_create = vmx_create_vcpu,
2282 .vcpu_free = vmx_free_vcpu, 2461 .vcpu_free = vmx_free_vcpu,
2462 .vcpu_reset = vmx_vcpu_reset,
2283 2463
2464 .prepare_guest_switch = vmx_save_host_state,
2284 .vcpu_load = vmx_vcpu_load, 2465 .vcpu_load = vmx_vcpu_load,
2285 .vcpu_put = vmx_vcpu_put, 2466 .vcpu_put = vmx_vcpu_put,
2286 .vcpu_decache = vmx_vcpu_decache, 2467 .vcpu_decache = vmx_vcpu_decache,
2287 2468
2288 .set_guest_debug = set_guest_debug, 2469 .set_guest_debug = set_guest_debug,
2470 .guest_debug_pre = kvm_guest_debug_pre,
2289 .get_msr = vmx_get_msr, 2471 .get_msr = vmx_get_msr,
2290 .set_msr = vmx_set_msr, 2472 .set_msr = vmx_set_msr,
2291 .get_segment_base = vmx_get_segment_base, 2473 .get_segment_base = vmx_get_segment_base,
@@ -2314,9 +2496,13 @@ static struct kvm_arch_ops vmx_arch_ops = {
2314 .inject_gp = vmx_inject_gp, 2496 .inject_gp = vmx_inject_gp,
2315 2497
2316 .run = vmx_vcpu_run, 2498 .run = vmx_vcpu_run,
2499 .handle_exit = kvm_handle_exit,
2317 .skip_emulated_instruction = skip_emulated_instruction, 2500 .skip_emulated_instruction = skip_emulated_instruction,
2318 .vcpu_setup = vmx_vcpu_setup,
2319 .patch_hypercall = vmx_patch_hypercall, 2501 .patch_hypercall = vmx_patch_hypercall,
2502 .get_irq = vmx_get_irq,
2503 .set_irq = vmx_inject_irq,
2504 .inject_pending_irq = vmx_intr_assist,
2505 .inject_pending_vectors = do_interrupt_requests,
2320}; 2506};
2321 2507
2322static int __init vmx_init(void) 2508static int __init vmx_init(void)
@@ -2347,7 +2533,7 @@ static int __init vmx_init(void)
2347 memset(iova, 0xff, PAGE_SIZE); 2533 memset(iova, 0xff, PAGE_SIZE);
2348 kunmap(vmx_io_bitmap_b); 2534 kunmap(vmx_io_bitmap_b);
2349 2535
2350 r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE); 2536 r = kvm_init_x86(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
2351 if (r) 2537 if (r)
2352 goto out1; 2538 goto out1;
2353 2539
@@ -2365,7 +2551,7 @@ static void __exit vmx_exit(void)
2365 __free_page(vmx_io_bitmap_b); 2551 __free_page(vmx_io_bitmap_b);
2366 __free_page(vmx_io_bitmap_a); 2552 __free_page(vmx_io_bitmap_a);
2367 2553
2368 kvm_exit_arch(); 2554 kvm_exit_x86();
2369} 2555}
2370 2556
2371module_init(vmx_init) 2557module_init(vmx_init)
diff --git a/drivers/kvm/vmx.h b/drivers/kvm/vmx.h
index d0dc93df41..fd4e146660 100644
--- a/drivers/kvm/vmx.h
+++ b/drivers/kvm/vmx.h
@@ -25,29 +25,36 @@
25 * 25 *
26 */ 26 */
27 27
28#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 28#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
29#define CPU_BASED_USE_TSC_OFFSETING 0x00000008 29#define CPU_BASED_USE_TSC_OFFSETING 0x00000008
30#define CPU_BASED_HLT_EXITING 0x00000080 30#define CPU_BASED_HLT_EXITING 0x00000080
31#define CPU_BASED_INVDPG_EXITING 0x00000200 31#define CPU_BASED_INVLPG_EXITING 0x00000200
32#define CPU_BASED_MWAIT_EXITING 0x00000400 32#define CPU_BASED_MWAIT_EXITING 0x00000400
33#define CPU_BASED_RDPMC_EXITING 0x00000800 33#define CPU_BASED_RDPMC_EXITING 0x00000800
34#define CPU_BASED_RDTSC_EXITING 0x00001000 34#define CPU_BASED_RDTSC_EXITING 0x00001000
35#define CPU_BASED_CR8_LOAD_EXITING 0x00080000 35#define CPU_BASED_CR8_LOAD_EXITING 0x00080000
36#define CPU_BASED_CR8_STORE_EXITING 0x00100000 36#define CPU_BASED_CR8_STORE_EXITING 0x00100000
37#define CPU_BASED_TPR_SHADOW 0x00200000 37#define CPU_BASED_TPR_SHADOW 0x00200000
38#define CPU_BASED_MOV_DR_EXITING 0x00800000 38#define CPU_BASED_MOV_DR_EXITING 0x00800000
39#define CPU_BASED_UNCOND_IO_EXITING 0x01000000 39#define CPU_BASED_UNCOND_IO_EXITING 0x01000000
40#define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000 40#define CPU_BASED_USE_IO_BITMAPS 0x02000000
41#define CPU_BASED_MSR_BITMAPS 0x10000000 41#define CPU_BASED_USE_MSR_BITMAPS 0x10000000
42#define CPU_BASED_MONITOR_EXITING 0x20000000 42#define CPU_BASED_MONITOR_EXITING 0x20000000
43#define CPU_BASED_PAUSE_EXITING 0x40000000 43#define CPU_BASED_PAUSE_EXITING 0x40000000
44#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
44 45
45#define PIN_BASED_EXT_INTR_MASK 0x1 46#define PIN_BASED_EXT_INTR_MASK 0x00000001
46#define PIN_BASED_NMI_EXITING 0x8 47#define PIN_BASED_NMI_EXITING 0x00000008
48#define PIN_BASED_VIRTUAL_NMIS 0x00000020
47 49
48#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 50#define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
49#define VM_EXIT_HOST_ADD_SPACE_SIZE 0x00000200 51#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
50 52
53#define VM_ENTRY_IA32E_MODE 0x00000200
54#define VM_ENTRY_SMM 0x00000400
55#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
56
57#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
51 58
52/* VMCS Encodings */ 59/* VMCS Encodings */
53enum vmcs_field { 60enum vmcs_field {
@@ -206,6 +213,7 @@ enum vmcs_field {
206#define EXIT_REASON_MSR_READ 31 213#define EXIT_REASON_MSR_READ 31
207#define EXIT_REASON_MSR_WRITE 32 214#define EXIT_REASON_MSR_WRITE 32
208#define EXIT_REASON_MWAIT_INSTRUCTION 36 215#define EXIT_REASON_MWAIT_INSTRUCTION 36
216#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
209 217
210/* 218/*
211 * Interruption-information format 219 * Interruption-information format
@@ -261,9 +269,6 @@ enum vmcs_field {
261/* segment AR */ 269/* segment AR */
262#define SEGMENT_AR_L_MASK (1 << 13) 270#define SEGMENT_AR_L_MASK (1 << 13)
263 271
264/* entry controls */
265#define VM_ENTRY_CONTROLS_IA32E_MASK (1 << 9)
266
267#define AR_TYPE_ACCESSES_MASK 1 272#define AR_TYPE_ACCESSES_MASK 1
268#define AR_TYPE_READABLE_MASK (1 << 1) 273#define AR_TYPE_READABLE_MASK (1 << 1)
269#define AR_TYPE_WRITEABLE_MASK (1 << 2) 274#define AR_TYPE_WRITEABLE_MASK (1 << 2)
@@ -285,13 +290,21 @@ enum vmcs_field {
285 290
286#define AR_RESERVD_MASK 0xfffe0f00 291#define AR_RESERVD_MASK 0xfffe0f00
287 292
288#define CR4_VMXE 0x2000 293#define MSR_IA32_VMX_BASIC 0x480
294#define MSR_IA32_VMX_PINBASED_CTLS 0x481
295#define MSR_IA32_VMX_PROCBASED_CTLS 0x482
296#define MSR_IA32_VMX_EXIT_CTLS 0x483
297#define MSR_IA32_VMX_ENTRY_CTLS 0x484
298#define MSR_IA32_VMX_MISC 0x485
299#define MSR_IA32_VMX_CR0_FIXED0 0x486
300#define MSR_IA32_VMX_CR0_FIXED1 0x487
301#define MSR_IA32_VMX_CR4_FIXED0 0x488
302#define MSR_IA32_VMX_CR4_FIXED1 0x489
303#define MSR_IA32_VMX_VMCS_ENUM 0x48a
304#define MSR_IA32_VMX_PROCBASED_CTLS2 0x48b
289 305
290#define MSR_IA32_VMX_BASIC 0x480 306#define MSR_IA32_FEATURE_CONTROL 0x3a
291#define MSR_IA32_FEATURE_CONTROL 0x03a 307#define MSR_IA32_FEATURE_CONTROL_LOCKED 0x1
292#define MSR_IA32_VMX_PINBASED_CTLS 0x481 308#define MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED 0x4
293#define MSR_IA32_VMX_PROCBASED_CTLS 0x482
294#define MSR_IA32_VMX_EXIT_CTLS 0x483
295#define MSR_IA32_VMX_ENTRY_CTLS 0x484
296 309
297#endif 310#endif
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index 4b8a0cc966..9737c3b2f4 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -6,7 +6,7 @@
6 * Copyright (c) 2005 Keir Fraser 6 * Copyright (c) 2005 Keir Fraser
7 * 7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode 8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privieged instructions: 9 * privileged instructions:
10 * 10 *
11 * Copyright (C) 2006 Qumranet 11 * Copyright (C) 2006 Qumranet
12 * 12 *
@@ -83,7 +83,7 @@ static u8 opcode_table[256] = {
83 /* 0x20 - 0x27 */ 83 /* 0x20 - 0x27 */
84 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 84 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
85 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 85 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
86 0, 0, 0, 0, 86 SrcImmByte, SrcImm, 0, 0,
87 /* 0x28 - 0x2F */ 87 /* 0x28 - 0x2F */
88 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 88 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
89 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 89 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
@@ -99,15 +99,24 @@ static u8 opcode_table[256] = {
99 /* 0x40 - 0x4F */ 99 /* 0x40 - 0x4F */
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 /* 0x50 - 0x57 */ 101 /* 0x50 - 0x57 */
102 0, 0, 0, 0, 0, 0, 0, 0, 102 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
103 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
103 /* 0x58 - 0x5F */ 104 /* 0x58 - 0x5F */
104 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 105 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
105 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 106 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
106 /* 0x60 - 0x6F */ 107 /* 0x60 - 0x67 */
107 0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ , 108 0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 109 0, 0, 0, 0,
109 /* 0x70 - 0x7F */ 110 /* 0x68 - 0x6F */
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 111 0, 0, ImplicitOps|Mov, 0,
112 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* insb, insw/insd */
113 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* outsb, outsw/outsd */
114 /* 0x70 - 0x77 */
115 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
116 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
117 /* 0x78 - 0x7F */
118 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
119 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
111 /* 0x80 - 0x87 */ 120 /* 0x80 - 0x87 */
112 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM, 121 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
113 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, 122 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
@@ -116,9 +125,9 @@ static u8 opcode_table[256] = {
116 /* 0x88 - 0x8F */ 125 /* 0x88 - 0x8F */
117 ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov, 126 ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
118 ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, 127 ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
119 0, 0, 0, DstMem | SrcNone | ModRM | Mov, 128 0, ModRM | DstReg, 0, DstMem | SrcNone | ModRM | Mov,
120 /* 0x90 - 0x9F */ 129 /* 0x90 - 0x9F */
121 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps, ImplicitOps, 0, 0,
122 /* 0xA0 - 0xA7 */ 131 /* 0xA0 - 0xA7 */
123 ByteOp | DstReg | SrcMem | Mov, DstReg | SrcMem | Mov, 132 ByteOp | DstReg | SrcMem | Mov, DstReg | SrcMem | Mov,
124 ByteOp | DstMem | SrcReg | Mov, DstMem | SrcReg | Mov, 133 ByteOp | DstMem | SrcReg | Mov, DstMem | SrcReg | Mov,
@@ -142,8 +151,10 @@ static u8 opcode_table[256] = {
142 0, 0, 0, 0, 151 0, 0, 0, 0,
143 /* 0xD8 - 0xDF */ 152 /* 0xD8 - 0xDF */
144 0, 0, 0, 0, 0, 0, 0, 0, 153 0, 0, 0, 0, 0, 0, 0, 0,
145 /* 0xE0 - 0xEF */ 154 /* 0xE0 - 0xE7 */
146 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 155 0, 0, 0, 0, 0, 0, 0, 0,
156 /* 0xE8 - 0xEF */
157 ImplicitOps, SrcImm|ImplicitOps, 0, SrcImmByte|ImplicitOps, 0, 0, 0, 0,
147 /* 0xF0 - 0xF7 */ 158 /* 0xF0 - 0xF7 */
148 0, 0, 0, 0, 159 0, 0, 0, 0,
149 ImplicitOps, 0, 160 ImplicitOps, 0,
@@ -181,7 +192,10 @@ static u16 twobyte_table[256] = {
181 /* 0x70 - 0x7F */ 192 /* 0x70 - 0x7F */
182 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 193 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
183 /* 0x80 - 0x8F */ 194 /* 0x80 - 0x8F */
184 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 195 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
196 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
197 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
198 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
185 /* 0x90 - 0x9F */ 199 /* 0x90 - 0x9F */
186 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 200 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
187 /* 0xA0 - 0xA7 */ 201 /* 0xA0 - 0xA7 */
@@ -207,19 +221,6 @@ static u16 twobyte_table[256] = {
207 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 221 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
208}; 222};
209 223
210/*
211 * Tell the emulator that of the Group 7 instructions (sgdt, lidt, etc.) we
212 * are interested only in invlpg and not in any of the rest.
213 *
214 * invlpg is a special instruction in that the data it references may not
215 * be mapped.
216 */
217void kvm_emulator_want_group7_invlpg(void)
218{
219 twobyte_table[1] &= ~SrcMem;
220}
221EXPORT_SYMBOL_GPL(kvm_emulator_want_group7_invlpg);
222
223/* Type, address-of, and value of an instruction's operand. */ 224/* Type, address-of, and value of an instruction's operand. */
224struct operand { 225struct operand {
225 enum { OP_REG, OP_MEM, OP_IMM } type; 226 enum { OP_REG, OP_MEM, OP_IMM } type;
@@ -420,7 +421,7 @@ struct operand {
420#define insn_fetch(_type, _size, _eip) \ 421#define insn_fetch(_type, _size, _eip) \
421({ unsigned long _x; \ 422({ unsigned long _x; \
422 rc = ops->read_std((unsigned long)(_eip) + ctxt->cs_base, &_x, \ 423 rc = ops->read_std((unsigned long)(_eip) + ctxt->cs_base, &_x, \
423 (_size), ctxt); \ 424 (_size), ctxt->vcpu); \
424 if ( rc != 0 ) \ 425 if ( rc != 0 ) \
425 goto done; \ 426 goto done; \
426 (_eip) += (_size); \ 427 (_eip) += (_size); \
@@ -428,10 +429,11 @@ struct operand {
428}) 429})
429 430
430/* Access/update address held in a register, based on addressing mode. */ 431/* Access/update address held in a register, based on addressing mode. */
432#define address_mask(reg) \
433 ((ad_bytes == sizeof(unsigned long)) ? \
434 (reg) : ((reg) & ((1UL << (ad_bytes << 3)) - 1)))
431#define register_address(base, reg) \ 435#define register_address(base, reg) \
432 ((base) + ((ad_bytes == sizeof(unsigned long)) ? (reg) : \ 436 ((base) + address_mask(reg))
433 ((reg) & ((1UL << (ad_bytes << 3)) - 1))))
434
435#define register_address_increment(reg, inc) \ 437#define register_address_increment(reg, inc) \
436 do { \ 438 do { \
437 /* signed type ensures sign extension to long */ \ 439 /* signed type ensures sign extension to long */ \
@@ -443,8 +445,19 @@ struct operand {
443 (((reg) + _inc) & ((1UL << (ad_bytes << 3)) - 1)); \ 445 (((reg) + _inc) & ((1UL << (ad_bytes << 3)) - 1)); \
444 } while (0) 446 } while (0)
445 447
446void *decode_register(u8 modrm_reg, unsigned long *regs, 448#define JMP_REL(rel) \
447 int highbyte_regs) 449 do { \
450 _eip += (int)(rel); \
451 _eip = ((op_bytes == 2) ? (uint16_t)_eip : (uint32_t)_eip); \
452 } while (0)
453
454/*
455 * Given the 'reg' portion of a ModRM byte, and a register block, return a
456 * pointer into the block that addresses the relevant register.
457 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
458 */
459static void *decode_register(u8 modrm_reg, unsigned long *regs,
460 int highbyte_regs)
448{ 461{
449 void *p; 462 void *p;
450 463
@@ -464,13 +477,50 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
464 if (op_bytes == 2) 477 if (op_bytes == 2)
465 op_bytes = 3; 478 op_bytes = 3;
466 *address = 0; 479 *address = 0;
467 rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2, ctxt); 480 rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
481 ctxt->vcpu);
468 if (rc) 482 if (rc)
469 return rc; 483 return rc;
470 rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes, ctxt); 484 rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
485 ctxt->vcpu);
471 return rc; 486 return rc;
472} 487}
473 488
489static int test_cc(unsigned int condition, unsigned int flags)
490{
491 int rc = 0;
492
493 switch ((condition & 15) >> 1) {
494 case 0: /* o */
495 rc |= (flags & EFLG_OF);
496 break;
497 case 1: /* b/c/nae */
498 rc |= (flags & EFLG_CF);
499 break;
500 case 2: /* z/e */
501 rc |= (flags & EFLG_ZF);
502 break;
503 case 3: /* be/na */
504 rc |= (flags & (EFLG_CF|EFLG_ZF));
505 break;
506 case 4: /* s */
507 rc |= (flags & EFLG_SF);
508 break;
509 case 5: /* p/pe */
510 rc |= (flags & EFLG_PF);
511 break;
512 case 7: /* le/ng */
513 rc |= (flags & EFLG_ZF);
514 /* fall through */
515 case 6: /* l/nge */
516 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
517 break;
518 }
519
520 /* Odd condition identifiers (lsb == 1) have inverted sense. */
521 return (!!rc ^ (condition & 1));
522}
523
474int 524int
475x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) 525x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
476{ 526{
@@ -771,11 +821,15 @@ done_prefixes:
771 goto srcmem_common; 821 goto srcmem_common;
772 case SrcMem: 822 case SrcMem:
773 src.bytes = (d & ByteOp) ? 1 : op_bytes; 823 src.bytes = (d & ByteOp) ? 1 : op_bytes;
824 /* Don't fetch the address for invlpg: it could be unmapped. */
825 if (twobyte && b == 0x01 && modrm_reg == 7)
826 break;
774 srcmem_common: 827 srcmem_common:
775 src.type = OP_MEM; 828 src.type = OP_MEM;
776 src.ptr = (unsigned long *)cr2; 829 src.ptr = (unsigned long *)cr2;
830 src.val = 0;
777 if ((rc = ops->read_emulated((unsigned long)src.ptr, 831 if ((rc = ops->read_emulated((unsigned long)src.ptr,
778 &src.val, src.bytes, ctxt)) != 0) 832 &src.val, src.bytes, ctxt->vcpu)) != 0)
779 goto done; 833 goto done;
780 src.orig_val = src.val; 834 src.orig_val = src.val;
781 break; 835 break;
@@ -814,7 +868,7 @@ done_prefixes:
814 case DstReg: 868 case DstReg:
815 dst.type = OP_REG; 869 dst.type = OP_REG;
816 if ((d & ByteOp) 870 if ((d & ByteOp)
817 && !(twobyte_table && (b == 0xb6 || b == 0xb7))) { 871 && !(twobyte && (b == 0xb6 || b == 0xb7))) {
818 dst.ptr = decode_register(modrm_reg, _regs, 872 dst.ptr = decode_register(modrm_reg, _regs,
819 (rex_prefix == 0)); 873 (rex_prefix == 0));
820 dst.val = *(u8 *) dst.ptr; 874 dst.val = *(u8 *) dst.ptr;
@@ -838,6 +892,7 @@ done_prefixes:
838 dst.type = OP_MEM; 892 dst.type = OP_MEM;
839 dst.ptr = (unsigned long *)cr2; 893 dst.ptr = (unsigned long *)cr2;
840 dst.bytes = (d & ByteOp) ? 1 : op_bytes; 894 dst.bytes = (d & ByteOp) ? 1 : op_bytes;
895 dst.val = 0;
841 if (d & BitOp) { 896 if (d & BitOp) {
842 unsigned long mask = ~(dst.bytes * 8 - 1); 897 unsigned long mask = ~(dst.bytes * 8 - 1);
843 898
@@ -845,7 +900,7 @@ done_prefixes:
845 } 900 }
846 if (!(d & Mov) && /* optimisation - avoid slow emulated read */ 901 if (!(d & Mov) && /* optimisation - avoid slow emulated read */
847 ((rc = ops->read_emulated((unsigned long)dst.ptr, 902 ((rc = ops->read_emulated((unsigned long)dst.ptr,
848 &dst.val, dst.bytes, ctxt)) != 0)) 903 &dst.val, dst.bytes, ctxt->vcpu)) != 0))
849 goto done; 904 goto done;
850 break; 905 break;
851 } 906 }
@@ -871,10 +926,27 @@ done_prefixes:
871 sbb: /* sbb */ 926 sbb: /* sbb */
872 emulate_2op_SrcV("sbb", src, dst, _eflags); 927 emulate_2op_SrcV("sbb", src, dst, _eflags);
873 break; 928 break;
874 case 0x20 ... 0x25: 929 case 0x20 ... 0x23:
875 and: /* and */ 930 and: /* and */
876 emulate_2op_SrcV("and", src, dst, _eflags); 931 emulate_2op_SrcV("and", src, dst, _eflags);
877 break; 932 break;
933 case 0x24: /* and al imm8 */
934 dst.type = OP_REG;
935 dst.ptr = &_regs[VCPU_REGS_RAX];
936 dst.val = *(u8 *)dst.ptr;
937 dst.bytes = 1;
938 dst.orig_val = dst.val;
939 goto and;
940 case 0x25: /* and ax imm16, or eax imm32 */
941 dst.type = OP_REG;
942 dst.bytes = op_bytes;
943 dst.ptr = &_regs[VCPU_REGS_RAX];
944 if (op_bytes == 2)
945 dst.val = *(u16 *)dst.ptr;
946 else
947 dst.val = *(u32 *)dst.ptr;
948 dst.orig_val = dst.val;
949 goto and;
878 case 0x28 ... 0x2d: 950 case 0x28 ... 0x2d:
879 sub: /* sub */ 951 sub: /* sub */
880 emulate_2op_SrcV("sub", src, dst, _eflags); 952 emulate_2op_SrcV("sub", src, dst, _eflags);
@@ -892,6 +964,17 @@ done_prefixes:
892 goto cannot_emulate; 964 goto cannot_emulate;
893 dst.val = (s32) src.val; 965 dst.val = (s32) src.val;
894 break; 966 break;
967 case 0x6a: /* push imm8 */
968 src.val = 0L;
969 src.val = insn_fetch(s8, 1, _eip);
970push:
971 dst.type = OP_MEM;
972 dst.bytes = op_bytes;
973 dst.val = src.val;
974 register_address_increment(_regs[VCPU_REGS_RSP], -op_bytes);
975 dst.ptr = (void *) register_address(ctxt->ss_base,
976 _regs[VCPU_REGS_RSP]);
977 break;
895 case 0x80 ... 0x83: /* Grp1 */ 978 case 0x80 ... 0x83: /* Grp1 */
896 switch (modrm_reg) { 979 switch (modrm_reg) {
897 case 0: 980 case 0:
@@ -939,18 +1022,10 @@ done_prefixes:
939 dst.val = src.val; 1022 dst.val = src.val;
940 lock_prefix = 1; 1023 lock_prefix = 1;
941 break; 1024 break;
942 case 0xa0 ... 0xa1: /* mov */
943 dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
944 dst.val = src.val;
945 _eip += ad_bytes; /* skip src displacement */
946 break;
947 case 0xa2 ... 0xa3: /* mov */
948 dst.val = (unsigned long)_regs[VCPU_REGS_RAX];
949 _eip += ad_bytes; /* skip dst displacement */
950 break;
951 case 0x88 ... 0x8b: /* mov */ 1025 case 0x88 ... 0x8b: /* mov */
952 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */ 1026 goto mov;
953 dst.val = src.val; 1027 case 0x8d: /* lea r16/r32, m */
1028 dst.val = modrm_val;
954 break; 1029 break;
955 case 0x8f: /* pop (sole member of Grp1a) */ 1030 case 0x8f: /* pop (sole member of Grp1a) */
956 /* 64-bit mode: POP always pops a 64-bit operand. */ 1031 /* 64-bit mode: POP always pops a 64-bit operand. */
@@ -958,10 +1033,19 @@ done_prefixes:
958 dst.bytes = 8; 1033 dst.bytes = 8;
959 if ((rc = ops->read_std(register_address(ctxt->ss_base, 1034 if ((rc = ops->read_std(register_address(ctxt->ss_base,
960 _regs[VCPU_REGS_RSP]), 1035 _regs[VCPU_REGS_RSP]),
961 &dst.val, dst.bytes, ctxt)) != 0) 1036 &dst.val, dst.bytes, ctxt->vcpu)) != 0)
962 goto done; 1037 goto done;
963 register_address_increment(_regs[VCPU_REGS_RSP], dst.bytes); 1038 register_address_increment(_regs[VCPU_REGS_RSP], dst.bytes);
964 break; 1039 break;
1040 case 0xa0 ... 0xa1: /* mov */
1041 dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
1042 dst.val = src.val;
1043 _eip += ad_bytes; /* skip src displacement */
1044 break;
1045 case 0xa2 ... 0xa3: /* mov */
1046 dst.val = (unsigned long)_regs[VCPU_REGS_RAX];
1047 _eip += ad_bytes; /* skip dst displacement */
1048 break;
965 case 0xc0 ... 0xc1: 1049 case 0xc0 ... 0xc1:
966 grp2: /* Grp2 */ 1050 grp2: /* Grp2 */
967 switch (modrm_reg) { 1051 switch (modrm_reg) {
@@ -989,12 +1073,41 @@ done_prefixes:
989 break; 1073 break;
990 } 1074 }
991 break; 1075 break;
1076 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
1077 mov:
1078 dst.val = src.val;
1079 break;
992 case 0xd0 ... 0xd1: /* Grp2 */ 1080 case 0xd0 ... 0xd1: /* Grp2 */
993 src.val = 1; 1081 src.val = 1;
994 goto grp2; 1082 goto grp2;
995 case 0xd2 ... 0xd3: /* Grp2 */ 1083 case 0xd2 ... 0xd3: /* Grp2 */
996 src.val = _regs[VCPU_REGS_RCX]; 1084 src.val = _regs[VCPU_REGS_RCX];
997 goto grp2; 1085 goto grp2;
1086 case 0xe8: /* call (near) */ {
1087 long int rel;
1088 switch (op_bytes) {
1089 case 2:
1090 rel = insn_fetch(s16, 2, _eip);
1091 break;
1092 case 4:
1093 rel = insn_fetch(s32, 4, _eip);
1094 break;
1095 case 8:
1096 rel = insn_fetch(s64, 8, _eip);
1097 break;
1098 default:
1099 DPRINTF("Call: Invalid op_bytes\n");
1100 goto cannot_emulate;
1101 }
1102 src.val = (unsigned long) _eip;
1103 JMP_REL(rel);
1104 goto push;
1105 }
1106 case 0xe9: /* jmp rel */
1107 case 0xeb: /* jmp rel short */
1108 JMP_REL(src.val);
1109 no_wb = 1; /* Disable writeback. */
1110 break;
998 case 0xf6 ... 0xf7: /* Grp3 */ 1111 case 0xf6 ... 0xf7: /* Grp3 */
999 switch (modrm_reg) { 1112 switch (modrm_reg) {
1000 case 0 ... 1: /* test */ 1113 case 0 ... 1: /* test */
@@ -1037,13 +1150,19 @@ done_prefixes:
1037 case 1: /* dec */ 1150 case 1: /* dec */
1038 emulate_1op("dec", dst, _eflags); 1151 emulate_1op("dec", dst, _eflags);
1039 break; 1152 break;
1153 case 4: /* jmp abs */
1154 if (b == 0xff)
1155 _eip = dst.val;
1156 else
1157 goto cannot_emulate;
1158 break;
1040 case 6: /* push */ 1159 case 6: /* push */
1041 /* 64-bit mode: PUSH always pushes a 64-bit operand. */ 1160 /* 64-bit mode: PUSH always pushes a 64-bit operand. */
1042 if (mode == X86EMUL_MODE_PROT64) { 1161 if (mode == X86EMUL_MODE_PROT64) {
1043 dst.bytes = 8; 1162 dst.bytes = 8;
1044 if ((rc = ops->read_std((unsigned long)dst.ptr, 1163 if ((rc = ops->read_std((unsigned long)dst.ptr,
1045 &dst.val, 8, 1164 &dst.val, 8,
1046 ctxt)) != 0) 1165 ctxt->vcpu)) != 0)
1047 goto done; 1166 goto done;
1048 } 1167 }
1049 register_address_increment(_regs[VCPU_REGS_RSP], 1168 register_address_increment(_regs[VCPU_REGS_RSP],
@@ -1051,7 +1170,7 @@ done_prefixes:
1051 if ((rc = ops->write_std( 1170 if ((rc = ops->write_std(
1052 register_address(ctxt->ss_base, 1171 register_address(ctxt->ss_base,
1053 _regs[VCPU_REGS_RSP]), 1172 _regs[VCPU_REGS_RSP]),
1054 &dst.val, dst.bytes, ctxt)) != 0) 1173 &dst.val, dst.bytes, ctxt->vcpu)) != 0)
1055 goto done; 1174 goto done;
1056 no_wb = 1; 1175 no_wb = 1;
1057 break; 1176 break;
@@ -1086,11 +1205,11 @@ writeback:
1086 rc = ops->cmpxchg_emulated((unsigned long)dst. 1205 rc = ops->cmpxchg_emulated((unsigned long)dst.
1087 ptr, &dst.orig_val, 1206 ptr, &dst.orig_val,
1088 &dst.val, dst.bytes, 1207 &dst.val, dst.bytes,
1089 ctxt); 1208 ctxt->vcpu);
1090 else 1209 else
1091 rc = ops->write_emulated((unsigned long)dst.ptr, 1210 rc = ops->write_emulated((unsigned long)dst.ptr,
1092 &dst.val, dst.bytes, 1211 &dst.val, dst.bytes,
1093 ctxt); 1212 ctxt->vcpu);
1094 if (rc != 0) 1213 if (rc != 0)
1095 goto done; 1214 goto done;
1096 default: 1215 default:
@@ -1109,6 +1228,81 @@ done:
1109special_insn: 1228special_insn:
1110 if (twobyte) 1229 if (twobyte)
1111 goto twobyte_special_insn; 1230 goto twobyte_special_insn;
1231 switch(b) {
1232 case 0x50 ... 0x57: /* push reg */
1233 if (op_bytes == 2)
1234 src.val = (u16) _regs[b & 0x7];
1235 else
1236 src.val = (u32) _regs[b & 0x7];
1237 dst.type = OP_MEM;
1238 dst.bytes = op_bytes;
1239 dst.val = src.val;
1240 register_address_increment(_regs[VCPU_REGS_RSP], -op_bytes);
1241 dst.ptr = (void *) register_address(
1242 ctxt->ss_base, _regs[VCPU_REGS_RSP]);
1243 break;
1244 case 0x58 ... 0x5f: /* pop reg */
1245 dst.ptr = (unsigned long *)&_regs[b & 0x7];
1246 pop_instruction:
1247 if ((rc = ops->read_std(register_address(ctxt->ss_base,
1248 _regs[VCPU_REGS_RSP]), dst.ptr, op_bytes, ctxt->vcpu))
1249 != 0)
1250 goto done;
1251
1252 register_address_increment(_regs[VCPU_REGS_RSP], op_bytes);
1253 no_wb = 1; /* Disable writeback. */
1254 break;
1255 case 0x6c: /* insb */
1256 case 0x6d: /* insw/insd */
1257 if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
1258 1, /* in */
1259 (d & ByteOp) ? 1 : op_bytes, /* size */
1260 rep_prefix ?
1261 address_mask(_regs[VCPU_REGS_RCX]) : 1, /* count */
1262 (_eflags & EFLG_DF), /* down */
1263 register_address(ctxt->es_base,
1264 _regs[VCPU_REGS_RDI]), /* address */
1265 rep_prefix,
1266 _regs[VCPU_REGS_RDX] /* port */
1267 ) == 0)
1268 return -1;
1269 return 0;
1270 case 0x6e: /* outsb */
1271 case 0x6f: /* outsw/outsd */
1272 if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
1273 0, /* in */
1274 (d & ByteOp) ? 1 : op_bytes, /* size */
1275 rep_prefix ?
1276 address_mask(_regs[VCPU_REGS_RCX]) : 1, /* count */
1277 (_eflags & EFLG_DF), /* down */
1278 register_address(override_base ?
1279 *override_base : ctxt->ds_base,
1280 _regs[VCPU_REGS_RSI]), /* address */
1281 rep_prefix,
1282 _regs[VCPU_REGS_RDX] /* port */
1283 ) == 0)
1284 return -1;
1285 return 0;
1286 case 0x70 ... 0x7f: /* jcc (short) */ {
1287 int rel = insn_fetch(s8, 1, _eip);
1288
1289 if (test_cc(b, _eflags))
1290 JMP_REL(rel);
1291 break;
1292 }
1293 case 0x9c: /* pushf */
1294 src.val = (unsigned long) _eflags;
1295 goto push;
1296 case 0x9d: /* popf */
1297 dst.ptr = (unsigned long *) &_eflags;
1298 goto pop_instruction;
1299 case 0xc3: /* ret */
1300 dst.ptr = &_eip;
1301 goto pop_instruction;
1302 case 0xf4: /* hlt */
1303 ctxt->vcpu->halt_request = 1;
1304 goto done;
1305 }
1112 if (rep_prefix) { 1306 if (rep_prefix) {
1113 if (_regs[VCPU_REGS_RCX] == 0) { 1307 if (_regs[VCPU_REGS_RCX] == 0) {
1114 ctxt->vcpu->rip = _eip; 1308 ctxt->vcpu->rip = _eip;
@@ -1125,7 +1319,7 @@ special_insn:
1125 _regs[VCPU_REGS_RDI]); 1319 _regs[VCPU_REGS_RDI]);
1126 if ((rc = ops->read_emulated(register_address( 1320 if ((rc = ops->read_emulated(register_address(
1127 override_base ? *override_base : ctxt->ds_base, 1321 override_base ? *override_base : ctxt->ds_base,
1128 _regs[VCPU_REGS_RSI]), &dst.val, dst.bytes, ctxt)) != 0) 1322 _regs[VCPU_REGS_RSI]), &dst.val, dst.bytes, ctxt->vcpu)) != 0)
1129 goto done; 1323 goto done;
1130 register_address_increment(_regs[VCPU_REGS_RSI], 1324 register_address_increment(_regs[VCPU_REGS_RSI],
1131 (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes); 1325 (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
@@ -1147,7 +1341,8 @@ special_insn:
1147 dst.type = OP_REG; 1341 dst.type = OP_REG;
1148 dst.bytes = (d & ByteOp) ? 1 : op_bytes; 1342 dst.bytes = (d & ByteOp) ? 1 : op_bytes;
1149 dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX]; 1343 dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
1150 if ((rc = ops->read_emulated(cr2, &dst.val, dst.bytes, ctxt)) != 0) 1344 if ((rc = ops->read_emulated(cr2, &dst.val, dst.bytes,
1345 ctxt->vcpu)) != 0)
1151 goto done; 1346 goto done;
1152 register_address_increment(_regs[VCPU_REGS_RSI], 1347 register_address_increment(_regs[VCPU_REGS_RSI],
1153 (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes); 1348 (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
@@ -1155,23 +1350,7 @@ special_insn:
1155 case 0xae ... 0xaf: /* scas */ 1350 case 0xae ... 0xaf: /* scas */
1156 DPRINTF("Urk! I don't handle SCAS.\n"); 1351 DPRINTF("Urk! I don't handle SCAS.\n");
1157 goto cannot_emulate; 1352 goto cannot_emulate;
1158 case 0xf4: /* hlt */
1159 ctxt->vcpu->halt_request = 1;
1160 goto done;
1161 case 0xc3: /* ret */
1162 dst.ptr = &_eip;
1163 goto pop_instruction;
1164 case 0x58 ... 0x5f: /* pop reg */
1165 dst.ptr = (unsigned long *)&_regs[b & 0x7];
1166 1353
1167pop_instruction:
1168 if ((rc = ops->read_std(register_address(ctxt->ss_base,
1169 _regs[VCPU_REGS_RSP]), dst.ptr, op_bytes, ctxt)) != 0)
1170 goto done;
1171
1172 register_address_increment(_regs[VCPU_REGS_RSP], op_bytes);
1173 no_wb = 1; /* Disable writeback. */
1174 break;
1175 } 1354 }
1176 goto writeback; 1355 goto writeback;
1177 1356
@@ -1230,40 +1409,50 @@ twobyte_insn:
1230 break; 1409 break;
1231 case 0x40 ... 0x4f: /* cmov */ 1410 case 0x40 ... 0x4f: /* cmov */
1232 dst.val = dst.orig_val = src.val; 1411 dst.val = dst.orig_val = src.val;
1233 d &= ~Mov; /* default to no move */ 1412 no_wb = 1;
1234 /* 1413 /*
1235 * First, assume we're decoding an even cmov opcode 1414 * First, assume we're decoding an even cmov opcode
1236 * (lsb == 0). 1415 * (lsb == 0).
1237 */ 1416 */
1238 switch ((b & 15) >> 1) { 1417 switch ((b & 15) >> 1) {
1239 case 0: /* cmovo */ 1418 case 0: /* cmovo */
1240 d |= (_eflags & EFLG_OF) ? Mov : 0; 1419 no_wb = (_eflags & EFLG_OF) ? 0 : 1;
1241 break; 1420 break;
1242 case 1: /* cmovb/cmovc/cmovnae */ 1421 case 1: /* cmovb/cmovc/cmovnae */
1243 d |= (_eflags & EFLG_CF) ? Mov : 0; 1422 no_wb = (_eflags & EFLG_CF) ? 0 : 1;
1244 break; 1423 break;
1245 case 2: /* cmovz/cmove */ 1424 case 2: /* cmovz/cmove */
1246 d |= (_eflags & EFLG_ZF) ? Mov : 0; 1425 no_wb = (_eflags & EFLG_ZF) ? 0 : 1;
1247 break; 1426 break;
1248 case 3: /* cmovbe/cmovna */ 1427 case 3: /* cmovbe/cmovna */
1249 d |= (_eflags & (EFLG_CF | EFLG_ZF)) ? Mov : 0; 1428 no_wb = (_eflags & (EFLG_CF | EFLG_ZF)) ? 0 : 1;
1250 break; 1429 break;
1251 case 4: /* cmovs */ 1430 case 4: /* cmovs */
1252 d |= (_eflags & EFLG_SF) ? Mov : 0; 1431 no_wb = (_eflags & EFLG_SF) ? 0 : 1;
1253 break; 1432 break;
1254 case 5: /* cmovp/cmovpe */ 1433 case 5: /* cmovp/cmovpe */
1255 d |= (_eflags & EFLG_PF) ? Mov : 0; 1434 no_wb = (_eflags & EFLG_PF) ? 0 : 1;
1256 break; 1435 break;
1257 case 7: /* cmovle/cmovng */ 1436 case 7: /* cmovle/cmovng */
1258 d |= (_eflags & EFLG_ZF) ? Mov : 0; 1437 no_wb = (_eflags & EFLG_ZF) ? 0 : 1;
1259 /* fall through */ 1438 /* fall through */
1260 case 6: /* cmovl/cmovnge */ 1439 case 6: /* cmovl/cmovnge */
1261 d |= (!(_eflags & EFLG_SF) != 1440 no_wb &= (!(_eflags & EFLG_SF) !=
1262 !(_eflags & EFLG_OF)) ? Mov : 0; 1441 !(_eflags & EFLG_OF)) ? 0 : 1;
1263 break; 1442 break;
1264 } 1443 }
1265 /* Odd cmov opcodes (lsb == 1) have inverted sense. */ 1444 /* Odd cmov opcodes (lsb == 1) have inverted sense. */
1266 d ^= (b & 1) ? Mov : 0; 1445 no_wb ^= b & 1;
1446 break;
1447 case 0xa3:
1448 bt: /* bt */
1449 src.val &= (dst.bytes << 3) - 1; /* only subword offset */
1450 emulate_2op_SrcV_nobyte("bt", src, dst, _eflags);
1451 break;
1452 case 0xab:
1453 bts: /* bts */
1454 src.val &= (dst.bytes << 3) - 1; /* only subword offset */
1455 emulate_2op_SrcV_nobyte("bts", src, dst, _eflags);
1267 break; 1456 break;
1268 case 0xb0 ... 0xb1: /* cmpxchg */ 1457 case 0xb0 ... 0xb1: /* cmpxchg */
1269 /* 1458 /*
@@ -1273,8 +1462,6 @@ twobyte_insn:
1273 src.orig_val = src.val; 1462 src.orig_val = src.val;
1274 src.val = _regs[VCPU_REGS_RAX]; 1463 src.val = _regs[VCPU_REGS_RAX];
1275 emulate_2op_SrcV("cmp", src, dst, _eflags); 1464 emulate_2op_SrcV("cmp", src, dst, _eflags);
1276 /* Always write back. The question is: where to? */
1277 d |= Mov;
1278 if (_eflags & EFLG_ZF) { 1465 if (_eflags & EFLG_ZF) {
1279 /* Success: write back to memory. */ 1466 /* Success: write back to memory. */
1280 dst.val = src.orig_val; 1467 dst.val = src.orig_val;
@@ -1284,30 +1471,15 @@ twobyte_insn:
1284 dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX]; 1471 dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
1285 } 1472 }
1286 break; 1473 break;
1287 case 0xa3:
1288 bt: /* bt */
1289 src.val &= (dst.bytes << 3) - 1; /* only subword offset */
1290 emulate_2op_SrcV_nobyte("bt", src, dst, _eflags);
1291 break;
1292 case 0xb3: 1474 case 0xb3:
1293 btr: /* btr */ 1475 btr: /* btr */
1294 src.val &= (dst.bytes << 3) - 1; /* only subword offset */ 1476 src.val &= (dst.bytes << 3) - 1; /* only subword offset */
1295 emulate_2op_SrcV_nobyte("btr", src, dst, _eflags); 1477 emulate_2op_SrcV_nobyte("btr", src, dst, _eflags);
1296 break; 1478 break;
1297 case 0xab:
1298 bts: /* bts */
1299 src.val &= (dst.bytes << 3) - 1; /* only subword offset */
1300 emulate_2op_SrcV_nobyte("bts", src, dst, _eflags);
1301 break;
1302 case 0xb6 ... 0xb7: /* movzx */ 1479 case 0xb6 ... 0xb7: /* movzx */
1303 dst.bytes = op_bytes; 1480 dst.bytes = op_bytes;
1304 dst.val = (d & ByteOp) ? (u8) src.val : (u16) src.val; 1481 dst.val = (d & ByteOp) ? (u8) src.val : (u16) src.val;
1305 break; 1482 break;
1306 case 0xbb:
1307 btc: /* btc */
1308 src.val &= (dst.bytes << 3) - 1; /* only subword offset */
1309 emulate_2op_SrcV_nobyte("btc", src, dst, _eflags);
1310 break;
1311 case 0xba: /* Grp8 */ 1483 case 0xba: /* Grp8 */
1312 switch (modrm_reg & 3) { 1484 switch (modrm_reg & 3) {
1313 case 0: 1485 case 0:
@@ -1320,6 +1492,11 @@ twobyte_insn:
1320 goto btc; 1492 goto btc;
1321 } 1493 }
1322 break; 1494 break;
1495 case 0xbb:
1496 btc: /* btc */
1497 src.val &= (dst.bytes << 3) - 1; /* only subword offset */
1498 emulate_2op_SrcV_nobyte("btc", src, dst, _eflags);
1499 break;
1323 case 0xbe ... 0xbf: /* movsx */ 1500 case 0xbe ... 0xbf: /* movsx */
1324 dst.bytes = op_bytes; 1501 dst.bytes = op_bytes;
1325 dst.val = (d & ByteOp) ? (s8) src.val : (s16) src.val; 1502 dst.val = (d & ByteOp) ? (s8) src.val : (s16) src.val;
@@ -1331,14 +1508,14 @@ twobyte_special_insn:
1331 /* Disable writeback. */ 1508 /* Disable writeback. */
1332 no_wb = 1; 1509 no_wb = 1;
1333 switch (b) { 1510 switch (b) {
1511 case 0x06:
1512 emulate_clts(ctxt->vcpu);
1513 break;
1334 case 0x09: /* wbinvd */ 1514 case 0x09: /* wbinvd */
1335 break; 1515 break;
1336 case 0x0d: /* GrpP (prefetch) */ 1516 case 0x0d: /* GrpP (prefetch) */
1337 case 0x18: /* Grp16 (prefetch/nop) */ 1517 case 0x18: /* Grp16 (prefetch/nop) */
1338 break; 1518 break;
1339 case 0x06:
1340 emulate_clts(ctxt->vcpu);
1341 break;
1342 case 0x20: /* mov cr, reg */ 1519 case 0x20: /* mov cr, reg */
1343 if (modrm_mod != 3) 1520 if (modrm_mod != 3)
1344 goto cannot_emulate; 1521 goto cannot_emulate;
@@ -1355,7 +1532,7 @@ twobyte_special_insn:
1355 | ((u64)_regs[VCPU_REGS_RDX] << 32); 1532 | ((u64)_regs[VCPU_REGS_RDX] << 32);
1356 rc = kvm_set_msr(ctxt->vcpu, _regs[VCPU_REGS_RCX], msr_data); 1533 rc = kvm_set_msr(ctxt->vcpu, _regs[VCPU_REGS_RCX], msr_data);
1357 if (rc) { 1534 if (rc) {
1358 kvm_arch_ops->inject_gp(ctxt->vcpu, 0); 1535 kvm_x86_ops->inject_gp(ctxt->vcpu, 0);
1359 _eip = ctxt->vcpu->rip; 1536 _eip = ctxt->vcpu->rip;
1360 } 1537 }
1361 rc = X86EMUL_CONTINUE; 1538 rc = X86EMUL_CONTINUE;
@@ -1364,7 +1541,7 @@ twobyte_special_insn:
1364 /* rdmsr */ 1541 /* rdmsr */
1365 rc = kvm_get_msr(ctxt->vcpu, _regs[VCPU_REGS_RCX], &msr_data); 1542 rc = kvm_get_msr(ctxt->vcpu, _regs[VCPU_REGS_RCX], &msr_data);
1366 if (rc) { 1543 if (rc) {
1367 kvm_arch_ops->inject_gp(ctxt->vcpu, 0); 1544 kvm_x86_ops->inject_gp(ctxt->vcpu, 0);
1368 _eip = ctxt->vcpu->rip; 1545 _eip = ctxt->vcpu->rip;
1369 } else { 1546 } else {
1370 _regs[VCPU_REGS_RAX] = (u32)msr_data; 1547 _regs[VCPU_REGS_RAX] = (u32)msr_data;
@@ -1372,10 +1549,32 @@ twobyte_special_insn:
1372 } 1549 }
1373 rc = X86EMUL_CONTINUE; 1550 rc = X86EMUL_CONTINUE;
1374 break; 1551 break;
1552 case 0x80 ... 0x8f: /* jnz rel, etc*/ {
1553 long int rel;
1554
1555 switch (op_bytes) {
1556 case 2:
1557 rel = insn_fetch(s16, 2, _eip);
1558 break;
1559 case 4:
1560 rel = insn_fetch(s32, 4, _eip);
1561 break;
1562 case 8:
1563 rel = insn_fetch(s64, 8, _eip);
1564 break;
1565 default:
1566 DPRINTF("jnz: Invalid op_bytes\n");
1567 goto cannot_emulate;
1568 }
1569 if (test_cc(b, _eflags))
1570 JMP_REL(rel);
1571 break;
1572 }
1375 case 0xc7: /* Grp9 (cmpxchg8b) */ 1573 case 0xc7: /* Grp9 (cmpxchg8b) */
1376 { 1574 {
1377 u64 old, new; 1575 u64 old, new;
1378 if ((rc = ops->read_emulated(cr2, &old, 8, ctxt)) != 0) 1576 if ((rc = ops->read_emulated(cr2, &old, 8, ctxt->vcpu))
1577 != 0)
1379 goto done; 1578 goto done;
1380 if (((u32) (old >> 0) != (u32) _regs[VCPU_REGS_RAX]) || 1579 if (((u32) (old >> 0) != (u32) _regs[VCPU_REGS_RAX]) ||
1381 ((u32) (old >> 32) != (u32) _regs[VCPU_REGS_RDX])) { 1580 ((u32) (old >> 32) != (u32) _regs[VCPU_REGS_RDX])) {
@@ -1386,7 +1585,7 @@ twobyte_special_insn:
1386 new = ((u64)_regs[VCPU_REGS_RCX] << 32) 1585 new = ((u64)_regs[VCPU_REGS_RCX] << 32)
1387 | (u32) _regs[VCPU_REGS_RBX]; 1586 | (u32) _regs[VCPU_REGS_RBX];
1388 if ((rc = ops->cmpxchg_emulated(cr2, &old, 1587 if ((rc = ops->cmpxchg_emulated(cr2, &old,
1389 &new, 8, ctxt)) != 0) 1588 &new, 8, ctxt->vcpu)) != 0)
1390 goto done; 1589 goto done;
1391 _eflags |= EFLG_ZF; 1590 _eflags |= EFLG_ZF;
1392 } 1591 }
diff --git a/drivers/kvm/x86_emulate.h b/drivers/kvm/x86_emulate.h
index ea3407d7fe..92c73aa7f9 100644
--- a/drivers/kvm/x86_emulate.h
+++ b/drivers/kvm/x86_emulate.h
@@ -60,7 +60,7 @@ struct x86_emulate_ops {
60 * @bytes: [IN ] Number of bytes to read from memory. 60 * @bytes: [IN ] Number of bytes to read from memory.
61 */ 61 */
62 int (*read_std)(unsigned long addr, void *val, 62 int (*read_std)(unsigned long addr, void *val,
63 unsigned int bytes, struct x86_emulate_ctxt * ctxt); 63 unsigned int bytes, struct kvm_vcpu *vcpu);
64 64
65 /* 65 /*
66 * write_std: Write bytes of standard (non-emulated/special) memory. 66 * write_std: Write bytes of standard (non-emulated/special) memory.
@@ -71,7 +71,7 @@ struct x86_emulate_ops {
71 * @bytes: [IN ] Number of bytes to write to memory. 71 * @bytes: [IN ] Number of bytes to write to memory.
72 */ 72 */
73 int (*write_std)(unsigned long addr, const void *val, 73 int (*write_std)(unsigned long addr, const void *val,
74 unsigned int bytes, struct x86_emulate_ctxt * ctxt); 74 unsigned int bytes, struct kvm_vcpu *vcpu);
75 75
76 /* 76 /*
77 * read_emulated: Read bytes from emulated/special memory area. 77 * read_emulated: Read bytes from emulated/special memory area.
@@ -82,7 +82,7 @@ struct x86_emulate_ops {
82 int (*read_emulated) (unsigned long addr, 82 int (*read_emulated) (unsigned long addr,
83 void *val, 83 void *val,
84 unsigned int bytes, 84 unsigned int bytes,
85 struct x86_emulate_ctxt * ctxt); 85 struct kvm_vcpu *vcpu);
86 86
87 /* 87 /*
88 * write_emulated: Read bytes from emulated/special memory area. 88 * write_emulated: Read bytes from emulated/special memory area.
@@ -94,7 +94,7 @@ struct x86_emulate_ops {
94 int (*write_emulated) (unsigned long addr, 94 int (*write_emulated) (unsigned long addr,
95 const void *val, 95 const void *val,
96 unsigned int bytes, 96 unsigned int bytes,
97 struct x86_emulate_ctxt * ctxt); 97 struct kvm_vcpu *vcpu);
98 98
99 /* 99 /*
100 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an 100 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
@@ -108,12 +108,10 @@ struct x86_emulate_ops {
108 const void *old, 108 const void *old,
109 const void *new, 109 const void *new,
110 unsigned int bytes, 110 unsigned int bytes,
111 struct x86_emulate_ctxt * ctxt); 111 struct kvm_vcpu *vcpu);
112 112
113}; 113};
114 114
115struct cpu_user_regs;
116
117struct x86_emulate_ctxt { 115struct x86_emulate_ctxt {
118 /* Register state before/after emulation. */ 116 /* Register state before/after emulation. */
119 struct kvm_vcpu *vcpu; 117 struct kvm_vcpu *vcpu;
@@ -154,12 +152,4 @@ struct x86_emulate_ctxt {
154int x86_emulate_memop(struct x86_emulate_ctxt *ctxt, 152int x86_emulate_memop(struct x86_emulate_ctxt *ctxt,
155 struct x86_emulate_ops *ops); 153 struct x86_emulate_ops *ops);
156 154
157/*
158 * Given the 'reg' portion of a ModRM byte, and a register block, return a
159 * pointer into the block that addresses the relevant register.
160 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
161 */
162void *decode_register(u8 modrm_reg, unsigned long *regs,
163 int highbyte_regs);
164
165#endif /* __X86_EMULATE_H__ */ 155#endif /* __X86_EMULATE_H__ */
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index a2191a4fcf..342517261e 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -54,8 +54,6 @@ static void emc_endio(struct bio *bio, int error)
54 54
55 /* request is freed in block layer */ 55 /* request is freed in block layer */
56 free_bio(bio); 56 free_bio(bio);
57
58 return 0;
59} 57}
60 58
61static struct bio *get_failover_bio(struct dm_path *path, unsigned data_size) 59static struct bio *get_failover_bio(struct dm_path *path, unsigned data_size)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e8f102ea9b..acf1b81b47 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3076,8 +3076,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
3076 mddev->gendisk = disk; 3076 mddev->gendisk = disk;
3077 mutex_unlock(&disks_mutex); 3077 mutex_unlock(&disks_mutex);
3078 mddev->kobj.parent = &disk->kobj; 3078 mddev->kobj.parent = &disk->kobj;
3079 mddev->kobj.k_name = NULL; 3079 kobject_set_name(&mddev->kobj, "%s", "md");
3080 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
3081 mddev->kobj.ktype = &md_ktype; 3080 mddev->kobj.ktype = &md_ktype;
3082 if (kobject_register(&mddev->kobj)) 3081 if (kobject_register(&mddev->kobj))
3083 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 3082 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index 56231d8edc..18738faecb 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -103,10 +103,7 @@ static struct file_operations dvb_device_fops =
103 .open = dvb_device_open, 103 .open = dvb_device_open,
104}; 104};
105 105
106static struct cdev dvb_device_cdev = { 106static struct cdev dvb_device_cdev;
107 .kobj = {.name = "dvb", },
108 .owner = THIS_MODULE,
109};
110 107
111int dvb_generic_open(struct inode *inode, struct file *file) 108int dvb_generic_open(struct inode *inode, struct file *file)
112{ 109{
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index 844f1762c4..4d5b8035e4 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -124,12 +124,6 @@ static struct i2c_adapter bttv_i2c_adap_sw_template = {
124/* ----------------------------------------------------------------------- */ 124/* ----------------------------------------------------------------------- */
125/* I2C functions - hardware i2c */ 125/* I2C functions - hardware i2c */
126 126
127static int algo_control(struct i2c_adapter *adapter,
128 unsigned int cmd, unsigned long arg)
129{
130 return 0;
131}
132
133static u32 functionality(struct i2c_adapter *adap) 127static u32 functionality(struct i2c_adapter *adap)
134{ 128{
135 return I2C_FUNC_SMBUS_EMUL; 129 return I2C_FUNC_SMBUS_EMUL;
@@ -278,7 +272,6 @@ static int bttv_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int
278 272
279static struct i2c_algorithm bttv_algo = { 273static struct i2c_algorithm bttv_algo = {
280 .master_xfer = bttv_i2c_xfer, 274 .master_xfer = bttv_i2c_xfer,
281 .algo_control = algo_control,
282 .functionality = functionality, 275 .functionality = functionality,
283}; 276};
284 277
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
index b517c8b5a5..71da528932 100644
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -272,12 +272,6 @@ void cx23885_call_i2c_clients(struct cx23885_i2c *bus,
272 i2c_clients_command(&bus->i2c_adap, cmd, arg); 272 i2c_clients_command(&bus->i2c_adap, cmd, arg);
273} 273}
274 274
275static int cx23885_algo_control(struct i2c_adapter *adap,
276 unsigned int cmd, unsigned long arg)
277{
278 return 0;
279}
280
281static u32 cx23885_functionality(struct i2c_adapter *adap) 275static u32 cx23885_functionality(struct i2c_adapter *adap)
282{ 276{
283 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C; 277 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
@@ -285,7 +279,6 @@ static u32 cx23885_functionality(struct i2c_adapter *adap)
285 279
286static struct i2c_algorithm cx23885_i2c_algo_template = { 280static struct i2c_algorithm cx23885_i2c_algo_template = {
287 .master_xfer = i2c_xfer, 281 .master_xfer = i2c_xfer,
288 .algo_control = cx23885_algo_control,
289 .functionality = cx23885_functionality, 282 .functionality = cx23885_functionality,
290}; 283};
291 284
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 54ccc6e1f9..997d067e32 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -383,15 +383,6 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned char *eedata, int len)
383/* ----------------------------------------------------------- */ 383/* ----------------------------------------------------------- */
384 384
385/* 385/*
386 * algo_control()
387 */
388static int algo_control(struct i2c_adapter *adapter,
389 unsigned int cmd, unsigned long arg)
390{
391 return 0;
392}
393
394/*
395 * functionality() 386 * functionality()
396 */ 387 */
397static u32 functionality(struct i2c_adapter *adap) 388static u32 functionality(struct i2c_adapter *adap)
@@ -475,7 +466,6 @@ static int attach_inform(struct i2c_client *client)
475 466
476static struct i2c_algorithm em28xx_algo = { 467static struct i2c_algorithm em28xx_algo = {
477 .master_xfer = em28xx_i2c_xfer, 468 .master_xfer = em28xx_i2c_xfer,
478 .algo_control = algo_control,
479 .functionality = functionality, 469 .functionality = functionality,
480}; 470};
481 471
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
index 898c9d2e4c..c817c864e6 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -520,12 +520,6 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
520 return ret; 520 return ret;
521} 521}
522 522
523static int pvr2_i2c_control(struct i2c_adapter *adapter,
524 unsigned int cmd, unsigned long arg)
525{
526 return 0;
527}
528
529static u32 pvr2_i2c_functionality(struct i2c_adapter *adap) 523static u32 pvr2_i2c_functionality(struct i2c_adapter *adap)
530{ 524{
531 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C; 525 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
@@ -942,7 +936,6 @@ static int pvr2_i2c_detach_inform(struct i2c_client *client)
942 936
943static struct i2c_algorithm pvr2_i2c_algo_template = { 937static struct i2c_algorithm pvr2_i2c_algo_template = {
944 .master_xfer = pvr2_i2c_xfer, 938 .master_xfer = pvr2_i2c_xfer,
945 .algo_control = pvr2_i2c_control,
946 .functionality = pvr2_i2c_functionality, 939 .functionality = pvr2_i2c_functionality,
947}; 940};
948 941
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
index 7a78d6b347..2ee3c3049e 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
@@ -905,8 +905,8 @@ struct pvr2_sysfs *pvr2_sysfs_create(struct pvr2_context *mp,
905} 905}
906 906
907 907
908static int pvr2_sysfs_hotplug(struct device *cd,char **envp, 908static int pvr2_sysfs_hotplug(struct device *d,
909 int numenvp,char *buf,int size) 909 struct kobj_uevent_env *env)
910{ 910{
911 /* Even though we don't do anything here, we still need this function 911 /* Even though we don't do anything here, we still need this function
912 because sysfs will still try to call it. */ 912 because sysfs will still try to call it. */
diff --git a/drivers/media/video/saa7134/saa7134-i2c.c b/drivers/media/video/saa7134/saa7134-i2c.c
index cc87f5855a..6deaad1a54 100644
--- a/drivers/media/video/saa7134/saa7134-i2c.c
+++ b/drivers/media/video/saa7134/saa7134-i2c.c
@@ -314,12 +314,6 @@ static int saa7134_i2c_xfer(struct i2c_adapter *i2c_adap,
314 314
315/* ----------------------------------------------------------- */ 315/* ----------------------------------------------------------- */
316 316
317static int algo_control(struct i2c_adapter *adapter,
318 unsigned int cmd, unsigned long arg)
319{
320 return 0;
321}
322
323static u32 functionality(struct i2c_adapter *adap) 317static u32 functionality(struct i2c_adapter *adap)
324{ 318{
325 return I2C_FUNC_SMBUS_EMUL; 319 return I2C_FUNC_SMBUS_EMUL;
@@ -387,7 +381,6 @@ static int attach_inform(struct i2c_client *client)
387 381
388static struct i2c_algorithm saa7134_algo = { 382static struct i2c_algorithm saa7134_algo = {
389 .master_xfer = saa7134_i2c_xfer, 383 .master_xfer = saa7134_i2c_xfer,
390 .algo_control = algo_control,
391 .functionality = functionality, 384 .functionality = functionality,
392}; 385};
393 386
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index c66aef6391..aabc42cae9 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -183,11 +183,6 @@ usbvision_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num)
183 return num; 183 return num;
184} 184}
185 185
186static int algo_control(struct i2c_adapter *adapter, unsigned int cmd, unsigned long arg)
187{
188 return 0;
189}
190
191static u32 functionality(struct i2c_adapter *adap) 186static u32 functionality(struct i2c_adapter *adap)
192{ 187{
193 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR; 188 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
@@ -199,7 +194,6 @@ static u32 functionality(struct i2c_adapter *adap)
199static struct i2c_algorithm usbvision_algo = { 194static struct i2c_algorithm usbvision_algo = {
200 .master_xfer = usbvision_i2c_xfer, 195 .master_xfer = usbvision_i2c_xfer,
201 .smbus_xfer = NULL, 196 .smbus_xfer = NULL,
202 .algo_control = algo_control,
203 .functionality = functionality, 197 .functionality = functionality,
204}; 198};
205 199
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index c606332512..5599a36490 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -674,7 +674,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
674 } 674 }
675 675
676 /* Copy to userspace */ 676 /* Copy to userspace */
677 retval=CALL(q,copy_to_user,q,data,count,nonblocking); 677 retval=CALL(q,video_copy_to_user,q,data,count,nonblocking);
678 if (retval<0) 678 if (retval<0)
679 goto done; 679 goto done;
680 680
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index 8bb7fdd306..3eb6123227 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -670,7 +670,7 @@ static struct videobuf_qtype_ops pci_ops = {
670 .sync = __videobuf_sync, 670 .sync = __videobuf_sync,
671 .mmap_free = __videobuf_mmap_free, 671 .mmap_free = __videobuf_mmap_free,
672 .mmap_mapper = __videobuf_mmap_mapper, 672 .mmap_mapper = __videobuf_mmap_mapper,
673 .copy_to_user = __videobuf_copy_to_user, 673 .video_copy_to_user = __videobuf_copy_to_user,
674 .copy_stream = __videobuf_copy_stream, 674 .copy_stream = __videobuf_copy_stream,
675}; 675};
676 676
diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
index 2e3689a12a..cd74341c98 100644
--- a/drivers/media/video/videobuf-vmalloc.c
+++ b/drivers/media/video/videobuf-vmalloc.c
@@ -320,7 +320,7 @@ static struct videobuf_qtype_ops qops = {
320 .sync = __videobuf_sync, 320 .sync = __videobuf_sync,
321 .mmap_free = __videobuf_mmap_free, 321 .mmap_free = __videobuf_mmap_free,
322 .mmap_mapper = __videobuf_mmap_mapper, 322 .mmap_mapper = __videobuf_mmap_mapper,
323 .copy_to_user = __videobuf_copy_to_user, 323 .video_copy_to_user = __videobuf_copy_to_user,
324 .copy_stream = __videobuf_copy_stream, 324 .copy_stream = __videobuf_copy_stream,
325}; 325};
326 326
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index 5a1b5f5a7d..9e7f3e685d 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -444,8 +444,6 @@ static int w9968cf_i2c_smbus_xfer(struct i2c_adapter*, u16 addr,
444static u32 w9968cf_i2c_func(struct i2c_adapter*); 444static u32 w9968cf_i2c_func(struct i2c_adapter*);
445static int w9968cf_i2c_attach_inform(struct i2c_client*); 445static int w9968cf_i2c_attach_inform(struct i2c_client*);
446static int w9968cf_i2c_detach_inform(struct i2c_client*); 446static int w9968cf_i2c_detach_inform(struct i2c_client*);
447static int w9968cf_i2c_control(struct i2c_adapter*, unsigned int cmd,
448 unsigned long arg);
449 447
450/* Memory management */ 448/* Memory management */
451static void* rvmalloc(unsigned long size); 449static void* rvmalloc(unsigned long size);
@@ -1543,21 +1541,12 @@ static int w9968cf_i2c_detach_inform(struct i2c_client* client)
1543} 1541}
1544 1542
1545 1543
1546static int
1547w9968cf_i2c_control(struct i2c_adapter* adapter, unsigned int cmd,
1548 unsigned long arg)
1549{
1550 return 0;
1551}
1552
1553
1554static int w9968cf_i2c_init(struct w9968cf_device* cam) 1544static int w9968cf_i2c_init(struct w9968cf_device* cam)
1555{ 1545{
1556 int err = 0; 1546 int err = 0;
1557 1547
1558 static struct i2c_algorithm algo = { 1548 static struct i2c_algorithm algo = {
1559 .smbus_xfer = w9968cf_i2c_smbus_xfer, 1549 .smbus_xfer = w9968cf_i2c_smbus_xfer,
1560 .algo_control = w9968cf_i2c_control,
1561 .functionality = w9968cf_i2c_func, 1550 .functionality = w9968cf_i2c_func,
1562 }; 1551 };
1563 1552
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index d195fb088f..8f77949f93 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -57,16 +57,11 @@ static int tifm_bus_match(struct device *dev, struct device_driver *drv)
57 return 0; 57 return 0;
58} 58}
59 59
60static int tifm_uevent(struct device *dev, char **envp, int num_envp, 60static int tifm_uevent(struct device *dev, struct kobj_uevent_env *env)
61 char *buffer, int buffer_size)
62{ 61{
63 struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); 62 struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
64 int i = 0;
65 int length = 0;
66 63
67 if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, 64 if (add_uevent_var(env, "TIFM_CARD_TYPE=%s", tifm_media_type_name(sock->type, 1)))
68 "TIFM_CARD_TYPE=%s",
69 tifm_media_type_name(sock->type, 1)))
70 return -ENOMEM; 65 return -ENOMEM;
71 66
72 return 0; 67 return 0;
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 8d6f601487..b0c22cad94 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -58,12 +58,11 @@ static int mmc_bus_match(struct device *dev, struct device_driver *drv)
58} 58}
59 59
60static int 60static int
61mmc_bus_uevent(struct device *dev, char **envp, int num_envp, char *buf, 61mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
62 int buf_size)
63{ 62{
64 struct mmc_card *card = dev_to_mmc_card(dev); 63 struct mmc_card *card = dev_to_mmc_card(dev);
65 const char *type; 64 const char *type;
66 int i = 0, length = 0; 65 int retval = 0;
67 66
68 switch (card->type) { 67 switch (card->type) {
69 case MMC_TYPE_MMC: 68 case MMC_TYPE_MMC:
@@ -80,20 +79,14 @@ mmc_bus_uevent(struct device *dev, char **envp, int num_envp, char *buf,
80 } 79 }
81 80
82 if (type) { 81 if (type) {
83 if (add_uevent_var(envp, num_envp, &i, 82 retval = add_uevent_var(env, "MMC_TYPE=%s", type);
84 buf, buf_size, &length, 83 if (retval)
85 "MMC_TYPE=%s", type)) 84 return retval;
86 return -ENOMEM;
87 } 85 }
88 86
89 if (add_uevent_var(envp, num_envp, &i, 87 retval = add_uevent_var(env, "MMC_NAME=%s", mmc_card_name(card));
90 buf, buf_size, &length,
91 "MMC_NAME=%s", mmc_card_name(card)))
92 return -ENOMEM;
93 88
94 envp[i] = NULL; 89 return retval;
95
96 return 0;
97} 90}
98 91
99static int mmc_bus_probe(struct device *dev) 92static int mmc_bus_probe(struct device *dev)
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 64fbc9759a..c65d203a84 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -143,7 +143,7 @@ void mmc_remove_host(struct mmc_host *host)
143 143
144 device_del(&host->class_dev); 144 device_del(&host->class_dev);
145 145
146 led_trigger_unregister(host->led); 146 led_trigger_unregister_simple(host->led);
147 147
148 spin_lock(&mmc_host_lock); 148 spin_lock(&mmc_host_lock);
149 idr_remove(&mmc_host_idr, host->index); 149 idr_remove(&mmc_host_idr, host->index);
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index fbec8cd55e..8848e8ac70 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -278,6 +278,14 @@ config SSFDC
278 This enables read only access to SmartMedia formatted NAND 278 This enables read only access to SmartMedia formatted NAND
279 flash. You can mount it with FAT file system. 279 flash. You can mount it with FAT file system.
280 280
281config MTD_OOPS
282 tristate "Log panic/oops to an MTD buffer"
283 depends on MTD
284 help
285 This enables panic and oops messages to be logged to a circular
286 buffer in a flash partition where it can be read back at some
287 later point.
288
281source "drivers/mtd/chips/Kconfig" 289source "drivers/mtd/chips/Kconfig"
282 290
283source "drivers/mtd/maps/Kconfig" 291source "drivers/mtd/maps/Kconfig"
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 6d958a4566..7f0b04b4ca 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_NFTL) += nftl.o
22obj-$(CONFIG_INFTL) += inftl.o 22obj-$(CONFIG_INFTL) += inftl.o
23obj-$(CONFIG_RFD_FTL) += rfd_ftl.o 23obj-$(CONFIG_RFD_FTL) += rfd_ftl.o
24obj-$(CONFIG_SSFDC) += ssfdc.o 24obj-$(CONFIG_SSFDC) += ssfdc.o
25obj-$(CONFIG_MTD_OOPS) += mtdoops.o
25 26
26nftl-objs := nftlcore.o nftlmount.o 27nftl-objs := nftlcore.o nftlmount.o
27inftl-objs := inftlcore.o inftlmount.o 28inftl-objs := inftlcore.o inftlmount.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 2f19fa78d2..3aa3dca56a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -526,7 +526,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
526 struct cfi_pri_intelext *extp = cfi->cmdset_priv; 526 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
527 527
528 /* 528 /*
529 * Probing of multi-partition flash ships. 529 * Probing of multi-partition flash chips.
530 * 530 *
531 * To support multiple partitions when available, we simply arrange 531 * To support multiple partitions when available, we simply arrange
532 * for each of them to have their own flchip structure even if they 532 * for each of them to have their own flchip structure even if they
@@ -653,7 +653,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
653 resettime: 653 resettime:
654 timeo = jiffies + HZ; 654 timeo = jiffies + HZ;
655 retry: 655 retry:
656 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) { 656 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
657 /* 657 /*
658 * OK. We have possibility for contension on the write/erase 658 * OK. We have possibility for contension on the write/erase
659 * operations which are global to the real chip and not per 659 * operations which are global to the real chip and not per
@@ -798,6 +798,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
798 if (mode == FL_READY && chip->oldstate == FL_READY) 798 if (mode == FL_READY && chip->oldstate == FL_READY)
799 return 0; 799 return 0;
800 800
801 case FL_SHUTDOWN:
802 /* The machine is rebooting now,so no one can get chip anymore */
803 return -EIO;
801 default: 804 default:
802 sleep: 805 sleep:
803 set_current_state(TASK_UNINTERRUPTIBLE); 806 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1166,28 +1169,34 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
1166{ 1169{
1167 struct map_info *map = mtd->priv; 1170 struct map_info *map = mtd->priv;
1168 struct cfi_private *cfi = map->fldrv_priv; 1171 struct cfi_private *cfi = map->fldrv_priv;
1169 unsigned long ofs; 1172 unsigned long ofs, last_end = 0;
1170 int chipnum; 1173 int chipnum;
1171 int ret = 0; 1174 int ret = 0;
1172 1175
1173 if (!map->virt || (from + len > mtd->size)) 1176 if (!map->virt || (from + len > mtd->size))
1174 return -EINVAL; 1177 return -EINVAL;
1175 1178
1176 *mtdbuf = (void *)map->virt + from;
1177 *retlen = 0;
1178
1179 /* Now lock the chip(s) to POINT state */ 1179 /* Now lock the chip(s) to POINT state */
1180 1180
1181 /* ofs: offset within the first chip that the first read should start */ 1181 /* ofs: offset within the first chip that the first read should start */
1182 chipnum = (from >> cfi->chipshift); 1182 chipnum = (from >> cfi->chipshift);
1183 ofs = from - (chipnum << cfi->chipshift); 1183 ofs = from - (chipnum << cfi->chipshift);
1184 1184
1185 *mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
1186 *retlen = 0;
1187
1185 while (len) { 1188 while (len) {
1186 unsigned long thislen; 1189 unsigned long thislen;
1187 1190
1188 if (chipnum >= cfi->numchips) 1191 if (chipnum >= cfi->numchips)
1189 break; 1192 break;
1190 1193
1194 /* We cannot point across chips that are virtually disjoint */
1195 if (!last_end)
1196 last_end = cfi->chips[chipnum].start;
1197 else if (cfi->chips[chipnum].start != last_end)
1198 break;
1199
1191 if ((len + ofs -1) >> cfi->chipshift) 1200 if ((len + ofs -1) >> cfi->chipshift)
1192 thislen = (1<<cfi->chipshift) - ofs; 1201 thislen = (1<<cfi->chipshift) - ofs;
1193 else 1202 else
@@ -1201,6 +1210,7 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
1201 len -= thislen; 1210 len -= thislen;
1202 1211
1203 ofs = 0; 1212 ofs = 0;
1213 last_end += 1 << cfi->chipshift;
1204 chipnum++; 1214 chipnum++;
1205 } 1215 }
1206 return 0; 1216 return 0;
@@ -1780,7 +1790,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1780 return ret; 1790 return ret;
1781} 1791}
1782 1792
1783int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 1793static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1784{ 1794{
1785 unsigned long ofs, len; 1795 unsigned long ofs, len;
1786 int ret; 1796 int ret;
@@ -1930,7 +1940,7 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1930 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n", 1940 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1931 __FUNCTION__, ofs, len); 1941 __FUNCTION__, ofs, len);
1932 cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 1942 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1933 ofs, len, 0); 1943 ofs, len, NULL);
1934#endif 1944#endif
1935 1945
1936 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 1946 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
@@ -1940,7 +1950,7 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1940 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", 1950 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1941 __FUNCTION__, ret); 1951 __FUNCTION__, ret);
1942 cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 1952 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1943 ofs, len, 0); 1953 ofs, len, NULL);
1944#endif 1954#endif
1945 1955
1946 return ret; 1956 return ret;
@@ -1954,7 +1964,7 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1954 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n", 1964 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1955 __FUNCTION__, ofs, len); 1965 __FUNCTION__, ofs, len);
1956 cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 1966 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1957 ofs, len, 0); 1967 ofs, len, NULL);
1958#endif 1968#endif
1959 1969
1960 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 1970 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
@@ -1964,7 +1974,7 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1964 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", 1974 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1965 __FUNCTION__, ret); 1975 __FUNCTION__, ret);
1966 cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 1976 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1967 ofs, len, 0); 1977 ofs, len, NULL);
1968#endif 1978#endif
1969 1979
1970 return ret; 1980 return ret;
@@ -2255,7 +2265,7 @@ static void cfi_intelext_save_locks(struct mtd_info *mtd)
2255 adr = region->offset + block * len; 2265 adr = region->offset + block * len;
2256 2266
2257 status = cfi_varsize_frob(mtd, 2267 status = cfi_varsize_frob(mtd,
2258 do_getlockstatus_oneblock, adr, len, 0); 2268 do_getlockstatus_oneblock, adr, len, NULL);
2259 if (status) 2269 if (status)
2260 set_bit(block, region->lockmap); 2270 set_bit(block, region->lockmap);
2261 else 2271 else
@@ -2402,10 +2412,10 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
2402 and switch to array mode so any bootloader in 2412 and switch to array mode so any bootloader in
2403 flash is accessible for soft reboot. */ 2413 flash is accessible for soft reboot. */
2404 spin_lock(chip->mutex); 2414 spin_lock(chip->mutex);
2405 ret = get_chip(map, chip, chip->start, FL_SYNCING); 2415 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2406 if (!ret) { 2416 if (!ret) {
2407 map_write(map, CMD(0xff), chip->start); 2417 map_write(map, CMD(0xff), chip->start);
2408 chip->state = FL_READY; 2418 chip->state = FL_SHUTDOWN;
2409 } 2419 }
2410 spin_unlock(chip->mutex); 2420 spin_unlock(chip->mutex);
2411 } 2421 }
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 1f64458404..389acc600f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1609,7 +1609,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1609} 1609}
1610 1610
1611 1611
1612int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 1612static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1613{ 1613{
1614 unsigned long ofs, len; 1614 unsigned long ofs, len;
1615 int ret; 1615 int ret;
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 58e561e876..a67b23b87f 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -17,7 +17,6 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/init.h>
21 20
22#include <linux/mtd/mtd.h> 21#include <linux/mtd/mtd.h>
23#include <linux/mtd/map.h> 22#include <linux/mtd/map.h>
@@ -70,6 +69,7 @@
70 69
71/* Fujitsu */ 70/* Fujitsu */
72#define MBM29F040C 0x00A4 71#define MBM29F040C 0x00A4
72#define MBM29F800BA 0x2258
73#define MBM29LV650UE 0x22D7 73#define MBM29LV650UE 0x22D7
74#define MBM29LV320TE 0x22F6 74#define MBM29LV320TE 0x22F6
75#define MBM29LV320BE 0x22F9 75#define MBM29LV320BE 0x22F9
@@ -129,6 +129,7 @@
129#define LH28F640BF 0x00b0 129#define LH28F640BF 0x00b0
130 130
131/* ST - www.st.com */ 131/* ST - www.st.com */
132#define M29F800AB 0x0058
132#define M29W800DT 0x00D7 133#define M29W800DT 0x00D7
133#define M29W800DB 0x005B 134#define M29W800DB 0x005B
134#define M29W160DT 0x22C4 135#define M29W160DT 0x22C4
@@ -646,6 +647,23 @@ static const struct amd_flash_info jedec_table[] = {
646 } 647 }
647 }, { 648 }, {
648 .mfr_id = MANUFACTURER_FUJITSU, 649 .mfr_id = MANUFACTURER_FUJITSU,
650 .dev_id = MBM29F800BA,
651 .name = "Fujitsu MBM29F800BA",
652 .uaddr = {
653 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
654 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
655 },
656 .DevSize = SIZE_1MiB,
657 .CmdSet = P_ID_AMD_STD,
658 .NumEraseRegions= 4,
659 .regions = {
660 ERASEINFO(0x04000,1),
661 ERASEINFO(0x02000,2),
662 ERASEINFO(0x08000,1),
663 ERASEINFO(0x10000,15),
664 }
665 }, {
666 .mfr_id = MANUFACTURER_FUJITSU,
649 .dev_id = MBM29LV650UE, 667 .dev_id = MBM29LV650UE,
650 .name = "Fujitsu MBM29LV650UE", 668 .name = "Fujitsu MBM29LV650UE",
651 .uaddr = { 669 .uaddr = {
@@ -1510,6 +1528,23 @@ static const struct amd_flash_info jedec_table[] = {
1510 ERASEINFO(0x1000,256) 1528 ERASEINFO(0x1000,256)
1511 } 1529 }
1512 1530
1531 }, {
1532 .mfr_id = MANUFACTURER_ST,
1533 .dev_id = M29F800AB,
1534 .name = "ST M29F800AB",
1535 .uaddr = {
1536 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1537 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1538 },
1539 .DevSize = SIZE_1MiB,
1540 .CmdSet = P_ID_AMD_STD,
1541 .NumEraseRegions= 4,
1542 .regions = {
1543 ERASEINFO(0x04000,1),
1544 ERASEINFO(0x02000,2),
1545 ERASEINFO(0x08000,1),
1546 ERASEINFO(0x10000,15),
1547 }
1513 }, { 1548 }, {
1514 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1549 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1515 .dev_id = M29W800DT, 1550 .dev_id = M29W800DT,
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index ff642f8fbe..811d56fd89 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -60,21 +60,22 @@ config MTD_DATAFLASH
60 Sometimes DataFlash chips are packaged inside MMC-format 60 Sometimes DataFlash chips are packaged inside MMC-format
61 cards; at this writing, the MMC stack won't handle those. 61 cards; at this writing, the MMC stack won't handle those.
62 62
63config MTD_DATAFLASH26
64 tristate "AT91RM9200 DataFlash AT26xxx"
65 depends on MTD && ARCH_AT91RM9200 && AT91_SPI
66 help
67 This enables access to the DataFlash chip (AT26xxx) on an
68 AT91RM9200-based board.
69 If you have such a board and such a DataFlash, say 'Y'.
70
71config MTD_M25P80 63config MTD_M25P80
72 tristate "Support for M25 SPI Flash" 64 tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
73 depends on SPI_MASTER && EXPERIMENTAL 65 depends on SPI_MASTER && EXPERIMENTAL
74 help 66 help
75 This enables access to ST M25P80 and similar SPI flash chips, 67 This enables access to most modern SPI flash chips, used for
76 used for program and data storage. Set up your spi devices 68 program and data storage. Series supported include Atmel AT26DF,
77 with the right board-specific platform data. 69 Spansion S25SL, SST 25VF, ST M25P, and Winbond W25X. Other chips
70 are supported as well. See the driver source for the current list,
71 or to add other chips.
72
73 Note that the original DataFlash chips (AT45 series, not AT26DF),
74 need an entirely different driver.
75
76 Set up your spi devices with the right board-specific platform data,
77 if you want to specify device partitioning or to use a device which
78 doesn't support the JEDEC ID instruction.
78 79
79config MTD_SLRAM 80config MTD_SLRAM
80 tristate "Uncached system RAM" 81 tristate "Uncached system RAM"
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 8ab568b3f5..0f788d5c4b 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -16,5 +16,4 @@ obj-$(CONFIG_MTD_MTDRAM) += mtdram.o
16obj-$(CONFIG_MTD_LART) += lart.o 16obj-$(CONFIG_MTD_LART) += lart.o
17obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o 17obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
18obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o 18obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
19obj-$(CONFIG_MTD_DATAFLASH26) += at91_dataflash26.o
20obj-$(CONFIG_MTD_M25P80) += m25p80.o 19obj-$(CONFIG_MTD_M25P80) += m25p80.o
diff --git a/drivers/mtd/devices/at91_dataflash26.c b/drivers/mtd/devices/at91_dataflash26.c
deleted file mode 100644
index 64ce37f986..0000000000
--- a/drivers/mtd/devices/at91_dataflash26.c
+++ /dev/null
@@ -1,485 +0,0 @@
1/*
2 * Atmel DataFlash driver for Atmel AT91RM9200 (Thunder)
3 * This is a largely modified version of at91_dataflash.c that
4 * supports AT26xxx dataflash chips. The original driver supports
5 * AT45xxx chips.
6 *
7 * Note: This driver was only tested with an AT26F004. It should be
8 * easy to make it work with other AT26xxx dataflash devices, though.
9 *
10 * Copyright (C) 2007 Hans J. Koch <hjk@linutronix.de>
11 * original Copyright (C) SAN People (Pty) Ltd
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * version 2 as published by the Free Software Foundation.
16*/
17
18#include <linux/config.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/mtd/mtd.h>
22
23#include <asm/arch/at91_spi.h>
24
25#define DATAFLASH_MAX_DEVICES 4 /* max number of dataflash devices */
26
27#define MANUFACTURER_ID_ATMEL 0x1F
28
29/* command codes */
30
31#define AT26_OP_READ_STATUS 0x05
32#define AT26_OP_READ_DEV_ID 0x9F
33#define AT26_OP_ERASE_PAGE_4K 0x20
34#define AT26_OP_READ_ARRAY_FAST 0x0B
35#define AT26_OP_SEQUENTIAL_WRITE 0xAF
36#define AT26_OP_WRITE_ENABLE 0x06
37#define AT26_OP_WRITE_DISABLE 0x04
38#define AT26_OP_SECTOR_PROTECT 0x36
39#define AT26_OP_SECTOR_UNPROTECT 0x39
40
41/* status register bits */
42
43#define AT26_STATUS_BUSY 0x01
44#define AT26_STATUS_WRITE_ENABLE 0x02
45
46struct dataflash_local
47{
48 int spi; /* SPI chip-select number */
49 unsigned int page_size; /* number of bytes per page */
50};
51
52
53/* Detected DataFlash devices */
54static struct mtd_info* mtd_devices[DATAFLASH_MAX_DEVICES];
55static int nr_devices = 0;
56
57/* Allocate a single SPI transfer descriptor. We're assuming that if multiple
58 SPI transfers occur at the same time, spi_access_bus() will serialize them.
59 If this is not valid, then either (i) each dataflash 'priv' structure
60 needs it's own transfer descriptor, (ii) we lock this one, or (iii) use
61 another mechanism. */
62static struct spi_transfer_list* spi_transfer_desc;
63
64/*
65 * Perform a SPI transfer to access the DataFlash device.
66 */
67static int do_spi_transfer(int nr, char* tx, int tx_len, char* rx, int rx_len,
68 char* txnext, int txnext_len, char* rxnext, int rxnext_len)
69{
70 struct spi_transfer_list* list = spi_transfer_desc;
71
72 list->tx[0] = tx; list->txlen[0] = tx_len;
73 list->rx[0] = rx; list->rxlen[0] = rx_len;
74
75 list->tx[1] = txnext; list->txlen[1] = txnext_len;
76 list->rx[1] = rxnext; list->rxlen[1] = rxnext_len;
77
78 list->nr_transfers = nr;
79 /* Note: spi_transfer() always returns 0, there are no error checks */
80 return spi_transfer(list);
81}
82
83/*
84 * Return the status of the DataFlash device.
85 */
86static unsigned char at91_dataflash26_status(void)
87{
88 unsigned char command[2];
89
90 command[0] = AT26_OP_READ_STATUS;
91 command[1] = 0;
92
93 do_spi_transfer(1, command, 2, command, 2, NULL, 0, NULL, 0);
94
95 return command[1];
96}
97
98/*
99 * Poll the DataFlash device until it is READY.
100 */
101static unsigned char at91_dataflash26_waitready(void)
102{
103 unsigned char status;
104
105 while (1) {
106 status = at91_dataflash26_status();
107 if (!(status & AT26_STATUS_BUSY))
108 return status;
109 }
110}
111
112/*
113 * Enable/disable write access
114 */
115 static void at91_dataflash26_write_enable(int enable)
116{
117 unsigned char cmd[2];
118
119 DEBUG(MTD_DEBUG_LEVEL3, "write_enable: enable=%i\n", enable);
120
121 if (enable)
122 cmd[0] = AT26_OP_WRITE_ENABLE;
123 else
124 cmd[0] = AT26_OP_WRITE_DISABLE;
125 cmd[1] = 0;
126
127 do_spi_transfer(1, cmd, 2, cmd, 2, NULL, 0, NULL, 0);
128}
129
130/*
131 * Protect/unprotect sector
132 */
133 static void at91_dataflash26_sector_protect(loff_t addr, int protect)
134{
135 unsigned char cmd[4];
136
137 DEBUG(MTD_DEBUG_LEVEL3, "sector_protect: addr=0x%06x prot=%d\n",
138 addr, protect);
139
140 if (protect)
141 cmd[0] = AT26_OP_SECTOR_PROTECT;
142 else
143 cmd[0] = AT26_OP_SECTOR_UNPROTECT;
144 cmd[1] = (addr & 0x00FF0000) >> 16;
145 cmd[2] = (addr & 0x0000FF00) >> 8;
146 cmd[3] = (addr & 0x000000FF);
147
148 do_spi_transfer(1, cmd, 4, cmd, 4, NULL, 0, NULL, 0);
149}
150
151/*
152 * Erase blocks of flash.
153 */
154static int at91_dataflash26_erase(struct mtd_info *mtd,
155 struct erase_info *instr)
156{
157 struct dataflash_local *priv = (struct dataflash_local *) mtd->priv;
158 unsigned char cmd[4];
159
160 DEBUG(MTD_DEBUG_LEVEL1, "dataflash_erase: addr=0x%06x len=%i\n",
161 instr->addr, instr->len);
162
163 /* Sanity checks */
164 if (priv->page_size != 4096)
165 return -EINVAL; /* Can't handle other sizes at the moment */
166
167 if ( ((instr->len % mtd->erasesize) != 0)
168 || ((instr->len % priv->page_size) != 0)
169 || ((instr->addr % priv->page_size) != 0)
170 || ((instr->addr + instr->len) > mtd->size))
171 return -EINVAL;
172
173 spi_access_bus(priv->spi);
174
175 while (instr->len > 0) {
176 at91_dataflash26_write_enable(1);
177 at91_dataflash26_sector_protect(instr->addr, 0);
178 at91_dataflash26_write_enable(1);
179 cmd[0] = AT26_OP_ERASE_PAGE_4K;
180 cmd[1] = (instr->addr & 0x00FF0000) >> 16;
181 cmd[2] = (instr->addr & 0x0000FF00) >> 8;
182 cmd[3] = (instr->addr & 0x000000FF);
183
184 DEBUG(MTD_DEBUG_LEVEL3, "ERASE: (0x%02x) 0x%02x 0x%02x"
185 "0x%02x\n",
186 cmd[0], cmd[1], cmd[2], cmd[3]);
187
188 do_spi_transfer(1, cmd, 4, cmd, 4, NULL, 0, NULL, 0);
189 at91_dataflash26_waitready();
190
191 instr->addr += priv->page_size; /* next page */
192 instr->len -= priv->page_size;
193 }
194
195 at91_dataflash26_write_enable(0);
196 spi_release_bus(priv->spi);
197
198 /* Inform MTD subsystem that erase is complete */
199 instr->state = MTD_ERASE_DONE;
200 if (instr->callback)
201 instr->callback(instr);
202
203 return 0;
204}
205
206/*
207 * Read from the DataFlash device.
208 * from : Start offset in flash device
209 * len : Number of bytes to read
210 * retlen : Number of bytes actually read
211 * buf : Buffer that will receive data
212 */
213static int at91_dataflash26_read(struct mtd_info *mtd, loff_t from, size_t len,
214 size_t *retlen, u_char *buf)
215{
216 struct dataflash_local *priv = (struct dataflash_local *) mtd->priv;
217 unsigned char cmd[5];
218
219 DEBUG(MTD_DEBUG_LEVEL1, "dataflash_read: %lli .. %lli\n",
220 from, from+len);
221
222 *retlen = 0;
223
224 /* Sanity checks */
225 if (!len)
226 return 0;
227 if (from + len > mtd->size)
228 return -EINVAL;
229
230 cmd[0] = AT26_OP_READ_ARRAY_FAST;
231 cmd[1] = (from & 0x00FF0000) >> 16;
232 cmd[2] = (from & 0x0000FF00) >> 8;
233 cmd[3] = (from & 0x000000FF);
234 /* cmd[4] is a "Don't care" byte */
235
236 DEBUG(MTD_DEBUG_LEVEL3, "READ: (0x%02x) 0x%02x 0x%02x 0x%02x\n",
237 cmd[0], cmd[1], cmd[2], cmd[3]);
238
239 spi_access_bus(priv->spi);
240 do_spi_transfer(2, cmd, 5, cmd, 5, buf, len, buf, len);
241 spi_release_bus(priv->spi);
242
243 *retlen = len;
244 return 0;
245}
246
247/*
248 * Write to the DataFlash device.
249 * to : Start offset in flash device
250 * len : Number of bytes to write
251 * retlen : Number of bytes actually written
252 * buf : Buffer containing the data
253 */
254static int at91_dataflash26_write(struct mtd_info *mtd, loff_t to, size_t len,
255 size_t *retlen, const u_char *buf)
256{
257 struct dataflash_local *priv = (struct dataflash_local *) mtd->priv;
258 unsigned int addr, buf_index = 0;
259 int ret = -EIO, sector, last_sector;
260 unsigned char status, cmd[5];
261
262 DEBUG(MTD_DEBUG_LEVEL1, "dataflash_write: %lli .. %lli\n", to, to+len);
263
264 *retlen = 0;
265
266 /* Sanity checks */
267 if (!len)
268 return 0;
269 if (to + len > mtd->size)
270 return -EINVAL;
271
272 spi_access_bus(priv->spi);
273
274 addr = to;
275 last_sector = -1;
276
277 while (buf_index < len) {
278 sector = addr / priv->page_size;
279 /* Write first byte if a new sector begins */
280 if (sector != last_sector) {
281 at91_dataflash26_write_enable(1);
282 at91_dataflash26_sector_protect(addr, 0);
283 at91_dataflash26_write_enable(1);
284
285 /* Program first byte of a new sector */
286 cmd[0] = AT26_OP_SEQUENTIAL_WRITE;
287 cmd[1] = (addr & 0x00FF0000) >> 16;
288 cmd[2] = (addr & 0x0000FF00) >> 8;
289 cmd[3] = (addr & 0x000000FF);
290 cmd[4] = buf[buf_index++];
291 do_spi_transfer(1, cmd, 5, cmd, 5, NULL, 0, NULL, 0);
292 status = at91_dataflash26_waitready();
293 addr++;
294 /* On write errors, the chip resets the write enable
295 flag. This also happens after the last byte of a
296 sector is successfully programmed. */
297 if ( ( !(status & AT26_STATUS_WRITE_ENABLE))
298 && ((addr % priv->page_size) != 0) ) {
299 DEBUG(MTD_DEBUG_LEVEL1,
300 "write error1: addr=0x%06x, "
301 "status=0x%02x\n", addr, status);
302 goto write_err;
303 }
304 (*retlen)++;
305 last_sector = sector;
306 }
307
308 /* Write subsequent bytes in the same sector */
309 cmd[0] = AT26_OP_SEQUENTIAL_WRITE;
310 cmd[1] = buf[buf_index++];
311 do_spi_transfer(1, cmd, 2, cmd, 2, NULL, 0, NULL, 0);
312 status = at91_dataflash26_waitready();
313 addr++;
314
315 if ( ( !(status & AT26_STATUS_WRITE_ENABLE))
316 && ((addr % priv->page_size) != 0) ) {
317 DEBUG(MTD_DEBUG_LEVEL1, "write error2: addr=0x%06x, "
318 "status=0x%02x\n", addr, status);
319 goto write_err;
320 }
321
322 (*retlen)++;
323 }
324
325 ret = 0;
326 at91_dataflash26_write_enable(0);
327write_err:
328 spi_release_bus(priv->spi);
329 return ret;
330}
331
332/*
333 * Initialize and register DataFlash device with MTD subsystem.
334 */
335static int __init add_dataflash(int channel, char *name, int nr_pages,
336 int pagesize)
337{
338 struct mtd_info *device;
339 struct dataflash_local *priv;
340
341 if (nr_devices >= DATAFLASH_MAX_DEVICES) {
342 printk(KERN_ERR "at91_dataflash26: Too many devices "
343 "detected\n");
344 return 0;
345 }
346
347 device = kzalloc(sizeof(struct mtd_info) + strlen(name) + 8,
348 GFP_KERNEL);
349 if (!device)
350 return -ENOMEM;
351
352 device->name = (char *)&device[1];
353 sprintf(device->name, "%s.spi%d", name, channel);
354 device->size = nr_pages * pagesize;
355 device->erasesize = pagesize;
356 device->owner = THIS_MODULE;
357 device->type = MTD_DATAFLASH;
358 device->flags = MTD_CAP_NORFLASH;
359 device->erase = at91_dataflash26_erase;
360 device->read = at91_dataflash26_read;
361 device->write = at91_dataflash26_write;
362
363 priv = (struct dataflash_local *)kzalloc(sizeof(struct dataflash_local),
364 GFP_KERNEL);
365 if (!priv) {
366 kfree(device);
367 return -ENOMEM;
368 }
369
370 priv->spi = channel;
371 priv->page_size = pagesize;
372 device->priv = priv;
373
374 mtd_devices[nr_devices] = device;
375 nr_devices++;
376 printk(KERN_INFO "at91_dataflash26: %s detected [spi%i] (%i bytes)\n",
377 name, channel, device->size);
378
379 return add_mtd_device(device);
380}
381
382/*
383 * Detect and initialize DataFlash device connected to specified SPI channel.
384 *
385 */
386
387struct dataflash26_types {
388 unsigned char id0;
389 unsigned char id1;
390 char *name;
391 int pagesize;
392 int nr_pages;
393};
394
395struct dataflash26_types df26_types[] = {
396 {
397 .id0 = 0x04,
398 .id1 = 0x00,
399 .name = "AT26F004",
400 .pagesize = 4096,
401 .nr_pages = 128,
402 },
403 {
404 .id0 = 0x45,
405 .id1 = 0x01,
406 .name = "AT26DF081A", /* Not tested ! */
407 .pagesize = 4096,
408 .nr_pages = 256,
409 },
410};
411
412static int __init at91_dataflash26_detect(int channel)
413{
414 unsigned char status, cmd[5];
415 int i;
416
417 spi_access_bus(channel);
418 status = at91_dataflash26_status();
419
420 if (status == 0 || status == 0xff) {
421 printk(KERN_ERR "at91_dataflash26_detect: status error %d\n",
422 status);
423 spi_release_bus(channel);
424 return -ENODEV;
425 }
426
427 cmd[0] = AT26_OP_READ_DEV_ID;
428 do_spi_transfer(1, cmd, 5, cmd, 5, NULL, 0, NULL, 0);
429 spi_release_bus(channel);
430
431 if (cmd[1] != MANUFACTURER_ID_ATMEL)
432 return -ENODEV;
433
434 for (i = 0; i < ARRAY_SIZE(df26_types); i++) {
435 if ( cmd[2] == df26_types[i].id0
436 && cmd[3] == df26_types[i].id1)
437 return add_dataflash(channel,
438 df26_types[i].name,
439 df26_types[i].nr_pages,
440 df26_types[i].pagesize);
441 }
442
443 printk(KERN_ERR "at91_dataflash26_detect: Unsupported device "
444 "(0x%02x/0x%02x)\n", cmd[2], cmd[3]);
445 return -ENODEV;
446}
447
448static int __init at91_dataflash26_init(void)
449{
450 spi_transfer_desc = kmalloc(sizeof(struct spi_transfer_list),
451 GFP_KERNEL);
452 if (!spi_transfer_desc)
453 return -ENOMEM;
454
455 /* DataFlash (SPI chip select 0) */
456 at91_dataflash26_detect(0);
457
458#ifdef CONFIG_MTD_AT91_DATAFLASH_CARD
459 /* DataFlash card (SPI chip select 3) */
460 at91_dataflash26_detect(3);
461#endif
462 return 0;
463}
464
465static void __exit at91_dataflash26_exit(void)
466{
467 int i;
468
469 for (i = 0; i < DATAFLASH_MAX_DEVICES; i++) {
470 if (mtd_devices[i]) {
471 del_mtd_device(mtd_devices[i]);
472 kfree(mtd_devices[i]->priv);
473 kfree(mtd_devices[i]);
474 }
475 }
476 nr_devices = 0;
477 kfree(spi_transfer_desc);
478}
479
480module_init(at91_dataflash26_init);
481module_exit(at91_dataflash26_exit);
482
483MODULE_LICENSE("GPL");
484MODULE_AUTHOR("Hans J. Koch");
485MODULE_DESCRIPTION("DataFlash AT26xxx driver for Atmel AT91RM9200");
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index 54aa759076..d8cc94ec4e 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -81,9 +81,7 @@ static unsigned long __initdata doc_locations[] = {
81#endif /* CONFIG_MTD_DOCPROBE_HIGH */ 81#endif /* CONFIG_MTD_DOCPROBE_HIGH */
82#elif defined(__PPC__) 82#elif defined(__PPC__)
83 0xe4000000, 83 0xe4000000,
84#elif defined(CONFIG_MOMENCO_OCELOT_G) 84#else
85 0xff000000,
86##else
87#warning Unknown architecture for DiskOnChip. No default probe locations defined 85#warning Unknown architecture for DiskOnChip. No default probe locations defined
88#endif 86#endif
89 0xffffffff }; 87 0xffffffff };
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 78c2511ae9..98df5bcc02 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * MTD SPI driver for ST M25Pxx flash chips 2 * MTD SPI driver for ST M25Pxx (and similar) serial flash chips
3 * 3 *
4 * Author: Mike Lavender, mike@steroidmicros.com 4 * Author: Mike Lavender, mike@steroidmicros.com
5 * 5 *
@@ -19,33 +19,32 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/interrupt.h> 22#include <linux/mutex.h>
23
23#include <linux/mtd/mtd.h> 24#include <linux/mtd/mtd.h>
24#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
26
25#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
26#include <linux/spi/flash.h> 28#include <linux/spi/flash.h>
27 29
28#include <asm/semaphore.h>
29
30
31/* NOTE: AT 25F and SST 25LF series are very similar,
32 * but commands for sector erase and chip id differ...
33 */
34 30
35#define FLASH_PAGESIZE 256 31#define FLASH_PAGESIZE 256
36 32
37/* Flash opcodes. */ 33/* Flash opcodes. */
38#define OPCODE_WREN 6 /* Write enable */ 34#define OPCODE_WREN 0x06 /* Write enable */
39#define OPCODE_RDSR 5 /* Read status register */ 35#define OPCODE_RDSR 0x05 /* Read status register */
40#define OPCODE_READ 3 /* Read data bytes */ 36#define OPCODE_READ 0x03 /* Read data bytes (low frequency) */
41#define OPCODE_PP 2 /* Page program */ 37#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
42#define OPCODE_SE 0xd8 /* Sector erase */ 38#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
43#define OPCODE_RES 0xab /* Read Electronic Signature */ 39#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
40#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
41#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
44#define OPCODE_RDID 0x9f /* Read JEDEC ID */ 42#define OPCODE_RDID 0x9f /* Read JEDEC ID */
45 43
46/* Status Register bits. */ 44/* Status Register bits. */
47#define SR_WIP 1 /* Write in progress */ 45#define SR_WIP 1 /* Write in progress */
48#define SR_WEL 2 /* Write enable latch */ 46#define SR_WEL 2 /* Write enable latch */
47/* meaning of other SR_* bits may differ between vendors */
49#define SR_BP0 4 /* Block protect 0 */ 48#define SR_BP0 4 /* Block protect 0 */
50#define SR_BP1 8 /* Block protect 1 */ 49#define SR_BP1 8 /* Block protect 1 */
51#define SR_BP2 0x10 /* Block protect 2 */ 50#define SR_BP2 0x10 /* Block protect 2 */
@@ -65,9 +64,10 @@
65 64
66struct m25p { 65struct m25p {
67 struct spi_device *spi; 66 struct spi_device *spi;
68 struct semaphore lock; 67 struct mutex lock;
69 struct mtd_info mtd; 68 struct mtd_info mtd;
70 unsigned partitioned; 69 unsigned partitioned:1;
70 u8 erase_opcode;
71 u8 command[4]; 71 u8 command[4];
72}; 72};
73 73
@@ -150,8 +150,9 @@ static int wait_till_ready(struct m25p *flash)
150 */ 150 */
151static int erase_sector(struct m25p *flash, u32 offset) 151static int erase_sector(struct m25p *flash, u32 offset)
152{ 152{
153 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s at 0x%08x\n", flash->spi->dev.bus_id, 153 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n",
154 __FUNCTION__, offset); 154 flash->spi->dev.bus_id, __FUNCTION__,
155 flash->mtd.erasesize / 1024, offset);
155 156
156 /* Wait until finished previous write command. */ 157 /* Wait until finished previous write command. */
157 if (wait_till_ready(flash)) 158 if (wait_till_ready(flash))
@@ -161,7 +162,7 @@ static int erase_sector(struct m25p *flash, u32 offset)
161 write_enable(flash); 162 write_enable(flash);
162 163
163 /* Set up command buffer. */ 164 /* Set up command buffer. */
164 flash->command[0] = OPCODE_SE; 165 flash->command[0] = flash->erase_opcode;
165 flash->command[1] = offset >> 16; 166 flash->command[1] = offset >> 16;
166 flash->command[2] = offset >> 8; 167 flash->command[2] = offset >> 8;
167 flash->command[3] = offset; 168 flash->command[3] = offset;
@@ -201,13 +202,17 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
201 addr = instr->addr; 202 addr = instr->addr;
202 len = instr->len; 203 len = instr->len;
203 204
204 down(&flash->lock); 205 mutex_lock(&flash->lock);
206
207 /* REVISIT in some cases we could speed up erasing large regions
208 * by using OPCODE_SE instead of OPCODE_BE_4K
209 */
205 210
206 /* now erase those sectors */ 211 /* now erase those sectors */
207 while (len) { 212 while (len) {
208 if (erase_sector(flash, addr)) { 213 if (erase_sector(flash, addr)) {
209 instr->state = MTD_ERASE_FAILED; 214 instr->state = MTD_ERASE_FAILED;
210 up(&flash->lock); 215 mutex_unlock(&flash->lock);
211 return -EIO; 216 return -EIO;
212 } 217 }
213 218
@@ -215,7 +220,7 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
215 len -= mtd->erasesize; 220 len -= mtd->erasesize;
216 } 221 }
217 222
218 up(&flash->lock); 223 mutex_unlock(&flash->lock);
219 224
220 instr->state = MTD_ERASE_DONE; 225 instr->state = MTD_ERASE_DONE;
221 mtd_erase_callback(instr); 226 mtd_erase_callback(instr);
@@ -260,16 +265,19 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
260 if (retlen) 265 if (retlen)
261 *retlen = 0; 266 *retlen = 0;
262 267
263 down(&flash->lock); 268 mutex_lock(&flash->lock);
264 269
265 /* Wait till previous write/erase is done. */ 270 /* Wait till previous write/erase is done. */
266 if (wait_till_ready(flash)) { 271 if (wait_till_ready(flash)) {
267 /* REVISIT status return?? */ 272 /* REVISIT status return?? */
268 up(&flash->lock); 273 mutex_unlock(&flash->lock);
269 return 1; 274 return 1;
270 } 275 }
271 276
272 /* NOTE: OPCODE_FAST_READ (if available) is faster... */ 277 /* FIXME switch to OPCODE_FAST_READ. It's required for higher
278 * clocks; and at this writing, every chip this driver handles
279 * supports that opcode.
280 */
273 281
274 /* Set up the write data buffer. */ 282 /* Set up the write data buffer. */
275 flash->command[0] = OPCODE_READ; 283 flash->command[0] = OPCODE_READ;
@@ -281,7 +289,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
281 289
282 *retlen = m.actual_length - sizeof(flash->command); 290 *retlen = m.actual_length - sizeof(flash->command);
283 291
284 up(&flash->lock); 292 mutex_unlock(&flash->lock);
285 293
286 return 0; 294 return 0;
287} 295}
@@ -323,7 +331,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
323 t[1].tx_buf = buf; 331 t[1].tx_buf = buf;
324 spi_message_add_tail(&t[1], &m); 332 spi_message_add_tail(&t[1], &m);
325 333
326 down(&flash->lock); 334 mutex_lock(&flash->lock);
327 335
328 /* Wait until finished previous write command. */ 336 /* Wait until finished previous write command. */
329 if (wait_till_ready(flash)) 337 if (wait_till_ready(flash))
@@ -381,10 +389,10 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
381 if (retlen) 389 if (retlen)
382 *retlen += m.actual_length 390 *retlen += m.actual_length
383 - sizeof(flash->command); 391 - sizeof(flash->command);
384 } 392 }
385 } 393 }
386 394
387 up(&flash->lock); 395 mutex_unlock(&flash->lock);
388 396
389 return 0; 397 return 0;
390} 398}
@@ -398,24 +406,118 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
398 406
399struct flash_info { 407struct flash_info {
400 char *name; 408 char *name;
401 u8 id; 409
402 u16 jedec_id; 410 /* JEDEC id zero means "no ID" (most older chips); otherwise it has
411 * a high byte of zero plus three data bytes: the manufacturer id,
412 * then a two byte device id.
413 */
414 u32 jedec_id;
415
416 /* The size listed here is what works with OPCODE_SE, which isn't
417 * necessarily called a "sector" by the vendor.
418 */
403 unsigned sector_size; 419 unsigned sector_size;
404 unsigned n_sectors; 420 u16 n_sectors;
421
422 u16 flags;
423#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
405}; 424};
406 425
426
427/* NOTE: double check command sets and memory organization when you add
428 * more flash chips. This current list focusses on newer chips, which
429 * have been converging on command sets which including JEDEC ID.
430 */
407static struct flash_info __devinitdata m25p_data [] = { 431static struct flash_info __devinitdata m25p_data [] = {
408 /* REVISIT: fill in JEDEC ids, for parts that have them */ 432
409 { "m25p05", 0x05, 0x2010, 32 * 1024, 2 }, 433 /* Atmel -- some are (confusingly) marketed as "DataFlash" */
410 { "m25p10", 0x10, 0x2011, 32 * 1024, 4 }, 434 { "at25fs010", 0x1f6601, 32 * 1024, 4, SECT_4K, },
411 { "m25p20", 0x11, 0x2012, 64 * 1024, 4 }, 435 { "at25fs040", 0x1f6604, 64 * 1024, 8, SECT_4K, },
412 { "m25p40", 0x12, 0x2013, 64 * 1024, 8 }, 436
413 { "m25p80", 0x13, 0x0000, 64 * 1024, 16 }, 437 { "at25df041a", 0x1f4401, 64 * 1024, 8, SECT_4K, },
414 { "m25p16", 0x14, 0x2015, 64 * 1024, 32 }, 438
415 { "m25p32", 0x15, 0x2016, 64 * 1024, 64 }, 439 { "at26f004", 0x1f0400, 64 * 1024, 8, SECT_4K, },
416 { "m25p64", 0x16, 0x2017, 64 * 1024, 128 }, 440 { "at26df081a", 0x1f4501, 64 * 1024, 16, SECT_4K, },
441 { "at26df161a", 0x1f4601, 64 * 1024, 32, SECT_4K, },
442 { "at26df321", 0x1f4701, 64 * 1024, 64, SECT_4K, },
443
444 /* Spansion -- single (large) sector size only, at least
445 * for the chips listed here (without boot sectors).
446 */
447 { "s25sl004a", 0x010212, 64 * 1024, 8, },
448 { "s25sl008a", 0x010213, 64 * 1024, 16, },
449 { "s25sl016a", 0x010214, 64 * 1024, 32, },
450 { "s25sl032a", 0x010215, 64 * 1024, 64, },
451 { "s25sl064a", 0x010216, 64 * 1024, 128, },
452
453 /* SST -- large erase sizes are "overlays", "sectors" are 4K */
454 { "sst25vf040b", 0xbf258d, 64 * 1024, 8, SECT_4K, },
455 { "sst25vf080b", 0xbf258e, 64 * 1024, 16, SECT_4K, },
456 { "sst25vf016b", 0xbf2541, 64 * 1024, 32, SECT_4K, },
457 { "sst25vf032b", 0xbf254a, 64 * 1024, 64, SECT_4K, },
458
459 /* ST Microelectronics -- newer production may have feature updates */
460 { "m25p05", 0x202010, 32 * 1024, 2, },
461 { "m25p10", 0x202011, 32 * 1024, 4, },
462 { "m25p20", 0x202012, 64 * 1024, 4, },
463 { "m25p40", 0x202013, 64 * 1024, 8, },
464 { "m25p80", 0, 64 * 1024, 16, },
465 { "m25p16", 0x202015, 64 * 1024, 32, },
466 { "m25p32", 0x202016, 64 * 1024, 64, },
467 { "m25p64", 0x202017, 64 * 1024, 128, },
468 { "m25p128", 0x202018, 256 * 1024, 64, },
469
470 { "m45pe80", 0x204014, 64 * 1024, 16, },
471 { "m45pe16", 0x204015, 64 * 1024, 32, },
472
473 { "m25pe80", 0x208014, 64 * 1024, 16, },
474 { "m25pe16", 0x208015, 64 * 1024, 32, SECT_4K, },
475
476 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
477 { "w25x10", 0xef3011, 64 * 1024, 2, SECT_4K, },
478 { "w25x20", 0xef3012, 64 * 1024, 4, SECT_4K, },
479 { "w25x40", 0xef3013, 64 * 1024, 8, SECT_4K, },
480 { "w25x80", 0xef3014, 64 * 1024, 16, SECT_4K, },
481 { "w25x16", 0xef3015, 64 * 1024, 32, SECT_4K, },
482 { "w25x32", 0xef3016, 64 * 1024, 64, SECT_4K, },
483 { "w25x64", 0xef3017, 64 * 1024, 128, SECT_4K, },
417}; 484};
418 485
486static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
487{
488 int tmp;
489 u8 code = OPCODE_RDID;
490 u8 id[3];
491 u32 jedec;
492 struct flash_info *info;
493
494 /* JEDEC also defines an optional "extended device information"
495 * string for after vendor-specific data, after the three bytes
496 * we use here. Supporting some chips might require using it.
497 */
498 tmp = spi_write_then_read(spi, &code, 1, id, 3);
499 if (tmp < 0) {
500 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
501 spi->dev.bus_id, tmp);
502 return NULL;
503 }
504 jedec = id[0];
505 jedec = jedec << 8;
506 jedec |= id[1];
507 jedec = jedec << 8;
508 jedec |= id[2];
509
510 for (tmp = 0, info = m25p_data;
511 tmp < ARRAY_SIZE(m25p_data);
512 tmp++, info++) {
513 if (info->jedec_id == jedec)
514 return info;
515 }
516 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
517 return NULL;
518}
519
520
419/* 521/*
420 * board specific setup should have ensured the SPI clock used here 522 * board specific setup should have ensured the SPI clock used here
421 * matches what the READ command supports, at least until this driver 523 * matches what the READ command supports, at least until this driver
@@ -429,37 +531,51 @@ static int __devinit m25p_probe(struct spi_device *spi)
429 unsigned i; 531 unsigned i;
430 532
431 /* Platform data helps sort out which chip type we have, as 533 /* Platform data helps sort out which chip type we have, as
432 * well as how this board partitions it. 534 * well as how this board partitions it. If we don't have
535 * a chip ID, try the JEDEC id commands; they'll work for most
536 * newer chips, even if we don't recognize the particular chip.
433 */ 537 */
434 data = spi->dev.platform_data; 538 data = spi->dev.platform_data;
435 if (!data || !data->type) { 539 if (data && data->type) {
436 /* FIXME some chips can identify themselves with RES 540 for (i = 0, info = m25p_data;
437 * or JEDEC get-id commands. Try them ... 541 i < ARRAY_SIZE(m25p_data);
438 */ 542 i++, info++) {
439 DEBUG(MTD_DEBUG_LEVEL1, "%s: no chip id\n", 543 if (strcmp(data->type, info->name) == 0)
440 spi->dev.bus_id); 544 break;
441 return -ENODEV; 545 }
442 }
443 546
444 for (i = 0, info = m25p_data; i < ARRAY_SIZE(m25p_data); i++, info++) { 547 /* unrecognized chip? */
445 if (strcmp(data->type, info->name) == 0) 548 if (i == ARRAY_SIZE(m25p_data)) {
446 break; 549 DEBUG(MTD_DEBUG_LEVEL0, "%s: unrecognized id %s\n",
447 } 550 spi->dev.bus_id, data->type);
448 if (i == ARRAY_SIZE(m25p_data)) { 551 info = NULL;
449 DEBUG(MTD_DEBUG_LEVEL1, "%s: unrecognized id %s\n", 552
450 spi->dev.bus_id, data->type); 553 /* recognized; is that chip really what's there? */
554 } else if (info->jedec_id) {
555 struct flash_info *chip = jedec_probe(spi);
556
557 if (!chip || chip != info) {
558 dev_warn(&spi->dev, "found %s, expected %s\n",
559 chip ? chip->name : "UNKNOWN",
560 info->name);
561 info = NULL;
562 }
563 }
564 } else
565 info = jedec_probe(spi);
566
567 if (!info)
451 return -ENODEV; 568 return -ENODEV;
452 }
453 569
454 flash = kzalloc(sizeof *flash, GFP_KERNEL); 570 flash = kzalloc(sizeof *flash, GFP_KERNEL);
455 if (!flash) 571 if (!flash)
456 return -ENOMEM; 572 return -ENOMEM;
457 573
458 flash->spi = spi; 574 flash->spi = spi;
459 init_MUTEX(&flash->lock); 575 mutex_init(&flash->lock);
460 dev_set_drvdata(&spi->dev, flash); 576 dev_set_drvdata(&spi->dev, flash);
461 577
462 if (data->name) 578 if (data && data->name)
463 flash->mtd.name = data->name; 579 flash->mtd.name = data->name;
464 else 580 else
465 flash->mtd.name = spi->dev.bus_id; 581 flash->mtd.name = spi->dev.bus_id;
@@ -468,17 +584,25 @@ static int __devinit m25p_probe(struct spi_device *spi)
468 flash->mtd.writesize = 1; 584 flash->mtd.writesize = 1;
469 flash->mtd.flags = MTD_CAP_NORFLASH; 585 flash->mtd.flags = MTD_CAP_NORFLASH;
470 flash->mtd.size = info->sector_size * info->n_sectors; 586 flash->mtd.size = info->sector_size * info->n_sectors;
471 flash->mtd.erasesize = info->sector_size;
472 flash->mtd.erase = m25p80_erase; 587 flash->mtd.erase = m25p80_erase;
473 flash->mtd.read = m25p80_read; 588 flash->mtd.read = m25p80_read;
474 flash->mtd.write = m25p80_write; 589 flash->mtd.write = m25p80_write;
475 590
591 /* prefer "small sector" erase if possible */
592 if (info->flags & SECT_4K) {
593 flash->erase_opcode = OPCODE_BE_4K;
594 flash->mtd.erasesize = 4096;
595 } else {
596 flash->erase_opcode = OPCODE_SE;
597 flash->mtd.erasesize = info->sector_size;
598 }
599
476 dev_info(&spi->dev, "%s (%d Kbytes)\n", info->name, 600 dev_info(&spi->dev, "%s (%d Kbytes)\n", info->name,
477 flash->mtd.size / 1024); 601 flash->mtd.size / 1024);
478 602
479 DEBUG(MTD_DEBUG_LEVEL2, 603 DEBUG(MTD_DEBUG_LEVEL2,
480 "mtd .name = %s, .size = 0x%.8x (%uM) " 604 "mtd .name = %s, .size = 0x%.8x (%uMiB) "
481 ".erasesize = 0x%.8x (%uK) .numeraseregions = %d\n", 605 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
482 flash->mtd.name, 606 flash->mtd.name,
483 flash->mtd.size, flash->mtd.size / (1024*1024), 607 flash->mtd.size, flash->mtd.size / (1024*1024),
484 flash->mtd.erasesize, flash->mtd.erasesize / 1024, 608 flash->mtd.erasesize, flash->mtd.erasesize / 1024,
@@ -488,7 +612,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
488 for (i = 0; i < flash->mtd.numeraseregions; i++) 612 for (i = 0; i < flash->mtd.numeraseregions; i++)
489 DEBUG(MTD_DEBUG_LEVEL2, 613 DEBUG(MTD_DEBUG_LEVEL2,
490 "mtd.eraseregions[%d] = { .offset = 0x%.8x, " 614 "mtd.eraseregions[%d] = { .offset = 0x%.8x, "
491 ".erasesize = 0x%.8x (%uK), " 615 ".erasesize = 0x%.8x (%uKiB), "
492 ".numblocks = %d }\n", 616 ".numblocks = %d }\n",
493 i, flash->mtd.eraseregions[i].offset, 617 i, flash->mtd.eraseregions[i].offset,
494 flash->mtd.eraseregions[i].erasesize, 618 flash->mtd.eraseregions[i].erasesize,
@@ -516,14 +640,14 @@ static int __devinit m25p_probe(struct spi_device *spi)
516 } 640 }
517 641
518 if (nr_parts > 0) { 642 if (nr_parts > 0) {
519 for (i = 0; i < data->nr_parts; i++) { 643 for (i = 0; i < nr_parts; i++) {
520 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " 644 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
521 "{.name = %s, .offset = 0x%.8x, " 645 "{.name = %s, .offset = 0x%.8x, "
522 ".size = 0x%.8x (%uK) }\n", 646 ".size = 0x%.8x (%uKiB) }\n",
523 i, data->parts[i].name, 647 i, parts[i].name,
524 data->parts[i].offset, 648 parts[i].offset,
525 data->parts[i].size, 649 parts[i].size,
526 data->parts[i].size / 1024); 650 parts[i].size / 1024);
527 } 651 }
528 flash->partitioned = 1; 652 flash->partitioned = 1;
529 return add_mtd_partitions(&flash->mtd, parts, nr_parts); 653 return add_mtd_partitions(&flash->mtd, parts, nr_parts);
@@ -560,6 +684,11 @@ static struct spi_driver m25p80_driver = {
560 }, 684 },
561 .probe = m25p_probe, 685 .probe = m25p_probe,
562 .remove = __devexit_p(m25p_remove), 686 .remove = __devexit_p(m25p_remove),
687
688 /* REVISIT: many of these chips have deep power-down modes, which
689 * should clearly be entered on suspend() to minimize power use.
690 * And also when they're otherwise idle...
691 */
563}; 692};
564 693
565 694
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index a987e917f4..a5ed6d232c 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/mutex.h>
17#include <linux/spi/spi.h> 18#include <linux/spi/spi.h>
18#include <linux/spi/flash.h> 19#include <linux/spi/flash.h>
19 20
@@ -89,7 +90,7 @@ struct dataflash {
89 unsigned short page_offset; /* offset in flash address */ 90 unsigned short page_offset; /* offset in flash address */
90 unsigned int page_size; /* of bytes per page */ 91 unsigned int page_size; /* of bytes per page */
91 92
92 struct semaphore lock; 93 struct mutex lock;
93 struct spi_device *spi; 94 struct spi_device *spi;
94 95
95 struct mtd_info mtd; 96 struct mtd_info mtd;
@@ -167,7 +168,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
167 x.len = 4; 168 x.len = 4;
168 spi_message_add_tail(&x, &msg); 169 spi_message_add_tail(&x, &msg);
169 170
170 down(&priv->lock); 171 mutex_lock(&priv->lock);
171 while (instr->len > 0) { 172 while (instr->len > 0) {
172 unsigned int pageaddr; 173 unsigned int pageaddr;
173 int status; 174 int status;
@@ -210,7 +211,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
210 instr->len -= priv->page_size; 211 instr->len -= priv->page_size;
211 } 212 }
212 } 213 }
213 up(&priv->lock); 214 mutex_unlock(&priv->lock);
214 215
215 /* Inform MTD subsystem that erase is complete */ 216 /* Inform MTD subsystem that erase is complete */
216 instr->state = MTD_ERASE_DONE; 217 instr->state = MTD_ERASE_DONE;
@@ -266,7 +267,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
266 x[1].len = len; 267 x[1].len = len;
267 spi_message_add_tail(&x[1], &msg); 268 spi_message_add_tail(&x[1], &msg);
268 269
269 down(&priv->lock); 270 mutex_lock(&priv->lock);
270 271
271 /* Continuous read, max clock = f(car) which may be less than 272 /* Continuous read, max clock = f(car) which may be less than
272 * the peak rate available. Some chips support commands with 273 * the peak rate available. Some chips support commands with
@@ -279,7 +280,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
279 /* plus 4 "don't care" bytes */ 280 /* plus 4 "don't care" bytes */
280 281
281 status = spi_sync(priv->spi, &msg); 282 status = spi_sync(priv->spi, &msg);
282 up(&priv->lock); 283 mutex_unlock(&priv->lock);
283 284
284 if (status >= 0) { 285 if (status >= 0) {
285 *retlen = msg.actual_length - 8; 286 *retlen = msg.actual_length - 8;
@@ -336,7 +337,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
336 else 337 else
337 writelen = len; 338 writelen = len;
338 339
339 down(&priv->lock); 340 mutex_lock(&priv->lock);
340 while (remaining > 0) { 341 while (remaining > 0) {
341 DEBUG(MTD_DEBUG_LEVEL3, "write @ %i:%i len=%i\n", 342 DEBUG(MTD_DEBUG_LEVEL3, "write @ %i:%i len=%i\n",
342 pageaddr, offset, writelen); 343 pageaddr, offset, writelen);
@@ -441,7 +442,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
441 else 442 else
442 writelen = remaining; 443 writelen = remaining;
443 } 444 }
444 up(&priv->lock); 445 mutex_unlock(&priv->lock);
445 446
446 return status; 447 return status;
447} 448}
@@ -463,7 +464,7 @@ add_dataflash(struct spi_device *spi, char *name,
463 if (!priv) 464 if (!priv)
464 return -ENOMEM; 465 return -ENOMEM;
465 466
466 init_MUTEX(&priv->lock); 467 mutex_init(&priv->lock);
467 priv->spi = spi; 468 priv->spi = spi;
468 priv->page_size = pagesize; 469 priv->page_size = pagesize;
469 priv->page_offset = pageoffset; 470 priv->page_offset = pageoffset;
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index e8f686f7a3..7060a0895c 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -30,8 +30,8 @@
30 * 30 *
31 * Notes: 31 * Notes:
32 * Due to what I assume is more buggy SROM, the 64M PMC551 I 32 * Due to what I assume is more buggy SROM, the 64M PMC551 I
33 * have available claims that all 4 of it's DRAM banks have 64M 33 * have available claims that all 4 of its DRAM banks have 64MiB
34 * of ram configured (making a grand total of 256M onboard). 34 * of ram configured (making a grand total of 256MiB onboard).
35 * This is slightly annoying since the BAR0 size reflects the 35 * This is slightly annoying since the BAR0 size reflects the
36 * aperture size, not the dram size, and the V370PDC supplies no 36 * aperture size, not the dram size, and the V370PDC supplies no
37 * other method for memory size discovery. This problem is 37 * other method for memory size discovery. This problem is
@@ -70,7 +70,7 @@
70 * made the memory unusable, added a fix to code to touch up 70 * made the memory unusable, added a fix to code to touch up
71 * the DRAM some. 71 * the DRAM some.
72 * 72 *
73 * Bugs/FIXME's: 73 * Bugs/FIXMEs:
74 * * MUST fix the init function to not spin on a register 74 * * MUST fix the init function to not spin on a register
75 * waiting for it to set .. this does not safely handle busted 75 * waiting for it to set .. this does not safely handle busted
76 * devices that never reset the register correctly which will 76 * devices that never reset the register correctly which will
@@ -562,10 +562,10 @@ static u32 fixup_pmc551(struct pci_dev *dev)
562 /* 562 /*
563 * Some screen fun 563 * Some screen fun
564 */ 564 */
565 printk(KERN_DEBUG "pmc551: %d%c (0x%x) of %sprefetchable memory at " 565 printk(KERN_DEBUG "pmc551: %d%sB (0x%x) of %sprefetchable memory at "
566 "0x%llx\n", (size < 1024) ? size : (size < 1048576) ? 566 "0x%llx\n", (size < 1024) ? size : (size < 1048576) ?
567 size >> 10 : size >> 20, 567 size >> 10 : size >> 20,
568 (size < 1024) ? 'B' : (size < 1048576) ? 'K' : 'M', size, 568 (size < 1024) ? "" : (size < 1048576) ? "Ki" : "Mi", size,
569 ((dcmd & (0x1 << 3)) == 0) ? "non-" : "", 569 ((dcmd & (0x1 << 3)) == 0) ? "non-" : "",
570 (unsigned long long)pci_resource_start(dev, 0)); 570 (unsigned long long)pci_resource_start(dev, 0));
571 571
@@ -649,14 +649,10 @@ MODULE_DESCRIPTION(PMC551_VERSION);
649 * Stuff these outside the ifdef so as to not bust compiled in driver support 649 * Stuff these outside the ifdef so as to not bust compiled in driver support
650 */ 650 */
651static int msize = 0; 651static int msize = 0;
652#if defined(CONFIG_MTD_PMC551_APERTURE_SIZE)
653static int asize = CONFIG_MTD_PMC551_APERTURE_SIZE;
654#else
655static int asize = 0; 652static int asize = 0;
656#endif
657 653
658module_param(msize, int, 0); 654module_param(msize, int, 0);
659MODULE_PARM_DESC(msize, "memory size in Megabytes [1 - 1024]"); 655MODULE_PARM_DESC(msize, "memory size in MiB [1 - 1024]");
660module_param(asize, int, 0); 656module_param(asize, int, 0);
661MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1-1024]"); 657MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1-1024]");
662 658
@@ -799,8 +795,7 @@ static int __init init_pmc551(void)
799 mtd->owner = THIS_MODULE; 795 mtd->owner = THIS_MODULE;
800 796
801 if (add_mtd_device(mtd)) { 797 if (add_mtd_device(mtd)) {
802 printk(KERN_NOTICE "pmc551: Failed to register new " 798 printk(KERN_NOTICE "pmc551: Failed to register new device\n");
803 "device\n");
804 pci_iounmap(PCI_Device, priv->start); 799 pci_iounmap(PCI_Device, priv->start);
805 kfree(mtd->priv); 800 kfree(mtd->priv);
806 kfree(mtd); 801 kfree(mtd);
@@ -811,13 +806,13 @@ static int __init init_pmc551(void)
811 pci_dev_get(PCI_Device); 806 pci_dev_get(PCI_Device);
812 807
813 printk(KERN_NOTICE "Registered pmc551 memory device.\n"); 808 printk(KERN_NOTICE "Registered pmc551 memory device.\n");
814 printk(KERN_NOTICE "Mapped %dM of memory from 0x%p to 0x%p\n", 809 printk(KERN_NOTICE "Mapped %dMiB of memory from 0x%p to 0x%p\n",
815 priv->asize >> 20, 810 priv->asize >> 20,
816 priv->start, priv->start + priv->asize); 811 priv->start, priv->start + priv->asize);
817 printk(KERN_NOTICE "Total memory is %d%c\n", 812 printk(KERN_NOTICE "Total memory is %d%sB\n",
818 (length < 1024) ? length : 813 (length < 1024) ? length :
819 (length < 1048576) ? length >> 10 : length >> 20, 814 (length < 1048576) ? length >> 10 : length >> 20,
820 (length < 1024) ? 'B' : (length < 1048576) ? 'K' : 'M'); 815 (length < 1024) ? "" : (length < 1048576) ? "Ki" : "Mi");
821 priv->nextpmc551 = pmc551list; 816 priv->nextpmc551 = pmc551list;
822 pmc551list = mtd; 817 pmc551list = mtd;
823 found++; 818 found++;
@@ -850,7 +845,7 @@ static void __exit cleanup_pmc551(void)
850 pmc551list = priv->nextpmc551; 845 pmc551list = priv->nextpmc551;
851 846
852 if (priv->start) { 847 if (priv->start) {
853 printk(KERN_DEBUG "pmc551: unmapping %dM starting at " 848 printk(KERN_DEBUG "pmc551: unmapping %dMiB starting at "
854 "0x%p\n", priv->asize >> 20, priv->start); 849 "0x%p\n", priv->asize >> 20, priv->start);
855 pci_iounmap(priv->dev, priv->start); 850 pci_iounmap(priv->dev, priv->start);
856 } 851 }
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index ecac0e438f..b8917beeb6 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -580,14 +580,13 @@ int INFTL_mount(struct INFTLrecord *s)
580 logical_block = block = BLOCK_NIL; 580 logical_block = block = BLOCK_NIL;
581 581
582 /* Temporary buffer to store ANAC numbers. */ 582 /* Temporary buffer to store ANAC numbers. */
583 ANACtable = kmalloc(s->nb_blocks * sizeof(u8), GFP_KERNEL); 583 ANACtable = kcalloc(s->nb_blocks, sizeof(u8), GFP_KERNEL);
584 if (!ANACtable) { 584 if (!ANACtable) {
585 printk(KERN_WARNING "INFTL: allocation of ANACtable " 585 printk(KERN_WARNING "INFTL: allocation of ANACtable "
586 "failed (%zd bytes)\n", 586 "failed (%zd bytes)\n",
587 s->nb_blocks * sizeof(u8)); 587 s->nb_blocks * sizeof(u8));
588 return -ENOMEM; 588 return -ENOMEM;
589 } 589 }
590 memset(ANACtable, 0, s->nb_blocks);
591 590
592 /* 591 /*
593 * First pass is to explore each physical unit, and construct the 592 * First pass is to explore each physical unit, and construct the
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 6cd132c751..2a2a125b0c 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -163,20 +163,12 @@ config MTD_SBC_GXX
163 More info at 163 More info at
164 <http://www.arcomcontrols.com/products/icp/pc104/processors/SBC_GX1.htm>. 164 <http://www.arcomcontrols.com/products/icp/pc104/processors/SBC_GX1.htm>.
165 165
166config MTD_LUBBOCK 166config MTD_PXA2XX
167 tristate "CFI Flash device mapped on Intel Lubbock XScale eval board" 167 tristate "CFI Flash device mapped on Intel XScale PXA2xx based boards"
168 depends on ARCH_LUBBOCK && MTD_CFI_INTELEXT && MTD_PARTITIONS 168 depends on (PXA25x || PXA27x) && MTD_CFI_INTELEXT
169 help
170 This provides a driver for the on-board flash of the Intel
171 'Lubbock' XScale evaluation board.
172
173config MTD_MAINSTONE
174 tristate "CFI Flash device mapped on Intel Mainstone XScale eval board"
175 depends on MACH_MAINSTONE && MTD_CFI_INTELEXT
176 select MTD_PARTITIONS 169 select MTD_PARTITIONS
177 help 170 help
178 This provides a driver for the on-board flash of the Intel 171 This provides a driver for the NOR flash attached to a PXA2xx chip.
179 'Mainstone PXA27x evaluation board.
180 172
181config MTD_OCTAGON 173config MTD_OCTAGON
182 tristate "JEDEC Flash device mapped on Octagon 5066 SBC" 174 tristate "JEDEC Flash device mapped on Octagon 5066 SBC"
@@ -354,7 +346,7 @@ config MTD_CFI_FLAGADM
354 346
355config MTD_WALNUT 347config MTD_WALNUT
356 tristate "Flash device mapped on IBM 405GP Walnut" 348 tristate "Flash device mapped on IBM 405GP Walnut"
357 depends on MTD_JEDECPROBE && WALNUT 349 depends on MTD_JEDECPROBE && WALNUT && !PPC_MERGE
358 help 350 help
359 This enables access routines for the flash chips on the IBM 405GP 351 This enables access routines for the flash chips on the IBM 405GP
360 Walnut board. If you have one of these boards and would like to 352 Walnut board. If you have one of these boards and would like to
@@ -370,7 +362,7 @@ config MTD_EBONY
370 362
371config MTD_OCOTEA 363config MTD_OCOTEA
372 tristate "Flash devices mapped on IBM 440GX Ocotea" 364 tristate "Flash devices mapped on IBM 440GX Ocotea"
373 depends on MTD_CFI && OCOTEA 365 depends on MTD_CFI && OCOTEA && !PPC_MERGE
374 help 366 help
375 This enables access routines for the flash chips on the IBM 440GX 367 This enables access routines for the flash chips on the IBM 440GX
376 Ocotea board. If you have one of these boards and would like to 368 Ocotea board. If you have one of these boards and would like to
@@ -384,22 +376,6 @@ config MTD_REDWOOD
384 Redwood board. If you have one of these boards and would like to 376 Redwood board. If you have one of these boards and would like to
385 use the flash chips on it, say 'Y'. 377 use the flash chips on it, say 'Y'.
386 378
387config MTD_TQM834x
388 tristate "Flash device mapped on TQ Components TQM834x Boards"
389 depends on MTD_CFI && TQM834x
390 help
391 This enables access routines for the flash chips on the
392 TQ Components TQM834x boards. If you have one of these boards
393 and would like to use the flash chips on it, say 'Y'.
394
395config MTD_OCELOT
396 tristate "Momenco Ocelot boot flash device"
397 depends on MOMENCO_OCELOT
398 help
399 This enables access routines for the boot flash device and for the
400 NVRAM on the Momenco Ocelot board. If you have one of these boards
401 and would like access to either of these, say 'Y'.
402
403config MTD_SOLUTIONENGINE 379config MTD_SOLUTIONENGINE
404 tristate "CFI Flash device mapped on Hitachi SolutionEngine" 380 tristate "CFI Flash device mapped on Hitachi SolutionEngine"
405 depends on SUPERH && MTD_CFI && MTD_REDBOOT_PARTS 381 depends on SUPERH && MTD_CFI && MTD_REDBOOT_PARTS
@@ -605,6 +581,13 @@ config MTD_SHARP_SL
605 help 581 help
606 This enables access to the flash chip on the Sharp SL Series of PDAs. 582 This enables access to the flash chip on the Sharp SL Series of PDAs.
607 583
584config MTD_INTEL_VR_NOR
585 tristate "NOR flash on Intel Vermilion Range Expansion Bus CS0"
586 depends on PCI
587 help
588 Map driver for a NOR flash bank located on the Expansion Bus of the
589 Intel Vermilion Range chipset.
590
608config MTD_PLATRAM 591config MTD_PLATRAM
609 tristate "Map driver for platform device RAM (mtd-ram)" 592 tristate "Map driver for platform device RAM (mtd-ram)"
610 select MTD_RAM 593 select MTD_RAM
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 970b189271..316382a140 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -20,8 +20,7 @@ obj-$(CONFIG_MTD_ESB2ROM) += esb2rom.o
20obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o 20obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
21obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o 21obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
22obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o 22obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
23obj-$(CONFIG_MTD_LUBBOCK) += lubbock-flash.o 23obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
24obj-$(CONFIG_MTD_MAINSTONE) += mainstone-flash.o
25obj-$(CONFIG_MTD_MBX860) += mbx860.o 24obj-$(CONFIG_MTD_MBX860) += mbx860.o
26obj-$(CONFIG_MTD_CEIVA) += ceiva.o 25obj-$(CONFIG_MTD_CEIVA) += ceiva.o
27obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o 26obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
@@ -43,7 +42,6 @@ obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o
43obj-$(CONFIG_MTD_VMAX) += vmax301.o 42obj-$(CONFIG_MTD_VMAX) += vmax301.o
44obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o 43obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
45obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o 44obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
46obj-$(CONFIG_MTD_OCELOT) += ocelot.o
47obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o 45obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
48obj-$(CONFIG_MTD_PCI) += pci.o 46obj-$(CONFIG_MTD_PCI) += pci.o
49obj-$(CONFIG_MTD_ALCHEMY) += alchemy-flash.o 47obj-$(CONFIG_MTD_ALCHEMY) += alchemy-flash.o
@@ -70,4 +68,4 @@ obj-$(CONFIG_MTD_SHARP_SL) += sharpsl-flash.o
70obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o 68obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
71obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o 69obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o
72obj-$(CONFIG_MTD_MTX1) += mtx-1_flash.o 70obj-$(CONFIG_MTD_MTX1) += mtx-1_flash.o
73obj-$(CONFIG_MTD_TQM834x) += tqm834x.o 71obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
diff --git a/drivers/mtd/maps/alchemy-flash.c b/drivers/mtd/maps/alchemy-flash.c
index 84fbe0e8c4..82811bcb04 100644
--- a/drivers/mtd/maps/alchemy-flash.c
+++ b/drivers/mtd/maps/alchemy-flash.c
@@ -75,13 +75,6 @@
75#define BOARD_FLASH_WIDTH 2 /* 16-bits */ 75#define BOARD_FLASH_WIDTH 2 /* 16-bits */
76#endif 76#endif
77 77
78#ifdef CONFIG_MIPS_HYDROGEN3
79#define BOARD_MAP_NAME "Hydrogen3 Flash"
80#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
81#define BOARD_FLASH_WIDTH 4 /* 32-bits */
82#define USE_LOCAL_ACCESSORS /* why? */
83#endif
84
85#ifdef CONFIG_MIPS_BOSPORUS 78#ifdef CONFIG_MIPS_BOSPORUS
86#define BOARD_MAP_NAME "Bosporus Flash" 79#define BOARD_MAP_NAME "Bosporus Flash"
87#define BOARD_FLASH_SIZE 0x01000000 /* 16MB */ 80#define BOARD_FLASH_SIZE 0x01000000 /* 16MB */
@@ -130,13 +123,6 @@ int __init alchemy_mtd_init(void)
130 123
131 window_addr = 0x20000000 - BOARD_FLASH_SIZE; 124 window_addr = 0x20000000 - BOARD_FLASH_SIZE;
132 window_size = BOARD_FLASH_SIZE; 125 window_size = BOARD_FLASH_SIZE;
133#ifdef CONFIG_MIPS_MIRAGE_WHY
134 /* Boot ROM flash bank only; no user bank */
135 window_addr = 0x1C000000;
136 window_size = 0x04000000;
137 /* USERFS from 0x1C00 0000 to 0x1FC00000 */
138 alchemy_partitions[0].size = 0x03C00000;
139#endif
140 126
141 /* 127 /*
142 * Static partition definition selection 128 * Static partition definition selection
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
new file mode 100644
index 0000000000..1e7814ae21
--- /dev/null
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -0,0 +1,298 @@
1/*
2 * drivers/mtd/maps/intel_vr_nor.c
3 *
4 * An MTD map driver for a NOR flash bank on the Expansion Bus of the Intel
5 * Vermilion Range chipset.
6 *
7 * The Vermilion Range Expansion Bus supports four chip selects, each of which
8 * has 64MiB of address space. The 2nd BAR of the Expansion Bus PCI Device
9 * is a 256MiB memory region containing the address spaces for all four of the
10 * chip selects, with start addresses hardcoded on 64MiB boundaries.
11 *
12 * This map driver only supports NOR flash on chip select 0. The buswidth
13 * (either 8 bits or 16 bits) is determined by reading the Expansion Bus Timing
14 * and Control Register for Chip Select 0 (EXP_TIMING_CS0). This driver does
15 * not modify the value in the EXP_TIMING_CS0 register except to enable writing
16 * and disable boot acceleration. The timing parameters in the register are
17 * assumed to have been properly initialized by the BIOS. The reset default
18 * timing parameters are maximally conservative (slow), so access to the flash
19 * will be slower than it should be if the BIOS has not initialized the timing
20 * parameters.
21 *
22 * Author: Andy Lowe <alowe@mvista.com>
23 *
24 * 2006 (c) MontaVista Software, Inc. This file is licensed under
25 * the terms of the GNU General Public License version 2. This program
26 * is licensed "as is" without any warranty of any kind, whether express
27 * or implied.
28 */
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/pci.h>
33#include <linux/init.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/map.h>
36#include <linux/mtd/partitions.h>
37#include <linux/mtd/cfi.h>
38#include <linux/mtd/flashchip.h>
39
40#define DRV_NAME "vr_nor"
41
42struct vr_nor_mtd {
43 void __iomem *csr_base;
44 struct map_info map;
45 struct mtd_info *info;
46 int nr_parts;
47 struct pci_dev *dev;
48};
49
50/* Expansion Bus Configuration and Status Registers are in BAR 0 */
51#define EXP_CSR_MBAR 0
52/* Expansion Bus Memory Window is BAR 1 */
53#define EXP_WIN_MBAR 1
54/* Maximum address space for Chip Select 0 is 64MiB */
55#define CS0_SIZE 0x04000000
56/* Chip Select 0 is at offset 0 in the Memory Window */
57#define CS0_START 0x0
58/* Chip Select 0 Timing Register is at offset 0 in CSR */
59#define EXP_TIMING_CS0 0x00
60#define TIMING_CS_EN (1 << 31) /* Chip Select Enable */
61#define TIMING_BOOT_ACCEL_DIS (1 << 8) /* Boot Acceleration Disable */
62#define TIMING_WR_EN (1 << 1) /* Write Enable */
63#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
64#define TIMING_MASK 0x3FFF0000
65
66static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
67{
68 if (p->nr_parts > 0) {
69#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
70 del_mtd_partitions(p->info);
71#endif
72 } else
73 del_mtd_device(p->info);
74}
75
76static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
77{
78 int err = 0;
79#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
80 struct mtd_partition *parts;
81 static const char *part_probes[] = { "cmdlinepart", NULL };
82#endif
83
84 /* register the flash bank */
85#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
86 /* partition the flash bank */
87 p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0);
88 if (p->nr_parts > 0)
89 err = add_mtd_partitions(p->info, parts, p->nr_parts);
90#endif
91 if (p->nr_parts <= 0)
92 err = add_mtd_device(p->info);
93
94 return err;
95}
96
97static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
98{
99 map_destroy(p->info);
100}
101
102static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
103{
104 static const char *probe_types[] =
105 { "cfi_probe", "jedec_probe", NULL };
106 const char **type;
107
108 for (type = probe_types; !p->info && *type; type++)
109 p->info = do_map_probe(*type, &p->map);
110 if (!p->info)
111 return -ENODEV;
112
113 p->info->owner = THIS_MODULE;
114
115 return 0;
116}
117
118static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
119{
120 unsigned int exp_timing_cs0;
121
122 /* write-protect the flash bank */
123 exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
124 exp_timing_cs0 &= ~TIMING_WR_EN;
125 writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
126
127 /* unmap the flash window */
128 iounmap(p->map.virt);
129
130 /* unmap the csr window */
131 iounmap(p->csr_base);
132}
133
134/*
135 * Initialize the map_info structure and map the flash.
136 * Returns 0 on success, nonzero otherwise.
137 */
138static int __devinit vr_nor_init_maps(struct vr_nor_mtd *p)
139{
140 unsigned long csr_phys, csr_len;
141 unsigned long win_phys, win_len;
142 unsigned int exp_timing_cs0;
143 int err;
144
145 csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR);
146 csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR);
147 win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR);
148 win_len = pci_resource_len(p->dev, EXP_WIN_MBAR);
149
150 if (!csr_phys || !csr_len || !win_phys || !win_len)
151 return -ENODEV;
152
153 if (win_len < (CS0_START + CS0_SIZE))
154 return -ENXIO;
155
156 p->csr_base = ioremap_nocache(csr_phys, csr_len);
157 if (!p->csr_base)
158 return -ENOMEM;
159
160 exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
161 if (!(exp_timing_cs0 & TIMING_CS_EN)) {
162 dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
163 "is disabled.\n");
164 err = -ENODEV;
165 goto release;
166 }
167 if ((exp_timing_cs0 & TIMING_MASK) == TIMING_MASK) {
168 dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
169 "is configured for maximally slow access times.\n");
170 }
171 p->map.name = DRV_NAME;
172 p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
173 p->map.phys = win_phys + CS0_START;
174 p->map.size = CS0_SIZE;
175 p->map.virt = ioremap_nocache(p->map.phys, p->map.size);
176 if (!p->map.virt) {
177 err = -ENOMEM;
178 goto release;
179 }
180 simple_map_init(&p->map);
181
182 /* Enable writes to flash bank */
183 exp_timing_cs0 |= TIMING_BOOT_ACCEL_DIS | TIMING_WR_EN;
184 writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
185
186 return 0;
187
188 release:
189 iounmap(p->csr_base);
190 return err;
191}
192
193static struct pci_device_id vr_nor_pci_ids[] = {
194 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)},
195 {0,}
196};
197
198static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
199{
200 struct vr_nor_mtd *p = pci_get_drvdata(dev);
201
202 pci_set_drvdata(dev, NULL);
203 vr_nor_destroy_partitions(p);
204 vr_nor_destroy_mtd_setup(p);
205 vr_nor_destroy_maps(p);
206 kfree(p);
207 pci_release_regions(dev);
208 pci_disable_device(dev);
209}
210
211static int __devinit
212vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
213{
214 struct vr_nor_mtd *p = NULL;
215 unsigned int exp_timing_cs0;
216 int err;
217
218 err = pci_enable_device(dev);
219 if (err)
220 goto out;
221
222 err = pci_request_regions(dev, DRV_NAME);
223 if (err)
224 goto disable_dev;
225
226 p = kzalloc(sizeof(*p), GFP_KERNEL);
227 err = -ENOMEM;
228 if (!p)
229 goto release;
230
231 p->dev = dev;
232
233 err = vr_nor_init_maps(p);
234 if (err)
235 goto release;
236
237 err = vr_nor_mtd_setup(p);
238 if (err)
239 goto destroy_maps;
240
241 err = vr_nor_init_partitions(p);
242 if (err)
243 goto destroy_mtd_setup;
244
245 pci_set_drvdata(dev, p);
246
247 return 0;
248
249 destroy_mtd_setup:
250 map_destroy(p->info);
251
252 destroy_maps:
253 /* write-protect the flash bank */
254 exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
255 exp_timing_cs0 &= ~TIMING_WR_EN;
256 writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
257
258 /* unmap the flash window */
259 iounmap(p->map.virt);
260
261 /* unmap the csr window */
262 iounmap(p->csr_base);
263
264 release:
265 kfree(p);
266 pci_release_regions(dev);
267
268 disable_dev:
269 pci_disable_device(dev);
270
271 out:
272 return err;
273}
274
275static struct pci_driver vr_nor_pci_driver = {
276 .name = DRV_NAME,
277 .probe = vr_nor_pci_probe,
278 .remove = __devexit_p(vr_nor_pci_remove),
279 .id_table = vr_nor_pci_ids,
280};
281
282static int __init vr_nor_mtd_init(void)
283{
284 return pci_register_driver(&vr_nor_pci_driver);
285}
286
287static void __exit vr_nor_mtd_exit(void)
288{
289 pci_unregister_driver(&vr_nor_pci_driver);
290}
291
292module_init(vr_nor_mtd_init);
293module_exit(vr_nor_mtd_exit);
294
295MODULE_AUTHOR("Andy Lowe");
296MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
297MODULE_LICENSE("GPL");
298MODULE_DEVICE_TABLE(pci, vr_nor_pci_ids);
diff --git a/drivers/mtd/maps/lubbock-flash.c b/drivers/mtd/maps/lubbock-flash.c
deleted file mode 100644
index e8560683b9..0000000000
--- a/drivers/mtd/maps/lubbock-flash.c
+++ /dev/null
@@ -1,168 +0,0 @@
1/*
2 * $Id: lubbock-flash.c,v 1.21 2005/11/07 11:14:27 gleixner Exp $
3 *
4 * Map driver for the Lubbock developer platform.
5 *
6 * Author: Nicolas Pitre
7 * Copyright: (C) 2001 MontaVista Software Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/map.h>
21#include <linux/mtd/partitions.h>
22
23#include <asm/io.h>
24#include <asm/hardware.h>
25#include <asm/arch/pxa-regs.h>
26#include <asm/arch/lubbock.h>
27#include <asm/cacheflush.h>
28
29#define ROM_ADDR 0x00000000
30#define FLASH_ADDR 0x04000000
31
32#define WINDOW_SIZE 64*1024*1024
33
34static void lubbock_map_inval_cache(struct map_info *map, unsigned long from, ssize_t len)
35{
36 flush_ioremap_region(map->phys, map->cached, from, len);
37}
38
39static struct map_info lubbock_maps[2] = { {
40 .size = WINDOW_SIZE,
41 .phys = 0x00000000,
42 .inval_cache = lubbock_map_inval_cache,
43}, {
44 .size = WINDOW_SIZE,
45 .phys = 0x04000000,
46 .inval_cache = lubbock_map_inval_cache,
47} };
48
49static struct mtd_partition lubbock_partitions[] = {
50 {
51 .name = "Bootloader",
52 .size = 0x00040000,
53 .offset = 0,
54 .mask_flags = MTD_WRITEABLE /* force read-only */
55 },{
56 .name = "Kernel",
57 .size = 0x00100000,
58 .offset = 0x00040000,
59 },{
60 .name = "Filesystem",
61 .size = MTDPART_SIZ_FULL,
62 .offset = 0x00140000
63 }
64};
65
66static struct mtd_info *mymtds[2];
67static struct mtd_partition *parsed_parts[2];
68static int nr_parsed_parts[2];
69
70static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
71
72static int __init init_lubbock(void)
73{
74 int flashboot = (LUB_CONF_SWITCHES & 1);
75 int ret = 0, i;
76
77 lubbock_maps[0].bankwidth = lubbock_maps[1].bankwidth =
78 (BOOT_DEF & 1) ? 2 : 4;
79
80 /* Compensate for the nROMBT switch which swaps the flash banks */
81 printk(KERN_NOTICE "Lubbock configured to boot from %s (bank %d)\n",
82 flashboot?"Flash":"ROM", flashboot);
83
84 lubbock_maps[flashboot^1].name = "Lubbock Application Flash";
85 lubbock_maps[flashboot].name = "Lubbock Boot ROM";
86
87 for (i = 0; i < 2; i++) {
88 lubbock_maps[i].virt = ioremap(lubbock_maps[i].phys, WINDOW_SIZE);
89 if (!lubbock_maps[i].virt) {
90 printk(KERN_WARNING "Failed to ioremap %s\n", lubbock_maps[i].name);
91 if (!ret)
92 ret = -ENOMEM;
93 continue;
94 }
95 lubbock_maps[i].cached = ioremap_cached(lubbock_maps[i].phys, WINDOW_SIZE);
96 if (!lubbock_maps[i].cached)
97 printk(KERN_WARNING "Failed to ioremap cached %s\n", lubbock_maps[i].name);
98 simple_map_init(&lubbock_maps[i]);
99
100 printk(KERN_NOTICE "Probing %s at physical address 0x%08lx (%d-bit bankwidth)\n",
101 lubbock_maps[i].name, lubbock_maps[i].phys,
102 lubbock_maps[i].bankwidth * 8);
103
104 mymtds[i] = do_map_probe("cfi_probe", &lubbock_maps[i]);
105
106 if (!mymtds[i]) {
107 iounmap((void *)lubbock_maps[i].virt);
108 if (lubbock_maps[i].cached)
109 iounmap(lubbock_maps[i].cached);
110 if (!ret)
111 ret = -EIO;
112 continue;
113 }
114 mymtds[i]->owner = THIS_MODULE;
115
116 ret = parse_mtd_partitions(mymtds[i], probes,
117 &parsed_parts[i], 0);
118
119 if (ret > 0)
120 nr_parsed_parts[i] = ret;
121 }
122
123 if (!mymtds[0] && !mymtds[1])
124 return ret;
125
126 for (i = 0; i < 2; i++) {
127 if (!mymtds[i]) {
128 printk(KERN_WARNING "%s is absent. Skipping\n", lubbock_maps[i].name);
129 } else if (nr_parsed_parts[i]) {
130 add_mtd_partitions(mymtds[i], parsed_parts[i], nr_parsed_parts[i]);
131 } else if (!i) {
132 printk("Using static partitions on %s\n", lubbock_maps[i].name);
133 add_mtd_partitions(mymtds[i], lubbock_partitions, ARRAY_SIZE(lubbock_partitions));
134 } else {
135 printk("Registering %s as whole device\n", lubbock_maps[i].name);
136 add_mtd_device(mymtds[i]);
137 }
138 }
139 return 0;
140}
141
142static void __exit cleanup_lubbock(void)
143{
144 int i;
145 for (i = 0; i < 2; i++) {
146 if (!mymtds[i])
147 continue;
148
149 if (nr_parsed_parts[i] || !i)
150 del_mtd_partitions(mymtds[i]);
151 else
152 del_mtd_device(mymtds[i]);
153
154 map_destroy(mymtds[i]);
155 iounmap((void *)lubbock_maps[i].virt);
156 if (lubbock_maps[i].cached)
157 iounmap(lubbock_maps[i].cached);
158
159 kfree(parsed_parts[i]);
160 }
161}
162
163module_init(init_lubbock);
164module_exit(cleanup_lubbock);
165
166MODULE_LICENSE("GPL");
167MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
168MODULE_DESCRIPTION("MTD map driver for Intel Lubbock");
diff --git a/drivers/mtd/maps/mainstone-flash.c b/drivers/mtd/maps/mainstone-flash.c
deleted file mode 100644
index d76487d82d..0000000000
--- a/drivers/mtd/maps/mainstone-flash.c
+++ /dev/null
@@ -1,180 +0,0 @@
1/*
2 * $Id: $
3 *
4 * Map driver for the Mainstone developer platform.
5 *
6 * Author: Nicolas Pitre
7 * Copyright: (C) 2001 MontaVista Software Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/map.h>
21#include <linux/mtd/partitions.h>
22
23#include <asm/io.h>
24#include <asm/hardware.h>
25#include <asm/arch/pxa-regs.h>
26#include <asm/arch/mainstone.h>
27#include <asm/cacheflush.h>
28
29
30#define ROM_ADDR 0x00000000
31#define FLASH_ADDR 0x04000000
32
33#define WINDOW_SIZE 0x04000000
34
35static void mainstone_map_inval_cache(struct map_info *map, unsigned long from,
36 ssize_t len)
37{
38 flush_ioremap_region(map->phys, map->cached, from, len);
39}
40
41static struct map_info mainstone_maps[2] = { {
42 .size = WINDOW_SIZE,
43 .phys = PXA_CS0_PHYS,
44 .inval_cache = mainstone_map_inval_cache,
45}, {
46 .size = WINDOW_SIZE,
47 .phys = PXA_CS1_PHYS,
48 .inval_cache = mainstone_map_inval_cache,
49} };
50
51static struct mtd_partition mainstone_partitions[] = {
52 {
53 .name = "Bootloader",
54 .size = 0x00040000,
55 .offset = 0,
56 .mask_flags = MTD_WRITEABLE /* force read-only */
57 },{
58 .name = "Kernel",
59 .size = 0x00400000,
60 .offset = 0x00040000,
61 },{
62 .name = "Filesystem",
63 .size = MTDPART_SIZ_FULL,
64 .offset = 0x00440000
65 }
66};
67
68static struct mtd_info *mymtds[2];
69static struct mtd_partition *parsed_parts[2];
70static int nr_parsed_parts[2];
71
72static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
73
74static int __init init_mainstone(void)
75{
76 int SW7 = 0; /* FIXME: get from SCR (Mst doc section 3.2.1.1) */
77 int ret = 0, i;
78
79 mainstone_maps[0].bankwidth = (BOOT_DEF & 1) ? 2 : 4;
80 mainstone_maps[1].bankwidth = 4;
81
82 /* Compensate for SW7 which swaps the flash banks */
83 mainstone_maps[SW7].name = "processor flash";
84 mainstone_maps[SW7 ^ 1].name = "main board flash";
85
86 printk(KERN_NOTICE "Mainstone configured to boot from %s\n",
87 mainstone_maps[0].name);
88
89 for (i = 0; i < 2; i++) {
90 mainstone_maps[i].virt = ioremap(mainstone_maps[i].phys,
91 WINDOW_SIZE);
92 if (!mainstone_maps[i].virt) {
93 printk(KERN_WARNING "Failed to ioremap %s\n",
94 mainstone_maps[i].name);
95 if (!ret)
96 ret = -ENOMEM;
97 continue;
98 }
99 mainstone_maps[i].cached =
100 ioremap_cached(mainstone_maps[i].phys, WINDOW_SIZE);
101 if (!mainstone_maps[i].cached)
102 printk(KERN_WARNING "Failed to ioremap cached %s\n",
103 mainstone_maps[i].name);
104 simple_map_init(&mainstone_maps[i]);
105
106 printk(KERN_NOTICE
107 "Probing %s at physical address 0x%08lx"
108 " (%d-bit bankwidth)\n",
109 mainstone_maps[i].name, mainstone_maps[i].phys,
110 mainstone_maps[i].bankwidth * 8);
111
112 mymtds[i] = do_map_probe("cfi_probe", &mainstone_maps[i]);
113
114 if (!mymtds[i]) {
115 iounmap((void *)mainstone_maps[i].virt);
116 if (mainstone_maps[i].cached)
117 iounmap(mainstone_maps[i].cached);
118 if (!ret)
119 ret = -EIO;
120 continue;
121 }
122 mymtds[i]->owner = THIS_MODULE;
123
124 ret = parse_mtd_partitions(mymtds[i], probes,
125 &parsed_parts[i], 0);
126
127 if (ret > 0)
128 nr_parsed_parts[i] = ret;
129 }
130
131 if (!mymtds[0] && !mymtds[1])
132 return ret;
133
134 for (i = 0; i < 2; i++) {
135 if (!mymtds[i]) {
136 printk(KERN_WARNING "%s is absent. Skipping\n",
137 mainstone_maps[i].name);
138 } else if (nr_parsed_parts[i]) {
139 add_mtd_partitions(mymtds[i], parsed_parts[i],
140 nr_parsed_parts[i]);
141 } else if (!i) {
142 printk("Using static partitions on %s\n",
143 mainstone_maps[i].name);
144 add_mtd_partitions(mymtds[i], mainstone_partitions,
145 ARRAY_SIZE(mainstone_partitions));
146 } else {
147 printk("Registering %s as whole device\n",
148 mainstone_maps[i].name);
149 add_mtd_device(mymtds[i]);
150 }
151 }
152 return 0;
153}
154
155static void __exit cleanup_mainstone(void)
156{
157 int i;
158 for (i = 0; i < 2; i++) {
159 if (!mymtds[i])
160 continue;
161
162 if (nr_parsed_parts[i] || !i)
163 del_mtd_partitions(mymtds[i]);
164 else
165 del_mtd_device(mymtds[i]);
166
167 map_destroy(mymtds[i]);
168 iounmap((void *)mainstone_maps[i].virt);
169 if (mainstone_maps[i].cached)
170 iounmap(mainstone_maps[i].cached);
171 kfree(parsed_parts[i]);
172 }
173}
174
175module_init(init_mainstone);
176module_exit(cleanup_mainstone);
177
178MODULE_LICENSE("GPL");
179MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
180MODULE_DESCRIPTION("MTD map driver for Intel Mainstone");
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 7b96cd02f8..0c9b305a72 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -158,68 +158,11 @@ static struct notifier_block nettel_notifier_block = {
158 nettel_reboot_notifier, NULL, 0 158 nettel_reboot_notifier, NULL, 0
159}; 159};
160 160
161/*
162 * Erase the configuration file system.
163 * Used to support the software reset button.
164 */
165static void nettel_erasecallback(struct erase_info *done)
166{
167 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
168 wake_up(wait_q);
169}
170
171static struct erase_info nettel_erase;
172
173int nettel_eraseconfig(void)
174{
175 struct mtd_info *mtd;
176 DECLARE_WAITQUEUE(wait, current);
177 wait_queue_head_t wait_q;
178 int ret;
179
180 init_waitqueue_head(&wait_q);
181 mtd = get_mtd_device(NULL, 2);
182 if (!IS_ERR(mtd)) {
183 nettel_erase.mtd = mtd;
184 nettel_erase.callback = nettel_erasecallback;
185 nettel_erase.callback = NULL;
186 nettel_erase.addr = 0;
187 nettel_erase.len = mtd->size;
188 nettel_erase.priv = (u_long) &wait_q;
189 nettel_erase.priv = 0;
190
191 set_current_state(TASK_INTERRUPTIBLE);
192 add_wait_queue(&wait_q, &wait);
193
194 ret = mtd->erase(mtd, &nettel_erase);
195 if (ret) {
196 set_current_state(TASK_RUNNING);
197 remove_wait_queue(&wait_q, &wait);
198 put_mtd_device(mtd);
199 return(ret);
200 }
201
202 schedule(); /* Wait for erase to finish. */
203 remove_wait_queue(&wait_q, &wait);
204
205 put_mtd_device(mtd);
206 }
207
208 return(0);
209}
210
211#else
212
213int nettel_eraseconfig(void)
214{
215 return(0);
216}
217
218#endif 161#endif
219 162
220/****************************************************************************/ 163/****************************************************************************/
221 164
222int __init nettel_init(void) 165static int __init nettel_init(void)
223{ 166{
224 volatile unsigned long *amdpar; 167 volatile unsigned long *amdpar;
225 unsigned long amdaddr, maxsize; 168 unsigned long amdaddr, maxsize;
@@ -421,10 +364,6 @@ int __init nettel_init(void)
421 364
422 intel_mtd->owner = THIS_MODULE; 365 intel_mtd->owner = THIS_MODULE;
423 366
424#ifndef CONFIG_BLK_DEV_INITRD
425 ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, 1);
426#endif
427
428 num_intel_partitions = sizeof(nettel_intel_partitions) / 367 num_intel_partitions = sizeof(nettel_intel_partitions) /
429 sizeof(nettel_intel_partitions[0]); 368 sizeof(nettel_intel_partitions[0]);
430 369
@@ -477,7 +416,7 @@ out_unmap2:
477 416
478/****************************************************************************/ 417/****************************************************************************/
479 418
480void __exit nettel_cleanup(void) 419static void __exit nettel_cleanup(void)
481{ 420{
482#ifdef CONFIG_MTD_CFI_INTELEXT 421#ifdef CONFIG_MTD_CFI_INTELEXT
483 unregister_reboot_notifier(&nettel_notifier_block); 422 unregister_reboot_notifier(&nettel_notifier_block);
diff --git a/drivers/mtd/maps/ocelot.c b/drivers/mtd/maps/ocelot.c
deleted file mode 100644
index 6977963d78..0000000000
--- a/drivers/mtd/maps/ocelot.c
+++ /dev/null
@@ -1,175 +0,0 @@
1/*
2 * $Id: ocelot.c,v 1.17 2005/11/07 11:14:27 gleixner Exp $
3 *
4 * Flash on Momenco Ocelot
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <asm/io.h>
12#include <linux/mtd/mtd.h>
13#include <linux/mtd/map.h>
14#include <linux/mtd/partitions.h>
15
16#define OCELOT_PLD 0x2c000000
17#define FLASH_WINDOW_ADDR 0x2fc00000
18#define FLASH_WINDOW_SIZE 0x00080000
19#define FLASH_BUSWIDTH 1
20#define NVRAM_WINDOW_ADDR 0x2c800000
21#define NVRAM_WINDOW_SIZE 0x00007FF0
22#define NVRAM_BUSWIDTH 1
23
24static unsigned int cacheflush = 0;
25
26static struct mtd_info *flash_mtd;
27static struct mtd_info *nvram_mtd;
28
29static void ocelot_ram_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
30{
31 struct map_info *map = mtd->priv;
32 size_t done = 0;
33
34 /* If we use memcpy, it does word-wide writes. Even though we told the
35 GT64120A that it's an 8-bit wide region, word-wide writes don't work.
36 We end up just writing the first byte of the four to all four bytes.
37 So we have this loop instead */
38 *retlen = len;
39 while(len) {
40 __raw_writeb(*(unsigned char *) from, map->virt + to);
41 from++;
42 to++;
43 len--;
44 }
45}
46
47static struct mtd_partition *parsed_parts;
48
49struct map_info ocelot_flash_map = {
50 .name = "Ocelot boot flash",
51 .size = FLASH_WINDOW_SIZE,
52 .bankwidth = FLASH_BUSWIDTH,
53 .phys = FLASH_WINDOW_ADDR,
54};
55
56struct map_info ocelot_nvram_map = {
57 .name = "Ocelot NVRAM",
58 .size = NVRAM_WINDOW_SIZE,
59 .bankwidth = NVRAM_BUSWIDTH,
60 .phys = NVRAM_WINDOW_ADDR,
61};
62
63static const char *probes[] = { "RedBoot", NULL };
64
65static int __init init_ocelot_maps(void)
66{
67 void *pld;
68 int nr_parts;
69 unsigned char brd_status;
70
71 printk(KERN_INFO "Momenco Ocelot MTD mappings: Flash 0x%x at 0x%x, NVRAM 0x%x at 0x%x\n",
72 FLASH_WINDOW_SIZE, FLASH_WINDOW_ADDR, NVRAM_WINDOW_SIZE, NVRAM_WINDOW_ADDR);
73
74 /* First check whether the flash jumper is present */
75 pld = ioremap(OCELOT_PLD, 0x10);
76 if (!pld) {
77 printk(KERN_NOTICE "Failed to ioremap Ocelot PLD\n");
78 return -EIO;
79 }
80 brd_status = readb(pld+4);
81 iounmap(pld);
82
83 /* Now ioremap the NVRAM space */
84 ocelot_nvram_map.virt = ioremap_nocache(NVRAM_WINDOW_ADDR, NVRAM_WINDOW_SIZE);
85 if (!ocelot_nvram_map.virt) {
86 printk(KERN_NOTICE "Failed to ioremap Ocelot NVRAM space\n");
87 return -EIO;
88 }
89
90 simple_map_init(&ocelot_nvram_map);
91
92 /* And do the RAM probe on it to get an MTD device */
93 nvram_mtd = do_map_probe("map_ram", &ocelot_nvram_map);
94 if (!nvram_mtd) {
95 printk("NVRAM probe failed\n");
96 goto fail_1;
97 }
98 nvram_mtd->owner = THIS_MODULE;
99 nvram_mtd->erasesize = 16;
100 /* Override the write() method */
101 nvram_mtd->write = ocelot_ram_write;
102
103 /* Now map the flash space */
104 ocelot_flash_map.virt = ioremap_nocache(FLASH_WINDOW_ADDR, FLASH_WINDOW_SIZE);
105 if (!ocelot_flash_map.virt) {
106 printk(KERN_NOTICE "Failed to ioremap Ocelot flash space\n");
107 goto fail_2;
108 }
109 /* Now the cached version */
110 ocelot_flash_map.cached = (unsigned long)__ioremap(FLASH_WINDOW_ADDR, FLASH_WINDOW_SIZE, 0);
111
112 simple_map_init(&ocelot_flash_map);
113
114 /* Only probe for flash if the write jumper is present */
115 if (brd_status & 0x40) {
116 flash_mtd = do_map_probe("jedec", &ocelot_flash_map);
117 } else {
118 printk(KERN_NOTICE "Ocelot flash write jumper not present. Treating as ROM\n");
119 }
120 /* If that failed or the jumper's absent, pretend it's ROM */
121 if (!flash_mtd) {
122 flash_mtd = do_map_probe("map_rom", &ocelot_flash_map);
123 /* If we're treating it as ROM, set the erase size */
124 if (flash_mtd)
125 flash_mtd->erasesize = 0x10000;
126 }
127 if (!flash_mtd)
128 goto fail3;
129
130 add_mtd_device(nvram_mtd);
131
132 flash_mtd->owner = THIS_MODULE;
133 nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
134
135 if (nr_parts > 0)
136 add_mtd_partitions(flash_mtd, parsed_parts, nr_parts);
137 else
138 add_mtd_device(flash_mtd);
139
140 return 0;
141
142 fail3:
143 iounmap((void *)ocelot_flash_map.virt);
144 if (ocelot_flash_map.cached)
145 iounmap((void *)ocelot_flash_map.cached);
146 fail_2:
147 map_destroy(nvram_mtd);
148 fail_1:
149 iounmap((void *)ocelot_nvram_map.virt);
150
151 return -ENXIO;
152}
153
154static void __exit cleanup_ocelot_maps(void)
155{
156 del_mtd_device(nvram_mtd);
157 map_destroy(nvram_mtd);
158 iounmap((void *)ocelot_nvram_map.virt);
159
160 if (parsed_parts)
161 del_mtd_partitions(flash_mtd);
162 else
163 del_mtd_device(flash_mtd);
164 map_destroy(flash_mtd);
165 iounmap((void *)ocelot_flash_map.virt);
166 if (ocelot_flash_map.cached)
167 iounmap((void *)ocelot_flash_map.cached);
168}
169
170module_init(init_ocelot_maps);
171module_exit(cleanup_ocelot_maps);
172
173MODULE_LICENSE("GPL");
174MODULE_AUTHOR("Red Hat, Inc. - David Woodhouse <dwmw2@cambridge.redhat.com>");
175MODULE_DESCRIPTION("MTD map driver for Momenco Ocelot board");
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index cf75a56644..aeed9ea797 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -232,7 +232,6 @@ static int __devinit of_flash_probe(struct of_device *dev,
232 info = kzalloc(sizeof(*info), GFP_KERNEL); 232 info = kzalloc(sizeof(*info), GFP_KERNEL);
233 if (!info) 233 if (!info)
234 goto err_out; 234 goto err_out;
235 memset(info, 0, sizeof(*info));
236 235
237 dev_set_drvdata(&dev->dev, info); 236 dev_set_drvdata(&dev->dev, info);
238 237
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index 7e0377ec1c..02bde8c982 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -73,13 +73,16 @@ int __init init_msp_flash(void)
73 return -ENXIO; 73 return -ENXIO;
74 74
75 printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt); 75 printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
76 msp_flash = (struct mtd_info **)kmalloc( 76
77 fcnt * sizeof(struct map_info *), GFP_KERNEL); 77 msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
78 msp_parts = (struct mtd_partition **)kmalloc( 78 msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
79 fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); 79 msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
80 msp_maps = (struct map_info *)kmalloc( 80 if (!msp_flash || !msp_parts || !msp_maps) {
81 fcnt * sizeof(struct mtd_info), GFP_KERNEL); 81 kfree(msp_maps);
82 memset(msp_maps, 0, fcnt * sizeof(struct mtd_info)); 82 kfree(msp_parts);
83 kfree(msp_flash);
84 return -ENOMEM;
85 }
83 86
84 /* loop over the flash devices, initializing each */ 87 /* loop over the flash devices, initializing each */
85 for (i = 0; i < fcnt; i++) { 88 for (i = 0; i < fcnt; i++) {
@@ -95,9 +98,8 @@ int __init init_msp_flash(void)
95 continue; 98 continue;
96 } 99 }
97 100
98 msp_parts[i] = (struct mtd_partition *)kmalloc( 101 msp_parts[i] = kcalloc(pcnt, sizeof(struct mtd_partition),
99 pcnt * sizeof(struct mtd_partition), GFP_KERNEL); 102 GFP_KERNEL);
100 memset(msp_parts[i], 0, pcnt * sizeof(struct mtd_partition));
101 103
102 /* now initialize the devices proper */ 104 /* now initialize the devices proper */
103 flash_name[5] = '0' + i; 105 flash_name[5] = '0' + i;
diff --git a/drivers/mtd/maps/pmcmsp-ramroot.c b/drivers/mtd/maps/pmcmsp-ramroot.c
index 18049bceba..30de5c0c09 100644
--- a/drivers/mtd/maps/pmcmsp-ramroot.c
+++ b/drivers/mtd/maps/pmcmsp-ramroot.c
@@ -79,7 +79,6 @@ static int __init init_rrmap(void)
79 rr_mtd->owner = THIS_MODULE; 79 rr_mtd->owner = THIS_MODULE;
80 80
81 add_mtd_device(rr_mtd); 81 add_mtd_device(rr_mtd);
82 ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, rr_mtd->index);
83 82
84 return 0; 83 return 0;
85 } 84 }
diff --git a/drivers/mtd/maps/pq2fads.c b/drivers/mtd/maps/pq2fads.c
deleted file mode 100644
index fb78d87cc1..0000000000
--- a/drivers/mtd/maps/pq2fads.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * drivers/mtd/maps/pq2fads.c
3 *
4 * Mapping for the flash SIMM on 8272ADS and PQ2FADS board
5 *
6 * Author: Vitaly Bordug <vbordug@ru.mvista.com>
7 *
8 * 2005 (c) MontaVista Software, Inc. This file is licensed under
9 * the terms of the GNU General Public License version 2. This program
10 * is licensed "as is" without any warranty of any kind, whether express
11 * or implied.
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <asm/io.h>
19#include <asm/ppcboot.h>
20#include <linux/mtd/mtd.h>
21#include <linux/mtd/map.h>
22#include <linux/mtd/partitions.h>
23#include <linux/mtd/physmap.h>
24
25/*
26 NOTE: bank width and interleave relative to the installed flash
27 should have been chosen within MTD_CFI_GEOMETRY options.
28 */
29#define PQ2FADS_BANK_WIDTH 4
30
31static struct mtd_partition pq2fads_partitions[] = {
32 {
33#ifdef CONFIG_ADS8272
34 .name = "HRCW",
35 .size = 0x40000,
36 .offset = 0,
37 .mask_flags = MTD_WRITEABLE, /* force read-only */
38 }, {
39 .name = "User FS",
40 .size = 0x5c0000,
41 .offset = 0x40000,
42#else
43 .name = "User FS",
44 .size = 0x600000,
45 .offset = 0,
46#endif
47 }, {
48 .name = "uImage",
49 .size = 0x100000,
50 .offset = 0x600000,
51 .mask_flags = MTD_WRITEABLE, /* force read-only */
52 }, {
53 .name = "bootloader",
54 .size = 0x40000,
55 .offset = 0x700000,
56 .mask_flags = MTD_WRITEABLE, /* force read-only */
57 }, {
58 .name = "bootloader env",
59 .size = 0x40000,
60 .offset = 0x740000,
61 .mask_flags = MTD_WRITEABLE, /* force read-only */
62 }
63};
64
65
66/* pointer to MPC885ADS board info data */
67extern unsigned char __res[];
68
69static int __init init_pq2fads_mtd(void)
70{
71 bd_t *bd = (bd_t *)__res;
72 physmap_configure(bd->bi_flashstart, bd->bi_flashsize, PQ2FADS_BANK_WIDTH, NULL);
73
74 physmap_set_partitions(pq2fads_partitions,
75 sizeof (pq2fads_partitions) /
76 sizeof (pq2fads_partitions[0]));
77 return 0;
78}
79
80static void __exit cleanup_pq2fads_mtd(void)
81{
82}
83
84module_init(init_pq2fads_mtd);
85module_exit(cleanup_pq2fads_mtd);
86
87MODULE_LICENSE("GPL");
88MODULE_DESCRIPTION("MTD map and partitions for MPC8272ADS boards");
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
new file mode 100644
index 0000000000..cb933ac475
--- /dev/null
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -0,0 +1,200 @@
1/*
2 * Map driver for Intel XScale PXA2xx platforms.
3 *
4 * Author: Nicolas Pitre
5 * Copyright: (C) 2001 MontaVista Software Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/platform_device.h>
17#include <linux/dma-mapping.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/map.h>
20#include <linux/mtd/partitions.h>
21
22#include <asm/io.h>
23#include <asm/hardware.h>
24
25#include <asm/mach/flash.h>
26
27static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
28 ssize_t len)
29{
30 consistent_sync((char *)map->cached + from, len, DMA_FROM_DEVICE);
31}
32
33struct pxa2xx_flash_info {
34 struct mtd_partition *parts;
35 int nr_parts;
36 struct mtd_info *mtd;
37 struct map_info map;
38};
39
40
41static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
42
43
44static int __init pxa2xx_flash_probe(struct device *dev)
45{
46 struct platform_device *pdev = to_platform_device(dev);
47 struct flash_platform_data *flash = pdev->dev.platform_data;
48 struct pxa2xx_flash_info *info;
49 struct mtd_partition *parts;
50 struct resource *res;
51 int ret = 0;
52
53 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
54 if (!res)
55 return -ENODEV;
56
57 info = kmalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
58 if (!info)
59 return -ENOMEM;
60
61 memset(info, 0, sizeof(struct pxa2xx_flash_info));
62 info->map.name = (char *) flash->name;
63 info->map.bankwidth = flash->width;
64 info->map.phys = res->start;
65 info->map.size = res->end - res->start + 1;
66 info->parts = flash->parts;
67 info->nr_parts = flash->nr_parts;
68
69 info->map.virt = ioremap(info->map.phys, info->map.size);
70 if (!info->map.virt) {
71 printk(KERN_WARNING "Failed to ioremap %s\n",
72 info->map.name);
73 return -ENOMEM;
74 }
75 info->map.cached =
76 ioremap_cached(info->map.phys, info->map.size);
77 if (!info->map.cached)
78 printk(KERN_WARNING "Failed to ioremap cached %s\n",
79 info->map.name);
80 info->map.inval_cache = pxa2xx_map_inval_cache;
81 simple_map_init(&info->map);
82
83 printk(KERN_NOTICE
84 "Probing %s at physical address 0x%08lx"
85 " (%d-bit bankwidth)\n",
86 info->map.name, (unsigned long)info->map.phys,
87 info->map.bankwidth * 8);
88
89 info->mtd = do_map_probe(flash->map_name, &info->map);
90
91 if (!info->mtd) {
92 iounmap((void *)info->map.virt);
93 if (info->map.cached)
94 iounmap(info->map.cached);
95 return -EIO;
96 }
97 info->mtd->owner = THIS_MODULE;
98
99#ifdef CONFIG_MTD_PARTITIONS
100 ret = parse_mtd_partitions(info->mtd, probes, &parts, 0);
101
102 if (ret > 0) {
103 info->nr_parts = ret;
104 info->parts = parts;
105 }
106#endif
107
108 if (info->nr_parts) {
109 add_mtd_partitions(info->mtd, info->parts,
110 info->nr_parts);
111 } else {
112 printk("Registering %s as whole device\n",
113 info->map.name);
114 add_mtd_device(info->mtd);
115 }
116
117 dev_set_drvdata(dev, info);
118 return 0;
119}
120
121static int __exit pxa2xx_flash_remove(struct device *dev)
122{
123 struct pxa2xx_flash_info *info = dev_get_drvdata(dev);
124
125 dev_set_drvdata(dev, NULL);
126
127#ifdef CONFIG_MTD_PARTITIONS
128 if (info->nr_parts)
129 del_mtd_partitions(info->mtd);
130 else
131#endif
132 del_mtd_device(info->mtd);
133
134 map_destroy(info->mtd);
135 iounmap(info->map.virt);
136 if (info->map.cached)
137 iounmap(info->map.cached);
138 kfree(info->parts);
139 kfree(info);
140 return 0;
141}
142
143#ifdef CONFIG_PM
144static int pxa2xx_flash_suspend(struct device *dev, pm_message_t state)
145{
146 struct pxa2xx_flash_info *info = dev_get_drvdata(dev);
147 int ret = 0;
148
149 if (info->mtd && info->mtd->suspend)
150 ret = info->mtd->suspend(info->mtd);
151 return ret;
152}
153
154static int pxa2xx_flash_resume(struct device *dev)
155{
156 struct pxa2xx_flash_info *info = dev_get_drvdata(dev);
157
158 if (info->mtd && info->mtd->resume)
159 info->mtd->resume(info->mtd);
160 return 0;
161}
162static void pxa2xx_flash_shutdown(struct device *dev)
163{
164 struct pxa2xx_flash_info *info = dev_get_drvdata(dev);
165
166 if (info && info->mtd->suspend(info->mtd) == 0)
167 info->mtd->resume(info->mtd);
168}
169#else
170#define pxa2xx_flash_suspend NULL
171#define pxa2xx_flash_resume NULL
172#define pxa2xx_flash_shutdown NULL
173#endif
174
175static struct device_driver pxa2xx_flash_driver = {
176 .name = "pxa2xx-flash",
177 .bus = &platform_bus_type,
178 .probe = pxa2xx_flash_probe,
179 .remove = __exit_p(pxa2xx_flash_remove),
180 .suspend = pxa2xx_flash_suspend,
181 .resume = pxa2xx_flash_resume,
182 .shutdown = pxa2xx_flash_shutdown,
183};
184
185static int __init init_pxa2xx_flash(void)
186{
187 return driver_register(&pxa2xx_flash_driver);
188}
189
190static void __exit cleanup_pxa2xx_flash(void)
191{
192 driver_unregister(&pxa2xx_flash_driver);
193}
194
195module_init(init_pxa2xx_flash);
196module_exit(cleanup_pxa2xx_flash);
197
198MODULE_LICENSE("GPL");
199MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
200MODULE_DESCRIPTION("MTD map driver for Intel XScale PXA2xx");
diff --git a/drivers/mtd/maps/tqm834x.c b/drivers/mtd/maps/tqm834x.c
deleted file mode 100644
index 9adc970e55..0000000000
--- a/drivers/mtd/maps/tqm834x.c
+++ /dev/null
@@ -1,286 +0,0 @@
1/*
2 * drivers/mtd/maps/tqm834x.c
3 *
4 * MTD mapping driver for TQM834x boards
5 *
6 * Copyright 2005 Wolfgang Denk, DENX Software Engineering, <wd@denx.de>.
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <asm/io.h>
20#include <asm/ppcboot.h>
21
22#include <linux/mtd/mtd.h>
23#include <linux/mtd/map.h>
24#include <linux/mtd/partitions.h>
25
26#define FLASH_BANK_MAX 2
27
28extern unsigned char __res[];
29
30/* trivial struct to describe partition information */
31struct mtd_part_def
32{
33 int nums;
34 unsigned char *type;
35 struct mtd_partition* mtd_part;
36};
37
38static struct mtd_info* mtd_banks[FLASH_BANK_MAX];
39static struct map_info* map_banks[FLASH_BANK_MAX];
40static struct mtd_part_def part_banks[FLASH_BANK_MAX];
41
42static unsigned long num_banks;
43static unsigned long start_scan_addr;
44
45#ifdef CONFIG_MTD_PARTITIONS
46/*
47 * The following defines the partition layout of TQM834x boards.
48 *
49 * See include/linux/mtd/partitions.h for definition of the
50 * mtd_partition structure.
51 *
52 * Assume minimal initial size of 4 MiB per bank, will be updated
53 * later in init_tqm834x_mtd() routine.
54 */
55
56/* Partition definition for the first flash bank which is always present. */
57static struct mtd_partition tqm834x_partitions_bank1[] = {
58 {
59 .name = "u-boot", /* u-boot firmware */
60 .offset = 0x00000000,
61 .size = 0x00040000, /* 256 KiB */
62 /*mask_flags: MTD_WRITEABLE, * force read-only */
63 },
64 {
65 .name = "env", /* u-boot environment */
66 .offset = 0x00040000,
67 .size = 0x00020000, /* 128 KiB */
68 /*mask_flags: MTD_WRITEABLE, * force read-only */
69 },
70 {
71 .name = "kernel", /* linux kernel image */
72 .offset = 0x00060000,
73 .size = 0x00100000, /* 1 MiB */
74 /*mask_flags: MTD_WRITEABLE, * force read-only */
75 },
76 {
77 .name = "initrd", /* ramdisk image */
78 .offset = 0x00160000,
79 .size = 0x00200000, /* 2 MiB */
80 },
81 {
82 .name = "user", /* user data */
83 .offset = 0x00360000,
84 .size = 0x000a0000, /* remaining space */
85 /* NOTE: this parttion size is re-calcated in */
86 /* init_tqm834x_mtd() to cover actual remaining space. */
87 },
88};
89
90/* Partition definition for the second flash bank which may be present on some
91 * TQM834x boards.
92 */
93static struct mtd_partition tqm834x_partitions_bank2[] = {
94 {
95 .name = "jffs2", /* jffs2 filesystem */
96 .offset = 0x00000000,
97 .size = 0x00400000, /* whole device */
98 /* NOTE: this parttion size is re-calcated in */
99 /* init_tqm834x_mtd() to cover actual device size. */
100 },
101};
102
103#endif /* CONFIG_MTD_PARTITIONS */
104
105static int __init init_tqm834x_mtd(void)
106{
107 int idx = 0, ret = 0;
108 unsigned long flash_addr, flash_size, mtd_size = 0;
109
110 /* pointer to TQM834x board info data */
111 bd_t *bd = (bd_t *)__res;
112#ifdef CONFIG_MTD_CMDLINE_PARTS
113 int n;
114 char mtdid[4];
115 const char *part_probes[] = { "cmdlinepart", NULL };
116#endif
117
118 flash_addr = bd->bi_flashstart;
119 flash_size = bd->bi_flashsize;
120
121 /* request maximum flash size address space */
122 start_scan_addr = (unsigned long)ioremap(flash_addr, flash_size);
123 if (!start_scan_addr) {
124 printk("%s: Failed to ioremap address: 0x%lx\n",
125 __FUNCTION__, flash_addr);
126 return -EIO;
127 }
128
129 for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
130 if (mtd_size >= flash_size)
131 break;
132
133 pr_debug("%s: chip probing count %d\n", __FUNCTION__, idx);
134
135 map_banks[idx] = kzalloc(sizeof(struct map_info), GFP_KERNEL);
136 if (map_banks[idx] == NULL) {
137 ret = -ENOMEM;
138 goto error_mem;
139 }
140 map_banks[idx]->name = kzalloc(16, GFP_KERNEL);
141 if (map_banks[idx]->name == NULL) {
142 ret = -ENOMEM;
143 goto error_mem;
144 }
145
146 sprintf(map_banks[idx]->name, "TQM834x-%d", idx);
147 map_banks[idx]->size = flash_size;
148 map_banks[idx]->bankwidth = 4;
149
150 simple_map_init(map_banks[idx]);
151
152 map_banks[idx]->virt = (void __iomem *)
153 (start_scan_addr + ((idx > 0) ?
154 (mtd_banks[idx-1] ? mtd_banks[idx-1]->size : 0) : 0));
155 map_banks[idx]->phys =
156 flash_addr + ((idx > 0) ?
157 (mtd_banks[idx-1] ? mtd_banks[idx-1]->size : 0) : 0);
158
159 /* start to probe flash chips */
160 mtd_banks[idx] = do_map_probe("cfi_probe", map_banks[idx]);
161 if (mtd_banks[idx]) {
162 mtd_banks[idx]->owner = THIS_MODULE;
163 mtd_size += mtd_banks[idx]->size;
164 num_banks++;
165 pr_debug("%s: bank %ld, name: %s, size: %d bytes \n",
166 __FUNCTION__, num_banks,
167 mtd_banks[idx]->name, mtd_banks[idx]->size);
168 }
169 }
170
171 /* no supported flash chips found */
172 if (!num_banks) {
173 printk("TQM834x: No supported flash chips found!\n");
174 ret = -ENXIO;
175 goto error_mem;
176 }
177
178#ifdef CONFIG_MTD_PARTITIONS
179 /*
180 * Select static partition definitions
181 */
182 n = ARRAY_SIZE(tqm834x_partitions_bank1);
183 part_banks[0].mtd_part = tqm834x_partitions_bank1;
184 part_banks[0].type = "static image bank1";
185 part_banks[0].nums = n;
186
187 /* update last partition size to cover actual remaining space */
188 tqm834x_partitions_bank1[n - 1].size =
189 mtd_banks[0]->size -
190 tqm834x_partitions_bank1[n - 1].offset;
191
192 /* check if we have second bank? */
193 if (num_banks == 2) {
194 n = ARRAY_SIZE(tqm834x_partitions_bank2);
195 part_banks[1].mtd_part = tqm834x_partitions_bank2;
196 part_banks[1].type = "static image bank2";
197 part_banks[1].nums = n;
198
199 /* update last partition size to cover actual remaining space */
200 tqm834x_partitions_bank2[n - 1].size =
201 mtd_banks[1]->size -
202 tqm834x_partitions_bank2[n - 1].offset;
203 }
204
205 for(idx = 0; idx < num_banks ; idx++) {
206#ifdef CONFIG_MTD_CMDLINE_PARTS
207 sprintf(mtdid, "%d", idx);
208 n = parse_mtd_partitions(mtd_banks[idx],
209 part_probes,
210 &part_banks[idx].mtd_part,
211 0);
212 pr_debug("%s: %d command line partitions on bank %s\n",
213 __FUNCTION__, n, mtdid);
214 if (n > 0) {
215 part_banks[idx].type = "command line";
216 part_banks[idx].nums = n;
217 }
218#endif /* CONFIG_MTD_CMDLINE_PARTS */
219 if (part_banks[idx].nums == 0) {
220 printk(KERN_NOTICE
221 "TQM834x flash bank %d: no partition info "
222 "available, registering whole device\n", idx);
223 add_mtd_device(mtd_banks[idx]);
224 } else {
225 printk(KERN_NOTICE
226 "TQM834x flash bank %d: Using %s partition "
227 "definition\n", idx, part_banks[idx].type);
228 add_mtd_partitions(mtd_banks[idx],
229 part_banks[idx].mtd_part,
230 part_banks[idx].nums);
231 }
232 }
233#else /* ! CONFIG_MTD_PARTITIONS */
234 printk(KERN_NOTICE "TQM834x flash: registering %d flash banks "
235 "at once\n", num_banks);
236
237 for(idx = 0 ; idx < num_banks ; idx++)
238 add_mtd_device(mtd_banks[idx]);
239
240#endif /* CONFIG_MTD_PARTITIONS */
241
242 return 0;
243error_mem:
244 for (idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
245 if (map_banks[idx] != NULL) {
246 if (map_banks[idx]->name != NULL) {
247 kfree(map_banks[idx]->name);
248 map_banks[idx]->name = NULL;
249 }
250 kfree(map_banks[idx]);
251 map_banks[idx] = NULL;
252 }
253 }
254
255 iounmap((void *)start_scan_addr);
256
257 return ret;
258}
259
260static void __exit cleanup_tqm834x_mtd(void)
261{
262 unsigned int idx = 0;
263 for(idx = 0 ; idx < num_banks ; idx++) {
264 /* destroy mtd_info previously allocated */
265 if (mtd_banks[idx]) {
266 del_mtd_partitions(mtd_banks[idx]);
267 map_destroy(mtd_banks[idx]);
268 }
269
270 /* release map_info not used anymore */
271 kfree(map_banks[idx]->name);
272 kfree(map_banks[idx]);
273 }
274
275 if (start_scan_addr) {
276 iounmap((void *)start_scan_addr);
277 start_scan_addr = 0;
278 }
279}
280
281module_init(init_tqm834x_mtd);
282module_exit(cleanup_tqm834x_mtd);
283
284MODULE_LICENSE("GPL");
285MODULE_AUTHOR("Wolfgang Denk <wd@denx.de>");
286MODULE_DESCRIPTION("MTD map driver for TQM834x boards");
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index ef89780eb9..74d9d30eda 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -24,10 +24,9 @@
24#include <linux/kthread.h> 24#include <linux/kthread.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26 26
27static LIST_HEAD(blktrans_majors); 27#include "mtdcore.h"
28 28
29extern struct mutex mtd_table_mutex; 29static LIST_HEAD(blktrans_majors);
30extern struct mtd_info *mtd_table[];
31 30
32struct mtd_blkcore_priv { 31struct mtd_blkcore_priv {
33 struct task_struct *thread; 32 struct task_struct *thread;
@@ -202,7 +201,7 @@ static int blktrans_ioctl(struct inode *inode, struct file *file,
202 } 201 }
203} 202}
204 203
205struct block_device_operations mtd_blktrans_ops = { 204static struct block_device_operations mtd_blktrans_ops = {
206 .owner = THIS_MODULE, 205 .owner = THIS_MODULE,
207 .open = blktrans_open, 206 .open = blktrans_open,
208 .release = blktrans_release, 207 .release = blktrans_release,
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index d091b2430b..22ed96c4b7 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -136,7 +136,8 @@ static int mtd_close(struct inode *inode, struct file *file)
136 136
137 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 137 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
138 138
139 if (mtd->sync) 139 /* Only sync if opened RW */
140 if ((file->f_mode & 2) && mtd->sync)
140 mtd->sync(mtd); 141 mtd->sync(mtd);
141 142
142 put_mtd_device(mtd); 143 put_mtd_device(mtd);
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 41844ea024..96be7ef62f 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -178,7 +178,7 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
178 178
179 /* Check alignment */ 179 /* Check alignment */
180 if (mtd->writesize > 1) { 180 if (mtd->writesize > 1) {
181 loff_t __to = to; 181 uint64_t __to = to;
182 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize)) 182 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
183 return -EINVAL; 183 return -EINVAL;
184 } 184 }
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index c153b64a83..6c2645e283 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -22,6 +22,8 @@
22 22
23#include <linux/mtd/mtd.h> 23#include <linux/mtd/mtd.h>
24 24
25#include "mtdcore.h"
26
25/* These are exported solely for the purpose of mtd_blkdevs.c. You 27/* These are exported solely for the purpose of mtd_blkdevs.c. You
26 should not use them for _anything_ else */ 28 should not use them for _anything_ else */
27DEFINE_MUTEX(mtd_table_mutex); 29DEFINE_MUTEX(mtd_table_mutex);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
new file mode 100644
index 0000000000..a33251f4b8
--- /dev/null
+++ b/drivers/mtd/mtdcore.h
@@ -0,0 +1,11 @@
1/* linux/drivers/mtd/mtdcore.h
2 *
3 * Header file for driver private mtdcore exports
4 *
5 */
6
7/* These are exported solely for the purpose of mtd_blkdevs.c. You
8 should not use them for _anything_ else */
9
10extern struct mutex mtd_table_mutex;
11extern struct mtd_info *mtd_table[MAX_MTD_DEVICES];
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
new file mode 100644
index 0000000000..f8af627f0b
--- /dev/null
+++ b/drivers/mtd/mtdoops.c
@@ -0,0 +1,376 @@
1/*
2 * MTD Oops/Panic logger
3 *
4 * Copyright (C) 2007 Nokia Corporation. All rights reserved.
5 *
6 * Author: Richard Purdie <rpurdie@openedhand.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/console.h>
27#include <linux/vmalloc.h>
28#include <linux/workqueue.h>
29#include <linux/sched.h>
30#include <linux/wait.h>
31#include <linux/mtd/mtd.h>
32
33#define OOPS_PAGE_SIZE 4096
34
35static struct mtdoops_context {
36 int mtd_index;
37 struct work_struct work;
38 struct mtd_info *mtd;
39 int oops_pages;
40 int nextpage;
41 int nextcount;
42
43 void *oops_buf;
44 int ready;
45 int writecount;
46} oops_cxt;
47
48static void mtdoops_erase_callback(struct erase_info *done)
49{
50 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
51 wake_up(wait_q);
52}
53
54static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
55{
56 struct erase_info erase;
57 DECLARE_WAITQUEUE(wait, current);
58 wait_queue_head_t wait_q;
59 int ret;
60
61 init_waitqueue_head(&wait_q);
62 erase.mtd = mtd;
63 erase.callback = mtdoops_erase_callback;
64 erase.addr = offset;
65 if (mtd->erasesize < OOPS_PAGE_SIZE)
66 erase.len = OOPS_PAGE_SIZE;
67 else
68 erase.len = mtd->erasesize;
69 erase.priv = (u_long)&wait_q;
70
71 set_current_state(TASK_INTERRUPTIBLE);
72 add_wait_queue(&wait_q, &wait);
73
74 ret = mtd->erase(mtd, &erase);
75 if (ret) {
76 set_current_state(TASK_RUNNING);
77 remove_wait_queue(&wait_q, &wait);
78 printk (KERN_WARNING "mtdoops: erase of region [0x%x, 0x%x] "
79 "on \"%s\" failed\n",
80 erase.addr, erase.len, mtd->name);
81 return ret;
82 }
83
84 schedule(); /* Wait for erase to finish. */
85 remove_wait_queue(&wait_q, &wait);
86
87 return 0;
88}
89
90static int mtdoops_inc_counter(struct mtdoops_context *cxt)
91{
92 struct mtd_info *mtd = cxt->mtd;
93 size_t retlen;
94 u32 count;
95 int ret;
96
97 cxt->nextpage++;
98 if (cxt->nextpage > cxt->oops_pages)
99 cxt->nextpage = 0;
100 cxt->nextcount++;
101 if (cxt->nextcount == 0xffffffff)
102 cxt->nextcount = 0;
103
104 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
105 &retlen, (u_char *) &count);
106 if ((retlen != 4) || (ret < 0)) {
107 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
108 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
109 retlen, ret);
110 return 1;
111 }
112
113 /* See if we need to erase the next block */
114 if (count != 0xffffffff)
115 return 1;
116
117 printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
118 cxt->nextpage, cxt->nextcount);
119 cxt->ready = 1;
120 return 0;
121}
122
123static void mtdoops_prepare(struct mtdoops_context *cxt)
124{
125 struct mtd_info *mtd = cxt->mtd;
126 int i = 0, j, ret, mod;
127
128 /* We were unregistered */
129 if (!mtd)
130 return;
131
132 mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize;
133 if (mod != 0) {
134 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE);
135 if (cxt->nextpage > cxt->oops_pages)
136 cxt->nextpage = 0;
137 }
138
139 while (mtd->block_isbad &&
140 mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE)) {
141badblock:
142 printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
143 cxt->nextpage * OOPS_PAGE_SIZE);
144 i++;
145 cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE);
146 if (cxt->nextpage > cxt->oops_pages)
147 cxt->nextpage = 0;
148 if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) {
149 printk(KERN_ERR "mtdoops: All blocks bad!\n");
150 return;
151 }
152 }
153
154 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
155 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
156
157 if (ret < 0) {
158 if (mtd->block_markbad)
159 mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
160 goto badblock;
161 }
162
163 printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
164
165 cxt->ready = 1;
166}
167
168static void mtdoops_workfunc(struct work_struct *work)
169{
170 struct mtdoops_context *cxt =
171 container_of(work, struct mtdoops_context, work);
172
173 mtdoops_prepare(cxt);
174}
175
176static int find_next_position(struct mtdoops_context *cxt)
177{
178 struct mtd_info *mtd = cxt->mtd;
179 int page, maxpos = 0;
180 u32 count, maxcount = 0xffffffff;
181 size_t retlen;
182
183 for (page = 0; page < cxt->oops_pages; page++) {
184 mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
185 if (count == 0xffffffff)
186 continue;
187 if (maxcount == 0xffffffff) {
188 maxcount = count;
189 maxpos = page;
190 } else if ((count < 0x40000000) && (maxcount > 0xc0000000)) {
191 maxcount = count;
192 maxpos = page;
193 } else if ((count > maxcount) && (count < 0xc0000000)) {
194 maxcount = count;
195 maxpos = page;
196 } else if ((count > maxcount) && (count > 0xc0000000)
197 && (maxcount > 0x80000000)) {
198 maxcount = count;
199 maxpos = page;
200 }
201 }
202 if (maxcount == 0xffffffff) {
203 cxt->nextpage = 0;
204 cxt->nextcount = 1;
205 cxt->ready = 1;
206 printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n",
207 cxt->nextpage, cxt->nextcount);
208 return 0;
209 }
210
211 cxt->nextpage = maxpos;
212 cxt->nextcount = maxcount;
213
214 return mtdoops_inc_counter(cxt);
215}
216
217
218static void mtdoops_notify_add(struct mtd_info *mtd)
219{
220 struct mtdoops_context *cxt = &oops_cxt;
221 int ret;
222
223 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
224 return;
225
226 if (mtd->size < (mtd->erasesize * 2)) {
227 printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n",
228 mtd->index);
229 return;
230 }
231
232 cxt->mtd = mtd;
233 cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE;
234
235 ret = find_next_position(cxt);
236 if (ret == 1)
237 mtdoops_prepare(cxt);
238
239 printk(KERN_DEBUG "mtdoops: Attached to MTD device %d\n", mtd->index);
240}
241
242static void mtdoops_notify_remove(struct mtd_info *mtd)
243{
244 struct mtdoops_context *cxt = &oops_cxt;
245
246 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
247 return;
248
249 cxt->mtd = NULL;
250 flush_scheduled_work();
251}
252
253static void mtdoops_console_sync(void)
254{
255 struct mtdoops_context *cxt = &oops_cxt;
256 struct mtd_info *mtd = cxt->mtd;
257 size_t retlen;
258 int ret;
259
260 if (!cxt->ready || !mtd)
261 return;
262
263 if (cxt->writecount == 0)
264 return;
265
266 if (cxt->writecount < OOPS_PAGE_SIZE)
267 memset(cxt->oops_buf + cxt->writecount, 0xff,
268 OOPS_PAGE_SIZE - cxt->writecount);
269
270 ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
271 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
272 cxt->ready = 0;
273 cxt->writecount = 0;
274
275 if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
276 printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
277 cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
278
279 ret = mtdoops_inc_counter(cxt);
280 if (ret == 1)
281 schedule_work(&cxt->work);
282}
283
284static void
285mtdoops_console_write(struct console *co, const char *s, unsigned int count)
286{
287 struct mtdoops_context *cxt = co->data;
288 struct mtd_info *mtd = cxt->mtd;
289 int i;
290
291 if (!oops_in_progress) {
292 mtdoops_console_sync();
293 return;
294 }
295
296 if (!cxt->ready || !mtd)
297 return;
298
299 if (cxt->writecount == 0) {
300 u32 *stamp = cxt->oops_buf;
301 *stamp = cxt->nextcount;
302 cxt->writecount = 4;
303 }
304
305 if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
306 count = OOPS_PAGE_SIZE - cxt->writecount;
307
308 for (i = 0; i < count; i++, s++)
309 *((char *)(cxt->oops_buf) + cxt->writecount + i) = *s;
310
311 cxt->writecount = cxt->writecount + count;
312}
313
314static int __init mtdoops_console_setup(struct console *co, char *options)
315{
316 struct mtdoops_context *cxt = co->data;
317
318 if (cxt->mtd_index != -1)
319 return -EBUSY;
320 if (co->index == -1)
321 return -EINVAL;
322
323 cxt->mtd_index = co->index;
324 return 0;
325}
326
327static struct mtd_notifier mtdoops_notifier = {
328 .add = mtdoops_notify_add,
329 .remove = mtdoops_notify_remove,
330};
331
332static struct console mtdoops_console = {
333 .name = "ttyMTD",
334 .write = mtdoops_console_write,
335 .setup = mtdoops_console_setup,
336 .unblank = mtdoops_console_sync,
337 .flags = CON_PRINTBUFFER,
338 .index = -1,
339 .data = &oops_cxt,
340};
341
342static int __init mtdoops_console_init(void)
343{
344 struct mtdoops_context *cxt = &oops_cxt;
345
346 cxt->mtd_index = -1;
347 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
348
349 if (!cxt->oops_buf) {
350 printk(KERN_ERR "Failed to allocate oops buffer workspace\n");
351 return -ENOMEM;
352 }
353
354 INIT_WORK(&cxt->work, mtdoops_workfunc);
355
356 register_console(&mtdoops_console);
357 register_mtd_user(&mtdoops_notifier);
358 return 0;
359}
360
361static void __exit mtdoops_console_exit(void)
362{
363 struct mtdoops_context *cxt = &oops_cxt;
364
365 unregister_mtd_user(&mtdoops_notifier);
366 unregister_console(&mtdoops_console);
367 vfree(cxt->oops_buf);
368}
369
370
371subsys_initcall(mtdoops_console_init);
372module_exit(mtdoops_console_exit);
373
374MODULE_LICENSE("GPL");
375MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
376MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index f1d60b6f04..8f9c3baeb3 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -91,6 +91,25 @@ config MTD_NAND_AU1550
91 This enables the driver for the NAND flash controller on the 91 This enables the driver for the NAND flash controller on the
92 AMD/Alchemy 1550 SOC. 92 AMD/Alchemy 1550 SOC.
93 93
94config MTD_NAND_BF5XX
95 tristate "Blackfin on-chip NAND Flash Controller driver"
96 depends on BF54x && MTD_NAND
97 help
98 This enables the Blackfin on-chip NAND flash controller
99
100 No board specific support is done by this driver, each board
101 must advertise a platform_device for the driver to attach.
102
103 This driver can also be built as a module. If so, the module
104 will be called bf5xx-nand.
105
106config MTD_NAND_BF5XX_HWECC
107 bool "BF5XX NAND Hardware ECC"
108 depends on MTD_NAND_BF5XX
109 help
110 Enable the use of the BF5XX's internal ECC generator when
111 using NAND.
112
94config MTD_NAND_RTC_FROM4 113config MTD_NAND_RTC_FROM4
95 tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)" 114 tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)"
96 depends on SH_SOLUTION_ENGINE 115 depends on SH_SOLUTION_ENGINE
@@ -134,10 +153,10 @@ config MTD_NAND_S3C2410_HWECC
134 153
135config MTD_NAND_NDFC 154config MTD_NAND_NDFC
136 tristate "NDFC NanD Flash Controller" 155 tristate "NDFC NanD Flash Controller"
137 depends on 44x 156 depends on 4xx && !PPC_MERGE
138 select MTD_NAND_ECC_SMC 157 select MTD_NAND_ECC_SMC
139 help 158 help
140 NDFC Nand Flash Controllers are integrated in EP44x SoCs 159 NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
141 160
142config MTD_NAND_S3C2410_CLKSTOP 161config MTD_NAND_S3C2410_CLKSTOP
143 bool "S3C2410 NAND IDLE clock stop" 162 bool "S3C2410 NAND IDLE clock stop"
@@ -237,7 +256,7 @@ config MTD_NAND_CAFE
237 select REED_SOLOMON 256 select REED_SOLOMON
238 select REED_SOLOMON_DEC16 257 select REED_SOLOMON_DEC16
239 help 258 help
240 Use NAND flash attached to the CAFÉ chip designed for the $100 259 Use NAND flash attached to the CAFÉ chip designed for the OLPC
241 laptop. 260 laptop.
242 261
243config MTD_NAND_CS553X 262config MTD_NAND_CS553X
@@ -280,5 +299,11 @@ config MTD_NAND_PLATFORM
280 devices. You will need to provide platform-specific functions 299 devices. You will need to provide platform-specific functions
281 via platform_data. 300 via platform_data.
282 301
302config MTD_ALAUDA
303 tristate "MTD driver for Olympus MAUSB-10 and Fijufilm DPC-R1"
304 depends on MTD_NAND && USB
305 help
306 These two (and possibly other) Alauda-based cardreaders for
307 SmartMedia and xD allow raw flash access.
283 308
284endif # MTD_NAND 309endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index edba1db14b..3ad6c0165d 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_MTD_NAND_TOTO) += toto.o
13obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o 13obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
14obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o 14obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
15obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o 15obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
16obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
16obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o 17obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
17obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o 18obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
18obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o 19obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
@@ -27,5 +28,6 @@ obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o
27obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o 28obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
28obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 29obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
29obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o 30obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
31obj-$(CONFIG_MTD_ALAUDA) += alauda.o
30 32
31nand-objs := nand_base.o nand_bbt.o 33nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
new file mode 100644
index 0000000000..257937cd99
--- /dev/null
+++ b/drivers/mtd/nand/alauda.c
@@ -0,0 +1,742 @@
1/*
2 * MTD driver for Alauda chips
3 *
4 * Copyright (C) 2007 Joern Engel <joern@logfs.org>
5 *
6 * Based on drivers/usb/usb-skeleton.c which is:
7 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
8 * and on drivers/usb/storage/alauda.c, which is:
9 * (c) 2005 Daniel Drake <dsd@gentoo.org>
10 *
11 * Idea and initial work by Arnd Bergmann <arnd@arndb.de>
12 */
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <linux/kref.h>
19#include <linux/usb.h>
20#include <linux/mutex.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/nand_ecc.h>
23
24/* Control commands */
25#define ALAUDA_GET_XD_MEDIA_STATUS 0x08
26#define ALAUDA_ACK_XD_MEDIA_CHANGE 0x0a
27#define ALAUDA_GET_XD_MEDIA_SIG 0x86
28
29/* Common prefix */
30#define ALAUDA_BULK_CMD 0x40
31
32/* The two ports */
33#define ALAUDA_PORT_XD 0x00
34#define ALAUDA_PORT_SM 0x01
35
36/* Bulk commands */
37#define ALAUDA_BULK_READ_PAGE 0x84
38#define ALAUDA_BULK_READ_OOB 0x85 /* don't use, there's a chip bug */
39#define ALAUDA_BULK_READ_BLOCK 0x94
40#define ALAUDA_BULK_ERASE_BLOCK 0xa3
41#define ALAUDA_BULK_WRITE_PAGE 0xa4
42#define ALAUDA_BULK_WRITE_BLOCK 0xb4
43#define ALAUDA_BULK_RESET_MEDIA 0xe0
44
45/* Address shifting */
46#define PBA_LO(pba) ((pba & 0xF) << 5)
47#define PBA_HI(pba) (pba >> 3)
48#define PBA_ZONE(pba) (pba >> 11)
49
50#define TIMEOUT HZ
51
52static struct usb_device_id alauda_table [] = {
53 { USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */
54 { USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */
55 { }
56};
57MODULE_DEVICE_TABLE(usb, alauda_table);
58
59struct alauda_card {
60 u8 id; /* id byte */
61 u8 chipshift; /* 1<<chipshift total size */
62 u8 pageshift; /* 1<<pageshift page size */
63 u8 blockshift; /* 1<<blockshift block size */
64};
65
66struct alauda {
67 struct usb_device *dev;
68 struct usb_interface *interface;
69 struct mtd_info *mtd;
70 struct alauda_card *card;
71 struct mutex card_mutex;
72 u32 pagemask;
73 u32 bytemask;
74 u32 blockmask;
75 unsigned int write_out;
76 unsigned int bulk_in;
77 unsigned int bulk_out;
78 u8 port;
79 struct kref kref;
80};
81
82static struct alauda_card alauda_card_ids[] = {
83 /* NAND flash */
84 { 0x6e, 20, 8, 12}, /* 1 MB */
85 { 0xe8, 20, 8, 12}, /* 1 MB */
86 { 0xec, 20, 8, 12}, /* 1 MB */
87 { 0x64, 21, 8, 12}, /* 2 MB */
88 { 0xea, 21, 8, 12}, /* 2 MB */
89 { 0x6b, 22, 9, 13}, /* 4 MB */
90 { 0xe3, 22, 9, 13}, /* 4 MB */
91 { 0xe5, 22, 9, 13}, /* 4 MB */
92 { 0xe6, 23, 9, 13}, /* 8 MB */
93 { 0x73, 24, 9, 14}, /* 16 MB */
94 { 0x75, 25, 9, 14}, /* 32 MB */
95 { 0x76, 26, 9, 14}, /* 64 MB */
96 { 0x79, 27, 9, 14}, /* 128 MB */
97 { 0x71, 28, 9, 14}, /* 256 MB */
98
99 /* MASK ROM */
100 { 0x5d, 21, 9, 13}, /* 2 MB */
101 { 0xd5, 22, 9, 13}, /* 4 MB */
102 { 0xd6, 23, 9, 13}, /* 8 MB */
103 { 0x57, 24, 9, 13}, /* 16 MB */
104 { 0x58, 25, 9, 13}, /* 32 MB */
105 { }
106};
107
108static struct alauda_card *get_card(u8 id)
109{
110 struct alauda_card *card;
111
112 for (card = alauda_card_ids; card->id; card++)
113 if (card->id == id)
114 return card;
115 return NULL;
116}
117
118static void alauda_delete(struct kref *kref)
119{
120 struct alauda *al = container_of(kref, struct alauda, kref);
121
122 if (al->mtd) {
123 del_mtd_device(al->mtd);
124 kfree(al->mtd);
125 }
126 usb_put_dev(al->dev);
127 kfree(al);
128}
129
130static int alauda_get_media_status(struct alauda *al, void *buf)
131{
132 int ret;
133
134 mutex_lock(&al->card_mutex);
135 ret = usb_control_msg(al->dev, usb_rcvctrlpipe(al->dev, 0),
136 ALAUDA_GET_XD_MEDIA_STATUS, 0xc0, 0, 1, buf, 2, HZ);
137 mutex_unlock(&al->card_mutex);
138 return ret;
139}
140
141static int alauda_ack_media(struct alauda *al)
142{
143 int ret;
144
145 mutex_lock(&al->card_mutex);
146 ret = usb_control_msg(al->dev, usb_sndctrlpipe(al->dev, 0),
147 ALAUDA_ACK_XD_MEDIA_CHANGE, 0x40, 0, 1, NULL, 0, HZ);
148 mutex_unlock(&al->card_mutex);
149 return ret;
150}
151
152static int alauda_get_media_signatures(struct alauda *al, void *buf)
153{
154 int ret;
155
156 mutex_lock(&al->card_mutex);
157 ret = usb_control_msg(al->dev, usb_rcvctrlpipe(al->dev, 0),
158 ALAUDA_GET_XD_MEDIA_SIG, 0xc0, 0, 0, buf, 4, HZ);
159 mutex_unlock(&al->card_mutex);
160 return ret;
161}
162
163static void alauda_reset(struct alauda *al)
164{
165 u8 command[] = {
166 ALAUDA_BULK_CMD, ALAUDA_BULK_RESET_MEDIA, 0, 0,
167 0, 0, 0, 0, al->port
168 };
169 mutex_lock(&al->card_mutex);
170 usb_bulk_msg(al->dev, al->bulk_out, command, 9, NULL, HZ);
171 mutex_unlock(&al->card_mutex);
172}
173
174static void correct_data(void *buf, void *read_ecc,
175 int *corrected, int *uncorrected)
176{
177 u8 calc_ecc[3];
178 int err;
179
180 nand_calculate_ecc(NULL, buf, calc_ecc);
181 err = nand_correct_data(NULL, buf, read_ecc, calc_ecc);
182 if (err) {
183 if (err > 0)
184 (*corrected)++;
185 else
186 (*uncorrected)++;
187 }
188}
189
190struct alauda_sg_request {
191 struct urb *urb[3];
192 struct completion comp;
193};
194
195static void alauda_complete(struct urb *urb)
196{
197 struct completion *comp = urb->context;
198
199 if (comp)
200 complete(comp);
201}
202
203static int __alauda_read_page(struct mtd_info *mtd, loff_t from, void *buf,
204 void *oob)
205{
206 struct alauda_sg_request sg;
207 struct alauda *al = mtd->priv;
208 u32 pba = from >> al->card->blockshift;
209 u32 page = (from >> al->card->pageshift) & al->pagemask;
210 u8 command[] = {
211 ALAUDA_BULK_CMD, ALAUDA_BULK_READ_PAGE, PBA_HI(pba),
212 PBA_ZONE(pba), 0, PBA_LO(pba) + page, 1, 0, al->port
213 };
214 int i, err;
215
216 for (i=0; i<3; i++)
217 sg.urb[i] = NULL;
218
219 err = -ENOMEM;
220 for (i=0; i<3; i++) {
221 sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
222 if (!sg.urb[i])
223 goto out;
224 }
225 init_completion(&sg.comp);
226 usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
227 alauda_complete, NULL);
228 usb_fill_bulk_urb(sg.urb[1], al->dev, al->bulk_in, buf, mtd->writesize,
229 alauda_complete, NULL);
230 usb_fill_bulk_urb(sg.urb[2], al->dev, al->bulk_in, oob, 16,
231 alauda_complete, &sg.comp);
232
233 mutex_lock(&al->card_mutex);
234 for (i=0; i<3; i++) {
235 err = usb_submit_urb(sg.urb[i], GFP_NOIO);
236 if (err)
237 goto cancel;
238 }
239 if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
240 err = -ETIMEDOUT;
241cancel:
242 for (i=0; i<3; i++) {
243 usb_kill_urb(sg.urb[i]);
244 }
245 }
246 mutex_unlock(&al->card_mutex);
247
248out:
249 usb_free_urb(sg.urb[0]);
250 usb_free_urb(sg.urb[1]);
251 usb_free_urb(sg.urb[2]);
252 return err;
253}
254
255static int alauda_read_page(struct mtd_info *mtd, loff_t from,
256 void *buf, u8 *oob, int *corrected, int *uncorrected)
257{
258 int err;
259
260 err = __alauda_read_page(mtd, from, buf, oob);
261 if (err)
262 return err;
263 correct_data(buf, oob+13, corrected, uncorrected);
264 correct_data(buf+256, oob+8, corrected, uncorrected);
265 return 0;
266}
267
268static int alauda_write_page(struct mtd_info *mtd, loff_t to, void *buf,
269 void *oob)
270{
271 struct alauda_sg_request sg;
272 struct alauda *al = mtd->priv;
273 u32 pba = to >> al->card->blockshift;
274 u32 page = (to >> al->card->pageshift) & al->pagemask;
275 u8 command[] = {
276 ALAUDA_BULK_CMD, ALAUDA_BULK_WRITE_PAGE, PBA_HI(pba),
277 PBA_ZONE(pba), 0, PBA_LO(pba) + page, 32, 0, al->port
278 };
279 int i, err;
280
281 for (i=0; i<3; i++)
282 sg.urb[i] = NULL;
283
284 err = -ENOMEM;
285 for (i=0; i<3; i++) {
286 sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
287 if (!sg.urb[i])
288 goto out;
289 }
290 init_completion(&sg.comp);
291 usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
292 alauda_complete, NULL);
293 usb_fill_bulk_urb(sg.urb[1], al->dev, al->write_out, buf,mtd->writesize,
294 alauda_complete, NULL);
295 usb_fill_bulk_urb(sg.urb[2], al->dev, al->write_out, oob, 16,
296 alauda_complete, &sg.comp);
297
298 mutex_lock(&al->card_mutex);
299 for (i=0; i<3; i++) {
300 err = usb_submit_urb(sg.urb[i], GFP_NOIO);
301 if (err)
302 goto cancel;
303 }
304 if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
305 err = -ETIMEDOUT;
306cancel:
307 for (i=0; i<3; i++) {
308 usb_kill_urb(sg.urb[i]);
309 }
310 }
311 mutex_unlock(&al->card_mutex);
312
313out:
314 usb_free_urb(sg.urb[0]);
315 usb_free_urb(sg.urb[1]);
316 usb_free_urb(sg.urb[2]);
317 return err;
318}
319
320static int alauda_erase_block(struct mtd_info *mtd, loff_t ofs)
321{
322 struct alauda_sg_request sg;
323 struct alauda *al = mtd->priv;
324 u32 pba = ofs >> al->card->blockshift;
325 u8 command[] = {
326 ALAUDA_BULK_CMD, ALAUDA_BULK_ERASE_BLOCK, PBA_HI(pba),
327 PBA_ZONE(pba), 0, PBA_LO(pba), 0x02, 0, al->port
328 };
329 u8 buf[2];
330 int i, err;
331
332 for (i=0; i<2; i++)
333 sg.urb[i] = NULL;
334
335 err = -ENOMEM;
336 for (i=0; i<2; i++) {
337 sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
338 if (!sg.urb[i])
339 goto out;
340 }
341 init_completion(&sg.comp);
342 usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
343 alauda_complete, NULL);
344 usb_fill_bulk_urb(sg.urb[1], al->dev, al->bulk_in, buf, 2,
345 alauda_complete, &sg.comp);
346
347 mutex_lock(&al->card_mutex);
348 for (i=0; i<2; i++) {
349 err = usb_submit_urb(sg.urb[i], GFP_NOIO);
350 if (err)
351 goto cancel;
352 }
353 if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
354 err = -ETIMEDOUT;
355cancel:
356 for (i=0; i<2; i++) {
357 usb_kill_urb(sg.urb[i]);
358 }
359 }
360 mutex_unlock(&al->card_mutex);
361
362out:
363 usb_free_urb(sg.urb[0]);
364 usb_free_urb(sg.urb[1]);
365 return err;
366}
367
368static int alauda_read_oob(struct mtd_info *mtd, loff_t from, void *oob)
369{
370 static u8 ignore_buf[512]; /* write only */
371
372 return __alauda_read_page(mtd, from, ignore_buf, oob);
373}
374
375static int popcount8(u8 c)
376{
377 int ret = 0;
378
379 for ( ; c; c>>=1)
380 ret += c & 1;
381 return ret;
382}
383
384static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
385{
386 u8 oob[16];
387 int err;
388
389 err = alauda_read_oob(mtd, ofs, oob);
390 if (err)
391 return err;
392
393 /* A block is marked bad if two or more bits are zero */
394 return popcount8(oob[5]) >= 7 ? 0 : 1;
395}
396
397static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
398 size_t *retlen, u_char *buf)
399{
400 struct alauda *al = mtd->priv;
401 void *bounce_buf;
402 int err, corrected=0, uncorrected=0;
403
404 bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL);
405 if (!bounce_buf)
406 return -ENOMEM;
407
408 *retlen = len;
409 while (len) {
410 u8 oob[16];
411 size_t byte = from & al->bytemask;
412 size_t cplen = min(len, mtd->writesize - byte);
413
414 err = alauda_read_page(mtd, from, bounce_buf, oob,
415 &corrected, &uncorrected);
416 if (err)
417 goto out;
418
419 memcpy(buf, bounce_buf + byte, cplen);
420 buf += cplen;
421 from += cplen;
422 len -= cplen;
423 }
424 err = 0;
425 if (corrected)
426 err = -EUCLEAN;
427 if (uncorrected)
428 err = -EBADMSG;
429out:
430 kfree(bounce_buf);
431 return err;
432}
433
434static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len,
435 size_t *retlen, u_char *buf)
436{
437 struct alauda *al = mtd->priv;
438 int err, corrected=0, uncorrected=0;
439
440 if ((from & al->bytemask) || (len & al->bytemask))
441 return alauda_bounce_read(mtd, from, len, retlen, buf);
442
443 *retlen = len;
444 while (len) {
445 u8 oob[16];
446
447 err = alauda_read_page(mtd, from, buf, oob,
448 &corrected, &uncorrected);
449 if (err)
450 return err;
451
452 buf += mtd->writesize;
453 from += mtd->writesize;
454 len -= mtd->writesize;
455 }
456 err = 0;
457 if (corrected)
458 err = -EUCLEAN;
459 if (uncorrected)
460 err = -EBADMSG;
461 return err;
462}
463
464static int alauda_write(struct mtd_info *mtd, loff_t to, size_t len,
465 size_t *retlen, const u_char *buf)
466{
467 struct alauda *al = mtd->priv;
468 int err;
469
470 if ((to & al->bytemask) || (len & al->bytemask))
471 return -EINVAL;
472
473 *retlen = len;
474 while (len) {
475 u32 page = (to >> al->card->pageshift) & al->pagemask;
476 u8 oob[16] = { 'h', 'e', 'l', 'l', 'o', 0xff, 0xff, 0xff,
477 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
478
479 /* don't write to bad blocks */
480 if (page == 0) {
481 err = alauda_isbad(mtd, to);
482 if (err) {
483 return -EIO;
484 }
485 }
486 nand_calculate_ecc(mtd, buf, &oob[13]);
487 nand_calculate_ecc(mtd, buf+256, &oob[8]);
488
489 err = alauda_write_page(mtd, to, (void*)buf, oob);
490 if (err)
491 return err;
492
493 buf += mtd->writesize;
494 to += mtd->writesize;
495 len -= mtd->writesize;
496 }
497 return 0;
498}
499
500static int __alauda_erase(struct mtd_info *mtd, struct erase_info *instr)
501{
502 struct alauda *al = mtd->priv;
503 u32 ofs = instr->addr;
504 u32 len = instr->len;
505 int err;
506
507 if ((ofs & al->blockmask) || (len & al->blockmask))
508 return -EINVAL;
509
510 while (len) {
511 /* don't erase bad blocks */
512 err = alauda_isbad(mtd, ofs);
513 if (err > 0)
514 err = -EIO;
515 if (err < 0)
516 return err;
517
518 err = alauda_erase_block(mtd, ofs);
519 if (err < 0)
520 return err;
521
522 ofs += mtd->erasesize;
523 len -= mtd->erasesize;
524 }
525 return 0;
526}
527
528static int alauda_erase(struct mtd_info *mtd, struct erase_info *instr)
529{
530 int err;
531
532 err = __alauda_erase(mtd, instr);
533 instr->state = err ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
534 mtd_erase_callback(instr);
535 return err;
536}
537
538static int alauda_init_media(struct alauda *al)
539{
540 u8 buf[4], *b0=buf, *b1=buf+1;
541 struct alauda_card *card;
542 struct mtd_info *mtd;
543 int err;
544
545 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
546 if (!mtd)
547 return -ENOMEM;
548
549 for (;;) {
550 err = alauda_get_media_status(al, buf);
551 if (err < 0)
552 goto error;
553 if (*b0 & 0x10)
554 break;
555 msleep(20);
556 }
557
558 err = alauda_ack_media(al);
559 if (err)
560 goto error;
561
562 msleep(10);
563
564 err = alauda_get_media_status(al, buf);
565 if (err < 0)
566 goto error;
567
568 if (*b0 != 0x14) {
569 /* media not ready */
570 err = -EIO;
571 goto error;
572 }
573 err = alauda_get_media_signatures(al, buf);
574 if (err < 0)
575 goto error;
576
577 card = get_card(*b1);
578 if (!card) {
579 printk(KERN_ERR"Alauda: unknown card id %02x\n", *b1);
580 err = -EIO;
581 goto error;
582 }
583 printk(KERN_INFO"pagesize=%x\nerasesize=%x\nsize=%xMiB\n",
584 1<<card->pageshift, 1<<card->blockshift,
585 1<<(card->chipshift-20));
586 al->card = card;
587 al->pagemask = (1 << (card->blockshift - card->pageshift)) - 1;
588 al->bytemask = (1 << card->pageshift) - 1;
589 al->blockmask = (1 << card->blockshift) - 1;
590
591 mtd->name = "alauda";
592 mtd->size = 1<<card->chipshift;
593 mtd->erasesize = 1<<card->blockshift;
594 mtd->writesize = 1<<card->pageshift;
595 mtd->type = MTD_NANDFLASH;
596 mtd->flags = MTD_CAP_NANDFLASH;
597 mtd->read = alauda_read;
598 mtd->write = alauda_write;
599 mtd->erase = alauda_erase;
600 mtd->block_isbad = alauda_isbad;
601 mtd->priv = al;
602 mtd->owner = THIS_MODULE;
603
604 err = add_mtd_device(mtd);
605 if (err) {
606 err = -ENFILE;
607 goto error;
608 }
609
610 al->mtd = mtd;
611 alauda_reset(al); /* no clue whether this is necessary */
612 return 0;
613error:
614 kfree(mtd);
615 return err;
616}
617
618static int alauda_check_media(struct alauda *al)
619{
620 u8 buf[2], *b0 = buf, *b1 = buf+1;
621 int err;
622
623 err = alauda_get_media_status(al, buf);
624 if (err < 0)
625 return err;
626
627 if ((*b1 & 0x01) == 0) {
628 /* door open */
629 return -EIO;
630 }
631 if ((*b0 & 0x80) || ((*b0 & 0x1F) == 0x10)) {
632 /* no media ? */
633 return -EIO;
634 }
635 if (*b0 & 0x08) {
636 /* media change ? */
637 return alauda_init_media(al);
638 }
639 return 0;
640}
641
642static int alauda_probe(struct usb_interface *interface,
643 const struct usb_device_id *id)
644{
645 struct alauda *al;
646 struct usb_host_interface *iface;
647 struct usb_endpoint_descriptor *ep,
648 *ep_in=NULL, *ep_out=NULL, *ep_wr=NULL;
649 int i, err = -ENOMEM;
650
651 al = kzalloc(2*sizeof(*al), GFP_KERNEL);
652 if (!al)
653 goto error;
654
655 kref_init(&al->kref);
656 usb_set_intfdata(interface, al);
657
658 al->dev = usb_get_dev(interface_to_usbdev(interface));
659 al->interface = interface;
660
661 iface = interface->cur_altsetting;
662 for (i = 0; i < iface->desc.bNumEndpoints; ++i) {
663 ep = &iface->endpoint[i].desc;
664
665 if (usb_endpoint_is_bulk_in(ep)) {
666 ep_in = ep;
667 } else if (usb_endpoint_is_bulk_out(ep)) {
668 if (i==0)
669 ep_wr = ep;
670 else
671 ep_out = ep;
672 }
673 }
674 err = -EIO;
675 if (!ep_wr || !ep_in || !ep_out)
676 goto error;
677
678 al->write_out = usb_sndbulkpipe(al->dev,
679 ep_wr->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
680 al->bulk_in = usb_rcvbulkpipe(al->dev,
681 ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
682 al->bulk_out = usb_sndbulkpipe(al->dev,
683 ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
684
685 /* second device is identical up to now */
686 memcpy(al+1, al, sizeof(*al));
687
688 mutex_init(&al[0].card_mutex);
689 mutex_init(&al[1].card_mutex);
690
691 al[0].port = ALAUDA_PORT_XD;
692 al[1].port = ALAUDA_PORT_SM;
693
694 info("alauda probed");
695 alauda_check_media(al);
696 alauda_check_media(al+1);
697
698 return 0;
699
700error:
701 if (al)
702 kref_put(&al->kref, alauda_delete);
703 return err;
704}
705
706static void alauda_disconnect(struct usb_interface *interface)
707{
708 struct alauda *al;
709
710 al = usb_get_intfdata(interface);
711 usb_set_intfdata(interface, NULL);
712
713 /* FIXME: prevent more I/O from starting */
714
715 /* decrement our usage count */
716 if (al)
717 kref_put(&al->kref, alauda_delete);
718
719 info("alauda gone");
720}
721
722static struct usb_driver alauda_driver = {
723 .name = "alauda",
724 .probe = alauda_probe,
725 .disconnect = alauda_disconnect,
726 .id_table = alauda_table,
727};
728
729static int __init alauda_init(void)
730{
731 return usb_register(&alauda_driver);
732}
733
734static void __exit alauda_exit(void)
735{
736 usb_deregister(&alauda_driver);
737}
738
739module_init(alauda_init);
740module_exit(alauda_exit);
741
742MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
new file mode 100644
index 0000000000..1657ecd748
--- /dev/null
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -0,0 +1,788 @@
1/* linux/drivers/mtd/nand/bf5xx_nand.c
2 *
3 * Copyright 2006-2007 Analog Devices Inc.
4 * http://blackfin.uclinux.org/
5 * Bryan Wu <bryan.wu@analog.com>
6 *
7 * Blackfin BF5xx on-chip NAND flash controler driver
8 *
9 * Derived from drivers/mtd/nand/s3c2410.c
10 * Copyright (c) 2007 Ben Dooks <ben@simtec.co.uk>
11 *
12 * Derived from drivers/mtd/nand/cafe.c
13 * Copyright © 2006 Red Hat, Inc.
14 * Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
15 *
16 * Changelog:
17 * 12-Jun-2007 Bryan Wu: Initial version
18 * 18-Jul-2007 Bryan Wu:
19 * - ECC_HW and ECC_SW supported
20 * - DMA supported in ECC_HW
21 * - YAFFS tested as rootfs in both ECC_HW and ECC_SW
22 *
23 * TODO:
24 * Enable JFFS2 over NAND as rootfs
25 *
26 * This program is free software; you can redistribute it and/or modify
27 * it under the terms of the GNU General Public License as published by
28 * the Free Software Foundation; either version 2 of the License, or
29 * (at your option) any later version.
30 *
31 * This program is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34 * GNU General Public License for more details.
35 *
36 * You should have received a copy of the GNU General Public License
37 * along with this program; if not, write to the Free Software
38 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
39*/
40
41#include <linux/module.h>
42#include <linux/types.h>
43#include <linux/init.h>
44#include <linux/kernel.h>
45#include <linux/string.h>
46#include <linux/ioport.h>
47#include <linux/platform_device.h>
48#include <linux/delay.h>
49#include <linux/dma-mapping.h>
50#include <linux/err.h>
51#include <linux/slab.h>
52#include <linux/io.h>
53#include <linux/bitops.h>
54
55#include <linux/mtd/mtd.h>
56#include <linux/mtd/nand.h>
57#include <linux/mtd/nand_ecc.h>
58#include <linux/mtd/partitions.h>
59
60#include <asm/blackfin.h>
61#include <asm/dma.h>
62#include <asm/cacheflush.h>
63#include <asm/nand.h>
64#include <asm/portmux.h>
65
66#define DRV_NAME "bf5xx-nand"
67#define DRV_VERSION "1.2"
68#define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>"
69#define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver"
70
71#ifdef CONFIG_MTD_NAND_BF5XX_HWECC
72static int hardware_ecc = 1;
73#else
74static int hardware_ecc;
75#endif
76
77static unsigned short bfin_nfc_pin_req[] = {P_NAND_CE, P_NAND_RB, 0};
78
79/*
80 * Data structures for bf5xx nand flash controller driver
81 */
82
83/* bf5xx nand info */
84struct bf5xx_nand_info {
85 /* mtd info */
86 struct nand_hw_control controller;
87 struct mtd_info mtd;
88 struct nand_chip chip;
89
90 /* platform info */
91 struct bf5xx_nand_platform *platform;
92
93 /* device info */
94 struct device *device;
95
96 /* DMA stuff */
97 struct completion dma_completion;
98};
99
100/*
101 * Conversion functions
102 */
103static struct bf5xx_nand_info *mtd_to_nand_info(struct mtd_info *mtd)
104{
105 return container_of(mtd, struct bf5xx_nand_info, mtd);
106}
107
108static struct bf5xx_nand_info *to_nand_info(struct platform_device *pdev)
109{
110 return platform_get_drvdata(pdev);
111}
112
113static struct bf5xx_nand_platform *to_nand_plat(struct platform_device *pdev)
114{
115 return pdev->dev.platform_data;
116}
117
118/*
119 * struct nand_chip interface function pointers
120 */
121
122/*
123 * bf5xx_nand_hwcontrol
124 *
125 * Issue command and address cycles to the chip
126 */
127static void bf5xx_nand_hwcontrol(struct mtd_info *mtd, int cmd,
128 unsigned int ctrl)
129{
130 if (cmd == NAND_CMD_NONE)
131 return;
132
133 while (bfin_read_NFC_STAT() & WB_FULL)
134 cpu_relax();
135
136 if (ctrl & NAND_CLE)
137 bfin_write_NFC_CMD(cmd);
138 else
139 bfin_write_NFC_ADDR(cmd);
140 SSYNC();
141}
142
143/*
144 * bf5xx_nand_devready()
145 *
146 * returns 0 if the nand is busy, 1 if it is ready
147 */
148static int bf5xx_nand_devready(struct mtd_info *mtd)
149{
150 unsigned short val = bfin_read_NFC_IRQSTAT();
151
152 if ((val & NBUSYIRQ) == NBUSYIRQ)
153 return 1;
154 else
155 return 0;
156}
157
158/*
159 * ECC functions
160 * These allow the bf5xx to use the controller's ECC
161 * generator block to ECC the data as it passes through
162 */
163
164/*
165 * ECC error correction function
166 */
167static int bf5xx_nand_correct_data_256(struct mtd_info *mtd, u_char *dat,
168 u_char *read_ecc, u_char *calc_ecc)
169{
170 struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
171 u32 syndrome[5];
172 u32 calced, stored;
173 int i;
174 unsigned short failing_bit, failing_byte;
175 u_char data;
176
177 calced = calc_ecc[0] | (calc_ecc[1] << 8) | (calc_ecc[2] << 16);
178 stored = read_ecc[0] | (read_ecc[1] << 8) | (read_ecc[2] << 16);
179
180 syndrome[0] = (calced ^ stored);
181
182 /*
183 * syndrome 0: all zero
184 * No error in data
185 * No action
186 */
187 if (!syndrome[0] || !calced || !stored)
188 return 0;
189
190 /*
191 * sysdrome 0: only one bit is one
192 * ECC data was incorrect
193 * No action
194 */
195 if (hweight32(syndrome[0]) == 1) {
196 dev_err(info->device, "ECC data was incorrect!\n");
197 return 1;
198 }
199
200 syndrome[1] = (calced & 0x7FF) ^ (stored & 0x7FF);
201 syndrome[2] = (calced & 0x7FF) ^ ((calced >> 11) & 0x7FF);
202 syndrome[3] = (stored & 0x7FF) ^ ((stored >> 11) & 0x7FF);
203 syndrome[4] = syndrome[2] ^ syndrome[3];
204
205 for (i = 0; i < 5; i++)
206 dev_info(info->device, "syndrome[%d] 0x%08x\n", i, syndrome[i]);
207
208 dev_info(info->device,
209 "calced[0x%08x], stored[0x%08x]\n",
210 calced, stored);
211
212 /*
213 * sysdrome 0: exactly 11 bits are one, each parity
214 * and parity' pair is 1 & 0 or 0 & 1.
215 * 1-bit correctable error
216 * Correct the error
217 */
218 if (hweight32(syndrome[0]) == 11 && syndrome[4] == 0x7FF) {
219 dev_info(info->device,
220 "1-bit correctable error, correct it.\n");
221 dev_info(info->device,
222 "syndrome[1] 0x%08x\n", syndrome[1]);
223
224 failing_bit = syndrome[1] & 0x7;
225 failing_byte = syndrome[1] >> 0x3;
226 data = *(dat + failing_byte);
227 data = data ^ (0x1 << failing_bit);
228 *(dat + failing_byte) = data;
229
230 return 0;
231 }
232
233 /*
234 * sysdrome 0: random data
235 * More than 1-bit error, non-correctable error
236 * Discard data, mark bad block
237 */
238 dev_err(info->device,
239 "More than 1-bit error, non-correctable error.\n");
240 dev_err(info->device,
241 "Please discard data, mark bad block\n");
242
243 return 1;
244}
245
246static int bf5xx_nand_correct_data(struct mtd_info *mtd, u_char *dat,
247 u_char *read_ecc, u_char *calc_ecc)
248{
249 struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
250 struct bf5xx_nand_platform *plat = info->platform;
251 unsigned short page_size = (plat->page_size ? 512 : 256);
252 int ret;
253
254 ret = bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc);
255
256 /* If page size is 512, correct second 256 bytes */
257 if (page_size == 512) {
258 dat += 256;
259 read_ecc += 8;
260 calc_ecc += 8;
261 ret = bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc);
262 }
263
264 return ret;
265}
266
267static void bf5xx_nand_enable_hwecc(struct mtd_info *mtd, int mode)
268{
269 return;
270}
271
272static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
273 const u_char *dat, u_char *ecc_code)
274{
275 struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
276 struct bf5xx_nand_platform *plat = info->platform;
277 u16 page_size = (plat->page_size ? 512 : 256);
278 u16 ecc0, ecc1;
279 u32 code[2];
280 u8 *p;
281 int bytes = 3, i;
282
283 /* first 4 bytes ECC code for 256 page size */
284 ecc0 = bfin_read_NFC_ECC0();
285 ecc1 = bfin_read_NFC_ECC1();
286
287 code[0] = (ecc0 & 0x3FF) | ((ecc1 & 0x3FF) << 11);
288
289 dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]);
290
291 /* second 4 bytes ECC code for 512 page size */
292 if (page_size == 512) {
293 ecc0 = bfin_read_NFC_ECC2();
294 ecc1 = bfin_read_NFC_ECC3();
295 code[1] = (ecc0 & 0x3FF) | ((ecc1 & 0x3FF) << 11);
296 bytes = 6;
297 dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]);
298 }
299
300 p = (u8 *)code;
301 for (i = 0; i < bytes; i++)
302 ecc_code[i] = p[i];
303
304 return 0;
305}
306
307/*
308 * PIO mode for buffer writing and reading
309 */
310static void bf5xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
311{
312 int i;
313 unsigned short val;
314
315 /*
316 * Data reads are requested by first writing to NFC_DATA_RD
317 * and then reading back from NFC_READ.
318 */
319 for (i = 0; i < len; i++) {
320 while (bfin_read_NFC_STAT() & WB_FULL)
321 cpu_relax();
322
323 /* Contents do not matter */
324 bfin_write_NFC_DATA_RD(0x0000);
325 SSYNC();
326
327 while ((bfin_read_NFC_IRQSTAT() & RD_RDY) != RD_RDY)
328 cpu_relax();
329
330 buf[i] = bfin_read_NFC_READ();
331
332 val = bfin_read_NFC_IRQSTAT();
333 val |= RD_RDY;
334 bfin_write_NFC_IRQSTAT(val);
335 SSYNC();
336 }
337}
338
339static uint8_t bf5xx_nand_read_byte(struct mtd_info *mtd)
340{
341 uint8_t val;
342
343 bf5xx_nand_read_buf(mtd, &val, 1);
344
345 return val;
346}
347
348static void bf5xx_nand_write_buf(struct mtd_info *mtd,
349 const uint8_t *buf, int len)
350{
351 int i;
352
353 for (i = 0; i < len; i++) {
354 while (bfin_read_NFC_STAT() & WB_FULL)
355 cpu_relax();
356
357 bfin_write_NFC_DATA_WR(buf[i]);
358 SSYNC();
359 }
360}
361
362static void bf5xx_nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
363{
364 int i;
365 u16 *p = (u16 *) buf;
366 len >>= 1;
367
368 /*
369 * Data reads are requested by first writing to NFC_DATA_RD
370 * and then reading back from NFC_READ.
371 */
372 bfin_write_NFC_DATA_RD(0x5555);
373
374 SSYNC();
375
376 for (i = 0; i < len; i++)
377 p[i] = bfin_read_NFC_READ();
378}
379
380static void bf5xx_nand_write_buf16(struct mtd_info *mtd,
381 const uint8_t *buf, int len)
382{
383 int i;
384 u16 *p = (u16 *) buf;
385 len >>= 1;
386
387 for (i = 0; i < len; i++)
388 bfin_write_NFC_DATA_WR(p[i]);
389
390 SSYNC();
391}
392
393/*
394 * DMA functions for buffer writing and reading
395 */
396static irqreturn_t bf5xx_nand_dma_irq(int irq, void *dev_id)
397{
398 struct bf5xx_nand_info *info = dev_id;
399
400 clear_dma_irqstat(CH_NFC);
401 disable_dma(CH_NFC);
402 complete(&info->dma_completion);
403
404 return IRQ_HANDLED;
405}
406
407static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
408 uint8_t *buf, int is_read)
409{
410 struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
411 struct bf5xx_nand_platform *plat = info->platform;
412 unsigned short page_size = (plat->page_size ? 512 : 256);
413 unsigned short val;
414
415 dev_dbg(info->device, " mtd->%p, buf->%p, is_read %d\n",
416 mtd, buf, is_read);
417
418 /*
419 * Before starting a dma transfer, be sure to invalidate/flush
420 * the cache over the address range of your DMA buffer to
421 * prevent cache coherency problems. Otherwise very subtle bugs
422 * can be introduced to your driver.
423 */
424 if (is_read)
425 invalidate_dcache_range((unsigned int)buf,
426 (unsigned int)(buf + page_size));
427 else
428 flush_dcache_range((unsigned int)buf,
429 (unsigned int)(buf + page_size));
430
431 /*
432 * This register must be written before each page is
433 * transferred to generate the correct ECC register
434 * values.
435 */
436 bfin_write_NFC_RST(0x1);
437 SSYNC();
438
439 disable_dma(CH_NFC);
440 clear_dma_irqstat(CH_NFC);
441
442 /* setup DMA register with Blackfin DMA API */
443 set_dma_config(CH_NFC, 0x0);
444 set_dma_start_addr(CH_NFC, (unsigned long) buf);
445 set_dma_x_count(CH_NFC, (page_size >> 2));
446 set_dma_x_modify(CH_NFC, 4);
447
448 /* setup write or read operation */
449 val = DI_EN | WDSIZE_32;
450 if (is_read)
451 val |= WNR;
452 set_dma_config(CH_NFC, val);
453 enable_dma(CH_NFC);
454
455 /* Start PAGE read/write operation */
456 if (is_read)
457 bfin_write_NFC_PGCTL(0x1);
458 else
459 bfin_write_NFC_PGCTL(0x2);
460 wait_for_completion(&info->dma_completion);
461
462 return 0;
463}
464
465static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd,
466 uint8_t *buf, int len)
467{
468 struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
469 struct bf5xx_nand_platform *plat = info->platform;
470 unsigned short page_size = (plat->page_size ? 512 : 256);
471
472 dev_dbg(info->device, "mtd->%p, buf->%p, int %d\n", mtd, buf, len);
473
474 if (len == page_size)
475 bf5xx_nand_dma_rw(mtd, buf, 1);
476 else
477 bf5xx_nand_read_buf(mtd, buf, len);
478}
479
480static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
481 const uint8_t *buf, int len)
482{
483 struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
484 struct bf5xx_nand_platform *plat = info->platform;
485 unsigned short page_size = (plat->page_size ? 512 : 256);
486
487 dev_dbg(info->device, "mtd->%p, buf->%p, len %d\n", mtd, buf, len);
488
489 if (len == page_size)
490 bf5xx_nand_dma_rw(mtd, (uint8_t *)buf, 0);
491 else
492 bf5xx_nand_write_buf(mtd, buf, len);
493}
494
495/*
496 * System initialization functions
497 */
498
499static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info)
500{
501 int ret;
502 unsigned short val;
503
504 /* Do not use dma */
505 if (!hardware_ecc)
506 return 0;
507
508 init_completion(&info->dma_completion);
509
510 /* Setup DMAC1 channel mux for NFC which shared with SDH */
511 val = bfin_read_DMAC1_PERIMUX();
512 val &= 0xFFFE;
513 bfin_write_DMAC1_PERIMUX(val);
514 SSYNC();
515
516 /* Request NFC DMA channel */
517 ret = request_dma(CH_NFC, "BF5XX NFC driver");
518 if (ret < 0) {
519 dev_err(info->device, " unable to get DMA channel\n");
520 return ret;
521 }
522
523 set_dma_callback(CH_NFC, (void *) bf5xx_nand_dma_irq, (void *) info);
524
525 /* Turn off the DMA channel first */
526 disable_dma(CH_NFC);
527 return 0;
528}
529
530/*
531 * BF5XX NFC hardware initialization
532 * - pin mux setup
533 * - clear interrupt status
534 */
535static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
536{
537 int err = 0;
538 unsigned short val;
539 struct bf5xx_nand_platform *plat = info->platform;
540
541 /* setup NFC_CTL register */
542 dev_info(info->device,
543 "page_size=%d, data_width=%d, wr_dly=%d, rd_dly=%d\n",
544 (plat->page_size ? 512 : 256),
545 (plat->data_width ? 16 : 8),
546 plat->wr_dly, plat->rd_dly);
547
548 val = (plat->page_size << NFC_PG_SIZE_OFFSET) |
549 (plat->data_width << NFC_NWIDTH_OFFSET) |
550 (plat->rd_dly << NFC_RDDLY_OFFSET) |
551 (plat->rd_dly << NFC_WRDLY_OFFSET);
552 dev_dbg(info->device, "NFC_CTL is 0x%04x\n", val);
553
554 bfin_write_NFC_CTL(val);
555 SSYNC();
556
557 /* clear interrupt status */
558 bfin_write_NFC_IRQMASK(0x0);
559 SSYNC();
560 val = bfin_read_NFC_IRQSTAT();
561 bfin_write_NFC_IRQSTAT(val);
562 SSYNC();
563
564 if (peripheral_request_list(bfin_nfc_pin_req, DRV_NAME)) {
565 printk(KERN_ERR DRV_NAME
566 ": Requesting Peripherals failed\n");
567 return -EFAULT;
568 }
569
570 /* DMA initialization */
571 if (bf5xx_nand_dma_init(info))
572 err = -ENXIO;
573
574 return err;
575}
576
577/*
578 * Device management interface
579 */
580static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
581{
582 struct mtd_info *mtd = &info->mtd;
583
584#ifdef CONFIG_MTD_PARTITIONS
585 struct mtd_partition *parts = info->platform->partitions;
586 int nr = info->platform->nr_partitions;
587
588 return add_mtd_partitions(mtd, parts, nr);
589#else
590 return add_mtd_device(mtd);
591#endif
592}
593
594static int bf5xx_nand_remove(struct platform_device *pdev)
595{
596 struct bf5xx_nand_info *info = to_nand_info(pdev);
597 struct mtd_info *mtd = NULL;
598
599 platform_set_drvdata(pdev, NULL);
600
601 /* first thing we need to do is release all our mtds
602 * and their partitions, then go through freeing the
603 * resources used
604 */
605 mtd = &info->mtd;
606 if (mtd) {
607 nand_release(mtd);
608 kfree(mtd);
609 }
610
611 peripheral_free_list(bfin_nfc_pin_req);
612
613 /* free the common resources */
614 kfree(info);
615
616 return 0;
617}
618
619/*
620 * bf5xx_nand_probe
621 *
622 * called by device layer when it finds a device matching
623 * one our driver can handled. This code checks to see if
624 * it can allocate all necessary resources then calls the
625 * nand layer to look for devices
626 */
627static int bf5xx_nand_probe(struct platform_device *pdev)
628{
629 struct bf5xx_nand_platform *plat = to_nand_plat(pdev);
630 struct bf5xx_nand_info *info = NULL;
631 struct nand_chip *chip = NULL;
632 struct mtd_info *mtd = NULL;
633 int err = 0;
634
635 dev_dbg(&pdev->dev, "(%p)\n", pdev);
636
637 if (!plat) {
638 dev_err(&pdev->dev, "no platform specific information\n");
639 goto exit_error;
640 }
641
642 info = kzalloc(sizeof(*info), GFP_KERNEL);
643 if (info == NULL) {
644 dev_err(&pdev->dev, "no memory for flash info\n");
645 err = -ENOMEM;
646 goto exit_error;
647 }
648
649 platform_set_drvdata(pdev, info);
650
651 spin_lock_init(&info->controller.lock);
652 init_waitqueue_head(&info->controller.wq);
653
654 info->device = &pdev->dev;
655 info->platform = plat;
656
657 /* initialise chip data struct */
658 chip = &info->chip;
659
660 if (plat->data_width)
661 chip->options |= NAND_BUSWIDTH_16;
662
663 chip->options |= NAND_CACHEPRG | NAND_SKIP_BBTSCAN;
664
665 chip->read_buf = (plat->data_width) ?
666 bf5xx_nand_read_buf16 : bf5xx_nand_read_buf;
667 chip->write_buf = (plat->data_width) ?
668 bf5xx_nand_write_buf16 : bf5xx_nand_write_buf;
669
670 chip->read_byte = bf5xx_nand_read_byte;
671
672 chip->cmd_ctrl = bf5xx_nand_hwcontrol;
673 chip->dev_ready = bf5xx_nand_devready;
674
675 chip->priv = &info->mtd;
676 chip->controller = &info->controller;
677
678 chip->IO_ADDR_R = (void __iomem *) NFC_READ;
679 chip->IO_ADDR_W = (void __iomem *) NFC_DATA_WR;
680
681 chip->chip_delay = 0;
682
683 /* initialise mtd info data struct */
684 mtd = &info->mtd;
685 mtd->priv = chip;
686 mtd->owner = THIS_MODULE;
687
688 /* initialise the hardware */
689 err = bf5xx_nand_hw_init(info);
690 if (err != 0)
691 goto exit_error;
692
693 /* setup hardware ECC data struct */
694 if (hardware_ecc) {
695 if (plat->page_size == NFC_PG_SIZE_256) {
696 chip->ecc.bytes = 3;
697 chip->ecc.size = 256;
698 } else if (plat->page_size == NFC_PG_SIZE_512) {
699 chip->ecc.bytes = 6;
700 chip->ecc.size = 512;
701 }
702
703 chip->read_buf = bf5xx_nand_dma_read_buf;
704 chip->write_buf = bf5xx_nand_dma_write_buf;
705 chip->ecc.calculate = bf5xx_nand_calculate_ecc;
706 chip->ecc.correct = bf5xx_nand_correct_data;
707 chip->ecc.mode = NAND_ECC_HW;
708 chip->ecc.hwctl = bf5xx_nand_enable_hwecc;
709 } else {
710 chip->ecc.mode = NAND_ECC_SOFT;
711 }
712
713 /* scan hardware nand chip and setup mtd info data struct */
714 if (nand_scan(mtd, 1)) {
715 err = -ENXIO;
716 goto exit_error;
717 }
718
719 /* add NAND partition */
720 bf5xx_nand_add_partition(info);
721
722 dev_dbg(&pdev->dev, "initialised ok\n");
723 return 0;
724
725exit_error:
726 bf5xx_nand_remove(pdev);
727
728 if (err == 0)
729 err = -EINVAL;
730 return err;
731}
732
733/* PM Support */
734#ifdef CONFIG_PM
735
736static int bf5xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
737{
738 struct bf5xx_nand_info *info = platform_get_drvdata(dev);
739
740 return 0;
741}
742
743static int bf5xx_nand_resume(struct platform_device *dev)
744{
745 struct bf5xx_nand_info *info = platform_get_drvdata(dev);
746
747 if (info)
748 bf5xx_nand_hw_init(info);
749
750 return 0;
751}
752
753#else
754#define bf5xx_nand_suspend NULL
755#define bf5xx_nand_resume NULL
756#endif
757
758/* driver device registration */
759static struct platform_driver bf5xx_nand_driver = {
760 .probe = bf5xx_nand_probe,
761 .remove = bf5xx_nand_remove,
762 .suspend = bf5xx_nand_suspend,
763 .resume = bf5xx_nand_resume,
764 .driver = {
765 .name = DRV_NAME,
766 .owner = THIS_MODULE,
767 },
768};
769
770static int __init bf5xx_nand_init(void)
771{
772 printk(KERN_INFO "%s, Version %s (c) 2007 Analog Devices, Inc.\n",
773 DRV_DESC, DRV_VERSION);
774
775 return platform_driver_register(&bf5xx_nand_driver);
776}
777
778static void __exit bf5xx_nand_exit(void)
779{
780 platform_driver_unregister(&bf5xx_nand_driver);
781}
782
783module_init(bf5xx_nand_init);
784module_exit(bf5xx_nand_exit);
785
786MODULE_LICENSE("GPL");
787MODULE_AUTHOR(DRV_AUTHOR);
788MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 6f32a35eb1..e2832d0b98 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -623,6 +623,11 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
623 uint32_t ctrl; 623 uint32_t ctrl;
624 int err = 0; 624 int err = 0;
625 625
626 /* Very old versions shared the same PCI ident for all three
627 functions on the chip. Verify the class too... */
628 if ((pdev->class >> 8) != PCI_CLASS_MEMORY_FLASH)
629 return -ENODEV;
630
626 err = pci_enable_device(pdev); 631 err = pci_enable_device(pdev);
627 if (err) 632 if (err)
628 return err; 633 return err;
@@ -816,21 +821,57 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
816} 821}
817 822
818static struct pci_device_id cafe_nand_tbl[] = { 823static struct pci_device_id cafe_nand_tbl[] = {
819 { 0x11ab, 0x4100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MEMORY_FLASH << 8, 0xFFFF0 }, 824 { 0x11ab, 0x4100, PCI_ANY_ID, PCI_ANY_ID },
820 { 0, } 825 { }
821}; 826};
822 827
823MODULE_DEVICE_TABLE(pci, cafe_nand_tbl); 828MODULE_DEVICE_TABLE(pci, cafe_nand_tbl);
824 829
830static int cafe_nand_resume(struct pci_dev *pdev)
831{
832 uint32_t ctrl;
833 struct mtd_info *mtd = pci_get_drvdata(pdev);
834 struct cafe_priv *cafe = mtd->priv;
835
836 /* Start off by resetting the NAND controller completely */
837 cafe_writel(cafe, 1, NAND_RESET);
838 cafe_writel(cafe, 0, NAND_RESET);
839 cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
840
841 /* Restore timing configuration */
842 cafe_writel(cafe, timing[0], NAND_TIMING1);
843 cafe_writel(cafe, timing[1], NAND_TIMING2);
844 cafe_writel(cafe, timing[2], NAND_TIMING3);
845
846 /* Disable master reset, enable NAND clock */
847 ctrl = cafe_readl(cafe, GLOBAL_CTRL);
848 ctrl &= 0xffffeff0;
849 ctrl |= 0x00007000;
850 cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
851 cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
852 cafe_writel(cafe, 0, NAND_DMA_CTRL);
853 cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
854 cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
855
856 /* Set up DMA address */
857 cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
858 if (sizeof(cafe->dmaaddr) > 4)
859 /* Shift in two parts to shut the compiler up */
860 cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
861 else
862 cafe_writel(cafe, 0, NAND_DMA_ADDR1);
863
864 /* Enable NAND IRQ in global IRQ mask register */
865 cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
866 return 0;
867}
868
825static struct pci_driver cafe_nand_pci_driver = { 869static struct pci_driver cafe_nand_pci_driver = {
826 .name = "CAFÉ NAND", 870 .name = "CAFÉ NAND",
827 .id_table = cafe_nand_tbl, 871 .id_table = cafe_nand_tbl,
828 .probe = cafe_nand_probe, 872 .probe = cafe_nand_probe,
829 .remove = __devexit_p(cafe_nand_remove), 873 .remove = __devexit_p(cafe_nand_remove),
830#ifdef CONFIG_PMx
831 .suspend = cafe_nand_suspend,
832 .resume = cafe_nand_resume, 874 .resume = cafe_nand_resume,
833#endif
834}; 875};
835 876
836static int cafe_nand_init(void) 877static int cafe_nand_init(void)
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index e96259f22c..ab9f5c5db3 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -56,8 +56,6 @@ static unsigned long __initdata doc_locations[] = {
56#endif /* CONFIG_MTD_DOCPROBE_HIGH */ 56#endif /* CONFIG_MTD_DOCPROBE_HIGH */
57#elif defined(__PPC__) 57#elif defined(__PPC__)
58 0xe4000000, 58 0xe4000000,
59#elif defined(CONFIG_MOMENCO_OCELOT_G)
60 0xff000000,
61#else 59#else
62#warning Unknown architecture for DiskOnChip. No default probe locations defined 60#warning Unknown architecture for DiskOnChip. No default probe locations defined
63#endif 61#endif
diff --git a/drivers/mtd/nand/excite_nandflash.c b/drivers/mtd/nand/excite_nandflash.c
index 7e9afc4c77..bed87290de 100644
--- a/drivers/mtd/nand/excite_nandflash.c
+++ b/drivers/mtd/nand/excite_nandflash.c
@@ -27,7 +27,6 @@
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/err.h> 29#include <linux/err.h>
30#include <linux/kernel.h>
31 30
32#include <linux/mtd/mtd.h> 31#include <linux/mtd/mtd.h>
33#include <linux/mtd/nand.h> 32#include <linux/mtd/nand.h>
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 24ac6778b1..b4e0e77238 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -7,7 +7,7 @@
7 * Basic support for AG-AND chips is provided. 7 * Basic support for AG-AND chips is provided.
8 * 8 *
9 * Additional technical information is available on 9 * Additional technical information is available on
10 * http://www.linux-mtd.infradead.org/tech/nand.html 10 * http://www.linux-mtd.infradead.org/doc/nand.html
11 * 11 *
12 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) 12 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
13 * 2002-2006 Thomas Gleixner (tglx@linutronix.de) 13 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
@@ -2069,13 +2069,14 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2069 erase_exit: 2069 erase_exit:
2070 2070
2071 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; 2071 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
2072 /* Do call back function */
2073 if (!ret)
2074 mtd_erase_callback(instr);
2075 2072
2076 /* Deselect and wake up anyone waiting on the device */ 2073 /* Deselect and wake up anyone waiting on the device */
2077 nand_release_device(mtd); 2074 nand_release_device(mtd);
2078 2075
2076 /* Do call back function */
2077 if (!ret)
2078 mtd_erase_callback(instr);
2079
2079 /* 2080 /*
2080 * If BBT requires refresh and erase was successful, rewrite any 2081 * If BBT requires refresh and erase was successful, rewrite any
2081 * selected bad block tables 2082 * selected bad block tables
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 2fc674a190..a3e3ab0185 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -141,6 +141,7 @@ struct nand_manufacturers nand_manuf_ids[] = {
141 {NAND_MFR_STMICRO, "ST Micro"}, 141 {NAND_MFR_STMICRO, "ST Micro"},
142 {NAND_MFR_HYNIX, "Hynix"}, 142 {NAND_MFR_HYNIX, "Hynix"},
143 {NAND_MFR_MICRON, "Micron"}, 143 {NAND_MFR_MICRON, "Micron"},
144 {NAND_MFR_AMD, "AMD"},
144 {0x0, "Unknown"} 145 {0x0, "Unknown"}
145}; 146};
146 147
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 205df0f771..a7574807dc 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1272,7 +1272,13 @@ static int prog_page(struct nandsim *ns, int num)
1272 mypage = NS_GET_PAGE(ns); 1272 mypage = NS_GET_PAGE(ns);
1273 if (mypage->byte == NULL) { 1273 if (mypage->byte == NULL) {
1274 NS_DBG("prog_page: allocating page %d\n", ns->regs.row); 1274 NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
1275 mypage->byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL); 1275 /*
1276 * We allocate memory with GFP_NOFS because a flash FS may
1277 * utilize this. If it is holding an FS lock, then gets here,
1278 * then kmalloc runs writeback which goes to the FS again
1279 * and deadlocks. This was seen in practice.
1280 */
1281 mypage->byte = kmalloc(ns->geom.pgszoob, GFP_NOFS);
1276 if (mypage->byte == NULL) { 1282 if (mypage->byte == NULL) {
1277 NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row); 1283 NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
1278 return -1; 1284 return -1;
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index fd7a8d5ba2..1c0e89f00e 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -24,7 +24,11 @@
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25 25
26#include <asm/io.h> 26#include <asm/io.h>
27#ifdef CONFIG_40x
28#include <asm/ibm405.h>
29#else
27#include <asm/ibm44x.h> 30#include <asm/ibm44x.h>
31#endif
28 32
29struct ndfc_nand_mtd { 33struct ndfc_nand_mtd {
30 struct mtd_info mtd; 34 struct mtd_info mtd;
@@ -230,7 +234,11 @@ static int ndfc_nand_probe(struct platform_device *pdev)
230 struct ndfc_controller *ndfc = &ndfc_ctrl; 234 struct ndfc_controller *ndfc = &ndfc_ctrl;
231 unsigned long long phys = settings->ndfc_erpn | res->start; 235 unsigned long long phys = settings->ndfc_erpn | res->start;
232 236
237#ifndef CONFIG_PHYS_64BIT
238 ndfc->ndfcbase = ioremap((phys_addr_t)phys, res->end - res->start + 1);
239#else
233 ndfc->ndfcbase = ioremap64(phys, res->end - res->start + 1); 240 ndfc->ndfcbase = ioremap64(phys, res->end - res->start + 1);
241#endif
234 if (!ndfc->ndfcbase) { 242 if (!ndfc->ndfcbase) {
235 printk(KERN_ERR "NDFC: ioremap failed\n"); 243 printk(KERN_ERR "NDFC: ioremap failed\n");
236 return -EIO; 244 return -EIO;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 5fac4c421a..b79a9cf2d1 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -60,8 +60,8 @@
60 60
61#include <asm/io.h> 61#include <asm/io.h>
62 62
63#include <asm/arch/regs-nand.h> 63#include <asm/plat-s3c/regs-nand.h>
64#include <asm/arch/nand.h> 64#include <asm/plat-s3c/nand.h>
65 65
66#ifdef CONFIG_MTD_NAND_S3C2410_HWECC 66#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
67static int hardware_ecc = 1; 67static int hardware_ecc = 1;
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index c257d397d0..cb41cbca64 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -40,4 +40,27 @@ config MTD_ONENAND_OTP
40 40
41 OTP block is fully-guaranteed to be a valid block. 41 OTP block is fully-guaranteed to be a valid block.
42 42
43config MTD_ONENAND_2X_PROGRAM
44 bool "OneNAND 2X program support"
45 help
46 The 2X Program is an extension of Program Operation.
47 Since the device is equipped with two DataRAMs, and two-plane NAND
48 Flash memory array, these two component enables simultaneous program
49 of 4KiB. Plane1 has only even blocks such as block0, block2, block4
50 while Plane2 has only odd blocks such as block1, block3, block5.
51 So MTD regards it as 4KiB page size and 256KiB block size
52
53 Now the following chips support it. (KFXXX16Q2M)
54 Demux: KFG2G16Q2M, KFH4G16Q2M, KFW8G16Q2M,
55 Mux: KFM2G16Q2M, KFN4G16Q2M,
56
57 And more recent chips
58
59config MTD_ONENAND_SIM
60 tristate "OneNAND simulator support"
61 depends on MTD_PARTITIONS
62 help
63 The simulator may simulate various OneNAND flash chips for the
64 OneNAND MTD layer.
65
43endif # MTD_ONENAND 66endif # MTD_ONENAND
diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile
index 269cfe4673..4d2eacfd7e 100644
--- a/drivers/mtd/onenand/Makefile
+++ b/drivers/mtd/onenand/Makefile
@@ -8,4 +8,7 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o
8# Board specific. 8# Board specific.
9obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o 9obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
10 10
11# Simulator
12obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o
13
11onenand-objs = onenand_base.o onenand_bbt.o 14onenand-objs = onenand_base.o onenand_bbt.o
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 0537fac8de..b2c40f67db 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -206,6 +206,15 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
206 default: 206 default:
207 block = (int) (addr >> this->erase_shift); 207 block = (int) (addr >> this->erase_shift);
208 page = (int) (addr >> this->page_shift); 208 page = (int) (addr >> this->page_shift);
209
210 if (ONENAND_IS_2PLANE(this)) {
211 /* Make the even block number */
212 block &= ~1;
213 /* Is it the odd plane? */
214 if (addr & this->writesize)
215 block++;
216 page >>= 1;
217 }
209 page &= this->page_mask; 218 page &= this->page_mask;
210 break; 219 break;
211 } 220 }
@@ -216,8 +225,12 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
216 value = onenand_bufferram_address(this, block); 225 value = onenand_bufferram_address(this, block);
217 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); 226 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
218 227
219 /* Switch to the next data buffer */ 228 if (ONENAND_IS_2PLANE(this))
220 ONENAND_SET_NEXT_BUFFERRAM(this); 229 /* It is always BufferRAM0 */
230 ONENAND_SET_BUFFERRAM0(this);
231 else
232 /* Switch to the next data buffer */
233 ONENAND_SET_NEXT_BUFFERRAM(this);
221 234
222 return 0; 235 return 0;
223 } 236 }
@@ -247,6 +260,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
247 break; 260 break;
248 261
249 default: 262 default:
263 if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG)
264 cmd = ONENAND_CMD_2X_PROG;
250 dataram = ONENAND_CURRENT_BUFFERRAM(this); 265 dataram = ONENAND_CURRENT_BUFFERRAM(this);
251 break; 266 break;
252 } 267 }
@@ -318,12 +333,14 @@ static int onenand_wait(struct mtd_info *mtd, int state)
318 if (interrupt & ONENAND_INT_READ) { 333 if (interrupt & ONENAND_INT_READ) {
319 int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS); 334 int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
320 if (ecc) { 335 if (ecc) {
321 printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc);
322 if (ecc & ONENAND_ECC_2BIT_ALL) { 336 if (ecc & ONENAND_ECC_2BIT_ALL) {
337 printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc);
323 mtd->ecc_stats.failed++; 338 mtd->ecc_stats.failed++;
324 return ecc; 339 return ecc;
325 } else if (ecc & ONENAND_ECC_1BIT_ALL) 340 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
341 printk(KERN_INFO "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
326 mtd->ecc_stats.corrected++; 342 mtd->ecc_stats.corrected++;
343 }
327 } 344 }
328 } else if (state == FL_READING) { 345 } else if (state == FL_READING) {
329 printk(KERN_ERR "onenand_wait: read timeout! ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt); 346 printk(KERN_ERR "onenand_wait: read timeout! ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt);
@@ -445,8 +462,9 @@ static inline int onenand_bufferram_offset(struct mtd_info *mtd, int area)
445 struct onenand_chip *this = mtd->priv; 462 struct onenand_chip *this = mtd->priv;
446 463
447 if (ONENAND_CURRENT_BUFFERRAM(this)) { 464 if (ONENAND_CURRENT_BUFFERRAM(this)) {
465 /* Note: the 'this->writesize' is a real page size */
448 if (area == ONENAND_DATARAM) 466 if (area == ONENAND_DATARAM)
449 return mtd->writesize; 467 return this->writesize;
450 if (area == ONENAND_SPARERAM) 468 if (area == ONENAND_SPARERAM)
451 return mtd->oobsize; 469 return mtd->oobsize;
452 } 470 }
@@ -572,6 +590,30 @@ static int onenand_write_bufferram(struct mtd_info *mtd, int area,
572} 590}
573 591
574/** 592/**
593 * onenand_get_2x_blockpage - [GENERIC] Get blockpage at 2x program mode
594 * @param mtd MTD data structure
595 * @param addr address to check
596 * @return blockpage address
597 *
598 * Get blockpage address at 2x program mode
599 */
600static int onenand_get_2x_blockpage(struct mtd_info *mtd, loff_t addr)
601{
602 struct onenand_chip *this = mtd->priv;
603 int blockpage, block, page;
604
605 /* Calculate the even block number */
606 block = (int) (addr >> this->erase_shift) & ~1;
607 /* Is it the odd plane? */
608 if (addr & this->writesize)
609 block++;
610 page = (int) (addr >> (this->page_shift + 1)) & this->page_mask;
611 blockpage = (block << 7) | page;
612
613 return blockpage;
614}
615
616/**
575 * onenand_check_bufferram - [GENERIC] Check BufferRAM information 617 * onenand_check_bufferram - [GENERIC] Check BufferRAM information
576 * @param mtd MTD data structure 618 * @param mtd MTD data structure
577 * @param addr address to check 619 * @param addr address to check
@@ -585,7 +627,10 @@ static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr)
585 int blockpage, found = 0; 627 int blockpage, found = 0;
586 unsigned int i; 628 unsigned int i;
587 629
588 blockpage = (int) (addr >> this->page_shift); 630 if (ONENAND_IS_2PLANE(this))
631 blockpage = onenand_get_2x_blockpage(mtd, addr);
632 else
633 blockpage = (int) (addr >> this->page_shift);
589 634
590 /* Is there valid data? */ 635 /* Is there valid data? */
591 i = ONENAND_CURRENT_BUFFERRAM(this); 636 i = ONENAND_CURRENT_BUFFERRAM(this);
@@ -625,7 +670,10 @@ static void onenand_update_bufferram(struct mtd_info *mtd, loff_t addr,
625 int blockpage; 670 int blockpage;
626 unsigned int i; 671 unsigned int i;
627 672
628 blockpage = (int) (addr >> this->page_shift); 673 if (ONENAND_IS_2PLANE(this))
674 blockpage = onenand_get_2x_blockpage(mtd, addr);
675 else
676 blockpage = (int) (addr >> this->page_shift);
629 677
630 /* Invalidate another BufferRAM */ 678 /* Invalidate another BufferRAM */
631 i = ONENAND_NEXT_BUFFERRAM(this); 679 i = ONENAND_NEXT_BUFFERRAM(this);
@@ -717,36 +765,86 @@ static void onenand_release_device(struct mtd_info *mtd)
717} 765}
718 766
719/** 767/**
720 * onenand_read - [MTD Interface] Read data from flash 768 * onenand_transfer_auto_oob - [Internal] oob auto-placement transfer
769 * @param mtd MTD device structure
770 * @param buf destination address
771 * @param column oob offset to read from
772 * @param thislen oob length to read
773 */
774static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int column,
775 int thislen)
776{
777 struct onenand_chip *this = mtd->priv;
778 struct nand_oobfree *free;
779 int readcol = column;
780 int readend = column + thislen;
781 int lastgap = 0;
782 unsigned int i;
783 uint8_t *oob_buf = this->oob_buf;
784
785 free = this->ecclayout->oobfree;
786 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
787 if (readcol >= lastgap)
788 readcol += free->offset - lastgap;
789 if (readend >= lastgap)
790 readend += free->offset - lastgap;
791 lastgap = free->offset + free->length;
792 }
793 this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
794 free = this->ecclayout->oobfree;
795 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
796 int free_end = free->offset + free->length;
797 if (free->offset < readend && free_end > readcol) {
798 int st = max_t(int,free->offset,readcol);
799 int ed = min_t(int,free_end,readend);
800 int n = ed - st;
801 memcpy(buf, oob_buf + st, n);
802 buf += n;
803 } else if (column == 0)
804 break;
805 }
806 return 0;
807}
808
809/**
810 * onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band
721 * @param mtd MTD device structure 811 * @param mtd MTD device structure
722 * @param from offset to read from 812 * @param from offset to read from
723 * @param len number of bytes to read 813 * @param ops: oob operation description structure
724 * @param retlen pointer to variable to store the number of read bytes
725 * @param buf the databuffer to put data
726 * 814 *
727 * Read with ecc 815 * OneNAND read main and/or out-of-band data
728*/ 816 */
729static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len, 817static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
730 size_t *retlen, u_char *buf) 818 struct mtd_oob_ops *ops)
731{ 819{
732 struct onenand_chip *this = mtd->priv; 820 struct onenand_chip *this = mtd->priv;
733 struct mtd_ecc_stats stats; 821 struct mtd_ecc_stats stats;
734 int read = 0, column; 822 size_t len = ops->len;
735 int thislen; 823 size_t ooblen = ops->ooblen;
824 u_char *buf = ops->datbuf;
825 u_char *oobbuf = ops->oobbuf;
826 int read = 0, column, thislen;
827 int oobread = 0, oobcolumn, thisooblen, oobsize;
736 int ret = 0, boundary = 0; 828 int ret = 0, boundary = 0;
829 int writesize = this->writesize;
737 830
738 DEBUG(MTD_DEBUG_LEVEL3, "onenand_read: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); 831 DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
832
833 if (ops->mode == MTD_OOB_AUTO)
834 oobsize = this->ecclayout->oobavail;
835 else
836 oobsize = mtd->oobsize;
837
838 oobcolumn = from & (mtd->oobsize - 1);
739 839
740 /* Do not allow reads past end of device */ 840 /* Do not allow reads past end of device */
741 if ((from + len) > mtd->size) { 841 if ((from + len) > mtd->size) {
742 printk(KERN_ERR "onenand_read: Attempt read beyond end of device\n"); 842 printk(KERN_ERR "onenand_read_ops_nolock: Attempt read beyond end of device\n");
743 *retlen = 0; 843 ops->retlen = 0;
844 ops->oobretlen = 0;
744 return -EINVAL; 845 return -EINVAL;
745 } 846 }
746 847
747 /* Grab the lock and see if the device is available */
748 onenand_get_device(mtd, FL_READING);
749
750 stats = mtd->ecc_stats; 848 stats = mtd->ecc_stats;
751 849
752 /* Read-while-load method */ 850 /* Read-while-load method */
@@ -754,22 +852,22 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
754 /* Do first load to bufferRAM */ 852 /* Do first load to bufferRAM */
755 if (read < len) { 853 if (read < len) {
756 if (!onenand_check_bufferram(mtd, from)) { 854 if (!onenand_check_bufferram(mtd, from)) {
757 this->command(mtd, ONENAND_CMD_READ, from, mtd->writesize); 855 this->command(mtd, ONENAND_CMD_READ, from, writesize);
758 ret = this->wait(mtd, FL_READING); 856 ret = this->wait(mtd, FL_READING);
759 onenand_update_bufferram(mtd, from, !ret); 857 onenand_update_bufferram(mtd, from, !ret);
760 } 858 }
761 } 859 }
762 860
763 thislen = min_t(int, mtd->writesize, len - read); 861 thislen = min_t(int, writesize, len - read);
764 column = from & (mtd->writesize - 1); 862 column = from & (writesize - 1);
765 if (column + thislen > mtd->writesize) 863 if (column + thislen > writesize)
766 thislen = mtd->writesize - column; 864 thislen = writesize - column;
767 865
768 while (!ret) { 866 while (!ret) {
769 /* If there is more to load then start next load */ 867 /* If there is more to load then start next load */
770 from += thislen; 868 from += thislen;
771 if (read + thislen < len) { 869 if (read + thislen < len) {
772 this->command(mtd, ONENAND_CMD_READ, from, mtd->writesize); 870 this->command(mtd, ONENAND_CMD_READ, from, writesize);
773 /* 871 /*
774 * Chip boundary handling in DDP 872 * Chip boundary handling in DDP
775 * Now we issued chip 1 read and pointed chip 1 873 * Now we issued chip 1 read and pointed chip 1
@@ -785,6 +883,21 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
785 } 883 }
786 /* While load is going, read from last bufferRAM */ 884 /* While load is going, read from last bufferRAM */
787 this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen); 885 this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
886
887 /* Read oob area if needed */
888 if (oobbuf) {
889 thisooblen = oobsize - oobcolumn;
890 thisooblen = min_t(int, thisooblen, ooblen - oobread);
891
892 if (ops->mode == MTD_OOB_AUTO)
893 onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
894 else
895 this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
896 oobread += thisooblen;
897 oobbuf += thisooblen;
898 oobcolumn = 0;
899 }
900
788 /* See if we are done */ 901 /* See if we are done */
789 read += thislen; 902 read += thislen;
790 if (read == len) 903 if (read == len)
@@ -794,7 +907,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
794 this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2); 907 this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
795 ONENAND_SET_NEXT_BUFFERRAM(this); 908 ONENAND_SET_NEXT_BUFFERRAM(this);
796 buf += thislen; 909 buf += thislen;
797 thislen = min_t(int, mtd->writesize, len - read); 910 thislen = min_t(int, writesize, len - read);
798 column = 0; 911 column = 0;
799 cond_resched(); 912 cond_resched();
800 /* Now wait for load */ 913 /* Now wait for load */
@@ -802,15 +915,13 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
802 onenand_update_bufferram(mtd, from, !ret); 915 onenand_update_bufferram(mtd, from, !ret);
803 } 916 }
804 917
805 /* Deselect and wake up anyone waiting on the device */
806 onenand_release_device(mtd);
807
808 /* 918 /*
809 * Return success, if no ECC failures, else -EBADMSG 919 * Return success, if no ECC failures, else -EBADMSG
810 * fs driver will take care of that, because 920 * fs driver will take care of that, because
811 * retlen == desired len and result == -EBADMSG 921 * retlen == desired len and result == -EBADMSG
812 */ 922 */
813 *retlen = read; 923 ops->retlen = read;
924 ops->oobretlen = oobread;
814 925
815 if (mtd->ecc_stats.failed - stats.failed) 926 if (mtd->ecc_stats.failed - stats.failed)
816 return -EBADMSG; 927 return -EBADMSG;
@@ -822,69 +933,29 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
822} 933}
823 934
824/** 935/**
825 * onenand_transfer_auto_oob - [Internal] oob auto-placement transfer 936 * onenand_read_oob_nolock - [MTD Interface] OneNAND read out-of-band
826 * @param mtd MTD device structure
827 * @param buf destination address
828 * @param column oob offset to read from
829 * @param thislen oob length to read
830 */
831static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int column,
832 int thislen)
833{
834 struct onenand_chip *this = mtd->priv;
835 struct nand_oobfree *free;
836 int readcol = column;
837 int readend = column + thislen;
838 int lastgap = 0;
839 unsigned int i;
840 uint8_t *oob_buf = this->oob_buf;
841
842 free = this->ecclayout->oobfree;
843 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
844 if (readcol >= lastgap)
845 readcol += free->offset - lastgap;
846 if (readend >= lastgap)
847 readend += free->offset - lastgap;
848 lastgap = free->offset + free->length;
849 }
850 this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
851 free = this->ecclayout->oobfree;
852 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
853 int free_end = free->offset + free->length;
854 if (free->offset < readend && free_end > readcol) {
855 int st = max_t(int,free->offset,readcol);
856 int ed = min_t(int,free_end,readend);
857 int n = ed - st;
858 memcpy(buf, oob_buf + st, n);
859 buf += n;
860 } else if (column == 0)
861 break;
862 }
863 return 0;
864}
865
866/**
867 * onenand_do_read_oob - [MTD Interface] OneNAND read out-of-band
868 * @param mtd MTD device structure 937 * @param mtd MTD device structure
869 * @param from offset to read from 938 * @param from offset to read from
870 * @param len number of bytes to read 939 * @param ops: oob operation description structure
871 * @param retlen pointer to variable to store the number of read bytes
872 * @param buf the databuffer to put data
873 * @param mode operation mode
874 * 940 *
875 * OneNAND read out-of-band data from the spare area 941 * OneNAND read out-of-band data from the spare area
876 */ 942 */
877static int onenand_do_read_oob(struct mtd_info *mtd, loff_t from, size_t len, 943static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
878 size_t *retlen, u_char *buf, mtd_oob_mode_t mode) 944 struct mtd_oob_ops *ops)
879{ 945{
880 struct onenand_chip *this = mtd->priv; 946 struct onenand_chip *this = mtd->priv;
881 int read = 0, thislen, column, oobsize; 947 int read = 0, thislen, column, oobsize;
948 size_t len = ops->ooblen;
949 mtd_oob_mode_t mode = ops->mode;
950 u_char *buf = ops->oobbuf;
882 int ret = 0; 951 int ret = 0;
883 952
884 DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_oob: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); 953 from += ops->ooboffs;
954
955 DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_oob_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
885 956
886 /* Initialize return length value */ 957 /* Initialize return length value */
887 *retlen = 0; 958 ops->oobretlen = 0;
888 959
889 if (mode == MTD_OOB_AUTO) 960 if (mode == MTD_OOB_AUTO)
890 oobsize = this->ecclayout->oobavail; 961 oobsize = this->ecclayout->oobavail;
@@ -894,7 +965,7 @@ static int onenand_do_read_oob(struct mtd_info *mtd, loff_t from, size_t len,
894 column = from & (mtd->oobsize - 1); 965 column = from & (mtd->oobsize - 1);
895 966
896 if (unlikely(column >= oobsize)) { 967 if (unlikely(column >= oobsize)) {
897 printk(KERN_ERR "onenand_read_oob: Attempted to start read outside oob\n"); 968 printk(KERN_ERR "onenand_read_oob_nolock: Attempted to start read outside oob\n");
898 return -EINVAL; 969 return -EINVAL;
899 } 970 }
900 971
@@ -902,13 +973,10 @@ static int onenand_do_read_oob(struct mtd_info *mtd, loff_t from, size_t len,
902 if (unlikely(from >= mtd->size || 973 if (unlikely(from >= mtd->size ||
903 column + len > ((mtd->size >> this->page_shift) - 974 column + len > ((mtd->size >> this->page_shift) -
904 (from >> this->page_shift)) * oobsize)) { 975 (from >> this->page_shift)) * oobsize)) {
905 printk(KERN_ERR "onenand_read_oob: Attempted to read beyond end of device\n"); 976 printk(KERN_ERR "onenand_read_oob_nolock: Attempted to read beyond end of device\n");
906 return -EINVAL; 977 return -EINVAL;
907 } 978 }
908 979
909 /* Grab the lock and see if the device is available */
910 onenand_get_device(mtd, FL_READING);
911
912 while (read < len) { 980 while (read < len) {
913 cond_resched(); 981 cond_resched();
914 982
@@ -928,7 +996,7 @@ static int onenand_do_read_oob(struct mtd_info *mtd, loff_t from, size_t len,
928 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen); 996 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
929 997
930 if (ret) { 998 if (ret) {
931 printk(KERN_ERR "onenand_read_oob: read failed = 0x%x\n", ret); 999 printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
932 break; 1000 break;
933 } 1001 }
934 1002
@@ -947,22 +1015,52 @@ static int onenand_do_read_oob(struct mtd_info *mtd, loff_t from, size_t len,
947 } 1015 }
948 } 1016 }
949 1017
950 /* Deselect and wake up anyone waiting on the device */ 1018 ops->oobretlen = read;
1019 return ret;
1020}
1021
1022/**
1023 * onenand_read - [MTD Interface] Read data from flash
1024 * @param mtd MTD device structure
1025 * @param from offset to read from
1026 * @param len number of bytes to read
1027 * @param retlen pointer to variable to store the number of read bytes
1028 * @param buf the databuffer to put data
1029 *
1030 * Read with ecc
1031*/
1032static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
1033 size_t *retlen, u_char *buf)
1034{
1035 struct mtd_oob_ops ops = {
1036 .len = len,
1037 .ooblen = 0,
1038 .datbuf = buf,
1039 .oobbuf = NULL,
1040 };
1041 int ret;
1042
1043 onenand_get_device(mtd, FL_READING);
1044 ret = onenand_read_ops_nolock(mtd, from, &ops);
951 onenand_release_device(mtd); 1045 onenand_release_device(mtd);
952 1046
953 *retlen = read; 1047 *retlen = ops.retlen;
954 return ret; 1048 return ret;
955} 1049}
956 1050
957/** 1051/**
958 * onenand_read_oob - [MTD Interface] NAND write data and/or out-of-band 1052 * onenand_read_oob - [MTD Interface] Read main and/or out-of-band
959 * @param mtd: MTD device structure 1053 * @param mtd: MTD device structure
960 * @param from: offset to read from 1054 * @param from: offset to read from
961 * @param ops: oob operation description structure 1055 * @param ops: oob operation description structure
1056
1057 * Read main and/or out-of-band
962 */ 1058 */
963static int onenand_read_oob(struct mtd_info *mtd, loff_t from, 1059static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
964 struct mtd_oob_ops *ops) 1060 struct mtd_oob_ops *ops)
965{ 1061{
1062 int ret;
1063
966 switch (ops->mode) { 1064 switch (ops->mode) {
967 case MTD_OOB_PLACE: 1065 case MTD_OOB_PLACE:
968 case MTD_OOB_AUTO: 1066 case MTD_OOB_AUTO:
@@ -972,8 +1070,15 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
972 default: 1070 default:
973 return -EINVAL; 1071 return -EINVAL;
974 } 1072 }
975 return onenand_do_read_oob(mtd, from + ops->ooboffs, ops->ooblen, 1073
976 &ops->oobretlen, ops->oobbuf, ops->mode); 1074 onenand_get_device(mtd, FL_READING);
1075 if (ops->datbuf)
1076 ret = onenand_read_ops_nolock(mtd, from, ops);
1077 else
1078 ret = onenand_read_oob_nolock(mtd, from, ops);
1079 onenand_release_device(mtd);
1080
1081 return ret;
977} 1082}
978 1083
979/** 1084/**
@@ -1079,7 +1184,7 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
1079 /* Read more? */ 1184 /* Read more? */
1080 if (read < len) { 1185 if (read < len) {
1081 /* Update Page size */ 1186 /* Update Page size */
1082 from += mtd->writesize; 1187 from += this->writesize;
1083 column = 0; 1188 column = 0;
1084 } 1189 }
1085 } 1190 }
@@ -1097,7 +1202,6 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
1097 * @param mtd MTD device structure 1202 * @param mtd MTD device structure
1098 * @param buf the databuffer to verify 1203 * @param buf the databuffer to verify
1099 * @param to offset to read from 1204 * @param to offset to read from
1100 *
1101 */ 1205 */
1102static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to) 1206static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to)
1103{ 1207{
@@ -1125,7 +1229,6 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
1125 * @param buf the databuffer to verify 1229 * @param buf the databuffer to verify
1126 * @param addr offset to read from 1230 * @param addr offset to read from
1127 * @param len number of bytes to read and compare 1231 * @param len number of bytes to read and compare
1128 *
1129 */ 1232 */
1130static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len) 1233static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len)
1131{ 1234{
@@ -1135,12 +1238,12 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr,
1135 int thislen, column; 1238 int thislen, column;
1136 1239
1137 while (len != 0) { 1240 while (len != 0) {
1138 thislen = min_t(int, mtd->writesize, len); 1241 thislen = min_t(int, this->writesize, len);
1139 column = addr & (mtd->writesize - 1); 1242 column = addr & (this->writesize - 1);
1140 if (column + thislen > mtd->writesize) 1243 if (column + thislen > this->writesize)
1141 thislen = mtd->writesize - column; 1244 thislen = this->writesize - column;
1142 1245
1143 this->command(mtd, ONENAND_CMD_READ, addr, mtd->writesize); 1246 this->command(mtd, ONENAND_CMD_READ, addr, this->writesize);
1144 1247
1145 onenand_update_bufferram(mtd, addr, 0); 1248 onenand_update_bufferram(mtd, addr, 0);
1146 1249
@@ -1171,50 +1274,101 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr,
1171#define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0) 1274#define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0)
1172 1275
1173/** 1276/**
1174 * onenand_write - [MTD Interface] write buffer to FLASH 1277 * onenand_fill_auto_oob - [Internal] oob auto-placement transfer
1278 * @param mtd MTD device structure
1279 * @param oob_buf oob buffer
1280 * @param buf source address
1281 * @param column oob offset to write to
1282 * @param thislen oob length to write
1283 */
1284static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
1285 const u_char *buf, int column, int thislen)
1286{
1287 struct onenand_chip *this = mtd->priv;
1288 struct nand_oobfree *free;
1289 int writecol = column;
1290 int writeend = column + thislen;
1291 int lastgap = 0;
1292 unsigned int i;
1293
1294 free = this->ecclayout->oobfree;
1295 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
1296 if (writecol >= lastgap)
1297 writecol += free->offset - lastgap;
1298 if (writeend >= lastgap)
1299 writeend += free->offset - lastgap;
1300 lastgap = free->offset + free->length;
1301 }
1302 free = this->ecclayout->oobfree;
1303 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
1304 int free_end = free->offset + free->length;
1305 if (free->offset < writeend && free_end > writecol) {
1306 int st = max_t(int,free->offset,writecol);
1307 int ed = min_t(int,free_end,writeend);
1308 int n = ed - st;
1309 memcpy(oob_buf + st, buf, n);
1310 buf += n;
1311 } else if (column == 0)
1312 break;
1313 }
1314 return 0;
1315}
1316
1317/**
1318 * onenand_write_ops_nolock - [OneNAND Interface] write main and/or out-of-band
1175 * @param mtd MTD device structure 1319 * @param mtd MTD device structure
1176 * @param to offset to write to 1320 * @param to offset to write to
1177 * @param len number of bytes to write 1321 * @param ops oob operation description structure
1178 * @param retlen pointer to variable to store the number of written bytes
1179 * @param buf the data to write
1180 * 1322 *
1181 * Write with ECC 1323 * Write main and/or oob with ECC
1182 */ 1324 */
1183static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len, 1325static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1184 size_t *retlen, const u_char *buf) 1326 struct mtd_oob_ops *ops)
1185{ 1327{
1186 struct onenand_chip *this = mtd->priv; 1328 struct onenand_chip *this = mtd->priv;
1187 int written = 0; 1329 int written = 0, column, thislen, subpage;
1330 int oobwritten = 0, oobcolumn, thisooblen, oobsize;
1331 size_t len = ops->len;
1332 size_t ooblen = ops->ooblen;
1333 const u_char *buf = ops->datbuf;
1334 const u_char *oob = ops->oobbuf;
1335 u_char *oobbuf;
1188 int ret = 0; 1336 int ret = 0;
1189 int column, subpage;
1190 1337
1191 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); 1338 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_ops_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
1192 1339
1193 /* Initialize retlen, in case of early exit */ 1340 /* Initialize retlen, in case of early exit */
1194 *retlen = 0; 1341 ops->retlen = 0;
1342 ops->oobretlen = 0;
1195 1343
1196 /* Do not allow writes past end of device */ 1344 /* Do not allow writes past end of device */
1197 if (unlikely((to + len) > mtd->size)) { 1345 if (unlikely((to + len) > mtd->size)) {
1198 printk(KERN_ERR "onenand_write: Attempt write to past end of device\n"); 1346 printk(KERN_ERR "onenand_write_ops_nolock: Attempt write to past end of device\n");
1199 return -EINVAL; 1347 return -EINVAL;
1200 } 1348 }
1201 1349
1202 /* Reject writes, which are not page aligned */ 1350 /* Reject writes, which are not page aligned */
1203 if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) { 1351 if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) {
1204 printk(KERN_ERR "onenand_write: Attempt to write not page aligned data\n"); 1352 printk(KERN_ERR "onenand_write_ops_nolock: Attempt to write not page aligned data\n");
1205 return -EINVAL; 1353 return -EINVAL;
1206 } 1354 }
1207 1355
1208 column = to & (mtd->writesize - 1); 1356 if (ops->mode == MTD_OOB_AUTO)
1357 oobsize = this->ecclayout->oobavail;
1358 else
1359 oobsize = mtd->oobsize;
1209 1360
1210 /* Grab the lock and see if the device is available */ 1361 oobcolumn = to & (mtd->oobsize - 1);
1211 onenand_get_device(mtd, FL_WRITING); 1362
1363 column = to & (mtd->writesize - 1);
1212 1364
1213 /* Loop until all data write */ 1365 /* Loop until all data write */
1214 while (written < len) { 1366 while (written < len) {
1215 int thislen = min_t(int, mtd->writesize - column, len - written);
1216 u_char *wbuf = (u_char *) buf; 1367 u_char *wbuf = (u_char *) buf;
1217 1368
1369 thislen = min_t(int, mtd->writesize - column, len - written);
1370 thisooblen = min_t(int, oobsize - oobcolumn, ooblen - oobwritten);
1371
1218 cond_resched(); 1372 cond_resched();
1219 1373
1220 this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen); 1374 this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
@@ -1228,7 +1382,25 @@ static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
1228 } 1382 }
1229 1383
1230 this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize); 1384 this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
1231 this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize); 1385
1386 if (oob) {
1387 oobbuf = this->oob_buf;
1388
1389 /* We send data to spare ram with oobsize
1390 * to prevent byte access */
1391 memset(oobbuf, 0xff, mtd->oobsize);
1392 if (ops->mode == MTD_OOB_AUTO)
1393 onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen);
1394 else
1395 memcpy(oobbuf + oobcolumn, oob, thisooblen);
1396
1397 oobwritten += thisooblen;
1398 oob += thisooblen;
1399 oobcolumn = 0;
1400 } else
1401 oobbuf = (u_char *) ffchars;
1402
1403 this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
1232 1404
1233 this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize); 1405 this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
1234 1406
@@ -1236,16 +1408,20 @@ static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
1236 1408
1237 /* In partial page write we don't update bufferram */ 1409 /* In partial page write we don't update bufferram */
1238 onenand_update_bufferram(mtd, to, !ret && !subpage); 1410 onenand_update_bufferram(mtd, to, !ret && !subpage);
1411 if (ONENAND_IS_2PLANE(this)) {
1412 ONENAND_SET_BUFFERRAM1(this);
1413 onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage);
1414 }
1239 1415
1240 if (ret) { 1416 if (ret) {
1241 printk(KERN_ERR "onenand_write: write filaed %d\n", ret); 1417 printk(KERN_ERR "onenand_write_ops_nolock: write filaed %d\n", ret);
1242 break; 1418 break;
1243 } 1419 }
1244 1420
1245 /* Only check verify write turn on */ 1421 /* Only check verify write turn on */
1246 ret = onenand_verify(mtd, (u_char *) wbuf, to, thislen); 1422 ret = onenand_verify(mtd, (u_char *) wbuf, to, thislen);
1247 if (ret) { 1423 if (ret) {
1248 printk(KERN_ERR "onenand_write: verify failed %d\n", ret); 1424 printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret);
1249 break; 1425 break;
1250 } 1426 }
1251 1427
@@ -1262,54 +1438,14 @@ static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
1262 /* Deselect and wake up anyone waiting on the device */ 1438 /* Deselect and wake up anyone waiting on the device */
1263 onenand_release_device(mtd); 1439 onenand_release_device(mtd);
1264 1440
1265 *retlen = written; 1441 ops->retlen = written;
1266 1442
1267 return ret; 1443 return ret;
1268} 1444}
1269 1445
1270/**
1271 * onenand_fill_auto_oob - [Internal] oob auto-placement transfer
1272 * @param mtd MTD device structure
1273 * @param oob_buf oob buffer
1274 * @param buf source address
1275 * @param column oob offset to write to
1276 * @param thislen oob length to write
1277 */
1278static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
1279 const u_char *buf, int column, int thislen)
1280{
1281 struct onenand_chip *this = mtd->priv;
1282 struct nand_oobfree *free;
1283 int writecol = column;
1284 int writeend = column + thislen;
1285 int lastgap = 0;
1286 unsigned int i;
1287
1288 free = this->ecclayout->oobfree;
1289 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
1290 if (writecol >= lastgap)
1291 writecol += free->offset - lastgap;
1292 if (writeend >= lastgap)
1293 writeend += free->offset - lastgap;
1294 lastgap = free->offset + free->length;
1295 }
1296 free = this->ecclayout->oobfree;
1297 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
1298 int free_end = free->offset + free->length;
1299 if (free->offset < writeend && free_end > writecol) {
1300 int st = max_t(int,free->offset,writecol);
1301 int ed = min_t(int,free_end,writeend);
1302 int n = ed - st;
1303 memcpy(oob_buf + st, buf, n);
1304 buf += n;
1305 } else if (column == 0)
1306 break;
1307 }
1308 return 0;
1309}
1310 1446
1311/** 1447/**
1312 * onenand_do_write_oob - [Internal] OneNAND write out-of-band 1448 * onenand_write_oob_nolock - [Internal] OneNAND write out-of-band
1313 * @param mtd MTD device structure 1449 * @param mtd MTD device structure
1314 * @param to offset to write to 1450 * @param to offset to write to
1315 * @param len number of bytes to write 1451 * @param len number of bytes to write
@@ -1319,18 +1455,23 @@ static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
1319 * 1455 *
1320 * OneNAND write out-of-band 1456 * OneNAND write out-of-band
1321 */ 1457 */
1322static int onenand_do_write_oob(struct mtd_info *mtd, loff_t to, size_t len, 1458static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
1323 size_t *retlen, const u_char *buf, mtd_oob_mode_t mode) 1459 struct mtd_oob_ops *ops)
1324{ 1460{
1325 struct onenand_chip *this = mtd->priv; 1461 struct onenand_chip *this = mtd->priv;
1326 int column, ret = 0, oobsize; 1462 int column, ret = 0, oobsize;
1327 int written = 0; 1463 int written = 0;
1328 u_char *oobbuf; 1464 u_char *oobbuf;
1465 size_t len = ops->ooblen;
1466 const u_char *buf = ops->oobbuf;
1467 mtd_oob_mode_t mode = ops->mode;
1329 1468
1330 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_oob: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); 1469 to += ops->ooboffs;
1470
1471 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_oob_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
1331 1472
1332 /* Initialize retlen, in case of early exit */ 1473 /* Initialize retlen, in case of early exit */
1333 *retlen = 0; 1474 ops->oobretlen = 0;
1334 1475
1335 if (mode == MTD_OOB_AUTO) 1476 if (mode == MTD_OOB_AUTO)
1336 oobsize = this->ecclayout->oobavail; 1477 oobsize = this->ecclayout->oobavail;
@@ -1340,13 +1481,13 @@ static int onenand_do_write_oob(struct mtd_info *mtd, loff_t to, size_t len,
1340 column = to & (mtd->oobsize - 1); 1481 column = to & (mtd->oobsize - 1);
1341 1482
1342 if (unlikely(column >= oobsize)) { 1483 if (unlikely(column >= oobsize)) {
1343 printk(KERN_ERR "onenand_write_oob: Attempted to start write outside oob\n"); 1484 printk(KERN_ERR "onenand_write_oob_nolock: Attempted to start write outside oob\n");
1344 return -EINVAL; 1485 return -EINVAL;
1345 } 1486 }
1346 1487
1347 /* For compatibility with NAND: Do not allow write past end of page */ 1488 /* For compatibility with NAND: Do not allow write past end of page */
1348 if (unlikely(column + len > oobsize)) { 1489 if (unlikely(column + len > oobsize)) {
1349 printk(KERN_ERR "onenand_write_oob: " 1490 printk(KERN_ERR "onenand_write_oob_nolock: "
1350 "Attempt to write past end of page\n"); 1491 "Attempt to write past end of page\n");
1351 return -EINVAL; 1492 return -EINVAL;
1352 } 1493 }
@@ -1355,13 +1496,10 @@ static int onenand_do_write_oob(struct mtd_info *mtd, loff_t to, size_t len,
1355 if (unlikely(to >= mtd->size || 1496 if (unlikely(to >= mtd->size ||
1356 column + len > ((mtd->size >> this->page_shift) - 1497 column + len > ((mtd->size >> this->page_shift) -
1357 (to >> this->page_shift)) * oobsize)) { 1498 (to >> this->page_shift)) * oobsize)) {
1358 printk(KERN_ERR "onenand_write_oob: Attempted to write past end of device\n"); 1499 printk(KERN_ERR "onenand_write_oob_nolock: Attempted to write past end of device\n");
1359 return -EINVAL; 1500 return -EINVAL;
1360 } 1501 }
1361 1502
1362 /* Grab the lock and see if the device is available */
1363 onenand_get_device(mtd, FL_WRITING);
1364
1365 oobbuf = this->oob_buf; 1503 oobbuf = this->oob_buf;
1366 1504
1367 /* Loop until all data write */ 1505 /* Loop until all data write */
@@ -1384,16 +1522,20 @@ static int onenand_do_write_oob(struct mtd_info *mtd, loff_t to, size_t len,
1384 this->command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize); 1522 this->command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
1385 1523
1386 onenand_update_bufferram(mtd, to, 0); 1524 onenand_update_bufferram(mtd, to, 0);
1525 if (ONENAND_IS_2PLANE(this)) {
1526 ONENAND_SET_BUFFERRAM1(this);
1527 onenand_update_bufferram(mtd, to + this->writesize, 0);
1528 }
1387 1529
1388 ret = this->wait(mtd, FL_WRITING); 1530 ret = this->wait(mtd, FL_WRITING);
1389 if (ret) { 1531 if (ret) {
1390 printk(KERN_ERR "onenand_write_oob: write failed %d\n", ret); 1532 printk(KERN_ERR "onenand_write_oob_nolock: write failed %d\n", ret);
1391 break; 1533 break;
1392 } 1534 }
1393 1535
1394 ret = onenand_verify_oob(mtd, oobbuf, to); 1536 ret = onenand_verify_oob(mtd, oobbuf, to);
1395 if (ret) { 1537 if (ret) {
1396 printk(KERN_ERR "onenand_write_oob: verify failed %d\n", ret); 1538 printk(KERN_ERR "onenand_write_oob_nolock: verify failed %d\n", ret);
1397 break; 1539 break;
1398 } 1540 }
1399 1541
@@ -1406,11 +1548,37 @@ static int onenand_do_write_oob(struct mtd_info *mtd, loff_t to, size_t len,
1406 column = 0; 1548 column = 0;
1407 } 1549 }
1408 1550
1409 /* Deselect and wake up anyone waiting on the device */ 1551 ops->oobretlen = written;
1410 onenand_release_device(mtd); 1552
1553 return ret;
1554}
1555
1556/**
1557 * onenand_write - [MTD Interface] write buffer to FLASH
1558 * @param mtd MTD device structure
1559 * @param to offset to write to
1560 * @param len number of bytes to write
1561 * @param retlen pointer to variable to store the number of written bytes
1562 * @param buf the data to write
1563 *
1564 * Write with ECC
1565 */
1566static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
1567 size_t *retlen, const u_char *buf)
1568{
1569 struct mtd_oob_ops ops = {
1570 .len = len,
1571 .ooblen = 0,
1572 .datbuf = (u_char *) buf,
1573 .oobbuf = NULL,
1574 };
1575 int ret;
1411 1576
1412 *retlen = written; 1577 onenand_get_device(mtd, FL_WRITING);
1578 ret = onenand_write_ops_nolock(mtd, to, &ops);
1579 onenand_release_device(mtd);
1413 1580
1581 *retlen = ops.retlen;
1414 return ret; 1582 return ret;
1415} 1583}
1416 1584
@@ -1423,6 +1591,8 @@ static int onenand_do_write_oob(struct mtd_info *mtd, loff_t to, size_t len,
1423static int onenand_write_oob(struct mtd_info *mtd, loff_t to, 1591static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
1424 struct mtd_oob_ops *ops) 1592 struct mtd_oob_ops *ops)
1425{ 1593{
1594 int ret;
1595
1426 switch (ops->mode) { 1596 switch (ops->mode) {
1427 case MTD_OOB_PLACE: 1597 case MTD_OOB_PLACE:
1428 case MTD_OOB_AUTO: 1598 case MTD_OOB_AUTO:
@@ -1432,21 +1602,27 @@ static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
1432 default: 1602 default:
1433 return -EINVAL; 1603 return -EINVAL;
1434 } 1604 }
1435 return onenand_do_write_oob(mtd, to + ops->ooboffs, ops->ooblen, 1605
1436 &ops->oobretlen, ops->oobbuf, ops->mode); 1606 onenand_get_device(mtd, FL_WRITING);
1607 if (ops->datbuf)
1608 ret = onenand_write_ops_nolock(mtd, to, ops);
1609 else
1610 ret = onenand_write_oob_nolock(mtd, to, ops);
1611 onenand_release_device(mtd);
1612
1613 return ret;
1437} 1614}
1438 1615
1439/** 1616/**
1440 * onenand_block_checkbad - [GENERIC] Check if a block is marked bad 1617 * onenand_block_isbad_nolock - [GENERIC] Check if a block is marked bad
1441 * @param mtd MTD device structure 1618 * @param mtd MTD device structure
1442 * @param ofs offset from device start 1619 * @param ofs offset from device start
1443 * @param getchip 0, if the chip is already selected
1444 * @param allowbbt 1, if its allowed to access the bbt area 1620 * @param allowbbt 1, if its allowed to access the bbt area
1445 * 1621 *
1446 * Check, if the block is bad. Either by reading the bad block table or 1622 * Check, if the block is bad. Either by reading the bad block table or
1447 * calling of the scan function. 1623 * calling of the scan function.
1448 */ 1624 */
1449static int onenand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip, int allowbbt) 1625static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allowbbt)
1450{ 1626{
1451 struct onenand_chip *this = mtd->priv; 1627 struct onenand_chip *this = mtd->priv;
1452 struct bbm_info *bbm = this->bbm; 1628 struct bbm_info *bbm = this->bbm;
@@ -1507,7 +1683,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
1507 cond_resched(); 1683 cond_resched();
1508 1684
1509 /* Check if we have a bad block, we do not erase bad blocks */ 1685 /* Check if we have a bad block, we do not erase bad blocks */
1510 if (onenand_block_checkbad(mtd, addr, 0, 0)) { 1686 if (onenand_block_isbad_nolock(mtd, addr, 0)) {
1511 printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%08x\n", (unsigned int) addr); 1687 printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%08x\n", (unsigned int) addr);
1512 instr->state = MTD_ERASE_FAILED; 1688 instr->state = MTD_ERASE_FAILED;
1513 goto erase_exit; 1689 goto erase_exit;
@@ -1571,11 +1747,16 @@ static void onenand_sync(struct mtd_info *mtd)
1571 */ 1747 */
1572static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs) 1748static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
1573{ 1749{
1750 int ret;
1751
1574 /* Check for invalid offset */ 1752 /* Check for invalid offset */
1575 if (ofs > mtd->size) 1753 if (ofs > mtd->size)
1576 return -EINVAL; 1754 return -EINVAL;
1577 1755
1578 return onenand_block_checkbad(mtd, ofs, 1, 0); 1756 onenand_get_device(mtd, FL_READING);
1757 ret = onenand_block_isbad_nolock(mtd, ofs, 0);
1758 onenand_release_device(mtd);
1759 return ret;
1579} 1760}
1580 1761
1581/** 1762/**
@@ -1591,7 +1772,12 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
1591 struct onenand_chip *this = mtd->priv; 1772 struct onenand_chip *this = mtd->priv;
1592 struct bbm_info *bbm = this->bbm; 1773 struct bbm_info *bbm = this->bbm;
1593 u_char buf[2] = {0, 0}; 1774 u_char buf[2] = {0, 0};
1594 size_t retlen; 1775 struct mtd_oob_ops ops = {
1776 .mode = MTD_OOB_PLACE,
1777 .ooblen = 2,
1778 .oobbuf = buf,
1779 .ooboffs = 0,
1780 };
1595 int block; 1781 int block;
1596 1782
1597 /* Get block number */ 1783 /* Get block number */
@@ -1601,7 +1787,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
1601 1787
1602 /* We write two bytes, so we dont have to mess with 16 bit access */ 1788 /* We write two bytes, so we dont have to mess with 16 bit access */
1603 ofs += mtd->oobsize + (bbm->badblockpos & ~0x01); 1789 ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
1604 return onenand_do_write_oob(mtd, ofs , 2, &retlen, buf, MTD_OOB_PLACE); 1790 return onenand_write_oob_nolock(mtd, ofs, &ops);
1605} 1791}
1606 1792
1607/** 1793/**
@@ -1624,7 +1810,10 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
1624 return ret; 1810 return ret;
1625 } 1811 }
1626 1812
1627 return this->block_markbad(mtd, ofs); 1813 onenand_get_device(mtd, FL_WRITING);
1814 ret = this->block_markbad(mtd, ofs);
1815 onenand_release_device(mtd);
1816 return ret;
1628} 1817}
1629 1818
1630/** 1819/**
@@ -1823,13 +2012,19 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
1823 size_t *retlen, u_char *buf) 2012 size_t *retlen, u_char *buf)
1824{ 2013{
1825 struct onenand_chip *this = mtd->priv; 2014 struct onenand_chip *this = mtd->priv;
2015 struct mtd_oob_ops ops = {
2016 .len = len,
2017 .ooblen = 0,
2018 .datbuf = buf,
2019 .oobbuf = NULL,
2020 };
1826 int ret; 2021 int ret;
1827 2022
1828 /* Enter OTP access mode */ 2023 /* Enter OTP access mode */
1829 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); 2024 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
1830 this->wait(mtd, FL_OTPING); 2025 this->wait(mtd, FL_OTPING);
1831 2026
1832 ret = mtd->read(mtd, from, len, retlen, buf); 2027 ret = onenand_read_ops_nolock(mtd, from, &ops);
1833 2028
1834 /* Exit OTP access mode */ 2029 /* Exit OTP access mode */
1835 this->command(mtd, ONENAND_CMD_RESET, 0, 0); 2030 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -1841,19 +2036,20 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
1841/** 2036/**
1842 * do_otp_write - [DEFAULT] Write OTP block area 2037 * do_otp_write - [DEFAULT] Write OTP block area
1843 * @param mtd MTD device structure 2038 * @param mtd MTD device structure
1844 * @param from The offset to write 2039 * @param to The offset to write
1845 * @param len number of bytes to write 2040 * @param len number of bytes to write
1846 * @param retlen pointer to variable to store the number of write bytes 2041 * @param retlen pointer to variable to store the number of write bytes
1847 * @param buf the databuffer to put/get data 2042 * @param buf the databuffer to put/get data
1848 * 2043 *
1849 * Write OTP block area. 2044 * Write OTP block area.
1850 */ 2045 */
1851static int do_otp_write(struct mtd_info *mtd, loff_t from, size_t len, 2046static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
1852 size_t *retlen, u_char *buf) 2047 size_t *retlen, u_char *buf)
1853{ 2048{
1854 struct onenand_chip *this = mtd->priv; 2049 struct onenand_chip *this = mtd->priv;
1855 unsigned char *pbuf = buf; 2050 unsigned char *pbuf = buf;
1856 int ret; 2051 int ret;
2052 struct mtd_oob_ops ops;
1857 2053
1858 /* Force buffer page aligned */ 2054 /* Force buffer page aligned */
1859 if (len < mtd->writesize) { 2055 if (len < mtd->writesize) {
@@ -1867,7 +2063,12 @@ static int do_otp_write(struct mtd_info *mtd, loff_t from, size_t len,
1867 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); 2063 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
1868 this->wait(mtd, FL_OTPING); 2064 this->wait(mtd, FL_OTPING);
1869 2065
1870 ret = mtd->write(mtd, from, len, retlen, pbuf); 2066 ops.len = len;
2067 ops.ooblen = 0;
2068 ops.datbuf = pbuf;
2069 ops.oobbuf = NULL;
2070 ret = onenand_write_ops_nolock(mtd, to, &ops);
2071 *retlen = ops.retlen;
1871 2072
1872 /* Exit OTP access mode */ 2073 /* Exit OTP access mode */
1873 this->command(mtd, ONENAND_CMD_RESET, 0, 0); 2074 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -1890,13 +2091,21 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
1890 size_t *retlen, u_char *buf) 2091 size_t *retlen, u_char *buf)
1891{ 2092{
1892 struct onenand_chip *this = mtd->priv; 2093 struct onenand_chip *this = mtd->priv;
2094 struct mtd_oob_ops ops = {
2095 .mode = MTD_OOB_PLACE,
2096 .ooblen = len,
2097 .oobbuf = buf,
2098 .ooboffs = 0,
2099 };
1893 int ret; 2100 int ret;
1894 2101
1895 /* Enter OTP access mode */ 2102 /* Enter OTP access mode */
1896 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); 2103 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
1897 this->wait(mtd, FL_OTPING); 2104 this->wait(mtd, FL_OTPING);
1898 2105
1899 ret = onenand_do_write_oob(mtd, from, len, retlen, buf, MTD_OOB_PLACE); 2106 ret = onenand_write_oob_nolock(mtd, from, &ops);
2107
2108 *retlen = ops.oobretlen;
1900 2109
1901 /* Exit OTP access mode */ 2110 /* Exit OTP access mode */
1902 this->command(mtd, ONENAND_CMD_RESET, 0, 0); 2111 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -1943,13 +2152,16 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1943 if (((mtd->writesize * otp_pages) - (from + len)) < 0) 2152 if (((mtd->writesize * otp_pages) - (from + len)) < 0)
1944 return 0; 2153 return 0;
1945 2154
2155 onenand_get_device(mtd, FL_OTPING);
1946 while (len > 0 && otp_pages > 0) { 2156 while (len > 0 && otp_pages > 0) {
1947 if (!action) { /* OTP Info functions */ 2157 if (!action) { /* OTP Info functions */
1948 struct otp_info *otpinfo; 2158 struct otp_info *otpinfo;
1949 2159
1950 len -= sizeof(struct otp_info); 2160 len -= sizeof(struct otp_info);
1951 if (len <= 0) 2161 if (len <= 0) {
1952 return -ENOSPC; 2162 ret = -ENOSPC;
2163 break;
2164 }
1953 2165
1954 otpinfo = (struct otp_info *) buf; 2166 otpinfo = (struct otp_info *) buf;
1955 otpinfo->start = from; 2167 otpinfo->start = from;
@@ -1969,13 +2181,14 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1969 len -= size; 2181 len -= size;
1970 *retlen += size; 2182 *retlen += size;
1971 2183
1972 if (ret < 0) 2184 if (ret)
1973 return ret; 2185 break;
1974 } 2186 }
1975 otp_pages--; 2187 otp_pages--;
1976 } 2188 }
2189 onenand_release_device(mtd);
1977 2190
1978 return 0; 2191 return ret;
1979} 2192}
1980 2193
1981/** 2194/**
@@ -2107,6 +2320,7 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
2107 * 2320 *
2108 * Check and set OneNAND features 2321 * Check and set OneNAND features
2109 * - lock scheme 2322 * - lock scheme
2323 * - two plane
2110 */ 2324 */
2111static void onenand_check_features(struct mtd_info *mtd) 2325static void onenand_check_features(struct mtd_info *mtd)
2112{ 2326{
@@ -2118,19 +2332,35 @@ static void onenand_check_features(struct mtd_info *mtd)
2118 process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT; 2332 process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT;
2119 2333
2120 /* Lock scheme */ 2334 /* Lock scheme */
2121 if (density >= ONENAND_DEVICE_DENSITY_1Gb) { 2335 switch (density) {
2336 case ONENAND_DEVICE_DENSITY_4Gb:
2337 this->options |= ONENAND_HAS_2PLANE;
2338
2339 case ONENAND_DEVICE_DENSITY_2Gb:
2340 /* 2Gb DDP don't have 2 plane */
2341 if (!ONENAND_IS_DDP(this))
2342 this->options |= ONENAND_HAS_2PLANE;
2343 this->options |= ONENAND_HAS_UNLOCK_ALL;
2344
2345 case ONENAND_DEVICE_DENSITY_1Gb:
2122 /* A-Die has all block unlock */ 2346 /* A-Die has all block unlock */
2123 if (process) { 2347 if (process)
2124 printk(KERN_DEBUG "Chip support all block unlock\n");
2125 this->options |= ONENAND_HAS_UNLOCK_ALL; 2348 this->options |= ONENAND_HAS_UNLOCK_ALL;
2126 } 2349 break;
2127 } else { 2350
2128 /* Some OneNAND has continues lock scheme */ 2351 default:
2129 if (!process) { 2352 /* Some OneNAND has continuous lock scheme */
2130 printk(KERN_DEBUG "Lock scheme is Continues Lock\n"); 2353 if (!process)
2131 this->options |= ONENAND_HAS_CONT_LOCK; 2354 this->options |= ONENAND_HAS_CONT_LOCK;
2132 } 2355 break;
2133 } 2356 }
2357
2358 if (this->options & ONENAND_HAS_CONT_LOCK)
2359 printk(KERN_DEBUG "Lock scheme is Continuous Lock\n");
2360 if (this->options & ONENAND_HAS_UNLOCK_ALL)
2361 printk(KERN_DEBUG "Chip support all block unlock\n");
2362 if (this->options & ONENAND_HAS_2PLANE)
2363 printk(KERN_DEBUG "Chip has 2 plane\n");
2134} 2364}
2135 2365
2136/** 2366/**
@@ -2154,7 +2384,7 @@ static void onenand_print_device_info(int device, int version)
2154 (16 << density), 2384 (16 << density),
2155 vcc ? "2.65/3.3" : "1.8", 2385 vcc ? "2.65/3.3" : "1.8",
2156 device); 2386 device);
2157 printk(KERN_DEBUG "OneNAND version = 0x%04x\n", version); 2387 printk(KERN_INFO "OneNAND version = 0x%04x\n", version);
2158} 2388}
2159 2389
2160static const struct onenand_manufacturers onenand_manuf_ids[] = { 2390static const struct onenand_manufacturers onenand_manuf_ids[] = {
@@ -2257,6 +2487,8 @@ static int onenand_probe(struct mtd_info *mtd)
2257 this->erase_shift = ffs(mtd->erasesize) - 1; 2487 this->erase_shift = ffs(mtd->erasesize) - 1;
2258 this->page_shift = ffs(mtd->writesize) - 1; 2488 this->page_shift = ffs(mtd->writesize) - 1;
2259 this->page_mask = (1 << (this->erase_shift - this->page_shift)) - 1; 2489 this->page_mask = (1 << (this->erase_shift - this->page_shift)) - 1;
2490 /* It's real page size */
2491 this->writesize = mtd->writesize;
2260 2492
2261 /* REVIST: Multichip handling */ 2493 /* REVIST: Multichip handling */
2262 2494
@@ -2265,6 +2497,17 @@ static int onenand_probe(struct mtd_info *mtd)
2265 /* Check OneNAND features */ 2497 /* Check OneNAND features */
2266 onenand_check_features(mtd); 2498 onenand_check_features(mtd);
2267 2499
2500 /*
2501 * We emulate the 4KiB page and 256KiB erase block size
2502 * But oobsize is still 64 bytes.
2503 * It is only valid if you turn on 2X program support,
2504 * Otherwise it will be ignored by compiler.
2505 */
2506 if (ONENAND_IS_2PLANE(this)) {
2507 mtd->writesize <<= 1;
2508 mtd->erasesize <<= 1;
2509 }
2510
2268 return 0; 2511 return 0;
2269} 2512}
2270 2513
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
new file mode 100644
index 0000000000..0d89ad5776
--- /dev/null
+++ b/drivers/mtd/onenand/onenand_sim.c
@@ -0,0 +1,495 @@
1/*
2 * linux/drivers/mtd/onenand/onenand_sim.c
3 *
4 * The OneNAND simulator
5 *
6 * Copyright © 2005-2007 Samsung Electronics
7 * Kyungmin Park <kyungmin.park@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/vmalloc.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/partitions.h>
20#include <linux/mtd/onenand.h>
21
22#include <linux/io.h>
23
24#ifndef CONFIG_ONENAND_SIM_MANUFACTURER
25#define CONFIG_ONENAND_SIM_MANUFACTURER 0xec
26#endif
27#ifndef CONFIG_ONENAND_SIM_DEVICE_ID
28#define CONFIG_ONENAND_SIM_DEVICE_ID 0x04
29#endif
30#ifndef CONFIG_ONENAND_SIM_VERSION_ID
31#define CONFIG_ONENAND_SIM_VERSION_ID 0x1e
32#endif
33
34static int manuf_id = CONFIG_ONENAND_SIM_MANUFACTURER;
35static int device_id = CONFIG_ONENAND_SIM_DEVICE_ID;
36static int version_id = CONFIG_ONENAND_SIM_VERSION_ID;
37
38struct onenand_flash {
39 void __iomem *base;
40 void __iomem *data;
41};
42
43#define ONENAND_CORE(flash) (flash->data)
44#define ONENAND_CORE_SPARE(flash, this, offset) \
45 ((flash->data) + (this->chipsize) + (offset >> 5))
46
47#define ONENAND_MAIN_AREA(this, offset) \
48 (this->base + ONENAND_DATARAM + offset)
49
50#define ONENAND_SPARE_AREA(this, offset) \
51 (this->base + ONENAND_SPARERAM + offset)
52
53#define ONENAND_GET_WP_STATUS(this) \
54 (readw(this->base + ONENAND_REG_WP_STATUS))
55
56#define ONENAND_SET_WP_STATUS(v, this) \
57 (writew(v, this->base + ONENAND_REG_WP_STATUS))
58
59/* It has all 0xff chars */
60#define MAX_ONENAND_PAGESIZE (2048 + 64)
61static unsigned char *ffchars;
62
63static struct mtd_partition os_partitions[] = {
64 {
65 .name = "OneNAND simulator partition",
66 .offset = 0,
67 .size = MTDPART_SIZ_FULL,
68 },
69};
70
71/*
72 * OneNAND simulator mtd
73 */
74struct onenand_info {
75 struct mtd_info mtd;
76 struct mtd_partition *parts;
77 struct onenand_chip onenand;
78 struct onenand_flash flash;
79};
80
81static struct onenand_info *info;
82
83#define DPRINTK(format, args...) \
84do { \
85 printk(KERN_DEBUG "%s[%d]: " format "\n", __func__, \
86 __LINE__, ##args); \
87} while (0)
88
89/**
90 * onenand_lock_handle - Handle Lock scheme
91 * @param this OneNAND device structure
92 * @param cmd The command to be sent
93 *
94 * Send lock command to OneNAND device.
95 * The lock scheme is depends on chip type.
96 */
97static void onenand_lock_handle(struct onenand_chip *this, int cmd)
98{
99 int block_lock_scheme;
100 int status;
101
102 status = ONENAND_GET_WP_STATUS(this);
103 block_lock_scheme = !(this->options & ONENAND_HAS_CONT_LOCK);
104
105 switch (cmd) {
106 case ONENAND_CMD_UNLOCK:
107 if (block_lock_scheme)
108 ONENAND_SET_WP_STATUS(ONENAND_WP_US, this);
109 else
110 ONENAND_SET_WP_STATUS(status | ONENAND_WP_US, this);
111 break;
112
113 case ONENAND_CMD_LOCK:
114 if (block_lock_scheme)
115 ONENAND_SET_WP_STATUS(ONENAND_WP_LS, this);
116 else
117 ONENAND_SET_WP_STATUS(status | ONENAND_WP_LS, this);
118 break;
119
120 case ONENAND_CMD_LOCK_TIGHT:
121 if (block_lock_scheme)
122 ONENAND_SET_WP_STATUS(ONENAND_WP_LTS, this);
123 else
124 ONENAND_SET_WP_STATUS(status | ONENAND_WP_LTS, this);
125 break;
126
127 default:
128 break;
129 }
130}
131
132/**
133 * onenand_bootram_handle - Handle BootRAM area
134 * @param this OneNAND device structure
135 * @param cmd The command to be sent
136 *
137 * Emulate BootRAM area. It is possible to do basic operation using BootRAM.
138 */
139static void onenand_bootram_handle(struct onenand_chip *this, int cmd)
140{
141 switch (cmd) {
142 case ONENAND_CMD_READID:
143 writew(manuf_id, this->base);
144 writew(device_id, this->base + 2);
145 writew(version_id, this->base + 4);
146 break;
147
148 default:
149 /* REVIST: Handle other commands */
150 break;
151 }
152}
153
154/**
155 * onenand_update_interrupt - Set interrupt register
156 * @param this OneNAND device structure
157 * @param cmd The command to be sent
158 *
159 * Update interrupt register. The status is depends on command.
160 */
161static void onenand_update_interrupt(struct onenand_chip *this, int cmd)
162{
163 int interrupt = ONENAND_INT_MASTER;
164
165 switch (cmd) {
166 case ONENAND_CMD_READ:
167 case ONENAND_CMD_READOOB:
168 interrupt |= ONENAND_INT_READ;
169 break;
170
171 case ONENAND_CMD_PROG:
172 case ONENAND_CMD_PROGOOB:
173 interrupt |= ONENAND_INT_WRITE;
174 break;
175
176 case ONENAND_CMD_ERASE:
177 interrupt |= ONENAND_INT_ERASE;
178 break;
179
180 case ONENAND_CMD_RESET:
181 interrupt |= ONENAND_INT_RESET;
182 break;
183
184 default:
185 break;
186 }
187
188 writew(interrupt, this->base + ONENAND_REG_INTERRUPT);
189}
190
191/**
192 * onenand_check_overwrite - Check over-write if happend
193 * @param dest The destination pointer
194 * @param src The source pointer
195 * @param count The length to be check
196 * @return 0 on same, otherwise 1
197 *
198 * Compare the source with destination
199 */
200static int onenand_check_overwrite(void *dest, void *src, size_t count)
201{
202 unsigned int *s = (unsigned int *) src;
203 unsigned int *d = (unsigned int *) dest;
204 int i;
205
206 count >>= 2;
207 for (i = 0; i < count; i++)
208 if ((*s++ ^ *d++) != 0)
209 return 1;
210
211 return 0;
212}
213
214/**
215 * onenand_data_handle - Handle OneNAND Core and DataRAM
216 * @param this OneNAND device structure
217 * @param cmd The command to be sent
218 * @param dataram Which dataram used
219 * @param offset The offset to OneNAND Core
220 *
221 * Copy data from OneNAND Core to DataRAM (read)
222 * Copy data from DataRAM to OneNAND Core (write)
223 * Erase the OneNAND Core (erase)
224 */
225static void onenand_data_handle(struct onenand_chip *this, int cmd,
226 int dataram, unsigned int offset)
227{
228 struct mtd_info *mtd = &info->mtd;
229 struct onenand_flash *flash = this->priv;
230 int main_offset, spare_offset;
231 void __iomem *src;
232 void __iomem *dest;
233 unsigned int i;
234
235 if (dataram) {
236 main_offset = mtd->writesize;
237 spare_offset = mtd->oobsize;
238 } else {
239 main_offset = 0;
240 spare_offset = 0;
241 }
242
243 switch (cmd) {
244 case ONENAND_CMD_READ:
245 src = ONENAND_CORE(flash) + offset;
246 dest = ONENAND_MAIN_AREA(this, main_offset);
247 memcpy(dest, src, mtd->writesize);
248 /* Fall through */
249
250 case ONENAND_CMD_READOOB:
251 src = ONENAND_CORE_SPARE(flash, this, offset);
252 dest = ONENAND_SPARE_AREA(this, spare_offset);
253 memcpy(dest, src, mtd->oobsize);
254 break;
255
256 case ONENAND_CMD_PROG:
257 src = ONENAND_MAIN_AREA(this, main_offset);
258 dest = ONENAND_CORE(flash) + offset;
259 /* To handle partial write */
260 for (i = 0; i < (1 << mtd->subpage_sft); i++) {
261 int off = i * this->subpagesize;
262 if (!memcmp(src + off, ffchars, this->subpagesize))
263 continue;
264 if (memcmp(dest + off, ffchars, this->subpagesize) &&
265 onenand_check_overwrite(dest + off, src + off, this->subpagesize))
266 printk(KERN_ERR "over-write happend at 0x%08x\n", offset);
267 memcpy(dest + off, src + off, this->subpagesize);
268 }
269 /* Fall through */
270
271 case ONENAND_CMD_PROGOOB:
272 src = ONENAND_SPARE_AREA(this, spare_offset);
273 /* Check all data is 0xff chars */
274 if (!memcmp(src, ffchars, mtd->oobsize))
275 break;
276
277 dest = ONENAND_CORE_SPARE(flash, this, offset);
278 if (memcmp(dest, ffchars, mtd->oobsize) &&
279 onenand_check_overwrite(dest, src, mtd->oobsize))
280 printk(KERN_ERR "OOB: over-write happend at 0x%08x\n",
281 offset);
282 memcpy(dest, src, mtd->oobsize);
283 break;
284
285 case ONENAND_CMD_ERASE:
286 memset(ONENAND_CORE(flash) + offset, 0xff, mtd->erasesize);
287 memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff,
288 (mtd->erasesize >> 5));
289 break;
290
291 default:
292 break;
293 }
294}
295
296/**
297 * onenand_command_handle - Handle command
298 * @param this OneNAND device structure
299 * @param cmd The command to be sent
300 *
301 * Emulate OneNAND command.
302 */
303static void onenand_command_handle(struct onenand_chip *this, int cmd)
304{
305 unsigned long offset = 0;
306 int block = -1, page = -1, bufferram = -1;
307 int dataram = 0;
308
309 switch (cmd) {
310 case ONENAND_CMD_UNLOCK:
311 case ONENAND_CMD_LOCK:
312 case ONENAND_CMD_LOCK_TIGHT:
313 case ONENAND_CMD_UNLOCK_ALL:
314 onenand_lock_handle(this, cmd);
315 break;
316
317 case ONENAND_CMD_BUFFERRAM:
318 /* Do nothing */
319 return;
320
321 default:
322 block = (int) readw(this->base + ONENAND_REG_START_ADDRESS1);
323 if (block & (1 << ONENAND_DDP_SHIFT)) {
324 block &= ~(1 << ONENAND_DDP_SHIFT);
325 /* The half of chip block */
326 block += this->chipsize >> (this->erase_shift + 1);
327 }
328 if (cmd == ONENAND_CMD_ERASE)
329 break;
330
331 page = (int) readw(this->base + ONENAND_REG_START_ADDRESS8);
332 page = (page >> ONENAND_FPA_SHIFT);
333 bufferram = (int) readw(this->base + ONENAND_REG_START_BUFFER);
334 bufferram >>= ONENAND_BSA_SHIFT;
335 bufferram &= ONENAND_BSA_DATARAM1;
336 dataram = (bufferram == ONENAND_BSA_DATARAM1) ? 1 : 0;
337 break;
338 }
339
340 if (block != -1)
341 offset += block << this->erase_shift;
342
343 if (page != -1)
344 offset += page << this->page_shift;
345
346 onenand_data_handle(this, cmd, dataram, offset);
347
348 onenand_update_interrupt(this, cmd);
349}
350
351/**
352 * onenand_writew - [OneNAND Interface] Emulate write operation
353 * @param value value to write
354 * @param addr address to write
355 *
356 * Write OneNAND register with value
357 */
358static void onenand_writew(unsigned short value, void __iomem * addr)
359{
360 struct onenand_chip *this = info->mtd.priv;
361
362 /* BootRAM handling */
363 if (addr < this->base + ONENAND_DATARAM) {
364 onenand_bootram_handle(this, value);
365 return;
366 }
367 /* Command handling */
368 if (addr == this->base + ONENAND_REG_COMMAND)
369 onenand_command_handle(this, value);
370
371 writew(value, addr);
372}
373
374/**
375 * flash_init - Initialize OneNAND simulator
376 * @param flash OneNAND simulaotr data strucutres
377 *
378 * Initialize OneNAND simulator.
379 */
380static int __init flash_init(struct onenand_flash *flash)
381{
382 int density, size;
383 int buffer_size;
384
385 flash->base = kzalloc(131072, GFP_KERNEL);
386 if (!flash->base) {
387 printk(KERN_ERR "Unable to allocate base address.\n");
388 return -ENOMEM;
389 }
390
391 density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
392 size = ((16 << 20) << density);
393
394 ONENAND_CORE(flash) = vmalloc(size + (size >> 5));
395 if (!ONENAND_CORE(flash)) {
396 printk(KERN_ERR "Unable to allocate nand core address.\n");
397 kfree(flash->base);
398 return -ENOMEM;
399 }
400
401 memset(ONENAND_CORE(flash), 0xff, size + (size >> 5));
402
403 /* Setup registers */
404 writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID);
405 writew(device_id, flash->base + ONENAND_REG_DEVICE_ID);
406 writew(version_id, flash->base + ONENAND_REG_VERSION_ID);
407
408 if (density < 2)
409 buffer_size = 0x0400; /* 1KiB page */
410 else
411 buffer_size = 0x0800; /* 2KiB page */
412 writew(buffer_size, flash->base + ONENAND_REG_DATA_BUFFER_SIZE);
413
414 return 0;
415}
416
417/**
418 * flash_exit - Clean up OneNAND simulator
419 * @param flash OneNAND simulaotr data strucutres
420 *
421 * Clean up OneNAND simulator.
422 */
423static void flash_exit(struct onenand_flash *flash)
424{
425 vfree(ONENAND_CORE(flash));
426 kfree(flash->base);
427 kfree(flash);
428}
429
430static int __init onenand_sim_init(void)
431{
432 /* Allocate all 0xff chars pointer */
433 ffchars = kmalloc(MAX_ONENAND_PAGESIZE, GFP_KERNEL);
434 if (!ffchars) {
435 printk(KERN_ERR "Unable to allocate ff chars.\n");
436 return -ENOMEM;
437 }
438 memset(ffchars, 0xff, MAX_ONENAND_PAGESIZE);
439
440 /* Allocate OneNAND simulator mtd pointer */
441 info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL);
442 if (!info) {
443 printk(KERN_ERR "Unable to allocate core structures.\n");
444 kfree(ffchars);
445 return -ENOMEM;
446 }
447
448 /* Override write_word function */
449 info->onenand.write_word = onenand_writew;
450
451 if (flash_init(&info->flash)) {
452 printk(KERN_ERR "Unable to allocat flash.\n");
453 kfree(ffchars);
454 kfree(info);
455 return -ENOMEM;
456 }
457
458 info->parts = os_partitions;
459
460 info->onenand.base = info->flash.base;
461 info->onenand.priv = &info->flash;
462
463 info->mtd.name = "OneNAND simulator";
464 info->mtd.priv = &info->onenand;
465 info->mtd.owner = THIS_MODULE;
466
467 if (onenand_scan(&info->mtd, 1)) {
468 flash_exit(&info->flash);
469 kfree(ffchars);
470 kfree(info);
471 return -ENXIO;
472 }
473
474 add_mtd_partitions(&info->mtd, info->parts, ARRAY_SIZE(os_partitions));
475
476 return 0;
477}
478
479static void __exit onenand_sim_exit(void)
480{
481 struct onenand_chip *this = info->mtd.priv;
482 struct onenand_flash *flash = this->priv;
483
484 onenand_release(&info->mtd);
485 flash_exit(flash);
486 kfree(ffchars);
487 kfree(info);
488}
489
490module_init(onenand_sim_init);
491module_exit(onenand_sim_exit);
492
493MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
494MODULE_DESCRIPTION("The OneNAND flash simulator");
495MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index 006c03aacb..823fba4e6d 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -779,10 +779,8 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
779 else { 779 else {
780 if (!mtd->erasesize) { 780 if (!mtd->erasesize) {
781 printk(KERN_WARNING PREFIX "please provide block_size"); 781 printk(KERN_WARNING PREFIX "please provide block_size");
782 kfree(part); 782 goto out;
783 return; 783 } else
784 }
785 else
786 part->block_size = mtd->erasesize; 784 part->block_size = mtd->erasesize;
787 } 785 }
788 786
@@ -804,7 +802,7 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
804 if (!add_mtd_blktrans_dev((void*)part)) 802 if (!add_mtd_blktrans_dev((void*)part))
805 return; 803 return;
806 } 804 }
807 805out:
808 kfree(part); 806 kfree(part);
809} 807}
810 808
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 94ee549344..29c41eeb09 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -1314,11 +1314,10 @@ static int paranoid_check_si(const struct ubi_device *ubi,
1314 * Make sure that all the physical eraseblocks are in one of the lists 1314 * Make sure that all the physical eraseblocks are in one of the lists
1315 * or trees. 1315 * or trees.
1316 */ 1316 */
1317 buf = kmalloc(ubi->peb_count, GFP_KERNEL); 1317 buf = kzalloc(ubi->peb_count, GFP_KERNEL);
1318 if (!buf) 1318 if (!buf)
1319 return -ENOMEM; 1319 return -ENOMEM;
1320 1320
1321 memset(buf, 1, ubi->peb_count);
1322 for (pnum = 0; pnum < ubi->peb_count; pnum++) { 1321 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
1323 err = ubi_io_is_bad(ubi, pnum); 1322 err = ubi_io_is_bad(ubi, pnum);
1324 if (err < 0) { 1323 if (err < 0) {
@@ -1326,28 +1325,28 @@ static int paranoid_check_si(const struct ubi_device *ubi,
1326 return err; 1325 return err;
1327 } 1326 }
1328 else if (err) 1327 else if (err)
1329 buf[pnum] = 0; 1328 buf[pnum] = 1;
1330 } 1329 }
1331 1330
1332 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) 1331 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb)
1333 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) 1332 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
1334 buf[seb->pnum] = 0; 1333 buf[seb->pnum] = 1;
1335 1334
1336 list_for_each_entry(seb, &si->free, u.list) 1335 list_for_each_entry(seb, &si->free, u.list)
1337 buf[seb->pnum] = 0; 1336 buf[seb->pnum] = 1;
1338 1337
1339 list_for_each_entry(seb, &si->corr, u.list) 1338 list_for_each_entry(seb, &si->corr, u.list)
1340 buf[seb->pnum] = 0; 1339 buf[seb->pnum] = 1;
1341 1340
1342 list_for_each_entry(seb, &si->erase, u.list) 1341 list_for_each_entry(seb, &si->erase, u.list)
1343 buf[seb->pnum] = 0; 1342 buf[seb->pnum] = 1;
1344 1343
1345 list_for_each_entry(seb, &si->alien, u.list) 1344 list_for_each_entry(seb, &si->alien, u.list)
1346 buf[seb->pnum] = 0; 1345 buf[seb->pnum] = 1;
1347 1346
1348 err = 0; 1347 err = 0;
1349 for (pnum = 0; pnum < ubi->peb_count; pnum++) 1348 for (pnum = 0; pnum < ubi->peb_count; pnum++)
1350 if (buf[pnum]) { 1349 if (!buf[pnum]) {
1351 ubi_err("PEB %d is not referred", pnum); 1350 ubi_err("PEB %d is not referred", pnum);
1352 err = 1; 1351 err = 1;
1353 } 1352 }
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index ebf1a3a88e..b74dbeef80 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -1023,7 +1023,7 @@ static int lance_rx( struct net_device *dev )
1023 DECLARE_MAC_BUF(mac); 1023 DECLARE_MAC_BUF(mac);
1024 DECLARE_MAC_BUF(mac2); 1024 DECLARE_MAC_BUF(mac2);
1025 1025
1026 printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %s to %s ", 1026 printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %s to %s "
1027 "data %02x %02x %02x %02x %02x %02x %02x %02x " 1027 "data %02x %02x %02x %02x %02x %02x %02x %02x "
1028 "len %d\n", 1028 "len %d\n",
1029 dev->name, ((u_short *)data)[6], 1029 dev->name, ((u_short *)data)[6],
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index d68accea38..78ed633ceb 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2652,10 +2652,10 @@ static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
2652 REG_RD(bp, BNX2_HC_COMMAND); 2652 REG_RD(bp, BNX2_HC_COMMAND);
2653 } 2653 }
2654 2654
2655 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons) 2655 if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2656 bnx2_tx_int(bp); 2656 bnx2_tx_int(bp);
2657 2657
2658 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) 2658 if (sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons)
2659 work_done += bnx2_rx_int(bp, budget - work_done); 2659 work_done += bnx2_rx_int(bp, budget - work_done);
2660 2660
2661 return work_done; 2661 return work_done;
@@ -2665,6 +2665,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
2665{ 2665{
2666 struct bnx2 *bp = container_of(napi, struct bnx2, napi); 2666 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2667 int work_done = 0; 2667 int work_done = 0;
2668 struct status_block *sblk = bp->status_blk;
2668 2669
2669 while (1) { 2670 while (1) {
2670 work_done = bnx2_poll_work(bp, work_done, budget); 2671 work_done = bnx2_poll_work(bp, work_done, budget);
@@ -2672,16 +2673,19 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
2672 if (unlikely(work_done >= budget)) 2673 if (unlikely(work_done >= budget))
2673 break; 2674 break;
2674 2675
2676 /* bp->last_status_idx is used below to tell the hw how
2677 * much work has been processed, so we must read it before
2678 * checking for more work.
2679 */
2680 bp->last_status_idx = sblk->status_idx;
2681 rmb();
2675 if (likely(!bnx2_has_work(bp))) { 2682 if (likely(!bnx2_has_work(bp))) {
2676 bp->last_status_idx = bp->status_blk->status_idx;
2677 rmb();
2678
2679 netif_rx_complete(bp->dev, napi); 2683 netif_rx_complete(bp->dev, napi);
2680 if (likely(bp->flags & USING_MSI_FLAG)) { 2684 if (likely(bp->flags & USING_MSI_FLAG)) {
2681 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 2685 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2682 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 2686 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2683 bp->last_status_idx); 2687 bp->last_status_idx);
2684 return 0; 2688 break;
2685 } 2689 }
2686 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 2690 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2687 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 2691 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 4ac161e1ca..7d7758f3ad 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1183,7 +1183,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1183 pool_count[i], pool_size[i], 1183 pool_count[i], pool_size[i],
1184 pool_active[i]); 1184 pool_active[i]);
1185 kobj->parent = &dev->dev.kobj; 1185 kobj->parent = &dev->dev.kobj;
1186 sprintf(kobj->name, "pool%d", i); 1186 kobject_set_name(kobj, "pool%d", i);
1187 kobj->ktype = &ktype_veth_pool; 1187 kobj->ktype = &ktype_veth_pool;
1188 kobject_register(kobj); 1188 kobject_register(kobj);
1189 } 1189 }
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 6589239b79..18770527df 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -538,8 +538,9 @@ static void mace_set_multicast(struct net_device *dev)
538 local_irq_restore(flags); 538 local_irq_restore(flags);
539} 539}
540 540
541static void mace_handle_misc_intrs(struct mace_data *mp, int intr) 541static void mace_handle_misc_intrs(struct net_device *dev, int intr)
542{ 542{
543 struct mace_data *mp = netdev_priv(dev);
543 volatile struct mace *mb = mp->mace; 544 volatile struct mace *mb = mp->mace;
544 static int mace_babbles, mace_jabbers; 545 static int mace_babbles, mace_jabbers;
545 546
@@ -571,7 +572,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
571 local_irq_save(flags); 572 local_irq_save(flags);
572 573
573 intr = mb->ir; /* read interrupt register */ 574 intr = mb->ir; /* read interrupt register */
574 mace_handle_misc_intrs(mp, intr); 575 mace_handle_misc_intrs(dev, intr);
575 576
576 if (intr & XMTINT) { 577 if (intr & XMTINT) {
577 fs = mb->xmtfs; 578 fs = mb->xmtfs;
@@ -645,7 +646,6 @@ static void mace_tx_timeout(struct net_device *dev)
645 646
646static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) 647static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
647{ 648{
648 struct mace_data *mp = netdev_priv(dev);
649 struct sk_buff *skb; 649 struct sk_buff *skb;
650 unsigned int frame_status = mf->rcvsts; 650 unsigned int frame_status = mf->rcvsts;
651 651
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index b33d21f4ef..84f2d6382f 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -784,7 +784,6 @@ static int mv643xx_eth_open(struct net_device *dev)
784 unsigned int port_num = mp->port_num; 784 unsigned int port_num = mp->port_num;
785 unsigned int size; 785 unsigned int size;
786 int err; 786 int err;
787 DECLARE_MAC_BUF(mac);
788 787
789 /* Clear any pending ethernet port interrupts */ 788 /* Clear any pending ethernet port interrupts */
790 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 789 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
@@ -1296,6 +1295,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1296 struct ethtool_cmd cmd; 1295 struct ethtool_cmd cmd;
1297 int duplex = DUPLEX_HALF; 1296 int duplex = DUPLEX_HALF;
1298 int speed = 0; /* default to auto-negotiation */ 1297 int speed = 0; /* default to auto-negotiation */
1298 DECLARE_MAC_BUF(mac);
1299 1299
1300 pd = pdev->dev.platform_data; 1300 pd = pdev->dev.platform_data;
1301 if (pd == NULL) { 1301 if (pd == NULL) {
diff --git a/drivers/net/mvme147.c b/drivers/net/mvme147.c
index 86c9c06433..06ca425215 100644
--- a/drivers/net/mvme147.c
+++ b/drivers/net/mvme147.c
@@ -85,7 +85,6 @@ struct net_device * __init mvme147lance_probe(int unit)
85 dev->open = &m147lance_open; 85 dev->open = &m147lance_open;
86 dev->stop = &m147lance_close; 86 dev->stop = &m147lance_close;
87 dev->hard_start_xmit = &lance_start_xmit; 87 dev->hard_start_xmit = &lance_start_xmit;
88 dev->get_stats = &lance_get_stats;
89 dev->set_multicast_list = &lance_set_multicast; 88 dev->set_multicast_list = &lance_set_multicast;
90 dev->tx_timeout = &lance_tx_timeout; 89 dev->tx_timeout = &lance_tx_timeout;
91 dev->dma = 0; 90 dev->dma = 0;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 0a32034654..68f728f0b6 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -52,7 +52,7 @@
52#include "sky2.h" 52#include "sky2.h"
53 53
54#define DRV_NAME "sky2" 54#define DRV_NAME "sky2"
55#define DRV_VERSION "1.18" 55#define DRV_VERSION "1.19"
56#define PFX DRV_NAME " " 56#define PFX DRV_NAME " "
57 57
58/* 58/*
@@ -296,10 +296,10 @@ static const u16 copper_fc_adv[] = {
296 296
297/* flow control to advertise bits when using 1000BaseX */ 297/* flow control to advertise bits when using 1000BaseX */
298static const u16 fiber_fc_adv[] = { 298static const u16 fiber_fc_adv[] = {
299 [FC_BOTH] = PHY_M_P_BOTH_MD_X, 299 [FC_NONE] = PHY_M_P_NO_PAUSE_X,
300 [FC_TX] = PHY_M_P_ASYM_MD_X, 300 [FC_TX] = PHY_M_P_ASYM_MD_X,
301 [FC_RX] = PHY_M_P_SYM_MD_X, 301 [FC_RX] = PHY_M_P_SYM_MD_X,
302 [FC_NONE] = PHY_M_P_NO_PAUSE_X, 302 [FC_BOTH] = PHY_M_P_BOTH_MD_X,
303}; 303};
304 304
305/* flow control to GMA disable bits */ 305/* flow control to GMA disable bits */
@@ -606,20 +606,19 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff)
606{ 606{
607 struct pci_dev *pdev = hw->pdev; 607 struct pci_dev *pdev = hw->pdev;
608 u32 reg1; 608 u32 reg1;
609 static const u32 phy_power[] 609 static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
610 = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; 610 static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
611
612 /* looks like this XL is back asswards .. */
613 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
614 onoff = !onoff;
615 611
616 pci_read_config_dword(pdev, PCI_DEV_REG1, &reg1); 612 pci_read_config_dword(pdev, PCI_DEV_REG1, &reg1);
613 /* Turn on/off phy power saving */
617 if (onoff) 614 if (onoff)
618 /* Turn off phy power saving */
619 reg1 &= ~phy_power[port]; 615 reg1 &= ~phy_power[port];
620 else 616 else
621 reg1 |= phy_power[port]; 617 reg1 |= phy_power[port];
622 618
619 if (onoff && hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
620 reg1 |= coma_mode[port];
621
623 pci_write_config_dword(pdev, PCI_DEV_REG1, reg1); 622 pci_write_config_dword(pdev, PCI_DEV_REG1, reg1);
624 pci_read_config_dword(pdev, PCI_DEV_REG1, &reg1); 623 pci_read_config_dword(pdev, PCI_DEV_REG1, &reg1);
625 624
@@ -1636,8 +1635,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1636 printk(KERN_DEBUG "%s: tx done %u\n", 1635 printk(KERN_DEBUG "%s: tx done %u\n",
1637 dev->name, idx); 1636 dev->name, idx);
1638 1637
1639 sky2->net_stats.tx_packets++; 1638 dev->stats.tx_packets++;
1640 sky2->net_stats.tx_bytes += re->skb->len; 1639 dev->stats.tx_bytes += re->skb->len;
1641 1640
1642 dev_kfree_skb_any(re->skb); 1641 dev_kfree_skb_any(re->skb);
1643 sky2->tx_next = RING_NEXT(idx, TX_RING_SIZE); 1642 sky2->tx_next = RING_NEXT(idx, TX_RING_SIZE);
@@ -2205,16 +2204,16 @@ resubmit:
2205len_error: 2204len_error:
2206 /* Truncation of overlength packets 2205 /* Truncation of overlength packets
2207 causes PHY length to not match MAC length */ 2206 causes PHY length to not match MAC length */
2208 ++sky2->net_stats.rx_length_errors; 2207 ++dev->stats.rx_length_errors;
2209 if (netif_msg_rx_err(sky2) && net_ratelimit()) 2208 if (netif_msg_rx_err(sky2) && net_ratelimit())
2210 pr_info(PFX "%s: rx length error: status %#x length %d\n", 2209 pr_info(PFX "%s: rx length error: status %#x length %d\n",
2211 dev->name, status, length); 2210 dev->name, status, length);
2212 goto resubmit; 2211 goto resubmit;
2213 2212
2214error: 2213error:
2215 ++sky2->net_stats.rx_errors; 2214 ++dev->stats.rx_errors;
2216 if (status & GMR_FS_RX_FF_OV) { 2215 if (status & GMR_FS_RX_FF_OV) {
2217 sky2->net_stats.rx_over_errors++; 2216 dev->stats.rx_over_errors++;
2218 goto resubmit; 2217 goto resubmit;
2219 } 2218 }
2220 2219
@@ -2223,11 +2222,11 @@ error:
2223 dev->name, status, length); 2222 dev->name, status, length);
2224 2223
2225 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE)) 2224 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
2226 sky2->net_stats.rx_length_errors++; 2225 dev->stats.rx_length_errors++;
2227 if (status & GMR_FS_FRAGMENT) 2226 if (status & GMR_FS_FRAGMENT)
2228 sky2->net_stats.rx_frame_errors++; 2227 dev->stats.rx_frame_errors++;
2229 if (status & GMR_FS_CRC_ERR) 2228 if (status & GMR_FS_CRC_ERR)
2230 sky2->net_stats.rx_crc_errors++; 2229 dev->stats.rx_crc_errors++;
2231 2230
2232 goto resubmit; 2231 goto resubmit;
2233} 2232}
@@ -2272,7 +2271,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2272 ++rx[port]; 2271 ++rx[port];
2273 skb = sky2_receive(dev, length, status); 2272 skb = sky2_receive(dev, length, status);
2274 if (unlikely(!skb)) { 2273 if (unlikely(!skb)) {
2275 sky2->net_stats.rx_dropped++; 2274 dev->stats.rx_dropped++;
2276 break; 2275 break;
2277 } 2276 }
2278 2277
@@ -2287,8 +2286,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2287 } 2286 }
2288 2287
2289 skb->protocol = eth_type_trans(skb, dev); 2288 skb->protocol = eth_type_trans(skb, dev);
2290 sky2->net_stats.rx_packets++; 2289 dev->stats.rx_packets++;
2291 sky2->net_stats.rx_bytes += skb->len; 2290 dev->stats.rx_bytes += skb->len;
2292 dev->last_rx = jiffies; 2291 dev->last_rx = jiffies;
2293 2292
2294#ifdef SKY2_VLAN_TAG_USED 2293#ifdef SKY2_VLAN_TAG_USED
@@ -2479,12 +2478,12 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2479 gma_read16(hw, port, GM_TX_IRQ_SRC); 2478 gma_read16(hw, port, GM_TX_IRQ_SRC);
2480 2479
2481 if (status & GM_IS_RX_FF_OR) { 2480 if (status & GM_IS_RX_FF_OR) {
2482 ++sky2->net_stats.rx_fifo_errors; 2481 ++dev->stats.rx_fifo_errors;
2483 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); 2482 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2484 } 2483 }
2485 2484
2486 if (status & GM_IS_TX_FF_UR) { 2485 if (status & GM_IS_TX_FF_UR) {
2487 ++sky2->net_stats.tx_fifo_errors; 2486 ++dev->stats.tx_fifo_errors;
2488 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); 2487 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2489 } 2488 }
2490} 2489}
@@ -3223,12 +3222,6 @@ static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
3223 } 3222 }
3224} 3223}
3225 3224
3226static struct net_device_stats *sky2_get_stats(struct net_device *dev)
3227{
3228 struct sky2_port *sky2 = netdev_priv(dev);
3229 return &sky2->net_stats;
3230}
3231
3232static int sky2_set_mac_address(struct net_device *dev, void *p) 3225static int sky2_set_mac_address(struct net_device *dev, void *p)
3233{ 3226{
3234 struct sky2_port *sky2 = netdev_priv(dev); 3227 struct sky2_port *sky2 = netdev_priv(dev);
@@ -3569,20 +3562,64 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3569{ 3562{
3570 const struct sky2_port *sky2 = netdev_priv(dev); 3563 const struct sky2_port *sky2 = netdev_priv(dev);
3571 const void __iomem *io = sky2->hw->regs; 3564 const void __iomem *io = sky2->hw->regs;
3565 unsigned int b;
3572 3566
3573 regs->version = 1; 3567 regs->version = 1;
3574 memset(p, 0, regs->len);
3575
3576 memcpy_fromio(p, io, B3_RAM_ADDR);
3577 3568
3578 /* skip diagnostic ram region */ 3569 for (b = 0; b < 128; b++) {
3579 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, 0x2000 - B3_RI_WTO_R1); 3570 /* This complicated switch statement is to make sure and
3571 * only access regions that are unreserved.
3572 * Some blocks are only valid on dual port cards.
3573 * and block 3 has some special diagnostic registers that
3574 * are poison.
3575 */
3576 switch (b) {
3577 case 3:
3578 /* skip diagnostic ram region */
3579 memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
3580 break;
3580 3581
3581 /* copy GMAC registers */ 3582 /* dual port cards only */
3582 memcpy_fromio(p + BASE_GMAC_1, io + BASE_GMAC_1, 0x1000); 3583 case 5: /* Tx Arbiter 2 */
3583 if (sky2->hw->ports > 1) 3584 case 9: /* RX2 */
3584 memcpy_fromio(p + BASE_GMAC_2, io + BASE_GMAC_2, 0x1000); 3585 case 14 ... 15: /* TX2 */
3586 case 17: case 19: /* Ram Buffer 2 */
3587 case 22 ... 23: /* Tx Ram Buffer 2 */
3588 case 25: /* Rx MAC Fifo 1 */
3589 case 27: /* Tx MAC Fifo 2 */
3590 case 31: /* GPHY 2 */
3591 case 40 ... 47: /* Pattern Ram 2 */
3592 case 52: case 54: /* TCP Segmentation 2 */
3593 case 112 ... 116: /* GMAC 2 */
3594 if (sky2->hw->ports == 1)
3595 goto reserved;
3596 /* fall through */
3597 case 0: /* Control */
3598 case 2: /* Mac address */
3599 case 4: /* Tx Arbiter 1 */
3600 case 7: /* PCI express reg */
3601 case 8: /* RX1 */
3602 case 12 ... 13: /* TX1 */
3603 case 16: case 18:/* Rx Ram Buffer 1 */
3604 case 20 ... 21: /* Tx Ram Buffer 1 */
3605 case 24: /* Rx MAC Fifo 1 */
3606 case 26: /* Tx MAC Fifo 1 */
3607 case 28 ... 29: /* Descriptor and status unit */
3608 case 30: /* GPHY 1*/
3609 case 32 ... 39: /* Pattern Ram 1 */
3610 case 48: case 50: /* TCP Segmentation 1 */
3611 case 56 ... 60: /* PCI space */
3612 case 80 ... 84: /* GMAC 1 */
3613 memcpy_fromio(p, io, 128);
3614 break;
3615 default:
3616reserved:
3617 memset(p, 0, 128);
3618 }
3585 3619
3620 p += 128;
3621 io += 128;
3622 }
3586} 3623}
3587 3624
3588/* In order to do Jumbo packets on these chips, need to turn off the 3625/* In order to do Jumbo packets on these chips, need to turn off the
@@ -3934,7 +3971,6 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3934 dev->stop = sky2_down; 3971 dev->stop = sky2_down;
3935 dev->do_ioctl = sky2_ioctl; 3972 dev->do_ioctl = sky2_ioctl;
3936 dev->hard_start_xmit = sky2_xmit_frame; 3973 dev->hard_start_xmit = sky2_xmit_frame;
3937 dev->get_stats = sky2_get_stats;
3938 dev->set_multicast_list = sky2_set_multicast; 3974 dev->set_multicast_list = sky2_set_multicast;
3939 dev->set_mac_address = sky2_set_mac_address; 3975 dev->set_mac_address = sky2_set_mac_address;
3940 dev->change_mtu = sky2_change_mtu; 3976 dev->change_mtu = sky2_change_mtu;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index f4a3c2f403..49ee264064 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2031,8 +2031,6 @@ struct sky2_port {
2031#ifdef CONFIG_SKY2_DEBUG 2031#ifdef CONFIG_SKY2_DEBUG
2032 struct dentry *debugfs; 2032 struct dentry *debugfs;
2033#endif 2033#endif
2034 struct net_device_stats net_stats;
2035
2036}; 2034};
2037 2035
2038struct sky2_hw { 2036struct sky2_hw {
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index a402b5c018..30b1cca814 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3576,7 +3576,7 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3576 if (sblk->idx[0].tx_consumer != tp->tx_cons) { 3576 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3577 tg3_tx(tp); 3577 tg3_tx(tp);
3578 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 3578 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3579 return 0; 3579 return work_done;
3580 } 3580 }
3581 3581
3582 /* run RX thread, within the bounds set by NAPI. 3582 /* run RX thread, within the bounds set by NAPI.
@@ -3593,6 +3593,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
3593{ 3593{
3594 struct tg3 *tp = container_of(napi, struct tg3, napi); 3594 struct tg3 *tp = container_of(napi, struct tg3, napi);
3595 int work_done = 0; 3595 int work_done = 0;
3596 struct tg3_hw_status *sblk = tp->hw_status;
3596 3597
3597 while (1) { 3598 while (1) {
3598 work_done = tg3_poll_work(tp, work_done, budget); 3599 work_done = tg3_poll_work(tp, work_done, budget);
@@ -3603,15 +3604,17 @@ static int tg3_poll(struct napi_struct *napi, int budget)
3603 if (unlikely(work_done >= budget)) 3604 if (unlikely(work_done >= budget))
3604 break; 3605 break;
3605 3606
3606 if (likely(!tg3_has_work(tp))) { 3607 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3607 struct tg3_hw_status *sblk = tp->hw_status; 3608 /* tp->last_tag is used in tg3_restart_ints() below
3608 3609 * to tell the hw how much work has been processed,
3609 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { 3610 * so we must read it before checking for more work.
3610 tp->last_tag = sblk->status_tag; 3611 */
3611 rmb(); 3612 tp->last_tag = sblk->status_tag;
3612 } else 3613 rmb();
3613 sblk->status &= ~SD_STATUS_UPDATED; 3614 } else
3615 sblk->status &= ~SD_STATUS_UPDATED;
3614 3616
3617 if (likely(!tg3_has_work(tp))) {
3615 netif_rx_complete(tp->dev, napi); 3618 netif_rx_complete(tp->dev, napi);
3616 tg3_restart_ints(tp); 3619 tg3_restart_ints(tp);
3617 break; 3620 break;
@@ -3621,9 +3624,10 @@ static int tg3_poll(struct napi_struct *napi, int budget)
3621 return work_done; 3624 return work_done;
3622 3625
3623tx_recovery: 3626tx_recovery:
3627 /* work_done is guaranteed to be less than budget. */
3624 netif_rx_complete(tp->dev, napi); 3628 netif_rx_complete(tp->dev, napi);
3625 schedule_work(&tp->reset_task); 3629 schedule_work(&tp->reset_task);
3626 return 0; 3630 return work_done;
3627} 3631}
3628 3632
3629static void tg3_irq_quiesce(struct tg3 *tp) 3633static void tg3_irq_quiesce(struct tg3 *tp)
@@ -6985,9 +6989,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6985 break; 6989 break;
6986 }; 6990 };
6987 6991
6988 /* Write our heartbeat update interval to APE. */ 6992 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6989 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 6993 /* Write our heartbeat update interval to APE. */
6990 APE_HOST_HEARTBEAT_INT_DISABLE); 6994 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
6995 APE_HOST_HEARTBEAT_INT_DISABLE);
6991 6996
6992 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 6997 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6993 6998
diff --git a/drivers/net/wireless/b43/phy.c b/drivers/net/wireless/b43/phy.c
index 5f7ffa0a76..3d4ed647c3 100644
--- a/drivers/net/wireless/b43/phy.c
+++ b/drivers/net/wireless/b43/phy.c
@@ -26,6 +26,7 @@
26*/ 26*/
27 27
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/io.h>
29#include <linux/types.h> 30#include <linux/types.h>
30 31
31#include "b43.h" 32#include "b43.h"
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
index 34a44c1b63..3488f2447b 100644
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -4,6 +4,7 @@
4#include "b43.h" 4#include "b43.h"
5 5
6#include <linux/interrupt.h> 6#include <linux/interrupt.h>
7#include <linux/io.h>
7#include <linux/list.h> 8#include <linux/list.h>
8#include <linux/skbuff.h> 9#include <linux/skbuff.h>
9 10
diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c
index fcb777383e..f4faff6a7d 100644
--- a/drivers/net/wireless/b43/sysfs.c
+++ b/drivers/net/wireless/b43/sysfs.c
@@ -23,13 +23,14 @@
23 23
24*/ 24*/
25 25
26#include <linux/capability.h>
27#include <linux/io.h>
28
26#include "b43.h" 29#include "b43.h"
27#include "sysfs.h" 30#include "sysfs.h"
28#include "main.h" 31#include "main.h"
29#include "phy.h" 32#include "phy.h"
30 33
31#include <linux/capability.h>
32
33#define GENERIC_FILESIZE 64 34#define GENERIC_FILESIZE 64
34 35
35static int get_integer(const char *buf, size_t count) 36static int get_integer(const char *buf, size_t count)
diff --git a/drivers/pci/hotplug.c b/drivers/pci/hotplug.c
index 1c97e7dd13..2b5352a7df 100644
--- a/drivers/pci/hotplug.c
+++ b/drivers/pci/hotplug.c
@@ -3,12 +3,9 @@
3#include <linux/module.h> 3#include <linux/module.h>
4#include "pci.h" 4#include "pci.h"
5 5
6int pci_uevent(struct device *dev, char **envp, int num_envp, 6int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
7 char *buffer, int buffer_size)
8{ 7{
9 struct pci_dev *pdev; 8 struct pci_dev *pdev;
10 int i = 0;
11 int length = 0;
12 9
13 if (!dev) 10 if (!dev)
14 return -ENODEV; 11 return -ENODEV;
@@ -17,37 +14,24 @@ int pci_uevent(struct device *dev, char **envp, int num_envp,
17 if (!pdev) 14 if (!pdev)
18 return -ENODEV; 15 return -ENODEV;
19 16
20 if (add_uevent_var(envp, num_envp, &i, 17 if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class))
21 buffer, buffer_size, &length,
22 "PCI_CLASS=%04X", pdev->class))
23 return -ENOMEM; 18 return -ENOMEM;
24 19
25 if (add_uevent_var(envp, num_envp, &i, 20 if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device))
26 buffer, buffer_size, &length,
27 "PCI_ID=%04X:%04X", pdev->vendor, pdev->device))
28 return -ENOMEM; 21 return -ENOMEM;
29 22
30 if (add_uevent_var(envp, num_envp, &i, 23 if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor,
31 buffer, buffer_size, &length,
32 "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor,
33 pdev->subsystem_device)) 24 pdev->subsystem_device))
34 return -ENOMEM; 25 return -ENOMEM;
35 26
36 if (add_uevent_var(envp, num_envp, &i, 27 if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
37 buffer, buffer_size, &length,
38 "PCI_SLOT_NAME=%s", pci_name(pdev)))
39 return -ENOMEM; 28 return -ENOMEM;
40 29
41 if (add_uevent_var(envp, num_envp, &i, 30 if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x",
42 buffer, buffer_size, &length,
43 "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x",
44 pdev->vendor, pdev->device, 31 pdev->vendor, pdev->device,
45 pdev->subsystem_vendor, pdev->subsystem_device, 32 pdev->subsystem_vendor, pdev->subsystem_device,
46 (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), 33 (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
47 (u8)(pdev->class))) 34 (u8)(pdev->class)))
48 return -ENOMEM; 35 return -ENOMEM;
49
50 envp[i] = NULL;
51
52 return 0; 36 return 0;
53} 37}
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 2305cc450a..a96b739b2d 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -549,7 +549,7 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
549 * slot. */ 549 * slot. */
550 bus->number = tbus; 550 bus->number = tbus;
551 pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0), 551 pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0),
552 PCI_REVISION_ID, &work); 552 PCI_CLASS_REVISION, &work);
553 553
554 if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { 554 if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
555 pci_bus_read_config_dword(bus, 555 pci_bus_read_config_dword(bus,
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 37d72f123a..3ef0a4875a 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -37,6 +37,7 @@
37#include <linux/smp_lock.h> 37#include <linux/smp_lock.h>
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/pci_hotplug.h> 39#include <linux/pci_hotplug.h>
40#include <linux/kthread.h>
40#include "cpqphp.h" 41#include "cpqphp.h"
41 42
42static u32 configure_new_device(struct controller* ctrl, struct pci_func *func, 43static u32 configure_new_device(struct controller* ctrl, struct pci_func *func,
@@ -45,34 +46,20 @@ static int configure_new_function(struct controller* ctrl, struct pci_func *func
45 u8 behind_bridge, struct resource_lists *resources); 46 u8 behind_bridge, struct resource_lists *resources);
46static void interrupt_event_handler(struct controller *ctrl); 47static void interrupt_event_handler(struct controller *ctrl);
47 48
48static struct semaphore event_semaphore; /* mutex for process loop (up if something to process) */
49static struct semaphore event_exit; /* guard ensure thread has exited before calling it quits */
50static int event_finished;
51static unsigned long pushbutton_pending; /* = 0 */
52 49
53/* things needed for the long_delay function */ 50static struct task_struct *cpqhp_event_thread;
54static struct semaphore delay_sem; 51static unsigned long pushbutton_pending; /* = 0 */
55static wait_queue_head_t delay_wait;
56 52
57/* delay is in jiffies to wait for */ 53/* delay is in jiffies to wait for */
58static void long_delay(int delay) 54static void long_delay(int delay)
59{ 55{
60 DECLARE_WAITQUEUE(wait, current); 56 /*
61 57 * XXX(hch): if someone is bored please convert all callers
62 /* only allow 1 customer into the delay queue at once 58 * to call msleep_interruptible directly. They really want
63 * yes this makes some people wait even longer, but who really cares? 59 * to specify timeouts in natural units and spend a lot of
64 * this is for _huge_ delays to make the hardware happy as the 60 * effort converting them to jiffies..
65 * signals bounce around
66 */ 61 */
67 down (&delay_sem);
68
69 init_waitqueue_head(&delay_wait);
70
71 add_wait_queue(&delay_wait, &wait);
72 msleep_interruptible(jiffies_to_msecs(delay)); 62 msleep_interruptible(jiffies_to_msecs(delay));
73 remove_wait_queue(&delay_wait, &wait);
74
75 up(&delay_sem);
76} 63}
77 64
78 65
@@ -955,8 +942,8 @@ irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data)
955 } 942 }
956 943
957 if (schedule_flag) { 944 if (schedule_flag) {
958 up(&event_semaphore); 945 wake_up_process(cpqhp_event_thread);
959 dbg("Signal event_semaphore\n"); 946 dbg("Waking even thread");
960 } 947 }
961 return IRQ_HANDLED; 948 return IRQ_HANDLED;
962} 949}
@@ -973,16 +960,13 @@ struct pci_func *cpqhp_slot_create(u8 busnumber)
973 struct pci_func *new_slot; 960 struct pci_func *new_slot;
974 struct pci_func *next; 961 struct pci_func *next;
975 962
976 new_slot = kmalloc(sizeof(*new_slot), GFP_KERNEL); 963 new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL);
977
978 if (new_slot == NULL) { 964 if (new_slot == NULL) {
979 /* I'm not dead yet! 965 /* I'm not dead yet!
980 * You will be. */ 966 * You will be. */
981 return new_slot; 967 return new_slot;
982 } 968 }
983 969
984 memset(new_slot, 0, sizeof(struct pci_func));
985
986 new_slot->next = NULL; 970 new_slot->next = NULL;
987 new_slot->configured = 1; 971 new_slot->configured = 1;
988 972
@@ -1738,7 +1722,7 @@ static u32 remove_board(struct pci_func * func, u32 replace_flag, struct control
1738static void pushbutton_helper_thread(unsigned long data) 1722static void pushbutton_helper_thread(unsigned long data)
1739{ 1723{
1740 pushbutton_pending = data; 1724 pushbutton_pending = data;
1741 up(&event_semaphore); 1725 wake_up_process(cpqhp_event_thread);
1742} 1726}
1743 1727
1744 1728
@@ -1747,13 +1731,13 @@ static int event_thread(void* data)
1747{ 1731{
1748 struct controller *ctrl; 1732 struct controller *ctrl;
1749 1733
1750 daemonize("phpd_event");
1751
1752 while (1) { 1734 while (1) {
1753 dbg("!!!!event_thread sleeping\n"); 1735 dbg("!!!!event_thread sleeping\n");
1754 down_interruptible (&event_semaphore); 1736 set_current_state(TASK_INTERRUPTIBLE);
1755 dbg("event_thread woken finished = %d\n", event_finished); 1737 schedule();
1756 if (event_finished) break; 1738
1739 if (kthread_should_stop())
1740 break;
1757 /* Do stuff here */ 1741 /* Do stuff here */
1758 if (pushbutton_pending) 1742 if (pushbutton_pending)
1759 cpqhp_pushbutton_thread(pushbutton_pending); 1743 cpqhp_pushbutton_thread(pushbutton_pending);
@@ -1762,38 +1746,24 @@ static int event_thread(void* data)
1762 interrupt_event_handler(ctrl); 1746 interrupt_event_handler(ctrl);
1763 } 1747 }
1764 dbg("event_thread signals exit\n"); 1748 dbg("event_thread signals exit\n");
1765 up(&event_exit);
1766 return 0; 1749 return 0;
1767} 1750}
1768 1751
1769
1770int cpqhp_event_start_thread(void) 1752int cpqhp_event_start_thread(void)
1771{ 1753{
1772 int pid; 1754 cpqhp_event_thread = kthread_run(event_thread, NULL, "phpd_event");
1773 1755 if (IS_ERR(cpqhp_event_thread)) {
1774 /* initialize our semaphores */
1775 init_MUTEX(&delay_sem);
1776 init_MUTEX_LOCKED(&event_semaphore);
1777 init_MUTEX_LOCKED(&event_exit);
1778 event_finished=0;
1779
1780 pid = kernel_thread(event_thread, NULL, 0);
1781 if (pid < 0) {
1782 err ("Can't start up our event thread\n"); 1756 err ("Can't start up our event thread\n");
1783 return -1; 1757 return PTR_ERR(cpqhp_event_thread);
1784 } 1758 }
1785 dbg("Our event thread pid = %d\n", pid); 1759
1786 return 0; 1760 return 0;
1787} 1761}
1788 1762
1789 1763
1790void cpqhp_event_stop_thread(void) 1764void cpqhp_event_stop_thread(void)
1791{ 1765{
1792 event_finished = 1; 1766 kthread_stop(cpqhp_event_thread);
1793 dbg("event_thread finish command given\n");
1794 up(&event_semaphore);
1795 dbg("wait for event_thread to exit\n");
1796 down(&event_exit);
1797} 1767}
1798 1768
1799 1769
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index d06ccb69e4..c31e7bf345 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -35,7 +35,7 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/sched.h> 37#include <linux/sched.h>
38 38#include <linux/kthread.h>
39#include "ibmphp.h" 39#include "ibmphp.h"
40 40
41static int to_debug = 0; 41static int to_debug = 0;
@@ -101,12 +101,11 @@ static int to_debug = 0;
101//---------------------------------------------------------------------------- 101//----------------------------------------------------------------------------
102// global variables 102// global variables
103//---------------------------------------------------------------------------- 103//----------------------------------------------------------------------------
104static int ibmphp_shutdown;
105static int tid_poll;
106static struct mutex sem_hpcaccess; // lock access to HPC 104static struct mutex sem_hpcaccess; // lock access to HPC
107static struct semaphore semOperations; // lock all operations and 105static struct semaphore semOperations; // lock all operations and
108 // access to data structures 106 // access to data structures
109static struct semaphore sem_exit; // make sure polling thread goes away 107static struct semaphore sem_exit; // make sure polling thread goes away
108static struct task_struct *ibmphp_poll_thread;
110//---------------------------------------------------------------------------- 109//----------------------------------------------------------------------------
111// local function prototypes 110// local function prototypes
112//---------------------------------------------------------------------------- 111//----------------------------------------------------------------------------
@@ -116,10 +115,9 @@ static u8 hpc_writecmdtoindex (u8, u8);
116static u8 hpc_readcmdtoindex (u8, u8); 115static u8 hpc_readcmdtoindex (u8, u8);
117static void get_hpc_access (void); 116static void get_hpc_access (void);
118static void free_hpc_access (void); 117static void free_hpc_access (void);
119static void poll_hpc (void); 118static int poll_hpc(void *data);
120static int process_changeinstatus (struct slot *, struct slot *); 119static int process_changeinstatus (struct slot *, struct slot *);
121static int process_changeinlatch (u8, u8, struct controller *); 120static int process_changeinlatch (u8, u8, struct controller *);
122static int hpc_poll_thread (void *);
123static int hpc_wait_ctlr_notworking (int, struct controller *, void __iomem *, u8 *); 121static int hpc_wait_ctlr_notworking (int, struct controller *, void __iomem *, u8 *);
124//---------------------------------------------------------------------------- 122//----------------------------------------------------------------------------
125 123
@@ -137,8 +135,6 @@ void __init ibmphp_hpc_initvars (void)
137 init_MUTEX (&semOperations); 135 init_MUTEX (&semOperations);
138 init_MUTEX_LOCKED (&sem_exit); 136 init_MUTEX_LOCKED (&sem_exit);
139 to_debug = 0; 137 to_debug = 0;
140 ibmphp_shutdown = 0;
141 tid_poll = 0;
142 138
143 debug ("%s - Exit\n", __FUNCTION__); 139 debug ("%s - Exit\n", __FUNCTION__);
144} 140}
@@ -819,7 +815,7 @@ void ibmphp_unlock_operations (void)
819#define POLL_LATCH_REGISTER 0 815#define POLL_LATCH_REGISTER 0
820#define POLL_SLOTS 1 816#define POLL_SLOTS 1
821#define POLL_SLEEP 2 817#define POLL_SLEEP 2
822static void poll_hpc (void) 818static int poll_hpc(void *data)
823{ 819{
824 struct slot myslot; 820 struct slot myslot;
825 struct slot *pslot = NULL; 821 struct slot *pslot = NULL;
@@ -833,10 +829,7 @@ static void poll_hpc (void)
833 829
834 debug ("%s - Entry\n", __FUNCTION__); 830 debug ("%s - Entry\n", __FUNCTION__);
835 831
836 while (!ibmphp_shutdown) { 832 while (!kthread_should_stop()) {
837 if (ibmphp_shutdown)
838 break;
839
840 /* try to get the lock to do some kind of hardware access */ 833 /* try to get the lock to do some kind of hardware access */
841 down (&semOperations); 834 down (&semOperations);
842 835
@@ -896,7 +889,7 @@ static void poll_hpc (void)
896 up (&semOperations); 889 up (&semOperations);
897 msleep(POLL_INTERVAL_SEC * 1000); 890 msleep(POLL_INTERVAL_SEC * 1000);
898 891
899 if (ibmphp_shutdown) 892 if (kthread_should_stop())
900 break; 893 break;
901 894
902 down (&semOperations); 895 down (&semOperations);
@@ -915,6 +908,7 @@ static void poll_hpc (void)
915 } 908 }
916 up (&sem_exit); 909 up (&sem_exit);
917 debug ("%s - Exit\n", __FUNCTION__); 910 debug ("%s - Exit\n", __FUNCTION__);
911 return 0;
918} 912}
919 913
920 914
@@ -1050,47 +1044,20 @@ static int process_changeinlatch (u8 old, u8 new, struct controller *ctrl)
1050} 1044}
1051 1045
1052/*---------------------------------------------------------------------- 1046/*----------------------------------------------------------------------
1053* Name: hpc_poll_thread
1054*
1055* Action: polling
1056*
1057* Return 0
1058* Value:
1059*---------------------------------------------------------------------*/
1060static int hpc_poll_thread (void *data)
1061{
1062 debug ("%s - Entry\n", __FUNCTION__);
1063
1064 daemonize("hpc_poll");
1065 allow_signal(SIGKILL);
1066
1067 poll_hpc ();
1068
1069 tid_poll = 0;
1070 debug ("%s - Exit\n", __FUNCTION__);
1071 return 0;
1072}
1073
1074
1075/*----------------------------------------------------------------------
1076* Name: ibmphp_hpc_start_poll_thread 1047* Name: ibmphp_hpc_start_poll_thread
1077* 1048*
1078* Action: start polling thread 1049* Action: start polling thread
1079*---------------------------------------------------------------------*/ 1050*---------------------------------------------------------------------*/
1080int __init ibmphp_hpc_start_poll_thread (void) 1051int __init ibmphp_hpc_start_poll_thread (void)
1081{ 1052{
1082 int rc = 0;
1083
1084 debug ("%s - Entry\n", __FUNCTION__); 1053 debug ("%s - Entry\n", __FUNCTION__);
1085 1054
1086 tid_poll = kernel_thread (hpc_poll_thread, NULL, 0); 1055 ibmphp_poll_thread = kthread_run(poll_hpc, NULL, "hpc_poll");
1087 if (tid_poll < 0) { 1056 if (IS_ERR(ibmphp_poll_thread)) {
1088 err ("%s - Error, thread not started\n", __FUNCTION__); 1057 err ("%s - Error, thread not started\n", __FUNCTION__);
1089 rc = -1; 1058 return PTR_ERR(ibmphp_poll_thread);
1090 } 1059 }
1091 1060 return 0;
1092 debug ("%s - Exit tid_poll[%d] rc[%d]\n", __FUNCTION__, tid_poll, rc);
1093 return rc;
1094} 1061}
1095 1062
1096/*---------------------------------------------------------------------- 1063/*----------------------------------------------------------------------
@@ -1102,7 +1069,7 @@ void __exit ibmphp_hpc_stop_poll_thread (void)
1102{ 1069{
1103 debug ("%s - Entry\n", __FUNCTION__); 1070 debug ("%s - Entry\n", __FUNCTION__);
1104 1071
1105 ibmphp_shutdown = 1; 1072 kthread_stop(ibmphp_poll_thread);
1106 debug ("before locking operations \n"); 1073 debug ("before locking operations \n");
1107 ibmphp_lock_operations (); 1074 ibmphp_lock_operations ();
1108 debug ("after locking operations \n"); 1075 debug ("after locking operations \n");
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index bd433ef6bf..f0eba534f8 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -694,66 +694,6 @@ int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot,
694 if ((slot == NULL) || (info == NULL)) 694 if ((slot == NULL) || (info == NULL))
695 return -ENODEV; 695 return -ENODEV;
696 696
697 /*
698 * check all fields in the info structure, and update timestamps
699 * for the files referring to the fields that have now changed.
700 */
701 if ((has_power_file(slot) == 0) &&
702 (slot->info->power_status != info->power_status)) {
703 retval = sysfs_update_file(&slot->kobj,
704 &hotplug_slot_attr_power.attr);
705 if (retval)
706 return retval;
707 }
708
709 if ((has_attention_file(slot) == 0) &&
710 (slot->info->attention_status != info->attention_status)) {
711 retval = sysfs_update_file(&slot->kobj,
712 &hotplug_slot_attr_attention.attr);
713 if (retval)
714 return retval;
715 }
716
717 if ((has_latch_file(slot) == 0) &&
718 (slot->info->latch_status != info->latch_status)) {
719 retval = sysfs_update_file(&slot->kobj,
720 &hotplug_slot_attr_latch.attr);
721 if (retval)
722 return retval;
723 }
724
725 if ((has_adapter_file(slot) == 0) &&
726 (slot->info->adapter_status != info->adapter_status)) {
727 retval = sysfs_update_file(&slot->kobj,
728 &hotplug_slot_attr_presence.attr);
729 if (retval)
730 return retval;
731 }
732
733 if ((has_address_file(slot) == 0) &&
734 (slot->info->address != info->address)) {
735 retval = sysfs_update_file(&slot->kobj,
736 &hotplug_slot_attr_address.attr);
737 if (retval)
738 return retval;
739 }
740
741 if ((has_max_bus_speed_file(slot) == 0) &&
742 (slot->info->max_bus_speed != info->max_bus_speed)) {
743 retval = sysfs_update_file(&slot->kobj,
744 &hotplug_slot_attr_max_bus_speed.attr);
745 if (retval)
746 return retval;
747 }
748
749 if ((has_cur_bus_speed_file(slot) == 0) &&
750 (slot->info->cur_bus_speed != info->cur_bus_speed)) {
751 retval = sysfs_update_file(&slot->kobj,
752 &hotplug_slot_attr_cur_bus_speed.attr);
753 if (retval)
754 return retval;
755 }
756
757 memcpy (slot->info, info, sizeof (struct hotplug_slot_info)); 697 memcpy (slot->info, info, sizeof (struct hotplug_slot_info));
758 698
759 return 0; 699 return 0;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index e5d3f0b4f4..6462ac3b40 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -304,8 +304,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
304 dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); 304 dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name);
305 305
306 hotplug_slot->info->attention_status = status; 306 hotplug_slot->info->attention_status = status;
307 307
308 if (ATTN_LED(slot->ctrl->ctrlcap)) 308 if (ATTN_LED(slot->ctrl->ctrlcap))
309 slot->hpc_ops->set_attention_status(slot, status); 309 slot->hpc_ops->set_attention_status(slot, status);
310 310
311 return 0; 311 return 0;
@@ -405,7 +405,7 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
405 int retval; 405 int retval;
406 406
407 dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); 407 dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name);
408 408
409 retval = slot->hpc_ops->get_max_bus_speed(slot, value); 409 retval = slot->hpc_ops->get_max_bus_speed(slot, value);
410 if (retval < 0) 410 if (retval < 0)
411 *value = PCI_SPEED_UNKNOWN; 411 *value = PCI_SPEED_UNKNOWN;
@@ -419,7 +419,7 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
419 int retval; 419 int retval;
420 420
421 dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); 421 dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name);
422 422
423 retval = slot->hpc_ops->get_cur_bus_speed(slot, value); 423 retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
424 if (retval < 0) 424 if (retval < 0)
425 *value = PCI_SPEED_UNKNOWN; 425 *value = PCI_SPEED_UNKNOWN;
@@ -434,7 +434,7 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
434 struct slot *t_slot; 434 struct slot *t_slot;
435 u8 value; 435 u8 value;
436 struct pci_dev *pdev; 436 struct pci_dev *pdev;
437 437
438 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 438 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
439 if (!ctrl) { 439 if (!ctrl) {
440 err("%s : out of memory\n", __FUNCTION__); 440 err("%s : out of memory\n", __FUNCTION__);
@@ -502,23 +502,23 @@ static void pciehp_remove (struct pcie_device *dev)
502#ifdef CONFIG_PM 502#ifdef CONFIG_PM
503static int pciehp_suspend (struct pcie_device *dev, pm_message_t state) 503static int pciehp_suspend (struct pcie_device *dev, pm_message_t state)
504{ 504{
505 printk("%s ENTRY\n", __FUNCTION__); 505 printk("%s ENTRY\n", __FUNCTION__);
506 return 0; 506 return 0;
507} 507}
508 508
509static int pciehp_resume (struct pcie_device *dev) 509static int pciehp_resume (struct pcie_device *dev)
510{ 510{
511 printk("%s ENTRY\n", __FUNCTION__); 511 printk("%s ENTRY\n", __FUNCTION__);
512 return 0; 512 return 0;
513} 513}
514#endif 514#endif
515 515
516static struct pcie_port_service_id port_pci_ids[] = { { 516static struct pcie_port_service_id port_pci_ids[] = { {
517 .vendor = PCI_ANY_ID, 517 .vendor = PCI_ANY_ID,
518 .device = PCI_ANY_ID, 518 .device = PCI_ANY_ID,
519 .port_type = PCIE_ANY_PORT, 519 .port_type = PCIE_ANY_PORT,
520 .service_type = PCIE_PORT_SERVICE_HP, 520 .service_type = PCIE_PORT_SERVICE_HP,
521 .driver_data = 0, 521 .driver_data = 0,
522 }, { /* end: all zeroes */ } 522 }, { /* end: all zeroes */ }
523}; 523};
524static const char device_name[] = "hpdriver"; 524static const char device_name[] = "hpdriver";
@@ -540,10 +540,6 @@ static int __init pcied_init(void)
540{ 540{
541 int retval = 0; 541 int retval = 0;
542 542
543#ifdef CONFIG_HOTPLUG_PCI_PCIE_POLL_EVENT_MODE
544 pciehp_poll_mode = 1;
545#endif
546
547 retval = pcie_port_service_register(&hpdriver_portdrv); 543 retval = pcie_port_service_register(&hpdriver_portdrv);
548 dbg("pcie_port_service_register = %d\n", retval); 544 dbg("pcie_port_service_register = %d\n", retval);
549 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 545 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 98e541ffef..c8cb49c5a7 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -173,7 +173,7 @@ u8 pciehp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
173 return 1; 173 return 1;
174} 174}
175 175
176/* The following routines constitute the bulk of the 176/* The following routines constitute the bulk of the
177 hotplug controller logic 177 hotplug controller logic
178 */ 178 */
179 179
@@ -181,7 +181,7 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
181{ 181{
182 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ 182 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/
183 if (POWER_CTRL(ctrl->ctrlcap)) { 183 if (POWER_CTRL(ctrl->ctrlcap)) {
184 if (pslot->hpc_ops->power_off_slot(pslot)) { 184 if (pslot->hpc_ops->power_off_slot(pslot)) {
185 err("%s: Issue of Slot Power Off command failed\n", 185 err("%s: Issue of Slot Power Off command failed\n",
186 __FUNCTION__); 186 __FUNCTION__);
187 return; 187 return;
@@ -189,7 +189,7 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
189 } 189 }
190 190
191 if (PWR_LED(ctrl->ctrlcap)) 191 if (PWR_LED(ctrl->ctrlcap))
192 pslot->hpc_ops->green_led_off(pslot); 192 pslot->hpc_ops->green_led_off(pslot);
193 193
194 if (ATTN_LED(ctrl->ctrlcap)) { 194 if (ATTN_LED(ctrl->ctrlcap)) {
195 if (pslot->hpc_ops->set_attention_status(pslot, 1)) { 195 if (pslot->hpc_ops->set_attention_status(pslot, 1)) {
@@ -231,7 +231,7 @@ static int board_added(struct slot *p_slot)
231 if (retval) 231 if (retval)
232 return retval; 232 return retval;
233 } 233 }
234 234
235 if (PWR_LED(ctrl->ctrlcap)) 235 if (PWR_LED(ctrl->ctrlcap))
236 p_slot->hpc_ops->green_led_blink(p_slot); 236 p_slot->hpc_ops->green_led_blink(p_slot);
237 237
@@ -548,7 +548,7 @@ int pciehp_enable_slot(struct slot *p_slot)
548 mutex_unlock(&p_slot->ctrl->crit_sect); 548 mutex_unlock(&p_slot->ctrl->crit_sect);
549 return -ENODEV; 549 return -ENODEV;
550 } 550 }
551 if (MRL_SENS(p_slot->ctrl->ctrlcap)) { 551 if (MRL_SENS(p_slot->ctrl->ctrlcap)) {
552 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 552 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
553 if (rc || getstatus) { 553 if (rc || getstatus) {
554 info("%s: latch open on slot(%s)\n", __FUNCTION__, 554 info("%s: latch open on slot(%s)\n", __FUNCTION__,
@@ -557,8 +557,8 @@ int pciehp_enable_slot(struct slot *p_slot)
557 return -ENODEV; 557 return -ENODEV;
558 } 558 }
559 } 559 }
560 560
561 if (POWER_CTRL(p_slot->ctrl->ctrlcap)) { 561 if (POWER_CTRL(p_slot->ctrl->ctrlcap)) {
562 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 562 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
563 if (rc || getstatus) { 563 if (rc || getstatus) {
564 info("%s: already enabled on slot(%s)\n", __FUNCTION__, 564 info("%s: already enabled on slot(%s)\n", __FUNCTION__,
@@ -593,7 +593,7 @@ int pciehp_disable_slot(struct slot *p_slot)
593 /* Check to see if (latch closed, card present, power on) */ 593 /* Check to see if (latch closed, card present, power on) */
594 mutex_lock(&p_slot->ctrl->crit_sect); 594 mutex_lock(&p_slot->ctrl->crit_sect);
595 595
596 if (!HP_SUPR_RM(p_slot->ctrl->ctrlcap)) { 596 if (!HP_SUPR_RM(p_slot->ctrl->ctrlcap)) {
597 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 597 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
598 if (ret || !getstatus) { 598 if (ret || !getstatus) {
599 info("%s: no adapter on slot(%s)\n", __FUNCTION__, 599 info("%s: no adapter on slot(%s)\n", __FUNCTION__,
@@ -603,7 +603,7 @@ int pciehp_disable_slot(struct slot *p_slot)
603 } 603 }
604 } 604 }
605 605
606 if (MRL_SENS(p_slot->ctrl->ctrlcap)) { 606 if (MRL_SENS(p_slot->ctrl->ctrlcap)) {
607 ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 607 ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
608 if (ret || getstatus) { 608 if (ret || getstatus) {
609 info("%s: latch open on slot(%s)\n", __FUNCTION__, 609 info("%s: latch open on slot(%s)\n", __FUNCTION__,
@@ -613,7 +613,7 @@ int pciehp_disable_slot(struct slot *p_slot)
613 } 613 }
614 } 614 }
615 615
616 if (POWER_CTRL(p_slot->ctrl->ctrlcap)) { 616 if (POWER_CTRL(p_slot->ctrl->ctrlcap)) {
617 ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 617 ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
618 if (ret || !getstatus) { 618 if (ret || !getstatus) {
619 info("%s: already disabled slot(%s)\n", __FUNCTION__, 619 info("%s: already disabled slot(%s)\n", __FUNCTION__,
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 016eea94a8..06d025b8b1 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -39,37 +39,6 @@
39 39
40#include "../pci.h" 40#include "../pci.h"
41#include "pciehp.h" 41#include "pciehp.h"
42#ifdef DEBUG
43#define DBG_K_TRACE_ENTRY ((unsigned int)0x00000001) /* On function entry */
44#define DBG_K_TRACE_EXIT ((unsigned int)0x00000002) /* On function exit */
45#define DBG_K_INFO ((unsigned int)0x00000004) /* Info messages */
46#define DBG_K_ERROR ((unsigned int)0x00000008) /* Error messages */
47#define DBG_K_TRACE (DBG_K_TRACE_ENTRY|DBG_K_TRACE_EXIT)
48#define DBG_K_STANDARD (DBG_K_INFO|DBG_K_ERROR|DBG_K_TRACE)
49/* Redefine this flagword to set debug level */
50#define DEBUG_LEVEL DBG_K_STANDARD
51
52#define DEFINE_DBG_BUFFER char __dbg_str_buf[256];
53
54#define DBG_PRINT( dbg_flags, args... ) \
55 do { \
56 if ( DEBUG_LEVEL & ( dbg_flags ) ) \
57 { \
58 int len; \
59 len = sprintf( __dbg_str_buf, "%s:%d: %s: ", \
60 __FILE__, __LINE__, __FUNCTION__ ); \
61 sprintf( __dbg_str_buf + len, args ); \
62 printk( KERN_NOTICE "%s\n", __dbg_str_buf ); \
63 } \
64 } while (0)
65
66#define DBG_ENTER_ROUTINE DBG_PRINT (DBG_K_TRACE_ENTRY, "%s", "[Entry]");
67#define DBG_LEAVE_ROUTINE DBG_PRINT (DBG_K_TRACE_EXIT, "%s", "[Exit]");
68#else
69#define DEFINE_DBG_BUFFER
70#define DBG_ENTER_ROUTINE
71#define DBG_LEAVE_ROUTINE
72#endif /* DEBUG */
73 42
74static atomic_t pciehp_num_controllers = ATOMIC_INIT(0); 43static atomic_t pciehp_num_controllers = ATOMIC_INIT(0);
75 44
@@ -160,10 +129,10 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
160/* Link Width Encoding */ 129/* Link Width Encoding */
161#define LNK_X1 0x01 130#define LNK_X1 0x01
162#define LNK_X2 0x02 131#define LNK_X2 0x02
163#define LNK_X4 0x04 132#define LNK_X4 0x04
164#define LNK_X8 0x08 133#define LNK_X8 0x08
165#define LNK_X12 0x0C 134#define LNK_X12 0x0C
166#define LNK_X16 0x10 135#define LNK_X16 0x10
167#define LNK_X32 0x20 136#define LNK_X32 0x20
168 137
169/*Field definitions of Link Status Register */ 138/*Field definitions of Link Status Register */
@@ -221,8 +190,6 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
221#define EMI_STATE 0x0080 190#define EMI_STATE 0x0080
222#define EMI_STATUS_BIT 7 191#define EMI_STATUS_BIT 7
223 192
224DEFINE_DBG_BUFFER /* Debug string buffer for entire HPC defined here */
225
226static irqreturn_t pcie_isr(int irq, void *dev_id); 193static irqreturn_t pcie_isr(int irq, void *dev_id);
227static void start_int_poll_timer(struct controller *ctrl, int sec); 194static void start_int_poll_timer(struct controller *ctrl, int sec);
228 195
@@ -231,14 +198,12 @@ static void int_poll_timeout(unsigned long data)
231{ 198{
232 struct controller *ctrl = (struct controller *)data; 199 struct controller *ctrl = (struct controller *)data;
233 200
234 DBG_ENTER_ROUTINE
235
236 /* Poll for interrupt events. regs == NULL => polling */ 201 /* Poll for interrupt events. regs == NULL => polling */
237 pcie_isr(0, ctrl); 202 pcie_isr(0, ctrl);
238 203
239 init_timer(&ctrl->poll_timer); 204 init_timer(&ctrl->poll_timer);
240 if (!pciehp_poll_time) 205 if (!pciehp_poll_time)
241 pciehp_poll_time = 2; /* reset timer to poll in 2 secs if user doesn't specify at module installation*/ 206 pciehp_poll_time = 2; /* default polling interval is 2 sec */
242 207
243 start_int_poll_timer(ctrl, pciehp_poll_time); 208 start_int_poll_timer(ctrl, pciehp_poll_time);
244} 209}
@@ -289,8 +254,6 @@ static int pcie_write_cmd(struct slot *slot, u16 cmd, u16 mask)
289 u16 slot_ctrl; 254 u16 slot_ctrl;
290 unsigned long flags; 255 unsigned long flags;
291 256
292 DBG_ENTER_ROUTINE
293
294 mutex_lock(&ctrl->ctrl_lock); 257 mutex_lock(&ctrl->ctrl_lock);
295 258
296 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 259 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
@@ -299,7 +262,7 @@ static int pcie_write_cmd(struct slot *slot, u16 cmd, u16 mask)
299 goto out; 262 goto out;
300 } 263 }
301 264
302 if ((slot_status & CMD_COMPLETED) == CMD_COMPLETED ) { 265 if ((slot_status & CMD_COMPLETED) == CMD_COMPLETED ) {
303 /* After 1 sec and CMD_COMPLETED still not set, just 266 /* After 1 sec and CMD_COMPLETED still not set, just
304 proceed forward to issue the next command according 267 proceed forward to issue the next command according
305 to spec. Just print out the error message */ 268 to spec. Just print out the error message */
@@ -332,7 +295,6 @@ static int pcie_write_cmd(struct slot *slot, u16 cmd, u16 mask)
332 retval = pcie_wait_cmd(ctrl); 295 retval = pcie_wait_cmd(ctrl);
333 out: 296 out:
334 mutex_unlock(&ctrl->ctrl_lock); 297 mutex_unlock(&ctrl->ctrl_lock);
335 DBG_LEAVE_ROUTINE
336 return retval; 298 return retval;
337} 299}
338 300
@@ -341,8 +303,6 @@ static int hpc_check_lnk_status(struct controller *ctrl)
341 u16 lnk_status; 303 u16 lnk_status;
342 int retval = 0; 304 int retval = 0;
343 305
344 DBG_ENTER_ROUTINE
345
346 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 306 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
347 if (retval) { 307 if (retval) {
348 err("%s: Cannot read LNKSTATUS register\n", __FUNCTION__); 308 err("%s: Cannot read LNKSTATUS register\n", __FUNCTION__);
@@ -350,26 +310,22 @@ static int hpc_check_lnk_status(struct controller *ctrl)
350 } 310 }
351 311
352 dbg("%s: lnk_status = %x\n", __FUNCTION__, lnk_status); 312 dbg("%s: lnk_status = %x\n", __FUNCTION__, lnk_status);
353 if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) || 313 if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) ||
354 !(lnk_status & NEG_LINK_WD)) { 314 !(lnk_status & NEG_LINK_WD)) {
355 err("%s : Link Training Error occurs \n", __FUNCTION__); 315 err("%s : Link Training Error occurs \n", __FUNCTION__);
356 retval = -1; 316 retval = -1;
357 return retval; 317 return retval;
358 } 318 }
359 319
360 DBG_LEAVE_ROUTINE
361 return retval; 320 return retval;
362} 321}
363 322
364
365static int hpc_get_attention_status(struct slot *slot, u8 *status) 323static int hpc_get_attention_status(struct slot *slot, u8 *status)
366{ 324{
367 struct controller *ctrl = slot->ctrl; 325 struct controller *ctrl = slot->ctrl;
368 u16 slot_ctrl; 326 u16 slot_ctrl;
369 u8 atten_led_state; 327 u8 atten_led_state;
370 int retval = 0; 328 int retval = 0;
371
372 DBG_ENTER_ROUTINE
373 329
374 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); 330 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
375 if (retval) { 331 if (retval) {
@@ -400,7 +356,6 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
400 break; 356 break;
401 } 357 }
402 358
403 DBG_LEAVE_ROUTINE
404 return 0; 359 return 0;
405} 360}
406 361
@@ -410,8 +365,6 @@ static int hpc_get_power_status(struct slot *slot, u8 *status)
410 u16 slot_ctrl; 365 u16 slot_ctrl;
411 u8 pwr_state; 366 u8 pwr_state;
412 int retval = 0; 367 int retval = 0;
413
414 DBG_ENTER_ROUTINE
415 368
416 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); 369 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
417 if (retval) { 370 if (retval) {
@@ -428,35 +381,30 @@ static int hpc_get_power_status(struct slot *slot, u8 *status)
428 *status = 1; 381 *status = 1;
429 break; 382 break;
430 case 1: 383 case 1:
431 *status = 0; 384 *status = 0;
432 break; 385 break;
433 default: 386 default:
434 *status = 0xFF; 387 *status = 0xFF;
435 break; 388 break;
436 } 389 }
437 390
438 DBG_LEAVE_ROUTINE
439 return retval; 391 return retval;
440} 392}
441 393
442
443static int hpc_get_latch_status(struct slot *slot, u8 *status) 394static int hpc_get_latch_status(struct slot *slot, u8 *status)
444{ 395{
445 struct controller *ctrl = slot->ctrl; 396 struct controller *ctrl = slot->ctrl;
446 u16 slot_status; 397 u16 slot_status;
447 int retval = 0; 398 int retval = 0;
448 399
449 DBG_ENTER_ROUTINE
450
451 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 400 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
452 if (retval) { 401 if (retval) {
453 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__); 402 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
454 return retval; 403 return retval;
455 } 404 }
456 405
457 *status = (((slot_status & MRL_STATE) >> 5) == 0) ? 0 : 1; 406 *status = (((slot_status & MRL_STATE) >> 5) == 0) ? 0 : 1;
458 407
459 DBG_LEAVE_ROUTINE
460 return 0; 408 return 0;
461} 409}
462 410
@@ -467,8 +415,6 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
467 u8 card_state; 415 u8 card_state;
468 int retval = 0; 416 int retval = 0;
469 417
470 DBG_ENTER_ROUTINE
471
472 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 418 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
473 if (retval) { 419 if (retval) {
474 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__); 420 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
@@ -477,7 +423,6 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
477 card_state = (u8)((slot_status & PRSN_STATE) >> 6); 423 card_state = (u8)((slot_status & PRSN_STATE) >> 6);
478 *status = (card_state == 1) ? 1 : 0; 424 *status = (card_state == 1) ? 1 : 0;
479 425
480 DBG_LEAVE_ROUTINE
481 return 0; 426 return 0;
482} 427}
483 428
@@ -488,16 +433,13 @@ static int hpc_query_power_fault(struct slot *slot)
488 u8 pwr_fault; 433 u8 pwr_fault;
489 int retval = 0; 434 int retval = 0;
490 435
491 DBG_ENTER_ROUTINE
492
493 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 436 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
494 if (retval) { 437 if (retval) {
495 err("%s: Cannot check for power fault\n", __FUNCTION__); 438 err("%s: Cannot check for power fault\n", __FUNCTION__);
496 return retval; 439 return retval;
497 } 440 }
498 pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1); 441 pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1);
499 442
500 DBG_LEAVE_ROUTINE
501 return pwr_fault; 443 return pwr_fault;
502} 444}
503 445
@@ -507,8 +449,6 @@ static int hpc_get_emi_status(struct slot *slot, u8 *status)
507 u16 slot_status; 449 u16 slot_status;
508 int retval = 0; 450 int retval = 0;
509 451
510 DBG_ENTER_ROUTINE
511
512 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 452 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
513 if (retval) { 453 if (retval) {
514 err("%s : Cannot check EMI status\n", __FUNCTION__); 454 err("%s : Cannot check EMI status\n", __FUNCTION__);
@@ -516,7 +456,6 @@ static int hpc_get_emi_status(struct slot *slot, u8 *status)
516 } 456 }
517 *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT; 457 *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT;
518 458
519 DBG_LEAVE_ROUTINE
520 return retval; 459 return retval;
521} 460}
522 461
@@ -526,8 +465,6 @@ static int hpc_toggle_emi(struct slot *slot)
526 u16 cmd_mask; 465 u16 cmd_mask;
527 int rc; 466 int rc;
528 467
529 DBG_ENTER_ROUTINE
530
531 slot_cmd = EMI_CTRL; 468 slot_cmd = EMI_CTRL;
532 cmd_mask = EMI_CTRL; 469 cmd_mask = EMI_CTRL;
533 if (!pciehp_poll_mode) { 470 if (!pciehp_poll_mode) {
@@ -537,7 +474,7 @@ static int hpc_toggle_emi(struct slot *slot)
537 474
538 rc = pcie_write_cmd(slot, slot_cmd, cmd_mask); 475 rc = pcie_write_cmd(slot, slot_cmd, cmd_mask);
539 slot->last_emi_toggle = get_seconds(); 476 slot->last_emi_toggle = get_seconds();
540 DBG_LEAVE_ROUTINE 477
541 return rc; 478 return rc;
542} 479}
543 480
@@ -548,8 +485,6 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
548 u16 cmd_mask; 485 u16 cmd_mask;
549 int rc; 486 int rc;
550 487
551 DBG_ENTER_ROUTINE
552
553 cmd_mask = ATTN_LED_CTRL; 488 cmd_mask = ATTN_LED_CTRL;
554 switch (value) { 489 switch (value) {
555 case 0 : /* turn off */ 490 case 0 : /* turn off */
@@ -572,19 +507,15 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
572 rc = pcie_write_cmd(slot, slot_cmd, cmd_mask); 507 rc = pcie_write_cmd(slot, slot_cmd, cmd_mask);
573 dbg("%s: SLOTCTRL %x write cmd %x\n", 508 dbg("%s: SLOTCTRL %x write cmd %x\n",
574 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); 509 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
575 510
576 DBG_LEAVE_ROUTINE
577 return rc; 511 return rc;
578} 512}
579 513
580
581static void hpc_set_green_led_on(struct slot *slot) 514static void hpc_set_green_led_on(struct slot *slot)
582{ 515{
583 struct controller *ctrl = slot->ctrl; 516 struct controller *ctrl = slot->ctrl;
584 u16 slot_cmd; 517 u16 slot_cmd;
585 u16 cmd_mask; 518 u16 cmd_mask;
586
587 DBG_ENTER_ROUTINE
588 519
589 slot_cmd = 0x0100; 520 slot_cmd = 0x0100;
590 cmd_mask = PWR_LED_CTRL; 521 cmd_mask = PWR_LED_CTRL;
@@ -597,8 +528,6 @@ static void hpc_set_green_led_on(struct slot *slot)
597 528
598 dbg("%s: SLOTCTRL %x write cmd %x\n", 529 dbg("%s: SLOTCTRL %x write cmd %x\n",
599 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); 530 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
600 DBG_LEAVE_ROUTINE
601 return;
602} 531}
603 532
604static void hpc_set_green_led_off(struct slot *slot) 533static void hpc_set_green_led_off(struct slot *slot)
@@ -607,8 +536,6 @@ static void hpc_set_green_led_off(struct slot *slot)
607 u16 slot_cmd; 536 u16 slot_cmd;
608 u16 cmd_mask; 537 u16 cmd_mask;
609 538
610 DBG_ENTER_ROUTINE
611
612 slot_cmd = 0x0300; 539 slot_cmd = 0x0300;
613 cmd_mask = PWR_LED_CTRL; 540 cmd_mask = PWR_LED_CTRL;
614 if (!pciehp_poll_mode) { 541 if (!pciehp_poll_mode) {
@@ -619,9 +546,6 @@ static void hpc_set_green_led_off(struct slot *slot)
619 pcie_write_cmd(slot, slot_cmd, cmd_mask); 546 pcie_write_cmd(slot, slot_cmd, cmd_mask);
620 dbg("%s: SLOTCTRL %x write cmd %x\n", 547 dbg("%s: SLOTCTRL %x write cmd %x\n",
621 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); 548 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
622
623 DBG_LEAVE_ROUTINE
624 return;
625} 549}
626 550
627static void hpc_set_green_led_blink(struct slot *slot) 551static void hpc_set_green_led_blink(struct slot *slot)
@@ -629,8 +553,6 @@ static void hpc_set_green_led_blink(struct slot *slot)
629 struct controller *ctrl = slot->ctrl; 553 struct controller *ctrl = slot->ctrl;
630 u16 slot_cmd; 554 u16 slot_cmd;
631 u16 cmd_mask; 555 u16 cmd_mask;
632
633 DBG_ENTER_ROUTINE
634 556
635 slot_cmd = 0x0200; 557 slot_cmd = 0x0200;
636 cmd_mask = PWR_LED_CTRL; 558 cmd_mask = PWR_LED_CTRL;
@@ -643,14 +565,10 @@ static void hpc_set_green_led_blink(struct slot *slot)
643 565
644 dbg("%s: SLOTCTRL %x write cmd %x\n", 566 dbg("%s: SLOTCTRL %x write cmd %x\n",
645 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); 567 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
646 DBG_LEAVE_ROUTINE
647 return;
648} 568}
649 569
650static void hpc_release_ctlr(struct controller *ctrl) 570static void hpc_release_ctlr(struct controller *ctrl)
651{ 571{
652 DBG_ENTER_ROUTINE
653
654 if (pciehp_poll_mode) 572 if (pciehp_poll_mode)
655 del_timer(&ctrl->poll_timer); 573 del_timer(&ctrl->poll_timer);
656 else 574 else
@@ -662,8 +580,6 @@ static void hpc_release_ctlr(struct controller *ctrl)
662 */ 580 */
663 if (atomic_dec_and_test(&pciehp_num_controllers)) 581 if (atomic_dec_and_test(&pciehp_num_controllers))
664 destroy_workqueue(pciehp_wq); 582 destroy_workqueue(pciehp_wq);
665
666 DBG_LEAVE_ROUTINE
667} 583}
668 584
669static int hpc_power_on_slot(struct slot * slot) 585static int hpc_power_on_slot(struct slot * slot)
@@ -674,8 +590,6 @@ static int hpc_power_on_slot(struct slot * slot)
674 u16 slot_status; 590 u16 slot_status;
675 int retval = 0; 591 int retval = 0;
676 592
677 DBG_ENTER_ROUTINE
678
679 dbg("%s: slot->hp_slot %x\n", __FUNCTION__, slot->hp_slot); 593 dbg("%s: slot->hp_slot %x\n", __FUNCTION__, slot->hp_slot);
680 594
681 /* Clear sticky power-fault bit from previous power failures */ 595 /* Clear sticky power-fault bit from previous power failures */
@@ -719,8 +633,6 @@ static int hpc_power_on_slot(struct slot * slot)
719 dbg("%s: SLOTCTRL %x write cmd %x\n", 633 dbg("%s: SLOTCTRL %x write cmd %x\n",
720 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); 634 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
721 635
722 DBG_LEAVE_ROUTINE
723
724 return retval; 636 return retval;
725} 637}
726 638
@@ -731,8 +643,6 @@ static int hpc_power_off_slot(struct slot * slot)
731 u16 cmd_mask; 643 u16 cmd_mask;
732 int retval = 0; 644 int retval = 0;
733 645
734 DBG_ENTER_ROUTINE
735
736 dbg("%s: slot->hp_slot %x\n", __FUNCTION__, slot->hp_slot); 646 dbg("%s: slot->hp_slot %x\n", __FUNCTION__, slot->hp_slot);
737 647
738 slot_cmd = POWER_OFF; 648 slot_cmd = POWER_OFF;
@@ -764,8 +674,6 @@ static int hpc_power_off_slot(struct slot * slot)
764 dbg("%s: SLOTCTRL %x write cmd %x\n", 674 dbg("%s: SLOTCTRL %x write cmd %x\n",
765 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); 675 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
766 676
767 DBG_LEAVE_ROUTINE
768
769 return retval; 677 return retval;
770} 678}
771 679
@@ -784,8 +692,8 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
784 return IRQ_NONE; 692 return IRQ_NONE;
785 } 693 }
786 694
787 intr_detect = ( ATTN_BUTTN_PRESSED | PWR_FAULT_DETECTED | MRL_SENS_CHANGED | 695 intr_detect = (ATTN_BUTTN_PRESSED | PWR_FAULT_DETECTED |
788 PRSN_DETECT_CHANGED | CMD_COMPLETED ); 696 MRL_SENS_CHANGED | PRSN_DETECT_CHANGED | CMD_COMPLETED);
789 697
790 intr_loc = slot_status & intr_detect; 698 intr_loc = slot_status & intr_detect;
791 699
@@ -807,7 +715,8 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
807 715
808 dbg("%s: pciehp_readw(SLOTCTRL) with value %x\n", 716 dbg("%s: pciehp_readw(SLOTCTRL) with value %x\n",
809 __FUNCTION__, temp_word); 717 __FUNCTION__, temp_word);
810 temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) | 0x00; 718 temp_word = (temp_word & ~HP_INTR_ENABLE &
719 ~CMD_CMPL_INTR_ENABLE) | 0x00;
811 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word); 720 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
812 if (rc) { 721 if (rc) {
813 err("%s: Cannot write to SLOTCTRL register\n", 722 err("%s: Cannot write to SLOTCTRL register\n",
@@ -825,7 +734,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
825 } 734 }
826 dbg("%s: pciehp_readw(SLOTSTATUS) with value %x\n", 735 dbg("%s: pciehp_readw(SLOTSTATUS) with value %x\n",
827 __FUNCTION__, slot_status); 736 __FUNCTION__, slot_status);
828 737
829 /* Clear command complete interrupt caused by this write */ 738 /* Clear command complete interrupt caused by this write */
830 temp_word = 0x1f; 739 temp_word = 0x1f;
831 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word); 740 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
@@ -835,10 +744,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
835 return IRQ_NONE; 744 return IRQ_NONE;
836 } 745 }
837 } 746 }
838 747
839 if (intr_loc & CMD_COMPLETED) { 748 if (intr_loc & CMD_COMPLETED) {
840 /* 749 /*
841 * Command Complete Interrupt Pending 750 * Command Complete Interrupt Pending
842 */ 751 */
843 ctrl->cmd_busy = 0; 752 ctrl->cmd_busy = 0;
844 wake_up_interruptible(&ctrl->queue); 753 wake_up_interruptible(&ctrl->queue);
@@ -892,7 +801,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
892 __FUNCTION__); 801 __FUNCTION__);
893 return IRQ_NONE; 802 return IRQ_NONE;
894 } 803 }
895 804
896 /* Clear command complete interrupt caused by this write */ 805 /* Clear command complete interrupt caused by this write */
897 temp_word = 0x1F; 806 temp_word = 0x1F;
898 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word); 807 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
@@ -904,19 +813,17 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
904 dbg("%s: pciehp_writew(SLOTSTATUS) with value %x\n", 813 dbg("%s: pciehp_writew(SLOTSTATUS) with value %x\n",
905 __FUNCTION__, temp_word); 814 __FUNCTION__, temp_word);
906 } 815 }
907 816
908 return IRQ_HANDLED; 817 return IRQ_HANDLED;
909} 818}
910 819
911static int hpc_get_max_lnk_speed (struct slot *slot, enum pci_bus_speed *value) 820static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
912{ 821{
913 struct controller *ctrl = slot->ctrl; 822 struct controller *ctrl = slot->ctrl;
914 enum pcie_link_speed lnk_speed; 823 enum pcie_link_speed lnk_speed;
915 u32 lnk_cap; 824 u32 lnk_cap;
916 int retval = 0; 825 int retval = 0;
917 826
918 DBG_ENTER_ROUTINE
919
920 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); 827 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
921 if (retval) { 828 if (retval) {
922 err("%s: Cannot read LNKCAP register\n", __FUNCTION__); 829 err("%s: Cannot read LNKCAP register\n", __FUNCTION__);
@@ -934,19 +841,18 @@ static int hpc_get_max_lnk_speed (struct slot *slot, enum pci_bus_speed *value)
934 841
935 *value = lnk_speed; 842 *value = lnk_speed;
936 dbg("Max link speed = %d\n", lnk_speed); 843 dbg("Max link speed = %d\n", lnk_speed);
937 DBG_LEAVE_ROUTINE 844
938 return retval; 845 return retval;
939} 846}
940 847
941static int hpc_get_max_lnk_width (struct slot *slot, enum pcie_link_width *value) 848static int hpc_get_max_lnk_width(struct slot *slot,
849 enum pcie_link_width *value)
942{ 850{
943 struct controller *ctrl = slot->ctrl; 851 struct controller *ctrl = slot->ctrl;
944 enum pcie_link_width lnk_wdth; 852 enum pcie_link_width lnk_wdth;
945 u32 lnk_cap; 853 u32 lnk_cap;
946 int retval = 0; 854 int retval = 0;
947 855
948 DBG_ENTER_ROUTINE
949
950 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); 856 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
951 if (retval) { 857 if (retval) {
952 err("%s: Cannot read LNKCAP register\n", __FUNCTION__); 858 err("%s: Cannot read LNKCAP register\n", __FUNCTION__);
@@ -985,19 +891,17 @@ static int hpc_get_max_lnk_width (struct slot *slot, enum pcie_link_width *value
985 891
986 *value = lnk_wdth; 892 *value = lnk_wdth;
987 dbg("Max link width = %d\n", lnk_wdth); 893 dbg("Max link width = %d\n", lnk_wdth);
988 DBG_LEAVE_ROUTINE 894
989 return retval; 895 return retval;
990} 896}
991 897
992static int hpc_get_cur_lnk_speed (struct slot *slot, enum pci_bus_speed *value) 898static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
993{ 899{
994 struct controller *ctrl = slot->ctrl; 900 struct controller *ctrl = slot->ctrl;
995 enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN; 901 enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN;
996 int retval = 0; 902 int retval = 0;
997 u16 lnk_status; 903 u16 lnk_status;
998 904
999 DBG_ENTER_ROUTINE
1000
1001 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 905 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
1002 if (retval) { 906 if (retval) {
1003 err("%s: Cannot read LNKSTATUS register\n", __FUNCTION__); 907 err("%s: Cannot read LNKSTATUS register\n", __FUNCTION__);
@@ -1015,25 +919,24 @@ static int hpc_get_cur_lnk_speed (struct slot *slot, enum pci_bus_speed *value)
1015 919
1016 *value = lnk_speed; 920 *value = lnk_speed;
1017 dbg("Current link speed = %d\n", lnk_speed); 921 dbg("Current link speed = %d\n", lnk_speed);
1018 DBG_LEAVE_ROUTINE 922
1019 return retval; 923 return retval;
1020} 924}
1021 925
1022static int hpc_get_cur_lnk_width (struct slot *slot, enum pcie_link_width *value) 926static int hpc_get_cur_lnk_width(struct slot *slot,
927 enum pcie_link_width *value)
1023{ 928{
1024 struct controller *ctrl = slot->ctrl; 929 struct controller *ctrl = slot->ctrl;
1025 enum pcie_link_width lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN; 930 enum pcie_link_width lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN;
1026 int retval = 0; 931 int retval = 0;
1027 u16 lnk_status; 932 u16 lnk_status;
1028 933
1029 DBG_ENTER_ROUTINE
1030
1031 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 934 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
1032 if (retval) { 935 if (retval) {
1033 err("%s: Cannot read LNKSTATUS register\n", __FUNCTION__); 936 err("%s: Cannot read LNKSTATUS register\n", __FUNCTION__);
1034 return retval; 937 return retval;
1035 } 938 }
1036 939
1037 switch ((lnk_status & 0x03F0) >> 4){ 940 switch ((lnk_status & 0x03F0) >> 4){
1038 case 0: 941 case 0:
1039 lnk_wdth = PCIE_LNK_WIDTH_RESRV; 942 lnk_wdth = PCIE_LNK_WIDTH_RESRV;
@@ -1066,7 +969,7 @@ static int hpc_get_cur_lnk_width (struct slot *slot, enum pcie_link_width *value
1066 969
1067 *value = lnk_wdth; 970 *value = lnk_wdth;
1068 dbg("Current link width = %d\n", lnk_wdth); 971 dbg("Current link width = %d\n", lnk_wdth);
1069 DBG_LEAVE_ROUTINE 972
1070 return retval; 973 return retval;
1071} 974}
1072 975
@@ -1085,12 +988,12 @@ static struct hpc_ops pciehp_hpc_ops = {
1085 .get_cur_bus_speed = hpc_get_cur_lnk_speed, 988 .get_cur_bus_speed = hpc_get_cur_lnk_speed,
1086 .get_max_lnk_width = hpc_get_max_lnk_width, 989 .get_max_lnk_width = hpc_get_max_lnk_width,
1087 .get_cur_lnk_width = hpc_get_cur_lnk_width, 990 .get_cur_lnk_width = hpc_get_cur_lnk_width,
1088 991
1089 .query_power_fault = hpc_query_power_fault, 992 .query_power_fault = hpc_query_power_fault,
1090 .green_led_on = hpc_set_green_led_on, 993 .green_led_on = hpc_set_green_led_on,
1091 .green_led_off = hpc_set_green_led_off, 994 .green_led_off = hpc_set_green_led_off,
1092 .green_led_blink = hpc_set_green_led_blink, 995 .green_led_blink = hpc_set_green_led_blink,
1093 996
1094 .release_ctlr = hpc_release_ctlr, 997 .release_ctlr = hpc_release_ctlr,
1095 .check_lnk_status = hpc_check_lnk_status, 998 .check_lnk_status = hpc_check_lnk_status,
1096}; 999};
@@ -1138,6 +1041,7 @@ int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
1138 dbg("Trying to get hotplug control for %s \n", 1041 dbg("Trying to get hotplug control for %s \n",
1139 (char *)string.pointer); 1042 (char *)string.pointer);
1140 status = pci_osc_control_set(handle, 1043 status = pci_osc_control_set(handle,
1044 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL |
1141 OSC_PCI_EXPRESS_NATIVE_HP_CONTROL); 1045 OSC_PCI_EXPRESS_NATIVE_HP_CONTROL);
1142 if (status == AE_NOT_FOUND) 1046 if (status == AE_NOT_FOUND)
1143 status = acpi_run_oshp(handle); 1047 status = acpi_run_oshp(handle);
@@ -1163,8 +1067,6 @@ int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
1163} 1067}
1164#endif 1068#endif
1165 1069
1166
1167
1168int pcie_init(struct controller * ctrl, struct pcie_device *dev) 1070int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1169{ 1071{
1170 int rc; 1072 int rc;
@@ -1176,8 +1078,6 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1176 u16 slot_status, slot_ctrl; 1078 u16 slot_status, slot_ctrl;
1177 struct pci_dev *pdev; 1079 struct pci_dev *pdev;
1178 1080
1179 DBG_ENTER_ROUTINE
1180
1181 pdev = dev->port; 1081 pdev = dev->port;
1182 ctrl->pci_dev = pdev; /* save pci_dev in context */ 1082 ctrl->pci_dev = pdev; /* save pci_dev in context */
1183 1083
@@ -1201,9 +1101,11 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1201 dbg("%s: CAPREG offset %x cap_reg %x\n", 1101 dbg("%s: CAPREG offset %x cap_reg %x\n",
1202 __FUNCTION__, ctrl->cap_base + CAPREG, cap_reg); 1102 __FUNCTION__, ctrl->cap_base + CAPREG, cap_reg);
1203 1103
1204 if (((cap_reg & SLOT_IMPL) == 0) || (((cap_reg & DEV_PORT_TYPE) != 0x0040) 1104 if (((cap_reg & SLOT_IMPL) == 0) ||
1105 (((cap_reg & DEV_PORT_TYPE) != 0x0040)
1205 && ((cap_reg & DEV_PORT_TYPE) != 0x0060))) { 1106 && ((cap_reg & DEV_PORT_TYPE) != 0x0060))) {
1206 dbg("%s : This is not a root port or the port is not connected to a slot\n", __FUNCTION__); 1107 dbg("%s : This is not a root port or the port is not "
1108 "connected to a slot\n", __FUNCTION__);
1207 goto abort_free_ctlr; 1109 goto abort_free_ctlr;
1208 } 1110 }
1209 1111
@@ -1236,14 +1138,15 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1236 dbg("%s: SLOTCTRL offset %x slot_ctrl %x\n", 1138 dbg("%s: SLOTCTRL offset %x slot_ctrl %x\n",
1237 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_ctrl); 1139 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
1238 1140
1239 for ( rc = 0; rc < DEVICE_COUNT_RESOURCE; rc++) 1141 for (rc = 0; rc < DEVICE_COUNT_RESOURCE; rc++)
1240 if (pci_resource_len(pdev, rc) > 0) 1142 if (pci_resource_len(pdev, rc) > 0)
1241 dbg("pci resource[%d] start=0x%llx(len=0x%llx)\n", rc, 1143 dbg("pci resource[%d] start=0x%llx(len=0x%llx)\n", rc,
1242 (unsigned long long)pci_resource_start(pdev, rc), 1144 (unsigned long long)pci_resource_start(pdev, rc),
1243 (unsigned long long)pci_resource_len(pdev, rc)); 1145 (unsigned long long)pci_resource_len(pdev, rc));
1244 1146
1245 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", pdev->vendor, pdev->device, 1147 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
1246 pdev->subsystem_vendor, pdev->subsystem_device); 1148 pdev->vendor, pdev->device,
1149 pdev->subsystem_vendor, pdev->subsystem_device);
1247 1150
1248 mutex_init(&ctrl->crit_sect); 1151 mutex_init(&ctrl->crit_sect);
1249 mutex_init(&ctrl->ctrl_lock); 1152 mutex_init(&ctrl->ctrl_lock);
@@ -1267,7 +1170,8 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1267 1170
1268 dbg("%s: SLOTCTRL %x value read %x\n", 1171 dbg("%s: SLOTCTRL %x value read %x\n",
1269 __FUNCTION__, ctrl->cap_base + SLOTCTRL, temp_word); 1172 __FUNCTION__, ctrl->cap_base + SLOTCTRL, temp_word);
1270 temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) | 0x00; 1173 temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) |
1174 0x00;
1271 1175
1272 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word); 1176 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
1273 if (rc) { 1177 if (rc) {
@@ -1330,14 +1234,14 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1330 1234
1331 if (ATTN_BUTTN(slot_cap)) 1235 if (ATTN_BUTTN(slot_cap))
1332 intr_enable = intr_enable | ATTN_BUTTN_ENABLE; 1236 intr_enable = intr_enable | ATTN_BUTTN_ENABLE;
1333 1237
1334 if (POWER_CTRL(slot_cap)) 1238 if (POWER_CTRL(slot_cap))
1335 intr_enable = intr_enable | PWR_FAULT_DETECT_ENABLE; 1239 intr_enable = intr_enable | PWR_FAULT_DETECT_ENABLE;
1336 1240
1337 if (MRL_SENS(slot_cap)) 1241 if (MRL_SENS(slot_cap))
1338 intr_enable = intr_enable | MRL_DETECT_ENABLE; 1242 intr_enable = intr_enable | MRL_DETECT_ENABLE;
1339 1243
1340 temp_word = (temp_word & ~intr_enable) | intr_enable; 1244 temp_word = (temp_word & ~intr_enable) | intr_enable;
1341 1245
1342 if (pciehp_poll_mode) { 1246 if (pciehp_poll_mode) {
1343 temp_word = (temp_word & ~HP_INTR_ENABLE) | 0x0; 1247 temp_word = (temp_word & ~HP_INTR_ENABLE) | 0x0;
@@ -1345,7 +1249,10 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1345 temp_word = (temp_word & ~HP_INTR_ENABLE) | HP_INTR_ENABLE; 1249 temp_word = (temp_word & ~HP_INTR_ENABLE) | HP_INTR_ENABLE;
1346 } 1250 }
1347 1251
1348 /* Unmask Hot-plug Interrupt Enable for the interrupt notification mechanism case */ 1252 /*
1253 * Unmask Hot-plug Interrupt Enable for the interrupt
1254 * notification mechanism case.
1255 */
1349 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word); 1256 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
1350 if (rc) { 1257 if (rc) {
1351 err("%s: Cannot write to SLOTCTRL register\n", __FUNCTION__); 1258 err("%s: Cannot write to SLOTCTRL register\n", __FUNCTION__);
@@ -1356,14 +1263,14 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1356 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__); 1263 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
1357 goto abort_disable_intr; 1264 goto abort_disable_intr;
1358 } 1265 }
1359 1266
1360 temp_word = 0x1F; /* Clear all events */ 1267 temp_word = 0x1F; /* Clear all events */
1361 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word); 1268 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
1362 if (rc) { 1269 if (rc) {
1363 err("%s: Cannot write to SLOTSTATUS register\n", __FUNCTION__); 1270 err("%s: Cannot write to SLOTSTATUS register\n", __FUNCTION__);
1364 goto abort_disable_intr; 1271 goto abort_disable_intr;
1365 } 1272 }
1366 1273
1367 if (pciehp_force) { 1274 if (pciehp_force) {
1368 dbg("Bypassing BIOS check for pciehp use on %s\n", 1275 dbg("Bypassing BIOS check for pciehp use on %s\n",
1369 pci_name(ctrl->pci_dev)); 1276 pci_name(ctrl->pci_dev));
@@ -1375,10 +1282,9 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1375 1282
1376 ctrl->hpc_ops = &pciehp_hpc_ops; 1283 ctrl->hpc_ops = &pciehp_hpc_ops;
1377 1284
1378 DBG_LEAVE_ROUTINE
1379 return 0; 1285 return 0;
1380 1286
1381 /* We end up here for the many possible ways to fail this API. */ 1287 /* We end up here for the many possible ways to fail this API. */
1382abort_disable_intr: 1288abort_disable_intr:
1383 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word); 1289 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
1384 if (!rc) { 1290 if (!rc) {
@@ -1395,6 +1301,5 @@ abort_free_irq:
1395 free_irq(ctrl->pci_dev->irq, ctrl); 1301 free_irq(ctrl->pci_dev->irq, ctrl);
1396 1302
1397abort_free_ctlr: 1303abort_free_ctlr:
1398 DBG_LEAVE_ROUTINE
1399 return -1; 1304 return -1;
1400} 1305}
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 854aaea09e..c424aded13 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -243,9 +243,10 @@ int pciehp_configure_device(struct slot *p_slot)
243 243
244int pciehp_unconfigure_device(struct slot *p_slot) 244int pciehp_unconfigure_device(struct slot *p_slot)
245{ 245{
246 int rc = 0; 246 int ret, rc = 0;
247 int j; 247 int j;
248 u8 bctl = 0; 248 u8 bctl = 0;
249 u8 presence = 0;
249 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; 250 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
250 251
251 dbg("%s: bus/dev = %x/%x\n", __FUNCTION__, p_slot->bus, 252 dbg("%s: bus/dev = %x/%x\n", __FUNCTION__, p_slot->bus,
@@ -263,23 +264,28 @@ int pciehp_unconfigure_device(struct slot *p_slot)
263 continue; 264 continue;
264 } 265 }
265 if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 266 if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
266 pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); 267 ret = p_slot->hpc_ops->get_adapter_status(p_slot,
267 if (bctl & PCI_BRIDGE_CTL_VGA) { 268 &presence);
268 err("Cannot remove display device %s\n", 269 if (!ret && presence) {
270 pci_read_config_byte(temp, PCI_BRIDGE_CONTROL,
271 &bctl);
272 if (bctl & PCI_BRIDGE_CTL_VGA) {
273 err("Cannot remove display device %s\n",
269 pci_name(temp)); 274 pci_name(temp));
270 pci_dev_put(temp); 275 pci_dev_put(temp);
271 continue; 276 continue;
277 }
272 } 278 }
273 } 279 }
274 pci_remove_bus_device(temp); 280 pci_remove_bus_device(temp);
275 pci_dev_put(temp); 281 pci_dev_put(temp);
276 } 282 }
277 /* 283 /*
278 * Some PCI Express root ports require fixup after hot-plug operation. 284 * Some PCI Express root ports require fixup after hot-plug operation.
279 */ 285 */
280 if (pcie_mch_quirk) 286 if (pcie_mch_quirk)
281 pci_fixup_device(pci_fixup_final, p_slot->ctrl->pci_dev); 287 pci_fixup_device(pci_fixup_final, p_slot->ctrl->pci_dev);
282 288
283 return rc; 289 return rc;
284} 290}
285 291
diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
index df076064a3..a080fedf03 100644
--- a/drivers/pci/hotplug/rpadlpar_sysfs.c
+++ b/drivers/pci/hotplug/rpadlpar_sysfs.c
@@ -129,17 +129,17 @@ struct kobj_type ktype_dlpar_io = {
129}; 129};
130 130
131struct kset dlpar_io_kset = { 131struct kset dlpar_io_kset = {
132 .kobj = {.name = DLPAR_KOBJ_NAME, 132 .kobj = {.ktype = &ktype_dlpar_io,
133 .ktype = &ktype_dlpar_io,
134 .parent = &pci_hotplug_slots_subsys.kobj}, 133 .parent = &pci_hotplug_slots_subsys.kobj},
135 .ktype = &ktype_dlpar_io, 134 .ktype = &ktype_dlpar_io,
136}; 135};
137 136
138int dlpar_sysfs_init(void) 137int dlpar_sysfs_init(void)
139{ 138{
139 kobject_set_name(&dlpar_io_kset.kobj, DLPAR_KOBJ_NAME);
140 if (kset_register(&dlpar_io_kset)) { 140 if (kset_register(&dlpar_io_kset)) {
141 printk(KERN_ERR "rpadlpar_io: cannot register kset for %s\n", 141 printk(KERN_ERR "rpadlpar_io: cannot register kset for %s\n",
142 dlpar_io_kset.kobj.name); 142 kobject_name(&dlpar_io_kset.kobj));
143 return -EINVAL; 143 return -EINVAL;
144 } 144 }
145 145
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index be1df85e5e..87e0161505 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -132,7 +132,7 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
132 pci_read_config_word(dev, msi_data_reg(pos, 1), &data); 132 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
133 } else { 133 } else {
134 msg->address_hi = 0; 134 msg->address_hi = 0;
135 pci_read_config_word(dev, msi_data_reg(pos, 1), &data); 135 pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
136 } 136 }
137 msg->data = data; 137 msg->data = data;
138 break; 138 break;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 004bc24872..6e2760b6c2 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -54,7 +54,6 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
54 if (!dynid) 54 if (!dynid)
55 return -ENOMEM; 55 return -ENOMEM;
56 56
57 INIT_LIST_HEAD(&dynid->node);
58 dynid->id.vendor = vendor; 57 dynid->id.vendor = vendor;
59 dynid->id.device = device; 58 dynid->id.device = device;
60 dynid->id.subvendor = subvendor; 59 dynid->id.subvendor = subvendor;
@@ -65,7 +64,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
65 driver_data : 0UL; 64 driver_data : 0UL;
66 65
67 spin_lock(&pdrv->dynids.lock); 66 spin_lock(&pdrv->dynids.lock);
68 list_add_tail(&pdrv->dynids.list, &dynid->node); 67 list_add_tail(&dynid->node, &pdrv->dynids.list);
69 spin_unlock(&pdrv->dynids.lock); 68 spin_unlock(&pdrv->dynids.lock);
70 69
71 if (get_driver(&pdrv->driver)) { 70 if (get_driver(&pdrv->driver)) {
@@ -532,8 +531,7 @@ void pci_dev_put(struct pci_dev *dev)
532} 531}
533 532
534#ifndef CONFIG_HOTPLUG 533#ifndef CONFIG_HOTPLUG
535int pci_uevent(struct device *dev, char **envp, int num_envp, 534int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
536 char *buffer, int buffer_size)
537{ 535{
538 return -ENODEV; 536 return -ENODEV;
539} 537}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 37c00f6fd8..728b3c863d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -17,11 +17,16 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/string.h> 19#include <linux/string.h>
20#include <linux/log2.h>
20#include <asm/dma.h> /* isa_dma_bridge_buggy */ 21#include <asm/dma.h> /* isa_dma_bridge_buggy */
21#include "pci.h" 22#include "pci.h"
22 23
23unsigned int pci_pm_d3_delay = 10; 24unsigned int pci_pm_d3_delay = 10;
24 25
26#ifdef CONFIG_PCI_DOMAINS
27int pci_domains_supported = 1;
28#endif
29
25#define DEFAULT_CARDBUS_IO_SIZE (256) 30#define DEFAULT_CARDBUS_IO_SIZE (256)
26#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) 31#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
27/* pci=cbmemsize=nnM,cbiosize=nn can override this */ 32/* pci=cbmemsize=nnM,cbiosize=nn can override this */
@@ -1454,7 +1459,7 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
1454 int cap, err = -EINVAL; 1459 int cap, err = -EINVAL;
1455 u32 stat, cmd, v, o; 1460 u32 stat, cmd, v, o;
1456 1461
1457 if (mmrbc < 512 || mmrbc > 4096 || (mmrbc & (mmrbc-1))) 1462 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
1458 goto out; 1463 goto out;
1459 1464
1460 v = ffs(mmrbc) - 10; 1465 v = ffs(mmrbc) - 10;
@@ -1526,7 +1531,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
1526 int cap, err = -EINVAL; 1531 int cap, err = -EINVAL;
1527 u16 ctl, v; 1532 u16 ctl, v;
1528 1533
1529 if (rq < 128 || rq > 4096 || (rq & (rq-1))) 1534 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
1530 goto out; 1535 goto out;
1531 1536
1532 v = (ffs(rq) - 8) << 12; 1537 v = (ffs(rq) - 8) << 12;
@@ -1566,6 +1571,13 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
1566 return bars; 1571 return bars;
1567} 1572}
1568 1573
1574static void __devinit pci_no_domains(void)
1575{
1576#ifdef CONFIG_PCI_DOMAINS
1577 pci_domains_supported = 0;
1578#endif
1579}
1580
1569static int __devinit pci_init(void) 1581static int __devinit pci_init(void)
1570{ 1582{
1571 struct pci_dev *dev = NULL; 1583 struct pci_dev *dev = NULL;
@@ -1585,6 +1597,10 @@ static int __devinit pci_setup(char *str)
1585 if (*str && (str = pcibios_setup(str)) && *str) { 1597 if (*str && (str = pcibios_setup(str)) && *str) {
1586 if (!strcmp(str, "nomsi")) { 1598 if (!strcmp(str, "nomsi")) {
1587 pci_no_msi(); 1599 pci_no_msi();
1600 } else if (!strcmp(str, "noaer")) {
1601 pci_no_aer();
1602 } else if (!strcmp(str, "nodomains")) {
1603 pci_no_domains();
1588 } else if (!strncmp(str, "cbiosize=", 9)) { 1604 } else if (!strncmp(str, "cbiosize=", 9)) {
1589 pci_cardbus_io_size = memparse(str + 9, &str); 1605 pci_cardbus_io_size = memparse(str + 9, &str);
1590 } else if (!strncmp(str, "cbmemsize=", 10)) { 1606 } else if (!strncmp(str, "cbmemsize=", 10)) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4c36e80f6d..6fda33de84 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,7 +1,6 @@
1/* Functions internal to the PCI core code */ 1/* Functions internal to the PCI core code */
2 2
3extern int pci_uevent(struct device *dev, char **envp, int num_envp, 3extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env);
4 char *buffer, int buffer_size);
5extern int pci_create_sysfs_dev_files(struct pci_dev *pdev); 4extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
6extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev); 5extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
7extern void pci_cleanup_rom(struct pci_dev *dev); 6extern void pci_cleanup_rom(struct pci_dev *dev);
@@ -52,6 +51,12 @@ void pci_restore_msi_state(struct pci_dev *dev);
52static inline void pci_restore_msi_state(struct pci_dev *dev) {} 51static inline void pci_restore_msi_state(struct pci_dev *dev) {}
53#endif 52#endif
54 53
54#ifdef CONFIG_PCIEAER
55void pci_no_aer(void);
56#else
57static inline void pci_no_aer(void) { }
58#endif
59
55static inline int pci_no_d1d2(struct pci_dev *dev) 60static inline int pci_no_d1d2(struct pci_dev *dev)
56{ 61{
57 unsigned int parent_dstates = 0; 62 unsigned int parent_dstates = 0;
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 0ad92a8ad8..287a931171 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -25,13 +25,4 @@ config HOTPLUG_PCI_PCIE
25 25
26 When in doubt, say N. 26 When in doubt, say N.
27 27
28config HOTPLUG_PCI_PCIE_POLL_EVENT_MODE
29 bool "Use polling mechanism for hot-plug events (for testing purpose)"
30 depends on HOTPLUG_PCI_PCIE
31 help
32 Say Y here if you want to use the polling mechanism for hot-plug
33 events for early platform testing.
34
35 When in doubt, say N.
36
37source "drivers/pci/pcie/aer/Kconfig" 28source "drivers/pci/pcie/aer/Kconfig"
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index ad90a01b0d..7a62f7dd90 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -81,6 +81,13 @@ static struct pcie_port_service_driver aerdriver = {
81 .reset_link = aer_root_reset, 81 .reset_link = aer_root_reset,
82}; 82};
83 83
84static int pcie_aer_disable;
85
86void pci_no_aer(void)
87{
88 pcie_aer_disable = 1; /* has priority over 'forceload' */
89}
90
84/** 91/**
85 * aer_irq - Root Port's ISR 92 * aer_irq - Root Port's ISR
86 * @irq: IRQ assigned to Root Port 93 * @irq: IRQ assigned to Root Port
@@ -327,6 +334,8 @@ static void aer_error_resume(struct pci_dev *dev)
327 **/ 334 **/
328static int __init aer_service_init(void) 335static int __init aer_service_init(void)
329{ 336{
337 if (pcie_aer_disable)
338 return -ENXIO;
330 return pcie_port_service_register(&aerdriver); 339 return pcie_port_service_register(&aerdriver);
331} 340}
332 341
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 171ca712e5..5db6b6690b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -276,8 +276,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
276 sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK); 276 sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK);
277 if (sz) { 277 if (sz) {
278 res->flags = (l & IORESOURCE_ROM_ENABLE) | 278 res->flags = (l & IORESOURCE_ROM_ENABLE) |
279 IORESOURCE_MEM | IORESOURCE_PREFETCH | 279 IORESOURCE_MEM | IORESOURCE_READONLY;
280 IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
281 res->start = l & PCI_ROM_ADDRESS_MASK; 280 res->start = l & PCI_ROM_ADDRESS_MASK;
282 res->end = res->start + (unsigned long) sz; 281 res->end = res->start + (unsigned long) sz;
283 } 282 }
@@ -597,7 +596,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass
597 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); 596 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
598 597
599 if (!is_cardbus) { 598 if (!is_cardbus) {
600 child->bridge_ctl = bctl | PCI_BRIDGE_CTL_NO_ISA; 599 child->bridge_ctl = bctl;
601 /* 600 /*
602 * Adjust subordinate busnr in parent buses. 601 * Adjust subordinate busnr in parent buses.
603 * We do this before scanning for children because 602 * We do this before scanning for children because
@@ -744,22 +743,46 @@ static int pci_setup_device(struct pci_dev * dev)
744 */ 743 */
745 if (class == PCI_CLASS_STORAGE_IDE) { 744 if (class == PCI_CLASS_STORAGE_IDE) {
746 u8 progif; 745 u8 progif;
746 struct pci_bus_region region;
747
747 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 748 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
748 if ((progif & 1) == 0) { 749 if ((progif & 1) == 0) {
749 dev->resource[0].start = 0x1F0; 750 struct resource resource = {
750 dev->resource[0].end = 0x1F7; 751 .start = 0x1F0,
751 dev->resource[0].flags = LEGACY_IO_RESOURCE; 752 .end = 0x1F7,
752 dev->resource[1].start = 0x3F6; 753 .flags = LEGACY_IO_RESOURCE,
753 dev->resource[1].end = 0x3F6; 754 };
754 dev->resource[1].flags = LEGACY_IO_RESOURCE; 755
756 pcibios_resource_to_bus(dev, &region, &resource);
757 dev->resource[0].start = region.start;
758 dev->resource[0].end = region.end;
759 dev->resource[0].flags = resource.flags;
760 resource.start = 0x3F6;
761 resource.end = 0x3F6;
762 resource.flags = LEGACY_IO_RESOURCE;
763 pcibios_resource_to_bus(dev, &region, &resource);
764 dev->resource[1].start = region.start;
765 dev->resource[1].end = region.end;
766 dev->resource[1].flags = resource.flags;
755 } 767 }
756 if ((progif & 4) == 0) { 768 if ((progif & 4) == 0) {
757 dev->resource[2].start = 0x170; 769 struct resource resource = {
758 dev->resource[2].end = 0x177; 770 .start = 0x170,
759 dev->resource[2].flags = LEGACY_IO_RESOURCE; 771 .end = 0x177,
760 dev->resource[3].start = 0x376; 772 .flags = LEGACY_IO_RESOURCE,
761 dev->resource[3].end = 0x376; 773 };
762 dev->resource[3].flags = LEGACY_IO_RESOURCE; 774
775 pcibios_resource_to_bus(dev, &region, &resource);
776 dev->resource[2].start = region.start;
777 dev->resource[2].end = region.end;
778 dev->resource[2].flags = resource.flags;
779 resource.start = 0x376;
780 resource.end = 0x376;
781 resource.flags = LEGACY_IO_RESOURCE;
782 pcibios_resource_to_bus(dev, &region, &resource);
783 dev->resource[3].start = region.start;
784 dev->resource[3].end = region.end;
785 dev->resource[3].flags = resource.flags;
763 } 786 }
764 } 787 }
765 break; 788 break;
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 90adc62d07..716439e25d 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -60,7 +60,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
60 */ 60 */
61 61
62 if (capable(CAP_SYS_ADMIN)) 62 if (capable(CAP_SYS_ADMIN))
63 size = dev->cfg_size; 63 size = dp->size;
64 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 64 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
65 size = 128; 65 size = 128;
66 else 66 else
@@ -129,11 +129,11 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
129static ssize_t 129static ssize_t
130proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) 130proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos)
131{ 131{
132 const struct inode *ino = file->f_path.dentry->d_inode; 132 struct inode *ino = file->f_path.dentry->d_inode;
133 const struct proc_dir_entry *dp = PDE(ino); 133 const struct proc_dir_entry *dp = PDE(ino);
134 struct pci_dev *dev = dp->data; 134 struct pci_dev *dev = dp->data;
135 int pos = *ppos; 135 int pos = *ppos;
136 int size = dev->cfg_size; 136 int size = dp->size;
137 int cnt; 137 int cnt;
138 138
139 if (pos >= size) 139 if (pos >= size)
@@ -193,6 +193,7 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
193 } 193 }
194 194
195 *ppos = pos; 195 *ppos = pos;
196 i_size_write(ino, dp->size);
196 return nbytes; 197 return nbytes;
197} 198}
198 199
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 50f2dd9e1b..59d4da2734 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -472,11 +472,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_
472 */ 472 */
473static void __devinit quirk_vt82c586_acpi(struct pci_dev *dev) 473static void __devinit quirk_vt82c586_acpi(struct pci_dev *dev)
474{ 474{
475 u8 rev;
476 u32 region; 475 u32 region;
477 476
478 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); 477 if (dev->revision & 0x10) {
479 if (rev & 0x10) {
480 pci_read_config_dword(dev, 0x48, &region); 478 pci_read_config_dword(dev, 0x48, &region);
481 region &= PCI_BASE_ADDRESS_IO_MASK; 479 region &= PCI_BASE_ADDRESS_IO_MASK;
482 quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI"); 480 quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI");
@@ -629,12 +627,9 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk
629 */ 627 */
630static void __init quirk_amd_8131_mmrbc(struct pci_dev *dev) 628static void __init quirk_amd_8131_mmrbc(struct pci_dev *dev)
631{ 629{
632 unsigned char revid; 630 if (dev->subordinate && dev->revision <= 0x12) {
633
634 pci_read_config_byte(dev, PCI_REVISION_ID, &revid);
635 if (dev->subordinate && revid <= 0x12) {
636 printk(KERN_INFO "AMD8131 rev %x detected, disabling PCI-X " 631 printk(KERN_INFO "AMD8131 rev %x detected, disabling PCI-X "
637 "MMRBC\n", revid); 632 "MMRBC\n", dev->revision);
638 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC; 633 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
639 } 634 }
640} 635}
@@ -930,38 +925,6 @@ static void __init quirk_eisa_bridge(struct pci_dev *dev)
930} 925}
931DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge ); 926DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge );
932 927
933/*
934 * On the MSI-K8T-Neo2Fir Board, the internal Soundcard is disabled
935 * when a PCI-Soundcard is added. The BIOS only gives Options
936 * "Disabled" and "AUTO". This Quirk Sets the corresponding
937 * Register-Value to enable the Soundcard.
938 *
939 * FIXME: Presently this quirk will run on anything that has an 8237
940 * which isn't correct, we need to check DMI tables or something in
941 * order to make sure it only runs on the MSI-K8T-Neo2Fir. Because it
942 * runs everywhere at present we suppress the printk output in most
943 * irrelevant cases.
944 */
945static void k8t_sound_hostbridge(struct pci_dev *dev)
946{
947 unsigned char val;
948
949 pci_read_config_byte(dev, 0x50, &val);
950 if (val == 0xc8) {
951 /* Assume it's probably a MSI-K8T-Neo2Fir */
952 printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, attempting to turn soundcard ON\n");
953 pci_write_config_byte(dev, 0x50, val & (~0x40));
954
955 /* Verify the Change for Status output */
956 pci_read_config_byte(dev, 0x50, &val);
957 if (val & 0x40)
958 printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, soundcard still off\n");
959 else
960 printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, soundcard on\n");
961 }
962}
963DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge);
964DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge);
965 928
966/* 929/*
967 * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge 930 * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 5e5191ec8d..401e03c920 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -472,7 +472,12 @@ void pci_bus_size_bridges(struct pci_bus *bus)
472 break; 472 break;
473 473
474 case PCI_CLASS_BRIDGE_PCI: 474 case PCI_CLASS_BRIDGE_PCI:
475 /* don't size subtractive decoding (transparent)
476 * PCI-to-PCI bridges */
477 if (bus->self->transparent)
478 break;
475 pci_bridge_check_ranges(bus); 479 pci_bridge_check_ranges(bus);
480 /* fall through */
476 default: 481 default:
477 pbus_size_io(bus); 482 pbus_size_io(bus);
478 /* If the bridge supports prefetchable range, size it 483 /* If the bridge supports prefetchable range, size it
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index 568f187731..05ca2ed9eb 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -48,7 +48,7 @@ pdev_fixup_irq(struct pci_dev *dev,
48 dev->irq = irq; 48 dev->irq = irq;
49 49
50 pr_debug("PCI: fixup irq: (%s) got %d\n", 50 pr_debug("PCI: fixup irq: (%s) got %d\n",
51 dev->dev.kobj.name, dev->irq); 51 kobject_name(&dev->dev.kobj), dev->irq);
52 52
53 /* Always tell the device, so the driver knows what is 53 /* Always tell the device, so the driver knows what is
54 the real IRQ to use; the device does not use it. */ 54 the real IRQ to use; the device does not use it. */
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index f8b13f0270..a0aca46ce8 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -907,18 +907,14 @@ int pcmcia_insert_card(struct pcmcia_socket *skt)
907EXPORT_SYMBOL(pcmcia_insert_card); 907EXPORT_SYMBOL(pcmcia_insert_card);
908 908
909 909
910static int pcmcia_socket_uevent(struct device *dev, char **envp, 910static int pcmcia_socket_uevent(struct device *dev,
911 int num_envp, char *buffer, int buffer_size) 911 struct kobj_uevent_env *env)
912{ 912{
913 struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev); 913 struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev);
914 int i = 0, length = 0;
915 914
916 if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, 915 if (add_uevent_var(env, "SOCKET_NO=%u", s->sock))
917 &length, "SOCKET_NO=%u", s->sock))
918 return -ENOMEM; 916 return -ENOMEM;
919 917
920 envp[i] = NULL;
921
922 return 0; 918 return 0;
923} 919}
924 920
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index a99607142f..55baa1f0fc 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -1064,11 +1064,10 @@ static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) {
1064 1064
1065#ifdef CONFIG_HOTPLUG 1065#ifdef CONFIG_HOTPLUG
1066 1066
1067static int pcmcia_bus_uevent(struct device *dev, char **envp, int num_envp, 1067static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
1068 char *buffer, int buffer_size)
1069{ 1068{
1070 struct pcmcia_device *p_dev; 1069 struct pcmcia_device *p_dev;
1071 int i, length = 0; 1070 int i;
1072 u32 hash[4] = { 0, 0, 0, 0}; 1071 u32 hash[4] = { 0, 0, 0, 0};
1073 1072
1074 if (!dev) 1073 if (!dev)
@@ -1083,23 +1082,13 @@ static int pcmcia_bus_uevent(struct device *dev, char **envp, int num_envp,
1083 hash[i] = crc32(0, p_dev->prod_id[i], strlen(p_dev->prod_id[i])); 1082 hash[i] = crc32(0, p_dev->prod_id[i], strlen(p_dev->prod_id[i]));
1084 } 1083 }
1085 1084
1086 i = 0; 1085 if (add_uevent_var(env, "SOCKET_NO=%u", p_dev->socket->sock))
1087
1088 if (add_uevent_var(envp, num_envp, &i,
1089 buffer, buffer_size, &length,
1090 "SOCKET_NO=%u",
1091 p_dev->socket->sock))
1092 return -ENOMEM; 1086 return -ENOMEM;
1093 1087
1094 if (add_uevent_var(envp, num_envp, &i, 1088 if (add_uevent_var(env, "DEVICE_NO=%02X", p_dev->device_no))
1095 buffer, buffer_size, &length,
1096 "DEVICE_NO=%02X",
1097 p_dev->device_no))
1098 return -ENOMEM; 1089 return -ENOMEM;
1099 1090
1100 if (add_uevent_var(envp, num_envp, &i, 1091 if (add_uevent_var(env, "MODALIAS=pcmcia:m%04Xc%04Xf%02Xfn%02Xpfn%02X"
1101 buffer, buffer_size, &length,
1102 "MODALIAS=pcmcia:m%04Xc%04Xf%02Xfn%02Xpfn%02X"
1103 "pa%08Xpb%08Xpc%08Xpd%08X", 1092 "pa%08Xpb%08Xpc%08Xpd%08X",
1104 p_dev->has_manf_id ? p_dev->manf_id : 0, 1093 p_dev->has_manf_id ? p_dev->manf_id : 0,
1105 p_dev->has_card_id ? p_dev->card_id : 0, 1094 p_dev->has_card_id ? p_dev->card_id : 0,
@@ -1112,15 +1101,12 @@ static int pcmcia_bus_uevent(struct device *dev, char **envp, int num_envp,
1112 hash[3])) 1101 hash[3]))
1113 return -ENOMEM; 1102 return -ENOMEM;
1114 1103
1115 envp[i] = NULL;
1116
1117 return 0; 1104 return 0;
1118} 1105}
1119 1106
1120#else 1107#else
1121 1108
1122static int pcmcia_bus_uevent(struct device *dev, char **envp, int num_envp, 1109static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
1123 char *buffer, int buffer_size)
1124{ 1110{
1125 return -ENODEV; 1111 return -ENODEV;
1126} 1112}
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c
index 383107ba4b..f6722ba0dd 100644
--- a/drivers/pcmcia/pxa2xx_mainstone.c
+++ b/drivers/pcmcia/pxa2xx_mainstone.c
@@ -175,7 +175,6 @@ static int __init mst_pcmcia_init(void)
175 if (!mst_pcmcia_device) 175 if (!mst_pcmcia_device)
176 return -ENOMEM; 176 return -ENOMEM;
177 177
178 mst_pcmcia_device->dev.uevent_suppress = 0;
179 mst_pcmcia_device->dev.platform_data = &mst_pcmcia_ops; 178 mst_pcmcia_device->dev.platform_data = &mst_pcmcia_ops;
180 179
181 ret = platform_device_add(mst_pcmcia_device); 180 ret = platform_device_add(mst_pcmcia_device);
@@ -195,3 +194,4 @@ fs_initcall(mst_pcmcia_init);
195module_exit(mst_pcmcia_exit); 194module_exit(mst_pcmcia_exit);
196 195
197MODULE_LICENSE("GPL"); 196MODULE_LICENSE("GPL");
197MODULE_ALIAS("platform:pxa2xx-pcmcia");
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index a2daa3f531..d5c33bd78d 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -261,7 +261,6 @@ static int __init sharpsl_pcmcia_init(void)
261 if (!sharpsl_pcmcia_device) 261 if (!sharpsl_pcmcia_device)
262 return -ENOMEM; 262 return -ENOMEM;
263 263
264 sharpsl_pcmcia_device->dev.uevent_suppress = 0;
265 sharpsl_pcmcia_device->dev.platform_data = &sharpsl_pcmcia_ops; 264 sharpsl_pcmcia_device->dev.platform_data = &sharpsl_pcmcia_ops;
266 sharpsl_pcmcia_device->dev.parent = platform_scoop_config->devs[0].dev; 265 sharpsl_pcmcia_device->dev.parent = platform_scoop_config->devs[0].dev;
267 266
@@ -284,3 +283,4 @@ module_exit(sharpsl_pcmcia_exit);
284 283
285MODULE_DESCRIPTION("Sharp SL Series PCMCIA Support"); 284MODULE_DESCRIPTION("Sharp SL Series PCMCIA Support");
286MODULE_LICENSE("GPL"); 285MODULE_LICENSE("GPL");
286MODULE_ALIAS("platform:pxa2xx-pcmcia");
diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
index a9880d468e..f38ba482be 100644
--- a/drivers/power/power_supply.h
+++ b/drivers/power/power_supply.h
@@ -14,8 +14,7 @@
14 14
15extern int power_supply_create_attrs(struct power_supply *psy); 15extern int power_supply_create_attrs(struct power_supply *psy);
16extern void power_supply_remove_attrs(struct power_supply *psy); 16extern void power_supply_remove_attrs(struct power_supply *psy);
17extern int power_supply_uevent(struct device *dev, char **envp, int num_envp, 17extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
18 char *buffer, int buffer_size);
19 18
20#else 19#else
21 20
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index de3155b212..249f61bae6 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -195,11 +195,10 @@ static char *kstruprdup(const char *str, gfp_t gfp)
195 return ret; 195 return ret;
196} 196}
197 197
198int power_supply_uevent(struct device *dev, char **envp, int num_envp, 198int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
199 char *buffer, int buffer_size)
200{ 199{
201 struct power_supply *psy = dev_get_drvdata(dev); 200 struct power_supply *psy = dev_get_drvdata(dev);
202 int i = 0, length = 0, ret = 0, j; 201 int ret = 0, j;
203 char *prop_buf; 202 char *prop_buf;
204 char *attrname; 203 char *attrname;
205 204
@@ -212,8 +211,7 @@ int power_supply_uevent(struct device *dev, char **envp, int num_envp,
212 211
213 dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->name); 212 dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->name);
214 213
215 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, 214 ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->name);
216 &length, "POWER_SUPPLY_NAME=%s", psy->name);
217 if (ret) 215 if (ret)
218 return ret; 216 return ret;
219 217
@@ -243,9 +241,7 @@ int power_supply_uevent(struct device *dev, char **envp, int num_envp,
243 241
244 dev_dbg(dev, "Static prop %s=%s\n", attrname, prop_buf); 242 dev_dbg(dev, "Static prop %s=%s\n", attrname, prop_buf);
245 243
246 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, 244 ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
247 &length, "POWER_SUPPLY_%s=%s",
248 attrname, prop_buf);
249 kfree(attrname); 245 kfree(attrname);
250 if (ret) 246 if (ret)
251 goto out; 247 goto out;
@@ -282,14 +278,11 @@ int power_supply_uevent(struct device *dev, char **envp, int num_envp,
282 278
283 dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf); 279 dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
284 280
285 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, 281 ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
286 &length, "POWER_SUPPLY_%s=%s",
287 attrname, prop_buf);
288 kfree(attrname); 282 kfree(attrname);
289 if (ret) 283 if (ret)
290 goto out; 284 goto out;
291 } 285 }
292 envp[i] = NULL;
293 286
294out: 287out:
295 free_page((unsigned long)prop_buf); 288 free_page((unsigned long)prop_buf);
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 93ee05eeae..78277a118b 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SuperH On-Chip RTC Support 2 * SuperH On-Chip RTC Support
3 * 3 *
4 * Copyright (C) 2006 Paul Mundt 4 * Copyright (C) 2006, 2007 Paul Mundt
5 * Copyright (C) 2006 Jamie Lenehan 5 * Copyright (C) 2006 Jamie Lenehan
6 * 6 *
7 * Based on the old arch/sh/kernel/cpu/rtc.c by: 7 * Based on the old arch/sh/kernel/cpu/rtc.c by:
@@ -23,16 +23,19 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <asm/rtc.h>
26 27
27#define DRV_NAME "sh-rtc" 28#define DRV_NAME "sh-rtc"
28#define DRV_VERSION "0.1.2" 29#define DRV_VERSION "0.1.3"
29 30
30#ifdef CONFIG_CPU_SH3 31#ifdef CONFIG_CPU_SH3
31#define rtc_reg_size sizeof(u16) 32#define rtc_reg_size sizeof(u16)
32#define RTC_BIT_INVERTED 0 /* No bug on SH7708, SH7709A */ 33#define RTC_BIT_INVERTED 0 /* No bug on SH7708, SH7709A */
34#define RTC_DEF_CAPABILITIES 0UL
33#elif defined(CONFIG_CPU_SH4) 35#elif defined(CONFIG_CPU_SH4)
34#define rtc_reg_size sizeof(u32) 36#define rtc_reg_size sizeof(u32)
35#define RTC_BIT_INVERTED 0x40 /* bug on SH7750, SH7750S */ 37#define RTC_BIT_INVERTED 0x40 /* bug on SH7750, SH7750S */
38#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR
36#endif 39#endif
37 40
38#define RTC_REG(r) ((r) * rtc_reg_size) 41#define RTC_REG(r) ((r) * rtc_reg_size)
@@ -80,6 +83,7 @@ struct sh_rtc {
80 struct rtc_device *rtc_dev; 83 struct rtc_device *rtc_dev;
81 spinlock_t lock; 84 spinlock_t lock;
82 int rearm_aie; 85 int rearm_aie;
86 unsigned long capabilities; /* See asm-sh/rtc.h for cap bits */
83}; 87};
84 88
85static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id) 89static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id)
@@ -319,14 +323,14 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
319 tm->tm_mday = BCD2BIN(readb(rtc->regbase + RDAYCNT)); 323 tm->tm_mday = BCD2BIN(readb(rtc->regbase + RDAYCNT));
320 tm->tm_mon = BCD2BIN(readb(rtc->regbase + RMONCNT)) - 1; 324 tm->tm_mon = BCD2BIN(readb(rtc->regbase + RMONCNT)) - 1;
321 325
322#if defined(CONFIG_CPU_SH4) 326 if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) {
323 yr = readw(rtc->regbase + RYRCNT); 327 yr = readw(rtc->regbase + RYRCNT);
324 yr100 = BCD2BIN(yr >> 8); 328 yr100 = BCD2BIN(yr >> 8);
325 yr &= 0xff; 329 yr &= 0xff;
326#else 330 } else {
327 yr = readb(rtc->regbase + RYRCNT); 331 yr = readb(rtc->regbase + RYRCNT);
328 yr100 = BCD2BIN((yr == 0x99) ? 0x19 : 0x20); 332 yr100 = BCD2BIN((yr == 0x99) ? 0x19 : 0x20);
329#endif 333 }
330 334
331 tm->tm_year = (yr100 * 100 + BCD2BIN(yr)) - 1900; 335 tm->tm_year = (yr100 * 100 + BCD2BIN(yr)) - 1900;
332 336
@@ -375,14 +379,14 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
375 writeb(BIN2BCD(tm->tm_mday), rtc->regbase + RDAYCNT); 379 writeb(BIN2BCD(tm->tm_mday), rtc->regbase + RDAYCNT);
376 writeb(BIN2BCD(tm->tm_mon + 1), rtc->regbase + RMONCNT); 380 writeb(BIN2BCD(tm->tm_mon + 1), rtc->regbase + RMONCNT);
377 381
378#ifdef CONFIG_CPU_SH3 382 if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) {
379 year = tm->tm_year % 100; 383 year = (BIN2BCD((tm->tm_year + 1900) / 100) << 8) |
380 writeb(BIN2BCD(year), rtc->regbase + RYRCNT); 384 BIN2BCD(tm->tm_year % 100);
381#else 385 writew(year, rtc->regbase + RYRCNT);
382 year = (BIN2BCD((tm->tm_year + 1900) / 100) << 8) | 386 } else {
383 BIN2BCD(tm->tm_year % 100); 387 year = tm->tm_year % 100;
384 writew(year, rtc->regbase + RYRCNT); 388 writeb(BIN2BCD(year), rtc->regbase + RYRCNT);
385#endif 389 }
386 390
387 /* Start RTC */ 391 /* Start RTC */
388 tmp = readb(rtc->regbase + RCR2); 392 tmp = readb(rtc->regbase + RCR2);
@@ -589,6 +593,17 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
589 goto err_badmap; 593 goto err_badmap;
590 } 594 }
591 595
596 rtc->capabilities = RTC_DEF_CAPABILITIES;
597 if (pdev->dev.platform_data) {
598 struct sh_rtc_platform_info *pinfo = pdev->dev.platform_data;
599
600 /*
601 * Some CPUs have special capabilities in addition to the
602 * default set. Add those in here.
603 */
604 rtc->capabilities |= pinfo->capabilities;
605 }
606
592 platform_set_drvdata(pdev, rtc); 607 platform_set_drvdata(pdev, rtc);
593 608
594 return 0; 609 return 0;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index aeda526824..d427daeef5 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -53,6 +53,7 @@
53#include <linux/genhd.h> 53#include <linux/genhd.h>
54#include <linux/hdreg.h> 54#include <linux/hdreg.h>
55#include <linux/interrupt.h> 55#include <linux/interrupt.h>
56#include <linux/log2.h>
56#include <asm/ccwdev.h> 57#include <asm/ccwdev.h>
57#include <linux/workqueue.h> 58#include <linux/workqueue.h>
58#include <asm/debug.h> 59#include <asm/debug.h>
@@ -456,7 +457,7 @@ dasd_free_chunk(struct list_head *chunk_list, void *mem)
456static inline int 457static inline int
457dasd_check_blocksize(int bsize) 458dasd_check_blocksize(int bsize)
458{ 459{
459 if (bsize < 512 || bsize > 4096 || (bsize & (bsize - 1)) != 0) 460 if (bsize < 512 || bsize > 4096 || !is_power_of_2(bsize))
460 return -EMEDIUMTYPE; 461 return -EMEDIUMTYPE;
461 return 0; 462 return 0;
462} 463}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 0fbacc8b10..f231bc21b1 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -230,7 +230,7 @@ static int xpram_make_request(struct request_queue *q, struct bio *bio)
230 } 230 }
231 } 231 }
232 set_bit(BIO_UPTODATE, &bio->bi_flags); 232 set_bit(BIO_UPTODATE, &bio->bi_flags);
233 bio_end_io(bio, 0); 233 bio_endio(bio, 0);
234 return 0; 234 return 0;
235fail: 235fail:
236 bio_io_error(bio); 236 bio_io_error(bio);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 6000bdee40..0e1f35c9ed 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -667,6 +667,9 @@ raw3215_probe (struct ccw_device *cdev)
667 struct raw3215_info *raw; 667 struct raw3215_info *raw;
668 int line; 668 int line;
669 669
670 /* Console is special. */
671 if (raw3215[0] && (cdev->dev.driver_data == raw3215[0]))
672 return 0;
670 raw = kmalloc(sizeof(struct raw3215_info) + 673 raw = kmalloc(sizeof(struct raw3215_info) +
671 RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA); 674 RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA);
672 if (raw == NULL) 675 if (raw == NULL)
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index fd3479119e..0b040557db 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -22,6 +22,7 @@
22#include <asm/ebcdic.h> 22#include <asm/ebcdic.h>
23 23
24#include "raw3270.h" 24#include "raw3270.h"
25#include "tty3270.h"
25#include "ctrlchar.h" 26#include "ctrlchar.h"
26 27
27#define CON3270_OUTPUT_BUFFER_SIZE 1024 28#define CON3270_OUTPUT_BUFFER_SIZE 1024
@@ -507,8 +508,6 @@ con3270_write(struct console *co, const char *str, unsigned int count)
507 spin_unlock_irqrestore(&cp->view.lock,flags); 508 spin_unlock_irqrestore(&cp->view.lock,flags);
508} 509}
509 510
510extern struct tty_driver *tty3270_driver;
511
512static struct tty_driver * 511static struct tty_driver *
513con3270_device(struct console *c, int *index) 512con3270_device(struct console *c, int *index)
514{ 513{
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index fa62e69440..25629b92de 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -93,6 +93,7 @@ static volatile enum sclp_mask_state_t {
93#define SCLP_RETRY_INTERVAL 30 93#define SCLP_RETRY_INTERVAL 30
94 94
95static void sclp_process_queue(void); 95static void sclp_process_queue(void);
96static void __sclp_make_read_req(void);
96static int sclp_init_mask(int calculate); 97static int sclp_init_mask(int calculate);
97static int sclp_init(void); 98static int sclp_init(void);
98 99
@@ -115,7 +116,6 @@ sclp_service_call(sclp_cmdw_t command, void *sccb)
115 return 0; 116 return 0;
116} 117}
117 118
118static inline void __sclp_make_read_req(void);
119 119
120static void 120static void
121__sclp_queue_read_req(void) 121__sclp_queue_read_req(void)
@@ -318,8 +318,7 @@ sclp_read_cb(struct sclp_req *req, void *data)
318} 318}
319 319
320/* Prepare read event data request. Called while sclp_lock is locked. */ 320/* Prepare read event data request. Called while sclp_lock is locked. */
321static inline void 321static void __sclp_make_read_req(void)
322__sclp_make_read_req(void)
323{ 322{
324 struct sccb_header *sccb; 323 struct sccb_header *sccb;
325 324
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 9f244c591e..da25f8e241 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -708,16 +708,22 @@ static void tape_3590_med_state_set(struct tape_device *device,
708 708
709 c_info = &TAPE_3590_CRYPT_INFO(device); 709 c_info = &TAPE_3590_CRYPT_INFO(device);
710 710
711 if (sense->masst == MSENSE_UNASSOCIATED) { 711 DBF_EVENT(6, "medium state: %x:%x\n", sense->macst, sense->masst);
712 switch (sense->macst) {
713 case 0x04:
714 case 0x05:
715 case 0x06:
712 tape_med_state_set(device, MS_UNLOADED); 716 tape_med_state_set(device, MS_UNLOADED);
713 TAPE_3590_CRYPT_INFO(device).medium_status = 0; 717 TAPE_3590_CRYPT_INFO(device).medium_status = 0;
714 return; 718 return;
715 } 719 case 0x08:
716 if (sense->masst != MSENSE_ASSOCIATED_MOUNT) { 720 case 0x09:
717 PRINT_ERR("Unknown medium state: %x\n", sense->masst); 721 tape_med_state_set(device, MS_LOADED);
722 break;
723 default:
724 tape_med_state_set(device, MS_UNKNOWN);
718 return; 725 return;
719 } 726 }
720 tape_med_state_set(device, MS_LOADED);
721 c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK; 727 c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
722 if (sense->flags & MSENSE_CRYPT_MASK) { 728 if (sense->flags & MSENSE_CRYPT_MASK) {
723 PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags); 729 PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags);
@@ -835,15 +841,17 @@ tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
835 /* Probably result of halt ssch */ 841 /* Probably result of halt ssch */
836 return TAPE_IO_PENDING; 842 return TAPE_IO_PENDING;
837 else if (irb->scsw.dstat == 0x85) 843 else if (irb->scsw.dstat == 0x85)
838 /* Device Ready -> check medium state */ 844 /* Device Ready */
839 tape_3590_schedule_work(device, TO_MSEN); 845 DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id);
840 else if (irb->scsw.dstat & DEV_STAT_ATTENTION) 846 else if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
841 tape_3590_schedule_work(device, TO_READ_ATTMSG); 847 tape_3590_schedule_work(device, TO_READ_ATTMSG);
842 else { 848 } else {
843 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); 849 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
844 PRINT_WARN("Unsolicited IRQ (Device End) caught.\n"); 850 PRINT_WARN("Unsolicited IRQ (Device End) caught.\n");
845 tape_dump_sense(device, NULL, irb); 851 tape_dump_sense(device, NULL, irb);
846 } 852 }
853 /* check medium state */
854 tape_3590_schedule_work(device, TO_MSEN);
847 return TAPE_IO_SUCCESS; 855 return TAPE_IO_SUCCESS;
848} 856}
849 857
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index bc33068b9c..70b1980a08 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -25,8 +25,8 @@
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27 27
28
29#include "raw3270.h" 28#include "raw3270.h"
29#include "tty3270.h"
30#include "keyboard.h" 30#include "keyboard.h"
31 31
32#define TTY3270_CHAR_BUF_SIZE 256 32#define TTY3270_CHAR_BUF_SIZE 256
@@ -1338,8 +1338,11 @@ tty3270_getpar(struct tty3270 *tp, int ix)
1338static void 1338static void
1339tty3270_goto_xy(struct tty3270 *tp, int cx, int cy) 1339tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
1340{ 1340{
1341 tp->cx = min_t(int, tp->view.cols - 1, max_t(int, 0, cx)); 1341 int max_cx = max(0, cx);
1342 cy = min_t(int, tp->view.rows - 3, max_t(int, 0, cy)); 1342 int max_cy = max(0, cy);
1343
1344 tp->cx = min_t(int, tp->view.cols - 1, max_cx);
1345 cy = min_t(int, tp->view.rows - 3, max_cy);
1343 if (cy != tp->cy) { 1346 if (cy != tp->cy) {
1344 tty3270_convert_line(tp, tp->cy); 1347 tty3270_convert_line(tp, tp->cy);
1345 tp->cy = cy; 1348 tp->cy = cy;
diff --git a/drivers/s390/char/tty3270.h b/drivers/s390/char/tty3270.h
new file mode 100644
index 0000000000..799da57f03
--- /dev/null
+++ b/drivers/s390/char/tty3270.h
@@ -0,0 +1,16 @@
1/*
2 * drivers/s390/char/tty3270.h
3 *
4 * Copyright IBM Corp. 2007
5 *
6 */
7
8#ifndef __DRIVERS_S390_CHAR_TTY3270_H
9#define __DRIVERS_S390_CHAR_TTY3270_H
10
11#include <linux/tty.h>
12#include <linux/tty_driver.h>
13
14extern struct tty_driver *tty3270_driver;
15
16#endif /* __DRIVERS_S390_CHAR_TTY3270_H */
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 680b9b58b8..6f40facb1c 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -66,8 +66,8 @@ static int __diag288(enum vmwdt_func func, unsigned int timeout,
66 "0: la %0,0\n" 66 "0: la %0,0\n"
67 "1:\n" 67 "1:\n"
68 EX_TABLE(0b,1b) 68 EX_TABLE(0b,1b)
69 : "=d" (err) : "d"(__func), "d"(__timeout), 69 : "+d" (err) : "d"(__func), "d"(__timeout),
70 "d"(__cmdp), "d"(__cmdl), "0" (-EINVAL) : "1", "cc"); 70 "d"(__cmdp), "d"(__cmdl) : "1", "cc");
71 return err; 71 return err;
72} 72}
73 73
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 3712ede167..7073daf779 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -141,15 +141,16 @@ static int memcpy_real(void *dest, unsigned long src, size_t count)
141 141
142 if (count == 0) 142 if (count == 0)
143 return 0; 143 return 0;
144 flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */ 144 flags = __raw_local_irq_stnsm(0xf8UL); /* switch to real mode */
145 asm volatile ( 145 asm volatile (
146 "0: mvcle %1,%2,0x0\n" 146 "0: mvcle %1,%2,0x0\n"
147 "1: jo 0b\n" 147 "1: jo 0b\n"
148 " lhi %0,0x0\n" 148 " lhi %0,0x0\n"
149 "2:\n" 149 "2:\n"
150 EX_TABLE(1b,2b) 150 EX_TABLE(1b,2b)
151 : "+d" (rc) 151 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
152 : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2) 152 "+d" (_len2), "=m" (*((long*)dest))
153 : "m" (*((long*)src))
153 : "cc", "memory"); 154 : "cc", "memory");
154 __raw_local_irq_ssm(flags); 155 __raw_local_irq_ssm(flags);
155 156
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index b0a18f5176..5baa517c3b 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -44,8 +44,7 @@ ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
44 return 0; 44 return 0;
45} 45}
46static int 46static int
47ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer, 47ccwgroup_uevent (struct device *dev, struct kobj_uevent_env *env)
48 int buffer_size)
49{ 48{
50 /* TODO */ 49 /* TODO */
51 return 0; 50 return 0;
@@ -152,16 +151,24 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
152 return 0; 151 return 0;
153} 152}
154 153
155/* 154/**
156 * try to add a new ccwgroup device for one driver 155 * ccwgroup_create() - create and register a ccw group device
157 * argc and argv[] are a list of bus_id's of devices 156 * @root: parent device for the new device
158 * belonging to the driver. 157 * @creator_id: identifier of creating driver
158 * @cdrv: ccw driver of slave devices
159 * @argc: number of slave devices
160 * @argv: bus ids of slave devices
161 *
162 * Create and register a new ccw group device as a child of @root. Slave
163 * devices are obtained from the list of bus ids given in @argv[] and must all
164 * belong to @cdrv.
165 * Returns:
166 * %0 on success and an error code on failure.
167 * Context:
168 * non-atomic
159 */ 169 */
160int 170int ccwgroup_create(struct device *root, unsigned int creator_id,
161ccwgroup_create(struct device *root, 171 struct ccw_driver *cdrv, int argc, char *argv[])
162 unsigned int creator_id,
163 struct ccw_driver *cdrv,
164 int argc, char *argv[])
165{ 172{
166 struct ccwgroup_device *gdev; 173 struct ccwgroup_device *gdev;
167 int i; 174 int i;
@@ -390,8 +397,13 @@ static struct bus_type ccwgroup_bus_type = {
390 .remove = ccwgroup_remove, 397 .remove = ccwgroup_remove,
391}; 398};
392 399
393int 400/**
394ccwgroup_driver_register (struct ccwgroup_driver *cdriver) 401 * ccwgroup_driver_register() - register a ccw group driver
402 * @cdriver: driver to be registered
403 *
404 * This function is mainly a wrapper around driver_register().
405 */
406int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
395{ 407{
396 /* register our new driver with the core */ 408 /* register our new driver with the core */
397 cdriver->driver.bus = &ccwgroup_bus_type; 409 cdriver->driver.bus = &ccwgroup_bus_type;
@@ -406,8 +418,13 @@ __ccwgroup_match_all(struct device *dev, void *data)
406 return 1; 418 return 1;
407} 419}
408 420
409void 421/**
410ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver) 422 * ccwgroup_driver_unregister() - deregister a ccw group driver
423 * @cdriver: driver to be deregistered
424 *
425 * This function is mainly a wrapper around driver_unregister().
426 */
427void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
411{ 428{
412 struct device *dev; 429 struct device *dev;
413 430
@@ -427,8 +444,16 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
427 driver_unregister(&cdriver->driver); 444 driver_unregister(&cdriver->driver);
428} 445}
429 446
430int 447/**
431ccwgroup_probe_ccwdev(struct ccw_device *cdev) 448 * ccwgroup_probe_ccwdev() - probe function for slave devices
449 * @cdev: ccw device to be probed
450 *
451 * This is a dummy probe function for ccw devices that are slave devices in
452 * a ccw group device.
453 * Returns:
454 * always %0
455 */
456int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
432{ 457{
433 return 0; 458 return 0;
434} 459}
@@ -452,8 +477,15 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
452 return NULL; 477 return NULL;
453} 478}
454 479
455void 480/**
456ccwgroup_remove_ccwdev(struct ccw_device *cdev) 481 * ccwgroup_remove_ccwdev() - remove function for slave devices
482 * @cdev: ccw device to be removed
483 *
484 * This is a remove function for ccw devices that are slave devices in a ccw
485 * group device. It sets the ccw device offline and also deregisters the
486 * embedding ccw group device.
487 */
488void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
457{ 489{
458 struct ccwgroup_device *gdev; 490 struct ccwgroup_device *gdev;
459 491
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 920dd71e64..42c1f4659a 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -14,7 +14,7 @@
14#include <linux/jiffies.h> 14#include <linux/jiffies.h>
15#include <linux/wait.h> 15#include <linux/wait.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <asm/errno.h> 17#include <linux/errno.h>
18#include <asm/chpid.h> 18#include <asm/chpid.h>
19#include <asm/sclp.h> 19#include <asm/sclp.h>
20 20
@@ -55,7 +55,7 @@ static wait_queue_head_t cfg_wait_queue;
55/* Return channel_path struct for given chpid. */ 55/* Return channel_path struct for given chpid. */
56static inline struct channel_path *chpid_to_chp(struct chp_id chpid) 56static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
57{ 57{
58 return css[chpid.cssid]->chps[chpid.id]; 58 return channel_subsystems[chpid.cssid]->chps[chpid.id];
59} 59}
60 60
61/* Set vary state for given chpid. */ 61/* Set vary state for given chpid. */
@@ -86,7 +86,7 @@ u8 chp_get_sch_opm(struct subchannel *sch)
86 86
87 opm = 0; 87 opm = 0;
88 chp_id_init(&chpid); 88 chp_id_init(&chpid);
89 for (i=0; i < 8; i++) { 89 for (i = 0; i < 8; i++) {
90 opm <<= 1; 90 opm <<= 1;
91 chpid.id = sch->schib.pmcw.chpid[i]; 91 chpid.id = sch->schib.pmcw.chpid[i];
92 if (chp_get_status(chpid) != 0) 92 if (chp_get_status(chpid) != 0)
@@ -118,7 +118,7 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
118 118
119 sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid, 119 sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
120 chpid.id); 120 chpid.id);
121 CIO_TRACE_EVENT( 2, dbf_text); 121 CIO_TRACE_EVENT(2, dbf_text);
122 122
123 status = chp_get_status(chpid); 123 status = chp_get_status(chpid);
124 if (!on && !status) { 124 if (!on && !status) {
@@ -140,9 +140,11 @@ static ssize_t chp_measurement_chars_read(struct kobject *kobj,
140 char *buf, loff_t off, size_t count) 140 char *buf, loff_t off, size_t count)
141{ 141{
142 struct channel_path *chp; 142 struct channel_path *chp;
143 struct device *device;
143 unsigned int size; 144 unsigned int size;
144 145
145 chp = to_channelpath(container_of(kobj, struct device, kobj)); 146 device = container_of(kobj, struct device, kobj);
147 chp = to_channelpath(device);
146 if (!chp->cmg_chars) 148 if (!chp->cmg_chars)
147 return 0; 149 return 0;
148 150
@@ -193,9 +195,11 @@ static ssize_t chp_measurement_read(struct kobject *kobj,
193{ 195{
194 struct channel_path *chp; 196 struct channel_path *chp;
195 struct channel_subsystem *css; 197 struct channel_subsystem *css;
198 struct device *device;
196 unsigned int size; 199 unsigned int size;
197 200
198 chp = to_channelpath(container_of(kobj, struct device, kobj)); 201 device = container_of(kobj, struct device, kobj);
202 chp = to_channelpath(device);
199 css = to_css(chp->dev.parent); 203 css = to_css(chp->dev.parent);
200 204
201 size = sizeof(struct cmg_entry); 205 size = sizeof(struct cmg_entry);
@@ -353,7 +357,7 @@ static ssize_t chp_shared_show(struct device *dev,
353 357
354static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); 358static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
355 359
356static struct attribute * chp_attrs[] = { 360static struct attribute *chp_attrs[] = {
357 &dev_attr_status.attr, 361 &dev_attr_status.attr,
358 &dev_attr_configure.attr, 362 &dev_attr_configure.attr,
359 &dev_attr_type.attr, 363 &dev_attr_type.attr,
@@ -395,7 +399,7 @@ int chp_new(struct chp_id chpid)
395 /* fill in status, etc. */ 399 /* fill in status, etc. */
396 chp->chpid = chpid; 400 chp->chpid = chpid;
397 chp->state = 1; 401 chp->state = 1;
398 chp->dev.parent = &css[chpid.cssid]->device; 402 chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
399 chp->dev.release = chp_release; 403 chp->dev.release = chp_release;
400 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid, 404 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid,
401 chpid.id); 405 chpid.id);
@@ -430,18 +434,18 @@ int chp_new(struct chp_id chpid)
430 device_unregister(&chp->dev); 434 device_unregister(&chp->dev);
431 goto out_free; 435 goto out_free;
432 } 436 }
433 mutex_lock(&css[chpid.cssid]->mutex); 437 mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
434 if (css[chpid.cssid]->cm_enabled) { 438 if (channel_subsystems[chpid.cssid]->cm_enabled) {
435 ret = chp_add_cmg_attr(chp); 439 ret = chp_add_cmg_attr(chp);
436 if (ret) { 440 if (ret) {
437 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); 441 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
438 device_unregister(&chp->dev); 442 device_unregister(&chp->dev);
439 mutex_unlock(&css[chpid.cssid]->mutex); 443 mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
440 goto out_free; 444 goto out_free;
441 } 445 }
442 } 446 }
443 css[chpid.cssid]->chps[chpid.id] = chp; 447 channel_subsystems[chpid.cssid]->chps[chpid.id] = chp;
444 mutex_unlock(&css[chpid.cssid]->mutex); 448 mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
445 return ret; 449 return ret;
446out_free: 450out_free:
447 kfree(chp); 451 kfree(chp);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index f2708d65be..4690534515 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -619,6 +619,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
619 sch->schib.pmcw.ena = 0; 619 sch->schib.pmcw.ena = 0;
620 if ((sch->lpm & (sch->lpm - 1)) != 0) 620 if ((sch->lpm & (sch->lpm - 1)) != 0)
621 sch->schib.pmcw.mp = 1; /* multipath mode */ 621 sch->schib.pmcw.mp = 1; /* multipath mode */
622 /* clean up possible residual cmf stuff */
623 sch->schib.pmcw.mme = 0;
624 sch->schib.pmcw.mbfc = 0;
625 sch->schib.pmcw.mbi = 0;
626 sch->schib.mba = 0;
622 return 0; 627 return 0;
623out: 628out:
624 if (!cio_is_console(schid)) 629 if (!cio_is_console(schid))
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 34a796913b..b960f66843 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -45,7 +45,8 @@
45#include "ioasm.h" 45#include "ioasm.h"
46#include "chsc.h" 46#include "chsc.h"
47 47
48/* parameter to enable cmf during boot, possible uses are: 48/*
49 * parameter to enable cmf during boot, possible uses are:
49 * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be 50 * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
50 * used on any subchannel 51 * used on any subchannel
51 * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure 52 * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
@@ -73,18 +74,20 @@ enum cmb_index {
73 * enum cmb_format - types of supported measurement block formats 74 * enum cmb_format - types of supported measurement block formats
74 * 75 *
75 * @CMF_BASIC: traditional channel measurement blocks supported 76 * @CMF_BASIC: traditional channel measurement blocks supported
76 * by all machines that we run on 77 * by all machines that we run on
77 * @CMF_EXTENDED: improved format that was introduced with the z990 78 * @CMF_EXTENDED: improved format that was introduced with the z990
78 * machine 79 * machine
79 * @CMF_AUTODETECT: default: use extended format when running on a z990 80 * @CMF_AUTODETECT: default: use extended format when running on a machine
80 * or later machine, otherwise fall back to basic format 81 * supporting extended format, otherwise fall back to
81 **/ 82 * basic format
83 */
82enum cmb_format { 84enum cmb_format {
83 CMF_BASIC, 85 CMF_BASIC,
84 CMF_EXTENDED, 86 CMF_EXTENDED,
85 CMF_AUTODETECT = -1, 87 CMF_AUTODETECT = -1,
86}; 88};
87/** 89
90/*
88 * format - actual format for all measurement blocks 91 * format - actual format for all measurement blocks
89 * 92 *
90 * The format module parameter can be set to a value of 0 (zero) 93 * The format module parameter can be set to a value of 0 (zero)
@@ -105,20 +108,21 @@ module_param(format, bool, 0444);
105 * either with the help of a special pool or with kmalloc 108 * either with the help of a special pool or with kmalloc
106 * @free: free memory allocated with @alloc 109 * @free: free memory allocated with @alloc
107 * @set: enable or disable measurement 110 * @set: enable or disable measurement
111 * @read: read a measurement entry at an index
108 * @readall: read a measurement block in a common format 112 * @readall: read a measurement block in a common format
109 * @reset: clear the data in the associated measurement block and 113 * @reset: clear the data in the associated measurement block and
110 * reset its time stamp 114 * reset its time stamp
111 * @align: align an allocated block so that the hardware can use it 115 * @align: align an allocated block so that the hardware can use it
112 */ 116 */
113struct cmb_operations { 117struct cmb_operations {
114 int (*alloc) (struct ccw_device*); 118 int (*alloc) (struct ccw_device *);
115 void(*free) (struct ccw_device*); 119 void (*free) (struct ccw_device *);
116 int (*set) (struct ccw_device*, u32); 120 int (*set) (struct ccw_device *, u32);
117 u64 (*read) (struct ccw_device*, int); 121 u64 (*read) (struct ccw_device *, int);
118 int (*readall)(struct ccw_device*, struct cmbdata *); 122 int (*readall)(struct ccw_device *, struct cmbdata *);
119 void (*reset) (struct ccw_device*); 123 void (*reset) (struct ccw_device *);
120 void * (*align) (void *); 124 void *(*align) (void *);
121 125/* private: */
122 struct attribute_group *attr_group; 126 struct attribute_group *attr_group;
123}; 127};
124static struct cmb_operations *cmbops; 128static struct cmb_operations *cmbops;
@@ -130,9 +134,11 @@ struct cmb_data {
130 unsigned long long last_update; /* when last_block was updated */ 134 unsigned long long last_update; /* when last_block was updated */
131}; 135};
132 136
133/* our user interface is designed in terms of nanoseconds, 137/*
138 * Our user interface is designed in terms of nanoseconds,
134 * while the hardware measures total times in its own 139 * while the hardware measures total times in its own
135 * unit.*/ 140 * unit.
141 */
136static inline u64 time_to_nsec(u32 value) 142static inline u64 time_to_nsec(u32 value)
137{ 143{
138 return ((u64)value) * 128000ull; 144 return ((u64)value) * 128000ull;
@@ -159,12 +165,13 @@ static inline u64 time_to_avg_nsec(u32 value, u32 count)
159 return ret; 165 return ret;
160} 166}
161 167
162/* activate or deactivate the channel monitor. When area is NULL, 168/*
169 * Activate or deactivate the channel monitor. When area is NULL,
163 * the monitor is deactivated. The channel monitor needs to 170 * the monitor is deactivated. The channel monitor needs to
164 * be active in order to measure subchannels, which also need 171 * be active in order to measure subchannels, which also need
165 * to be enabled. */ 172 * to be enabled.
166static inline void 173 */
167cmf_activate(void *area, unsigned int onoff) 174static inline void cmf_activate(void *area, unsigned int onoff)
168{ 175{
169 register void * __gpr2 asm("2"); 176 register void * __gpr2 asm("2");
170 register long __gpr1 asm("1"); 177 register long __gpr1 asm("1");
@@ -175,8 +182,8 @@ cmf_activate(void *area, unsigned int onoff)
175 asm("schm" : : "d" (__gpr2), "d" (__gpr1) ); 182 asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
176} 183}
177 184
178static int 185static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
179set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) 186 unsigned long address)
180{ 187{
181 int ret; 188 int ret;
182 int retry; 189 int retry;
@@ -466,6 +473,7 @@ static void cmf_generic_reset(struct ccw_device *cdev)
466 * 473 *
467 * @mem: pointer to CMBs (only in basic measurement mode) 474 * @mem: pointer to CMBs (only in basic measurement mode)
468 * @list: contains a linked list of all subchannels 475 * @list: contains a linked list of all subchannels
476 * @num_channels: number of channels to be measured
469 * @lock: protect concurrent access to @mem and @list 477 * @lock: protect concurrent access to @mem and @list
470 */ 478 */
471struct cmb_area { 479struct cmb_area {
@@ -481,28 +489,36 @@ static struct cmb_area cmb_area = {
481 .num_channels = 1024, 489 .num_channels = 1024,
482}; 490};
483 491
484
485/* ****** old style CMB handling ********/ 492/* ****** old style CMB handling ********/
486 493
487/** int maxchannels 494/*
488 *
489 * Basic channel measurement blocks are allocated in one contiguous 495 * Basic channel measurement blocks are allocated in one contiguous
490 * block of memory, which can not be moved as long as any channel 496 * block of memory, which can not be moved as long as any channel
491 * is active. Therefore, a maximum number of subchannels needs to 497 * is active. Therefore, a maximum number of subchannels needs to
492 * be defined somewhere. This is a module parameter, defaulting to 498 * be defined somewhere. This is a module parameter, defaulting to
493 * a resonable value of 1024, or 32 kb of memory. 499 * a resonable value of 1024, or 32 kb of memory.
494 * Current kernels don't allow kmalloc with more than 128kb, so the 500 * Current kernels don't allow kmalloc with more than 128kb, so the
495 * maximum is 4096 501 * maximum is 4096.
496 */ 502 */
497 503
498module_param_named(maxchannels, cmb_area.num_channels, uint, 0444); 504module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
499 505
500/** 506/**
501 * struct cmb - basic channel measurement block 507 * struct cmb - basic channel measurement block
508 * @ssch_rsch_count: number of ssch and rsch
509 * @sample_count: number of samples
510 * @device_connect_time: time of device connect
511 * @function_pending_time: time of function pending
512 * @device_disconnect_time: time of device disconnect
513 * @control_unit_queuing_time: time of control unit queuing
514 * @device_active_only_time: time of device active only
515 * @reserved: unused in basic measurement mode
516 *
517 * The measurement block as used by the hardware. The fields are described
518 * further in z/Architecture Principles of Operation, chapter 17.
502 * 519 *
503 * cmb as used by the hardware the fields are described in z/Architecture 520 * The cmb area made up from these blocks must be a contiguous array and may
504 * Principles of Operation, chapter 17. 521 * not be reallocated or freed.
505 * The area to be a contiguous array and may not be reallocated or freed.
506 * Only one cmb area can be present in the system. 522 * Only one cmb area can be present in the system.
507 */ 523 */
508struct cmb { 524struct cmb {
@@ -516,8 +532,9 @@ struct cmb {
516 u32 reserved[2]; 532 u32 reserved[2];
517}; 533};
518 534
519/* insert a single device into the cmb_area list 535/*
520 * called with cmb_area.lock held from alloc_cmb 536 * Insert a single device into the cmb_area list.
537 * Called with cmb_area.lock held from alloc_cmb.
521 */ 538 */
522static int alloc_cmb_single(struct ccw_device *cdev, 539static int alloc_cmb_single(struct ccw_device *cdev,
523 struct cmb_data *cmb_data) 540 struct cmb_data *cmb_data)
@@ -532,9 +549,11 @@ static int alloc_cmb_single(struct ccw_device *cdev,
532 goto out; 549 goto out;
533 } 550 }
534 551
535 /* find first unused cmb in cmb_area.mem. 552 /*
536 * this is a little tricky: cmb_area.list 553 * Find first unused cmb in cmb_area.mem.
537 * remains sorted by ->cmb->hw_data pointers */ 554 * This is a little tricky: cmb_area.list
555 * remains sorted by ->cmb->hw_data pointers.
556 */
538 cmb = cmb_area.mem; 557 cmb = cmb_area.mem;
539 list_for_each_entry(node, &cmb_area.list, cmb_list) { 558 list_for_each_entry(node, &cmb_area.list, cmb_list) {
540 struct cmb_data *data; 559 struct cmb_data *data;
@@ -558,8 +577,7 @@ out:
558 return ret; 577 return ret;
559} 578}
560 579
561static int 580static int alloc_cmb(struct ccw_device *cdev)
562alloc_cmb (struct ccw_device *cdev)
563{ 581{
564 int ret; 582 int ret;
565 struct cmb *mem; 583 struct cmb *mem;
@@ -670,7 +688,7 @@ static int set_cmb(struct ccw_device *cdev, u32 mme)
670 return set_schib_wait(cdev, mme, 0, offset); 688 return set_schib_wait(cdev, mme, 0, offset);
671} 689}
672 690
673static u64 read_cmb (struct ccw_device *cdev, int index) 691static u64 read_cmb(struct ccw_device *cdev, int index)
674{ 692{
675 struct cmb *cmb; 693 struct cmb *cmb;
676 u32 val; 694 u32 val;
@@ -720,7 +738,7 @@ out:
720 return ret; 738 return ret;
721} 739}
722 740
723static int readall_cmb (struct ccw_device *cdev, struct cmbdata *data) 741static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
724{ 742{
725 struct cmb *cmb; 743 struct cmb *cmb;
726 struct cmb_data *cmb_data; 744 struct cmb_data *cmb_data;
@@ -793,14 +811,25 @@ static struct cmb_operations cmbops_basic = {
793 .align = align_cmb, 811 .align = align_cmb,
794 .attr_group = &cmf_attr_group, 812 .attr_group = &cmf_attr_group,
795}; 813};
796 814
797/* ******** extended cmb handling ********/ 815/* ******** extended cmb handling ********/
798 816
799/** 817/**
800 * struct cmbe - extended channel measurement block 818 * struct cmbe - extended channel measurement block
819 * @ssch_rsch_count: number of ssch and rsch
820 * @sample_count: number of samples
821 * @device_connect_time: time of device connect
822 * @function_pending_time: time of function pending
823 * @device_disconnect_time: time of device disconnect
824 * @control_unit_queuing_time: time of control unit queuing
825 * @device_active_only_time: time of device active only
826 * @device_busy_time: time of device busy
827 * @initial_command_response_time: initial command response time
828 * @reserved: unused
801 * 829 *
802 * cmb as used by the hardware, may be in any 64 bit physical location, 830 * The measurement block as used by the hardware. May be in any 64 bit physical
803 * the fields are described in z/Architecture Principles of Operation, 831 * location.
832 * The fields are described further in z/Architecture Principles of Operation,
804 * third edition, chapter 17. 833 * third edition, chapter 17.
805 */ 834 */
806struct cmbe { 835struct cmbe {
@@ -816,10 +845,12 @@ struct cmbe {
816 u32 reserved[7]; 845 u32 reserved[7];
817}; 846};
818 847
819/* kmalloc only guarantees 8 byte alignment, but we need cmbe 848/*
849 * kmalloc only guarantees 8 byte alignment, but we need cmbe
820 * pointers to be naturally aligned. Make sure to allocate 850 * pointers to be naturally aligned. Make sure to allocate
821 * enough space for two cmbes */ 851 * enough space for two cmbes.
822static inline struct cmbe* cmbe_align(struct cmbe *c) 852 */
853static inline struct cmbe *cmbe_align(struct cmbe *c)
823{ 854{
824 unsigned long addr; 855 unsigned long addr;
825 addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) & 856 addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
@@ -827,7 +858,7 @@ static inline struct cmbe* cmbe_align(struct cmbe *c)
827 return (struct cmbe*)addr; 858 return (struct cmbe*)addr;
828} 859}
829 860
830static int alloc_cmbe (struct ccw_device *cdev) 861static int alloc_cmbe(struct ccw_device *cdev)
831{ 862{
832 struct cmbe *cmbe; 863 struct cmbe *cmbe;
833 struct cmb_data *cmb_data; 864 struct cmb_data *cmb_data;
@@ -873,7 +904,7 @@ out_free:
873 return ret; 904 return ret;
874} 905}
875 906
876static void free_cmbe (struct ccw_device *cdev) 907static void free_cmbe(struct ccw_device *cdev)
877{ 908{
878 struct cmb_data *cmb_data; 909 struct cmb_data *cmb_data;
879 910
@@ -912,7 +943,7 @@ static int set_cmbe(struct ccw_device *cdev, u32 mme)
912} 943}
913 944
914 945
915static u64 read_cmbe (struct ccw_device *cdev, int index) 946static u64 read_cmbe(struct ccw_device *cdev, int index)
916{ 947{
917 struct cmbe *cmb; 948 struct cmbe *cmb;
918 struct cmb_data *cmb_data; 949 struct cmb_data *cmb_data;
@@ -970,7 +1001,7 @@ out:
970 return ret; 1001 return ret;
971} 1002}
972 1003
973static int readall_cmbe (struct ccw_device *cdev, struct cmbdata *data) 1004static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
974{ 1005{
975 struct cmbe *cmb; 1006 struct cmbe *cmb;
976 struct cmb_data *cmb_data; 1007 struct cmb_data *cmb_data;
@@ -1047,17 +1078,16 @@ static struct cmb_operations cmbops_extended = {
1047 .align = align_cmbe, 1078 .align = align_cmbe,
1048 .attr_group = &cmf_attr_group_ext, 1079 .attr_group = &cmf_attr_group_ext,
1049}; 1080};
1050
1051 1081
1052static ssize_t 1082static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
1053cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
1054{ 1083{
1055 return sprintf(buf, "%lld\n", 1084 return sprintf(buf, "%lld\n",
1056 (unsigned long long) cmf_read(to_ccwdev(dev), idx)); 1085 (unsigned long long) cmf_read(to_ccwdev(dev), idx));
1057} 1086}
1058 1087
1059static ssize_t 1088static ssize_t cmb_show_avg_sample_interval(struct device *dev,
1060cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr, char *buf) 1089 struct device_attribute *attr,
1090 char *buf)
1061{ 1091{
1062 struct ccw_device *cdev; 1092 struct ccw_device *cdev;
1063 long interval; 1093 long interval;
@@ -1079,8 +1109,9 @@ cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr,
1079 return sprintf(buf, "%ld\n", interval); 1109 return sprintf(buf, "%ld\n", interval);
1080} 1110}
1081 1111
1082static ssize_t 1112static ssize_t cmb_show_avg_utilization(struct device *dev,
1083cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char *buf) 1113 struct device_attribute *attr,
1114 char *buf)
1084{ 1115{
1085 struct cmbdata data; 1116 struct cmbdata data;
1086 u64 utilization; 1117 u64 utilization;
@@ -1112,14 +1143,16 @@ cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char
1112} 1143}
1113 1144
1114#define cmf_attr(name) \ 1145#define cmf_attr(name) \
1115static ssize_t show_ ## name (struct device * dev, struct device_attribute *attr, char * buf) \ 1146static ssize_t show_##name(struct device *dev, \
1116{ return cmb_show_attr((dev), buf, cmb_ ## name); } \ 1147 struct device_attribute *attr, char *buf) \
1117static DEVICE_ATTR(name, 0444, show_ ## name, NULL); 1148{ return cmb_show_attr((dev), buf, cmb_##name); } \
1149static DEVICE_ATTR(name, 0444, show_##name, NULL);
1118 1150
1119#define cmf_attr_avg(name) \ 1151#define cmf_attr_avg(name) \
1120static ssize_t show_avg_ ## name (struct device * dev, struct device_attribute *attr, char * buf) \ 1152static ssize_t show_avg_##name(struct device *dev, \
1121{ return cmb_show_attr((dev), buf, cmb_ ## name); } \ 1153 struct device_attribute *attr, char *buf) \
1122static DEVICE_ATTR(avg_ ## name, 0444, show_avg_ ## name, NULL); 1154{ return cmb_show_attr((dev), buf, cmb_##name); } \
1155static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
1123 1156
1124cmf_attr(ssch_rsch_count); 1157cmf_attr(ssch_rsch_count);
1125cmf_attr(sample_count); 1158cmf_attr(sample_count);
@@ -1131,7 +1164,8 @@ cmf_attr_avg(device_active_only_time);
1131cmf_attr_avg(device_busy_time); 1164cmf_attr_avg(device_busy_time);
1132cmf_attr_avg(initial_command_response_time); 1165cmf_attr_avg(initial_command_response_time);
1133 1166
1134static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval, NULL); 1167static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
1168 NULL);
1135static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL); 1169static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
1136 1170
1137static struct attribute *cmf_attributes[] = { 1171static struct attribute *cmf_attributes[] = {
@@ -1172,12 +1206,16 @@ static struct attribute_group cmf_attr_group_ext = {
1172 .attrs = cmf_attributes_ext, 1206 .attrs = cmf_attributes_ext,
1173}; 1207};
1174 1208
1175static ssize_t cmb_enable_show(struct device *dev, struct device_attribute *attr, char *buf) 1209static ssize_t cmb_enable_show(struct device *dev,
1210 struct device_attribute *attr,
1211 char *buf)
1176{ 1212{
1177 return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0); 1213 return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0);
1178} 1214}
1179 1215
1180static ssize_t cmb_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t c) 1216static ssize_t cmb_enable_store(struct device *dev,
1217 struct device_attribute *attr, const char *buf,
1218 size_t c)
1181{ 1219{
1182 struct ccw_device *cdev; 1220 struct ccw_device *cdev;
1183 int ret; 1221 int ret;
@@ -1202,9 +1240,16 @@ static ssize_t cmb_enable_store(struct device *dev, struct device_attribute *att
1202 1240
1203DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store); 1241DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
1204 1242
1205/* enable_cmf/disable_cmf: module interface for cmf (de)activation */ 1243/**
1206int 1244 * enable_cmf() - switch on the channel measurement for a specific device
1207enable_cmf(struct ccw_device *cdev) 1245 * @cdev: The ccw device to be enabled
1246 *
1247 * Returns %0 for success or a negative error value.
1248 *
1249 * Context:
1250 * non-atomic
1251 */
1252int enable_cmf(struct ccw_device *cdev)
1208{ 1253{
1209 int ret; 1254 int ret;
1210 1255
@@ -1225,8 +1270,16 @@ enable_cmf(struct ccw_device *cdev)
1225 return ret; 1270 return ret;
1226} 1271}
1227 1272
1228int 1273/**
1229disable_cmf(struct ccw_device *cdev) 1274 * disable_cmf() - switch off the channel measurement for a specific device
1275 * @cdev: The ccw device to be disabled
1276 *
1277 * Returns %0 for success or a negative error value.
1278 *
1279 * Context:
1280 * non-atomic
1281 */
1282int disable_cmf(struct ccw_device *cdev)
1230{ 1283{
1231 int ret; 1284 int ret;
1232 1285
@@ -1238,14 +1291,32 @@ disable_cmf(struct ccw_device *cdev)
1238 return ret; 1291 return ret;
1239} 1292}
1240 1293
1241u64 1294/**
1242cmf_read(struct ccw_device *cdev, int index) 1295 * cmf_read() - read one value from the current channel measurement block
1296 * @cdev: the channel to be read
1297 * @index: the index of the value to be read
1298 *
1299 * Returns the value read or %0 if the value cannot be read.
1300 *
1301 * Context:
1302 * any
1303 */
1304u64 cmf_read(struct ccw_device *cdev, int index)
1243{ 1305{
1244 return cmbops->read(cdev, index); 1306 return cmbops->read(cdev, index);
1245} 1307}
1246 1308
1247int 1309/**
1248cmf_readall(struct ccw_device *cdev, struct cmbdata *data) 1310 * cmf_readall() - read the current channel measurement block
1311 * @cdev: the channel to be read
1312 * @data: a pointer to a data block that will be filled
1313 *
1314 * Returns %0 on success, a negative error value otherwise.
1315 *
1316 * Context:
1317 * any
1318 */
1319int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
1249{ 1320{
1250 return cmbops->readall(cdev, data); 1321 return cmbops->readall(cdev, data);
1251} 1322}
@@ -1257,15 +1328,16 @@ int cmf_reenable(struct ccw_device *cdev)
1257 return cmbops->set(cdev, 2); 1328 return cmbops->set(cdev, 2);
1258} 1329}
1259 1330
1260static int __init 1331static int __init init_cmf(void)
1261init_cmf(void)
1262{ 1332{
1263 char *format_string; 1333 char *format_string;
1264 char *detect_string = "parameter"; 1334 char *detect_string = "parameter";
1265 1335
1266 /* We cannot really autoprobe this. If the user did not give a parameter, 1336 /*
1267 see if we are running on z990 or up, otherwise fall back to basic mode. */ 1337 * If the user did not give a parameter, see if we are running on a
1268 1338 * machine supporting extended measurement blocks, otherwise fall back
1339 * to basic mode.
1340 */
1269 if (format == CMF_AUTODETECT) { 1341 if (format == CMF_AUTODETECT) {
1270 if (!css_characteristics_avail || 1342 if (!css_characteristics_avail ||
1271 !css_general_characteristics.ext_mb) { 1343 !css_general_characteristics.ext_mb) {
@@ -1284,7 +1356,7 @@ init_cmf(void)
1284 cmbops = &cmbops_basic; 1356 cmbops = &cmbops_basic;
1285 break; 1357 break;
1286 case CMF_EXTENDED: 1358 case CMF_EXTENDED:
1287 format_string = "extended"; 1359 format_string = "extended";
1288 cmbops = &cmbops_extended; 1360 cmbops = &cmbops_extended;
1289 break; 1361 break;
1290 default: 1362 default:
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 5635e656c1..5d83dd4714 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/reboot.h>
16 17
17#include "css.h" 18#include "css.h"
18#include "cio.h" 19#include "cio.h"
@@ -27,7 +28,7 @@ int css_init_done = 0;
27static int need_reprobe = 0; 28static int need_reprobe = 0;
28static int max_ssid = 0; 29static int max_ssid = 0;
29 30
30struct channel_subsystem *css[__MAX_CSSID + 1]; 31struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
31 32
32int css_characteristics_avail = 0; 33int css_characteristics_avail = 0;
33 34
@@ -177,7 +178,7 @@ static int css_register_subchannel(struct subchannel *sch)
177 int ret; 178 int ret;
178 179
179 /* Initialize the subchannel structure */ 180 /* Initialize the subchannel structure */
180 sch->dev.parent = &css[0]->device; 181 sch->dev.parent = &channel_subsystems[0]->device;
181 sch->dev.bus = &css_bus_type; 182 sch->dev.bus = &css_bus_type;
182 sch->dev.release = &css_subchannel_release; 183 sch->dev.release = &css_subchannel_release;
183 sch->dev.groups = subch_attr_groups; 184 sch->dev.groups = subch_attr_groups;
@@ -606,30 +607,55 @@ static int __init setup_css(int nr)
606{ 607{
607 u32 tod_high; 608 u32 tod_high;
608 int ret; 609 int ret;
610 struct channel_subsystem *css;
609 611
610 memset(css[nr], 0, sizeof(struct channel_subsystem)); 612 css = channel_subsystems[nr];
611 css[nr]->pseudo_subchannel = 613 memset(css, 0, sizeof(struct channel_subsystem));
612 kzalloc(sizeof(*css[nr]->pseudo_subchannel), GFP_KERNEL); 614 css->pseudo_subchannel =
613 if (!css[nr]->pseudo_subchannel) 615 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
616 if (!css->pseudo_subchannel)
614 return -ENOMEM; 617 return -ENOMEM;
615 css[nr]->pseudo_subchannel->dev.parent = &css[nr]->device; 618 css->pseudo_subchannel->dev.parent = &css->device;
616 css[nr]->pseudo_subchannel->dev.release = css_subchannel_release; 619 css->pseudo_subchannel->dev.release = css_subchannel_release;
617 sprintf(css[nr]->pseudo_subchannel->dev.bus_id, "defunct"); 620 sprintf(css->pseudo_subchannel->dev.bus_id, "defunct");
618 ret = cio_create_sch_lock(css[nr]->pseudo_subchannel); 621 ret = cio_create_sch_lock(css->pseudo_subchannel);
619 if (ret) { 622 if (ret) {
620 kfree(css[nr]->pseudo_subchannel); 623 kfree(css->pseudo_subchannel);
621 return ret; 624 return ret;
622 } 625 }
623 mutex_init(&css[nr]->mutex); 626 mutex_init(&css->mutex);
624 css[nr]->valid = 1; 627 css->valid = 1;
625 css[nr]->cssid = nr; 628 css->cssid = nr;
626 sprintf(css[nr]->device.bus_id, "css%x", nr); 629 sprintf(css->device.bus_id, "css%x", nr);
627 css[nr]->device.release = channel_subsystem_release; 630 css->device.release = channel_subsystem_release;
628 tod_high = (u32) (get_clock() >> 32); 631 tod_high = (u32) (get_clock() >> 32);
629 css_generate_pgid(css[nr], tod_high); 632 css_generate_pgid(css, tod_high);
630 return 0; 633 return 0;
631} 634}
632 635
636static int css_reboot_event(struct notifier_block *this,
637 unsigned long event,
638 void *ptr)
639{
640 int ret, i;
641
642 ret = NOTIFY_DONE;
643 for (i = 0; i <= __MAX_CSSID; i++) {
644 struct channel_subsystem *css;
645
646 css = channel_subsystems[i];
647 if (css->cm_enabled)
648 if (chsc_secm(css, 0))
649 ret = NOTIFY_BAD;
650 }
651
652 return ret;
653}
654
655static struct notifier_block css_reboot_notifier = {
656 .notifier_call = css_reboot_event,
657};
658
633/* 659/*
634 * Now that the driver core is running, we can setup our channel subsystem. 660 * Now that the driver core is running, we can setup our channel subsystem.
635 * The struct subchannel's are created during probing (except for the 661 * The struct subchannel's are created during probing (except for the
@@ -670,51 +696,63 @@ init_channel_subsystem (void)
670 } 696 }
671 /* Setup css structure. */ 697 /* Setup css structure. */
672 for (i = 0; i <= __MAX_CSSID; i++) { 698 for (i = 0; i <= __MAX_CSSID; i++) {
673 css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 699 struct channel_subsystem *css;
674 if (!css[i]) { 700
701 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
702 if (!css) {
675 ret = -ENOMEM; 703 ret = -ENOMEM;
676 goto out_unregister; 704 goto out_unregister;
677 } 705 }
706 channel_subsystems[i] = css;
678 ret = setup_css(i); 707 ret = setup_css(i);
679 if (ret) 708 if (ret)
680 goto out_free; 709 goto out_free;
681 ret = device_register(&css[i]->device); 710 ret = device_register(&css->device);
682 if (ret) 711 if (ret)
683 goto out_free_all; 712 goto out_free_all;
684 if (css_characteristics_avail && 713 if (css_characteristics_avail &&
685 css_chsc_characteristics.secm) { 714 css_chsc_characteristics.secm) {
686 ret = device_create_file(&css[i]->device, 715 ret = device_create_file(&css->device,
687 &dev_attr_cm_enable); 716 &dev_attr_cm_enable);
688 if (ret) 717 if (ret)
689 goto out_device; 718 goto out_device;
690 } 719 }
691 ret = device_register(&css[i]->pseudo_subchannel->dev); 720 ret = device_register(&css->pseudo_subchannel->dev);
692 if (ret) 721 if (ret)
693 goto out_file; 722 goto out_file;
694 } 723 }
724 ret = register_reboot_notifier(&css_reboot_notifier);
725 if (ret)
726 goto out_pseudo;
695 css_init_done = 1; 727 css_init_done = 1;
696 728
697 ctl_set_bit(6, 28); 729 ctl_set_bit(6, 28);
698 730
699 for_each_subchannel(__init_channel_subsystem, NULL); 731 for_each_subchannel(__init_channel_subsystem, NULL);
700 return 0; 732 return 0;
733out_pseudo:
734 device_unregister(&channel_subsystems[i]->pseudo_subchannel->dev);
701out_file: 735out_file:
702 device_remove_file(&css[i]->device, &dev_attr_cm_enable); 736 device_remove_file(&channel_subsystems[i]->device,
737 &dev_attr_cm_enable);
703out_device: 738out_device:
704 device_unregister(&css[i]->device); 739 device_unregister(&channel_subsystems[i]->device);
705out_free_all: 740out_free_all:
706 kfree(css[i]->pseudo_subchannel->lock); 741 kfree(channel_subsystems[i]->pseudo_subchannel->lock);
707 kfree(css[i]->pseudo_subchannel); 742 kfree(channel_subsystems[i]->pseudo_subchannel);
708out_free: 743out_free:
709 kfree(css[i]); 744 kfree(channel_subsystems[i]);
710out_unregister: 745out_unregister:
711 while (i > 0) { 746 while (i > 0) {
747 struct channel_subsystem *css;
748
712 i--; 749 i--;
713 device_unregister(&css[i]->pseudo_subchannel->dev); 750 css = channel_subsystems[i];
751 device_unregister(&css->pseudo_subchannel->dev);
714 if (css_characteristics_avail && css_chsc_characteristics.secm) 752 if (css_characteristics_avail && css_chsc_characteristics.secm)
715 device_remove_file(&css[i]->device, 753 device_remove_file(&css->device,
716 &dev_attr_cm_enable); 754 &dev_attr_cm_enable);
717 device_unregister(&css[i]->device); 755 device_unregister(&css->device);
718 } 756 }
719out_bus: 757out_bus:
720 bus_unregister(&css_bus_type); 758 bus_unregister(&css_bus_type);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 5d65e83ca6..81215ef324 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -167,7 +167,7 @@ struct channel_subsystem {
167#define to_css(dev) container_of(dev, struct channel_subsystem, device) 167#define to_css(dev) container_of(dev, struct channel_subsystem, device)
168 168
169extern struct bus_type css_bus_type; 169extern struct bus_type css_bus_type;
170extern struct channel_subsystem *css[]; 170extern struct channel_subsystem *channel_subsystems[];
171 171
172/* Some helper functions for disconnected state. */ 172/* Some helper functions for disconnected state. */
173int device_is_disconnected(struct subchannel *); 173int device_is_disconnected(struct subchannel *);
@@ -191,6 +191,5 @@ int sch_is_pseudo_sch(struct subchannel *);
191 191
192extern struct workqueue_struct *slow_path_wq; 192extern struct workqueue_struct *slow_path_wq;
193 193
194int subchannel_add_files (struct device *);
195extern struct attribute_group *subch_attr_groups[]; 194extern struct attribute_group *subch_attr_groups[];
196#endif 195#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e44d92eac8..7ee57f084a 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -21,6 +21,7 @@
21#include <asm/ccwdev.h> 21#include <asm/ccwdev.h>
22#include <asm/cio.h> 22#include <asm/cio.h>
23#include <asm/param.h> /* HZ */ 23#include <asm/param.h> /* HZ */
24#include <asm/cmb.h>
24 25
25#include "cio.h" 26#include "cio.h"
26#include "cio_debug.h" 27#include "cio_debug.h"
@@ -78,49 +79,38 @@ static int snprint_alias(char *buf, size_t size,
78 79
79/* Set up environment variables for ccw device uevent. Return 0 on success, 80/* Set up environment variables for ccw device uevent. Return 0 on success,
80 * non-zero otherwise. */ 81 * non-zero otherwise. */
81static int ccw_uevent(struct device *dev, char **envp, int num_envp, 82static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
82 char *buffer, int buffer_size)
83{ 83{
84 struct ccw_device *cdev = to_ccwdev(dev); 84 struct ccw_device *cdev = to_ccwdev(dev);
85 struct ccw_device_id *id = &(cdev->id); 85 struct ccw_device_id *id = &(cdev->id);
86 int i = 0;
87 int len = 0;
88 int ret; 86 int ret;
89 char modalias_buf[30]; 87 char modalias_buf[30];
90 88
91 /* CU_TYPE= */ 89 /* CU_TYPE= */
92 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, 90 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
93 "CU_TYPE=%04X", id->cu_type);
94 if (ret) 91 if (ret)
95 return ret; 92 return ret;
96 93
97 /* CU_MODEL= */ 94 /* CU_MODEL= */
98 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, 95 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
99 "CU_MODEL=%02X", id->cu_model);
100 if (ret) 96 if (ret)
101 return ret; 97 return ret;
102 98
103 /* The next two can be zero, that's ok for us */ 99 /* The next two can be zero, that's ok for us */
104 /* DEV_TYPE= */ 100 /* DEV_TYPE= */
105 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, 101 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
106 "DEV_TYPE=%04X", id->dev_type);
107 if (ret) 102 if (ret)
108 return ret; 103 return ret;
109 104
110 /* DEV_MODEL= */ 105 /* DEV_MODEL= */
111 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, 106 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
112 "DEV_MODEL=%02X", id->dev_model);
113 if (ret) 107 if (ret)
114 return ret; 108 return ret;
115 109
116 /* MODALIAS= */ 110 /* MODALIAS= */
117 snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); 111 snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
118 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, 112 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
119 "MODALIAS=%s", modalias_buf); 113 return ret;
120 if (ret)
121 return ret;
122 envp[i] = NULL;
123 return 0;
124} 114}
125 115
126struct bus_type ccw_bus_type; 116struct bus_type ccw_bus_type;
@@ -357,8 +347,18 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
357 cdev->private->dev_id.devno); 347 cdev->private->dev_id.devno);
358} 348}
359 349
360int 350/**
361ccw_device_set_offline(struct ccw_device *cdev) 351 * ccw_device_set_offline() - disable a ccw device for I/O
352 * @cdev: target ccw device
353 *
354 * This function calls the driver's set_offline() function for @cdev, if
355 * given, and then disables @cdev.
356 * Returns:
357 * %0 on success and a negative error value on failure.
358 * Context:
359 * enabled, ccw device lock not held
360 */
361int ccw_device_set_offline(struct ccw_device *cdev)
362{ 362{
363 int ret; 363 int ret;
364 364
@@ -396,8 +396,19 @@ ccw_device_set_offline(struct ccw_device *cdev)
396 return ret; 396 return ret;
397} 397}
398 398
399int 399/**
400ccw_device_set_online(struct ccw_device *cdev) 400 * ccw_device_set_online() - enable a ccw device for I/O
401 * @cdev: target ccw device
402 *
403 * This function first enables @cdev and then calls the driver's set_online()
404 * function for @cdev, if given. If set_online() returns an error, @cdev is
405 * disabled again.
406 * Returns:
407 * %0 on success and a negative error value on failure.
408 * Context:
409 * enabled, ccw device lock not held
410 */
411int ccw_device_set_online(struct ccw_device *cdev)
401{ 412{
402 int ret; 413 int ret;
403 414
@@ -947,8 +958,7 @@ out:
947 wake_up(&ccw_device_init_wq); 958 wake_up(&ccw_device_init_wq);
948} 959}
949 960
950void 961static void ccw_device_call_sch_unregister(struct work_struct *work)
951ccw_device_call_sch_unregister(struct work_struct *work)
952{ 962{
953 struct ccw_device_private *priv; 963 struct ccw_device_private *priv;
954 struct ccw_device *cdev; 964 struct ccw_device *cdev;
@@ -1101,6 +1111,7 @@ io_subchannel_probe (struct subchannel *sch)
1101 * device, e.g. the console. 1111 * device, e.g. the console.
1102 */ 1112 */
1103 cdev = sch->dev.driver_data; 1113 cdev = sch->dev.driver_data;
1114 cdev->dev.groups = ccwdev_attr_groups;
1104 device_initialize(&cdev->dev); 1115 device_initialize(&cdev->dev);
1105 ccw_device_register(cdev); 1116 ccw_device_register(cdev);
1106 /* 1117 /*
@@ -1326,8 +1337,19 @@ __ccwdev_check_busid(struct device *dev, void *id)
1326} 1337}
1327 1338
1328 1339
1329struct ccw_device * 1340/**
1330get_ccwdev_by_busid(struct ccw_driver *cdrv, const char *bus_id) 1341 * get_ccwdev_by_busid() - obtain device from a bus id
1342 * @cdrv: driver the device is owned by
1343 * @bus_id: bus id of the device to be searched
1344 *
1345 * This function searches all devices owned by @cdrv for a device with a bus
1346 * id matching @bus_id.
1347 * Returns:
1348 * If a match is found, its reference count of the found device is increased
1349 * and it is returned; else %NULL is returned.
1350 */
1351struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1352 const char *bus_id)
1331{ 1353{
1332 struct device *dev; 1354 struct device *dev;
1333 struct device_driver *drv; 1355 struct device_driver *drv;
@@ -1401,16 +1423,34 @@ ccw_device_remove (struct device *dev)
1401 return 0; 1423 return 0;
1402} 1424}
1403 1425
1426static void ccw_device_shutdown(struct device *dev)
1427{
1428 struct ccw_device *cdev;
1429
1430 cdev = to_ccwdev(dev);
1431 if (cdev->drv && cdev->drv->shutdown)
1432 cdev->drv->shutdown(cdev);
1433 disable_cmf(cdev);
1434}
1435
1404struct bus_type ccw_bus_type = { 1436struct bus_type ccw_bus_type = {
1405 .name = "ccw", 1437 .name = "ccw",
1406 .match = ccw_bus_match, 1438 .match = ccw_bus_match,
1407 .uevent = ccw_uevent, 1439 .uevent = ccw_uevent,
1408 .probe = ccw_device_probe, 1440 .probe = ccw_device_probe,
1409 .remove = ccw_device_remove, 1441 .remove = ccw_device_remove,
1442 .shutdown = ccw_device_shutdown,
1410}; 1443};
1411 1444
1412int 1445/**
1413ccw_driver_register (struct ccw_driver *cdriver) 1446 * ccw_driver_register() - register a ccw driver
1447 * @cdriver: driver to be registered
1448 *
1449 * This function is mainly a wrapper around driver_register().
1450 * Returns:
1451 * %0 on success and a negative error value on failure.
1452 */
1453int ccw_driver_register(struct ccw_driver *cdriver)
1414{ 1454{
1415 struct device_driver *drv = &cdriver->driver; 1455 struct device_driver *drv = &cdriver->driver;
1416 1456
@@ -1420,8 +1460,13 @@ ccw_driver_register (struct ccw_driver *cdriver)
1420 return driver_register(drv); 1460 return driver_register(drv);
1421} 1461}
1422 1462
1423void 1463/**
1424ccw_driver_unregister (struct ccw_driver *cdriver) 1464 * ccw_driver_unregister() - deregister a ccw driver
1465 * @cdriver: driver to be deregistered
1466 *
1467 * This function is mainly a wrapper around driver_unregister().
1468 */
1469void ccw_driver_unregister(struct ccw_driver *cdriver)
1425{ 1470{
1426 driver_unregister(&cdriver->driver); 1471 driver_unregister(&cdriver->driver);
1427} 1472}
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index b66338b765..0d40896004 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -80,7 +80,6 @@ void io_subchannel_recog_done(struct ccw_device *cdev);
80int ccw_device_cancel_halt_clear(struct ccw_device *); 80int ccw_device_cancel_halt_clear(struct ccw_device *);
81 81
82void ccw_device_do_unreg_rereg(struct work_struct *); 82void ccw_device_do_unreg_rereg(struct work_struct *);
83void ccw_device_call_sch_unregister(struct work_struct *);
84void ccw_device_move_to_orphanage(struct work_struct *); 83void ccw_device_move_to_orphanage(struct work_struct *);
85int ccw_device_is_orphan(struct ccw_device *); 84int ccw_device_is_orphan(struct ccw_device *);
86 85
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 8633dc5376..8867443b80 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -446,7 +446,8 @@ static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
446 if (cdev->private->pgid[last].inf.ps.state1 == 446 if (cdev->private->pgid[last].inf.ps.state1 ==
447 SNID_STATE1_RESET) 447 SNID_STATE1_RESET)
448 /* No previous pgid found */ 448 /* No previous pgid found */
449 memcpy(&cdev->private->pgid[0], &css[0]->global_pgid, 449 memcpy(&cdev->private->pgid[0],
450 &channel_subsystems[0]->global_pgid,
450 sizeof(struct pgid)); 451 sizeof(struct pgid));
451 else 452 else
452 /* Use existing pgid */ 453 /* Use existing pgid */
@@ -543,51 +544,6 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
543} 544}
544 545
545 546
546static void
547ccw_device_nopath_notify(struct work_struct *work)
548{
549 struct ccw_device_private *priv;
550 struct ccw_device *cdev;
551 struct subchannel *sch;
552 int ret;
553 unsigned long flags;
554
555 priv = container_of(work, struct ccw_device_private, kick_work);
556 cdev = priv->cdev;
557 spin_lock_irqsave(cdev->ccwlock, flags);
558 sch = to_subchannel(cdev->dev.parent);
559 /* Extra sanity. */
560 if (sch->lpm)
561 goto out_unlock;
562 if (sch->driver && sch->driver->notify) {
563 spin_unlock_irqrestore(cdev->ccwlock, flags);
564 ret = sch->driver->notify(&sch->dev, CIO_NO_PATH);
565 spin_lock_irqsave(cdev->ccwlock, flags);
566 } else
567 ret = 0;
568 if (!ret) {
569 if (get_device(&sch->dev)) {
570 /* Driver doesn't want to keep device. */
571 cio_disable_subchannel(sch);
572 if (get_device(&cdev->dev)) {
573 PREPARE_WORK(&cdev->private->kick_work,
574 ccw_device_call_sch_unregister);
575 queue_work(ccw_device_work,
576 &cdev->private->kick_work);
577 } else
578 put_device(&sch->dev);
579 }
580 } else {
581 cio_disable_subchannel(sch);
582 ccw_device_set_timeout(cdev, 0);
583 cdev->private->flags.fake_irb = 0;
584 cdev->private->state = DEV_STATE_DISCONNECTED;
585 wake_up(&cdev->private->wait_q);
586 }
587out_unlock:
588 spin_unlock_irqrestore(cdev->ccwlock, flags);
589}
590
591void 547void
592ccw_device_verify_done(struct ccw_device *cdev, int err) 548ccw_device_verify_done(struct ccw_device *cdev, int err)
593{ 549{
@@ -631,12 +587,9 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
631 default: 587 default:
632 /* Reset oper notify indication after verify error. */ 588 /* Reset oper notify indication after verify error. */
633 cdev->private->flags.donotify = 0; 589 cdev->private->flags.donotify = 0;
634 if (cdev->online) { 590 if (cdev->online)
635 PREPARE_WORK(&cdev->private->kick_work, 591 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
636 ccw_device_nopath_notify); 592 else
637 queue_work(ccw_device_notify_work,
638 &cdev->private->kick_work);
639 } else
640 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 593 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
641 break; 594 break;
642 } 595 }
@@ -690,11 +643,7 @@ ccw_device_disband_done(struct ccw_device *cdev, int err)
690 break; 643 break;
691 default: 644 default:
692 cdev->private->flags.donotify = 0; 645 cdev->private->flags.donotify = 0;
693 if (get_device(&cdev->dev)) { 646 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
694 PREPARE_WORK(&cdev->private->kick_work,
695 ccw_device_call_sch_unregister);
696 queue_work(ccw_device_work, &cdev->private->kick_work);
697 }
698 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 647 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
699 break; 648 break;
700 } 649 }
@@ -765,59 +714,16 @@ ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
765} 714}
766 715
767/* 716/*
768 * Handle not operational event while offline. 717 * Handle not operational event in non-special state.
769 */ 718 */
770static void 719static void ccw_device_generic_notoper(struct ccw_device *cdev,
771ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event) 720 enum dev_event dev_event)
772{ 721{
773 struct subchannel *sch; 722 struct subchannel *sch;
774 723
775 cdev->private->state = DEV_STATE_NOT_OPER; 724 cdev->private->state = DEV_STATE_NOT_OPER;
776 sch = to_subchannel(cdev->dev.parent); 725 sch = to_subchannel(cdev->dev.parent);
777 if (get_device(&cdev->dev)) { 726 css_schedule_eval(sch->schid);
778 PREPARE_WORK(&cdev->private->kick_work,
779 ccw_device_call_sch_unregister);
780 queue_work(ccw_device_work, &cdev->private->kick_work);
781 }
782 wake_up(&cdev->private->wait_q);
783}
784
785/*
786 * Handle not operational event while online.
787 */
788static void
789ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
790{
791 struct subchannel *sch;
792 int ret;
793
794 sch = to_subchannel(cdev->dev.parent);
795 if (sch->driver->notify) {
796 spin_unlock_irq(cdev->ccwlock);
797 ret = sch->driver->notify(&sch->dev,
798 sch->lpm ? CIO_GONE : CIO_NO_PATH);
799 spin_lock_irq(cdev->ccwlock);
800 } else
801 ret = 0;
802 if (ret) {
803 ccw_device_set_timeout(cdev, 0);
804 cdev->private->flags.fake_irb = 0;
805 cdev->private->state = DEV_STATE_DISCONNECTED;
806 wake_up(&cdev->private->wait_q);
807 return;
808 }
809 cdev->private->state = DEV_STATE_NOT_OPER;
810 cio_disable_subchannel(sch);
811 if (sch->schib.scsw.actl != 0) {
812 // FIXME: not-oper indication to device driver ?
813 ccw_device_call_handler(cdev);
814 }
815 if (get_device(&cdev->dev)) {
816 PREPARE_WORK(&cdev->private->kick_work,
817 ccw_device_call_sch_unregister);
818 queue_work(ccw_device_work, &cdev->private->kick_work);
819 }
820 wake_up(&cdev->private->wait_q);
821} 727}
822 728
823/* 729/*
@@ -915,18 +821,9 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
915 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 821 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
916 return; 822 return;
917 } 823 }
918 if (ret == -ENODEV) { 824 if (ret == -ENODEV)
919 struct subchannel *sch; 825 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
920 826 else if (cdev->handler)
921 sch = to_subchannel(cdev->dev.parent);
922 if (!sch->lpm) {
923 PREPARE_WORK(&cdev->private->kick_work,
924 ccw_device_nopath_notify);
925 queue_work(ccw_device_notify_work,
926 &cdev->private->kick_work);
927 } else
928 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
929 } else if (cdev->handler)
930 cdev->handler(cdev, cdev->private->intparm, 827 cdev->handler(cdev, cdev->private->intparm,
931 ERR_PTR(-ETIMEDOUT)); 828 ERR_PTR(-ETIMEDOUT));
932} 829}
@@ -1233,7 +1130,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1233 [DEV_EVENT_VERIFY] = ccw_device_nop, 1130 [DEV_EVENT_VERIFY] = ccw_device_nop,
1234 }, 1131 },
1235 [DEV_STATE_SENSE_PGID] = { 1132 [DEV_STATE_SENSE_PGID] = {
1236 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1133 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1237 [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq, 1134 [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq,
1238 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1135 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1239 [DEV_EVENT_VERIFY] = ccw_device_nop, 1136 [DEV_EVENT_VERIFY] = ccw_device_nop,
@@ -1245,50 +1142,50 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1245 [DEV_EVENT_VERIFY] = ccw_device_nop, 1142 [DEV_EVENT_VERIFY] = ccw_device_nop,
1246 }, 1143 },
1247 [DEV_STATE_OFFLINE] = { 1144 [DEV_STATE_OFFLINE] = {
1248 [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper, 1145 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1249 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, 1146 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
1250 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1147 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1251 [DEV_EVENT_VERIFY] = ccw_device_nop, 1148 [DEV_EVENT_VERIFY] = ccw_device_nop,
1252 }, 1149 },
1253 [DEV_STATE_VERIFY] = { 1150 [DEV_STATE_VERIFY] = {
1254 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1151 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1255 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, 1152 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
1256 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1153 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1257 [DEV_EVENT_VERIFY] = ccw_device_delay_verify, 1154 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1258 }, 1155 },
1259 [DEV_STATE_ONLINE] = { 1156 [DEV_STATE_ONLINE] = {
1260 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1157 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1261 [DEV_EVENT_INTERRUPT] = ccw_device_irq, 1158 [DEV_EVENT_INTERRUPT] = ccw_device_irq,
1262 [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout, 1159 [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
1263 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1160 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1264 }, 1161 },
1265 [DEV_STATE_W4SENSE] = { 1162 [DEV_STATE_W4SENSE] = {
1266 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1163 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1267 [DEV_EVENT_INTERRUPT] = ccw_device_w4sense, 1164 [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
1268 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1165 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1269 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1166 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1270 }, 1167 },
1271 [DEV_STATE_DISBAND_PGID] = { 1168 [DEV_STATE_DISBAND_PGID] = {
1272 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1169 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1273 [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq, 1170 [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq,
1274 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1171 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1275 [DEV_EVENT_VERIFY] = ccw_device_nop, 1172 [DEV_EVENT_VERIFY] = ccw_device_nop,
1276 }, 1173 },
1277 [DEV_STATE_BOXED] = { 1174 [DEV_STATE_BOXED] = {
1278 [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper, 1175 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1279 [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done, 1176 [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done,
1280 [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done, 1177 [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
1281 [DEV_EVENT_VERIFY] = ccw_device_nop, 1178 [DEV_EVENT_VERIFY] = ccw_device_nop,
1282 }, 1179 },
1283 /* states to wait for i/o completion before doing something */ 1180 /* states to wait for i/o completion before doing something */
1284 [DEV_STATE_CLEAR_VERIFY] = { 1181 [DEV_STATE_CLEAR_VERIFY] = {
1285 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1182 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1286 [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify, 1183 [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
1287 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1184 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1288 [DEV_EVENT_VERIFY] = ccw_device_nop, 1185 [DEV_EVENT_VERIFY] = ccw_device_nop,
1289 }, 1186 },
1290 [DEV_STATE_TIMEOUT_KILL] = { 1187 [DEV_STATE_TIMEOUT_KILL] = {
1291 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1188 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1292 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1189 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
1293 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, 1190 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
1294 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME 1191 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 14eba854b1..7fd2dadc32 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -25,6 +25,16 @@
25#include "device.h" 25#include "device.h"
26#include "chp.h" 26#include "chp.h"
27 27
28/**
29 * ccw_device_set_options_mask() - set some options and unset the rest
30 * @cdev: device for which the options are to be set
31 * @flags: options to be set
32 *
33 * All flags specified in @flags are set, all flags not specified in @flags
34 * are cleared.
35 * Returns:
36 * %0 on success, -%EINVAL on an invalid flag combination.
37 */
28int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) 38int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
29{ 39{
30 /* 40 /*
@@ -40,6 +50,15 @@ int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
40 return 0; 50 return 0;
41} 51}
42 52
53/**
54 * ccw_device_set_options() - set some options
55 * @cdev: device for which the options are to be set
56 * @flags: options to be set
57 *
58 * All flags specified in @flags are set, the remainder is left untouched.
59 * Returns:
60 * %0 on success, -%EINVAL if an invalid flag combination would ensue.
61 */
43int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags) 62int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
44{ 63{
45 /* 64 /*
@@ -59,6 +78,13 @@ int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
59 return 0; 78 return 0;
60} 79}
61 80
81/**
82 * ccw_device_clear_options() - clear some options
83 * @cdev: device for which the options are to be cleared
84 * @flags: options to be cleared
85 *
86 * All flags specified in @flags are cleared, the remainder is left untouched.
87 */
62void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags) 88void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
63{ 89{
64 cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0; 90 cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
@@ -67,8 +93,22 @@ void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
67 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0; 93 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
68} 94}
69 95
70int 96/**
71ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) 97 * ccw_device_clear() - terminate I/O request processing
98 * @cdev: target ccw device
99 * @intparm: interruption parameter; value is only used if no I/O is
100 * outstanding, otherwise the intparm associated with the I/O request
101 * is returned
102 *
103 * ccw_device_clear() calls csch on @cdev's subchannel.
104 * Returns:
105 * %0 on success,
106 * -%ENODEV on device not operational,
107 * -%EINVAL on invalid device state.
108 * Context:
109 * Interrupts disabled, ccw device lock held
110 */
111int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
72{ 112{
73 struct subchannel *sch; 113 struct subchannel *sch;
74 int ret; 114 int ret;
@@ -89,10 +129,33 @@ ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
89 return ret; 129 return ret;
90} 130}
91 131
92int 132/**
93ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, 133 * ccw_device_start_key() - start a s390 channel program with key
94 unsigned long intparm, __u8 lpm, __u8 key, 134 * @cdev: target ccw device
95 unsigned long flags) 135 * @cpa: logical start address of channel program
136 * @intparm: user specific interruption parameter; will be presented back to
137 * @cdev's interrupt handler. Allows a device driver to associate
138 * the interrupt with a particular I/O request.
139 * @lpm: defines the channel path to be used for a specific I/O request. A
140 * value of 0 will make cio use the opm.
141 * @key: storage key to be used for the I/O
142 * @flags: additional flags; defines the action to be performed for I/O
143 * processing.
144 *
145 * Start a S/390 channel program. When the interrupt arrives, the
146 * IRQ handler is called, either immediately, delayed (dev-end missing,
147 * or sense required) or never (no IRQ handler registered).
148 * Returns:
149 * %0, if the operation was successful;
150 * -%EBUSY, if the device is busy, or status pending;
151 * -%EACCES, if no path specified in @lpm is operational;
152 * -%ENODEV, if the device is not operational.
153 * Context:
154 * Interrupts disabled, ccw device lock held
155 */
156int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
157 unsigned long intparm, __u8 lpm, __u8 key,
158 unsigned long flags)
96{ 159{
97 struct subchannel *sch; 160 struct subchannel *sch;
98 int ret; 161 int ret;
@@ -135,11 +198,38 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
135 return ret; 198 return ret;
136} 199}
137 200
138 201/**
139int 202 * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
140ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, 203 * @cdev: target ccw device
141 unsigned long intparm, __u8 lpm, __u8 key, 204 * @cpa: logical start address of channel program
142 unsigned long flags, int expires) 205 * @intparm: user specific interruption parameter; will be presented back to
206 * @cdev's interrupt handler. Allows a device driver to associate
207 * the interrupt with a particular I/O request.
208 * @lpm: defines the channel path to be used for a specific I/O request. A
209 * value of 0 will make cio use the opm.
210 * @key: storage key to be used for the I/O
211 * @flags: additional flags; defines the action to be performed for I/O
212 * processing.
213 * @expires: timeout value in jiffies
214 *
215 * Start a S/390 channel program. When the interrupt arrives, the
216 * IRQ handler is called, either immediately, delayed (dev-end missing,
217 * or sense required) or never (no IRQ handler registered).
218 * This function notifies the device driver if the channel program has not
219 * completed during the time specified by @expires. If a timeout occurs, the
220 * channel program is terminated via xsch, hsch or csch, and the device's
221 * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
222 * Returns:
223 * %0, if the operation was successful;
224 * -%EBUSY, if the device is busy, or status pending;
225 * -%EACCES, if no path specified in @lpm is operational;
226 * -%ENODEV, if the device is not operational.
227 * Context:
228 * Interrupts disabled, ccw device lock held
229 */
230int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
231 unsigned long intparm, __u8 lpm, __u8 key,
232 unsigned long flags, int expires)
143{ 233{
144 int ret; 234 int ret;
145 235
@@ -152,18 +242,67 @@ ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
152 return ret; 242 return ret;
153} 243}
154 244
155int 245/**
156ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa, 246 * ccw_device_start() - start a s390 channel program
157 unsigned long intparm, __u8 lpm, unsigned long flags) 247 * @cdev: target ccw device
248 * @cpa: logical start address of channel program
249 * @intparm: user specific interruption parameter; will be presented back to
250 * @cdev's interrupt handler. Allows a device driver to associate
251 * the interrupt with a particular I/O request.
252 * @lpm: defines the channel path to be used for a specific I/O request. A
253 * value of 0 will make cio use the opm.
254 * @flags: additional flags; defines the action to be performed for I/O
255 * processing.
256 *
257 * Start a S/390 channel program. When the interrupt arrives, the
258 * IRQ handler is called, either immediately, delayed (dev-end missing,
259 * or sense required) or never (no IRQ handler registered).
260 * Returns:
261 * %0, if the operation was successful;
262 * -%EBUSY, if the device is busy, or status pending;
263 * -%EACCES, if no path specified in @lpm is operational;
264 * -%ENODEV, if the device is not operational.
265 * Context:
266 * Interrupts disabled, ccw device lock held
267 */
268int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
269 unsigned long intparm, __u8 lpm, unsigned long flags)
158{ 270{
159 return ccw_device_start_key(cdev, cpa, intparm, lpm, 271 return ccw_device_start_key(cdev, cpa, intparm, lpm,
160 PAGE_DEFAULT_KEY, flags); 272 PAGE_DEFAULT_KEY, flags);
161} 273}
162 274
163int 275/**
164ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa, 276 * ccw_device_start_timeout() - start a s390 channel program with timeout
165 unsigned long intparm, __u8 lpm, unsigned long flags, 277 * @cdev: target ccw device
166 int expires) 278 * @cpa: logical start address of channel program
279 * @intparm: user specific interruption parameter; will be presented back to
280 * @cdev's interrupt handler. Allows a device driver to associate
281 * the interrupt with a particular I/O request.
282 * @lpm: defines the channel path to be used for a specific I/O request. A
283 * value of 0 will make cio use the opm.
284 * @flags: additional flags; defines the action to be performed for I/O
285 * processing.
286 * @expires: timeout value in jiffies
287 *
288 * Start a S/390 channel program. When the interrupt arrives, the
289 * IRQ handler is called, either immediately, delayed (dev-end missing,
290 * or sense required) or never (no IRQ handler registered).
291 * This function notifies the device driver if the channel program has not
292 * completed during the time specified by @expires. If a timeout occurs, the
293 * channel program is terminated via xsch, hsch or csch, and the device's
294 * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
295 * Returns:
296 * %0, if the operation was successful;
297 * -%EBUSY, if the device is busy, or status pending;
298 * -%EACCES, if no path specified in @lpm is operational;
299 * -%ENODEV, if the device is not operational.
300 * Context:
301 * Interrupts disabled, ccw device lock held
302 */
303int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
304 unsigned long intparm, __u8 lpm,
305 unsigned long flags, int expires)
167{ 306{
168 return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, 307 return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
169 PAGE_DEFAULT_KEY, flags, 308 PAGE_DEFAULT_KEY, flags,
@@ -171,8 +310,23 @@ ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
171} 310}
172 311
173 312
174int 313/**
175ccw_device_halt(struct ccw_device *cdev, unsigned long intparm) 314 * ccw_device_halt() - halt I/O request processing
315 * @cdev: target ccw device
316 * @intparm: interruption parameter; value is only used if no I/O is
317 * outstanding, otherwise the intparm associated with the I/O request
318 * is returned
319 *
320 * ccw_device_halt() calls hsch on @cdev's subchannel.
321 * Returns:
322 * %0 on success,
323 * -%ENODEV on device not operational,
324 * -%EINVAL on invalid device state,
325 * -%EBUSY on device busy or interrupt pending.
326 * Context:
327 * Interrupts disabled, ccw device lock held
328 */
329int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
176{ 330{
177 struct subchannel *sch; 331 struct subchannel *sch;
178 int ret; 332 int ret;
@@ -193,8 +347,20 @@ ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
193 return ret; 347 return ret;
194} 348}
195 349
196int 350/**
197ccw_device_resume(struct ccw_device *cdev) 351 * ccw_device_resume() - resume channel program execution
352 * @cdev: target ccw device
353 *
354 * ccw_device_resume() calls rsch on @cdev's subchannel.
355 * Returns:
356 * %0 on success,
357 * -%ENODEV on device not operational,
358 * -%EINVAL on invalid device state,
359 * -%EBUSY on device busy or interrupt pending.
360 * Context:
361 * Interrupts disabled, ccw device lock held
362 */
363int ccw_device_resume(struct ccw_device *cdev)
198{ 364{
199 struct subchannel *sch; 365 struct subchannel *sch;
200 366
@@ -260,11 +426,21 @@ ccw_device_call_handler(struct ccw_device *cdev)
260 return 1; 426 return 1;
261} 427}
262 428
263/* 429/**
264 * Search for CIW command in extended sense data. 430 * ccw_device_get_ciw() - Search for CIW command in extended sense data.
431 * @cdev: ccw device to inspect
432 * @ct: command type to look for
433 *
434 * During SenseID, command information words (CIWs) describing special
435 * commands available to the device may have been stored in the extended
436 * sense data. This function searches for CIWs of a specified command
437 * type in the extended sense data.
438 * Returns:
439 * %NULL if no extended sense data has been stored or if no CIW of the
440 * specified command type could be found,
441 * else a pointer to the CIW of the specified command type.
265 */ 442 */
266struct ciw * 443struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
267ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
268{ 444{
269 int ciw_cnt; 445 int ciw_cnt;
270 446
@@ -276,8 +452,14 @@ ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
276 return NULL; 452 return NULL;
277} 453}
278 454
279__u8 455/**
280ccw_device_get_path_mask(struct ccw_device *cdev) 456 * ccw_device_get_path_mask() - get currently available paths
457 * @cdev: ccw device to be queried
458 * Returns:
459 * %0 if no subchannel for the device is available,
460 * else the mask of currently available paths for the ccw device's subchannel.
461 */
462__u8 ccw_device_get_path_mask(struct ccw_device *cdev)
281{ 463{
282 struct subchannel *sch; 464 struct subchannel *sch;
283 465
@@ -357,8 +539,7 @@ out_unlock:
357 return ret; 539 return ret;
358} 540}
359 541
360void * 542void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
361ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
362{ 543{
363 struct subchannel *sch; 544 struct subchannel *sch;
364 struct chp_id chpid; 545 struct chp_id chpid;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index d8d479876e..40a3208c7c 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -1024,9 +1024,9 @@ __qdio_outbound_processing(struct qdio_q *q)
1024} 1024}
1025 1025
1026static void 1026static void
1027qdio_outbound_processing(struct qdio_q *q) 1027qdio_outbound_processing(unsigned long q)
1028{ 1028{
1029 __qdio_outbound_processing(q); 1029 __qdio_outbound_processing((struct qdio_q *) q);
1030} 1030}
1031 1031
1032/************************* INBOUND ROUTINES *******************************/ 1032/************************* INBOUND ROUTINES *******************************/
@@ -1449,9 +1449,10 @@ out:
1449} 1449}
1450 1450
1451static void 1451static void
1452tiqdio_inbound_processing(struct qdio_q *q) 1452tiqdio_inbound_processing(unsigned long q)
1453{ 1453{
1454 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); 1454 __tiqdio_inbound_processing((struct qdio_q *) q,
1455 atomic_read(&spare_indicator_usecount));
1455} 1456}
1456 1457
1457static void 1458static void
@@ -1494,9 +1495,9 @@ again:
1494} 1495}
1495 1496
1496static void 1497static void
1497qdio_inbound_processing(struct qdio_q *q) 1498qdio_inbound_processing(unsigned long q)
1498{ 1499{
1499 __qdio_inbound_processing(q); 1500 __qdio_inbound_processing((struct qdio_q *) q);
1500} 1501}
1501 1502
1502/************************* MAIN ROUTINES *******************************/ 1503/************************* MAIN ROUTINES *******************************/
@@ -1760,12 +1761,15 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1760 q->handler=input_handler; 1761 q->handler=input_handler;
1761 q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind; 1762 q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
1762 1763
1763 q->tasklet.data=(unsigned long)q;
1764 /* q->is_thinint_q isn't valid at this time, but 1764 /* q->is_thinint_q isn't valid at this time, but
1765 * irq_ptr->is_thinint_irq is */ 1765 * irq_ptr->is_thinint_irq is
1766 q->tasklet.func=(void(*)(unsigned long)) 1766 */
1767 ((irq_ptr->is_thinint_irq)?&tiqdio_inbound_processing: 1767 if (irq_ptr->is_thinint_irq)
1768 &qdio_inbound_processing); 1768 tasklet_init(&q->tasklet, tiqdio_inbound_processing,
1769 (unsigned long) q);
1770 else
1771 tasklet_init(&q->tasklet, qdio_inbound_processing,
1772 (unsigned long) q);
1769 1773
1770 /* actually this is not used for inbound queues. yet. */ 1774 /* actually this is not used for inbound queues. yet. */
1771 atomic_set(&q->busy_siga_counter,0); 1775 atomic_set(&q->busy_siga_counter,0);
@@ -1836,13 +1840,10 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1836 q->last_move_ftc=0; 1840 q->last_move_ftc=0;
1837 q->handler=output_handler; 1841 q->handler=output_handler;
1838 1842
1839 q->tasklet.data=(unsigned long)q; 1843 tasklet_init(&q->tasklet, qdio_outbound_processing,
1840 q->tasklet.func=(void(*)(unsigned long)) 1844 (unsigned long) q);
1841 &qdio_outbound_processing; 1845 setup_timer(&q->timer, qdio_outbound_processing,
1842 q->timer.function=(void(*)(unsigned long)) 1846 (unsigned long) q);
1843 &qdio_outbound_processing;
1844 q->timer.data = (long)q;
1845 init_timer(&q->timer);
1846 1847
1847 atomic_set(&q->busy_siga_counter,0); 1848 atomic_set(&q->busy_siga_counter,0);
1848 q->timing.busy_start=0; 1849 q->timing.busy_start=0;
@@ -3726,7 +3727,7 @@ qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count
3726#endif /* CONFIG_64BIT */ 3727#endif /* CONFIG_64BIT */
3727 } 3728 }
3728 } else { 3729 } else {
3729 QDIO_PRINT_WARN("QDIO performance_stats: write 0 or 1 to this file!\n"); 3730 QDIO_PRINT_ERR("QDIO performance_stats: write 0 or 1 to this file!\n");
3730 return -EINVAL; 3731 return -EINVAL;
3731 } 3732 }
3732 return count; 3733 return count;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 90bd220145..67aaff3e66 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -458,28 +458,22 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
458 * uevent function for AP devices. It sets up a single environment 458 * uevent function for AP devices. It sets up a single environment
459 * variable DEV_TYPE which contains the hardware device type. 459 * variable DEV_TYPE which contains the hardware device type.
460 */ 460 */
461static int ap_uevent (struct device *dev, char **envp, int num_envp, 461static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
462 char *buffer, int buffer_size)
463{ 462{
464 struct ap_device *ap_dev = to_ap_dev(dev); 463 struct ap_device *ap_dev = to_ap_dev(dev);
465 int retval = 0, length = 0, i = 0; 464 int retval = 0;
466 465
467 if (!ap_dev) 466 if (!ap_dev)
468 return -ENODEV; 467 return -ENODEV;
469 468
470 /* Set up DEV_TYPE environment variable. */ 469 /* Set up DEV_TYPE environment variable. */
471 retval = add_uevent_var(envp, num_envp, &i, 470 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
472 buffer, buffer_size, &length,
473 "DEV_TYPE=%04X", ap_dev->device_type);
474 if (retval) 471 if (retval)
475 return retval; 472 return retval;
476 473
477 /* Add MODALIAS= */ 474 /* Add MODALIAS= */
478 retval = add_uevent_var(envp, num_envp, &i, 475 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
479 buffer, buffer_size, &length,
480 "MODALIAS=ap:t%02X", ap_dev->device_type);
481 476
482 envp[i] = NULL;
483 return retval; 477 return retval;
484} 478}
485 479
@@ -1231,8 +1225,9 @@ static void ap_reset_domain(void)
1231{ 1225{
1232 int i; 1226 int i;
1233 1227
1234 for (i = 0; i < AP_DEVICES; i++) 1228 if (ap_domain_index != -1)
1235 ap_reset_queue(AP_MKQID(i, ap_domain_index)); 1229 for (i = 0; i < AP_DEVICES; i++)
1230 ap_reset_queue(AP_MKQID(i, ap_domain_index));
1236} 1231}
1237 1232
1238static void ap_reset_all(void) 1233static void ap_reset_all(void)
diff --git a/drivers/s390/crypto/zcrypt_mono.c b/drivers/s390/crypto/zcrypt_mono.c
index 2a9349ad68..44253fdd41 100644
--- a/drivers/s390/crypto/zcrypt_mono.c
+++ b/drivers/s390/crypto/zcrypt_mono.c
@@ -45,7 +45,7 @@
45/** 45/**
46 * The module initialization code. 46 * The module initialization code.
47 */ 47 */
48int __init zcrypt_init(void) 48static int __init zcrypt_init(void)
49{ 49{
50 int rc; 50 int rc;
51 51
@@ -86,7 +86,7 @@ out:
86/** 86/**
87 * The module termination code. 87 * The module termination code.
88 */ 88 */
89void __exit zcrypt_exit(void) 89static void __exit zcrypt_exit(void)
90{ 90{
91 zcrypt_cex2a_exit(); 91 zcrypt_cex2a_exit();
92 zcrypt_pcixcc_exit(); 92 zcrypt_pcixcc_exit();
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 64948788d3..70b9ddc8cf 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -277,7 +277,7 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
277 }; 277 };
278 struct { 278 struct {
279 struct type6_hdr hdr; 279 struct type6_hdr hdr;
280 struct ica_CPRBX cprbx; 280 struct CPRBX cprbx;
281 } __attribute__((packed)) *msg = ap_msg->message; 281 } __attribute__((packed)) *msg = ap_msg->message;
282 282
283 int rcblen = CEIL4(xcRB->request_control_blk_length); 283 int rcblen = CEIL4(xcRB->request_control_blk_length);
@@ -432,14 +432,17 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
432 } 432 }
433 if (service_rc == 8 && service_rs == 770) { 433 if (service_rc == 8 && service_rs == 770) {
434 PDEBUG("Invalid key length on PCIXCC/CEX2C\n"); 434 PDEBUG("Invalid key length on PCIXCC/CEX2C\n");
435 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; 435 return -EINVAL;
436 return -EAGAIN;
437 } 436 }
438 if (service_rc == 8 && service_rs == 783) { 437 if (service_rc == 8 && service_rs == 783) {
439 PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n"); 438 PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n");
440 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; 439 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
441 return -EAGAIN; 440 return -EAGAIN;
442 } 441 }
442 if (service_rc == 12 && service_rs == 769) {
443 PDEBUG("Invalid key on PCIXCC/CEX2C\n");
444 return -EINVAL;
445 }
443 PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n", 446 PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n",
444 service_rc, service_rs); 447 service_rc, service_rs);
445 zdev->online = 0; 448 zdev->online = 0;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
index a78ff307fd..8cb7d7a697 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.h
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -28,51 +28,6 @@
28#ifndef _ZCRYPT_PCIXCC_H_ 28#ifndef _ZCRYPT_PCIXCC_H_
29#define _ZCRYPT_PCIXCC_H_ 29#define _ZCRYPT_PCIXCC_H_
30 30
31/**
32 * CPRBX
33 * Note that all shorts and ints are big-endian.
34 * All pointer fields are 16 bytes long, and mean nothing.
35 *
36 * A request CPRB is followed by a request_parameter_block.
37 *
38 * The request (or reply) parameter block is organized thus:
39 * function code
40 * VUD block
41 * key block
42 */
43struct CPRBX {
44 unsigned short cprb_len; /* CPRB length 220 */
45 unsigned char cprb_ver_id; /* CPRB version id. 0x02 */
46 unsigned char pad_000[3]; /* Alignment pad bytes */
47 unsigned char func_id[2]; /* function id 0x5432 */
48 unsigned char cprb_flags[4]; /* Flags */
49 unsigned int req_parml; /* request parameter buffer len */
50 unsigned int req_datal; /* request data buffer */
51 unsigned int rpl_msgbl; /* reply message block length */
52 unsigned int rpld_parml; /* replied parameter block len */
53 unsigned int rpl_datal; /* reply data block len */
54 unsigned int rpld_datal; /* replied data block len */
55 unsigned int req_extbl; /* request extension block len */
56 unsigned char pad_001[4]; /* reserved */
57 unsigned int rpld_extbl; /* replied extension block len */
58 unsigned char req_parmb[16]; /* request parm block 'address' */
59 unsigned char req_datab[16]; /* request data block 'address' */
60 unsigned char rpl_parmb[16]; /* reply parm block 'address' */
61 unsigned char rpl_datab[16]; /* reply data block 'address' */
62 unsigned char req_extb[16]; /* request extension block 'addr'*/
63 unsigned char rpl_extb[16]; /* reply extension block 'addres'*/
64 unsigned short ccp_rtcode; /* server return code */
65 unsigned short ccp_rscode; /* server reason code */
66 unsigned int mac_data_len; /* Mac Data Length */
67 unsigned char logon_id[8]; /* Logon Identifier */
68 unsigned char mac_value[8]; /* Mac Value */
69 unsigned char mac_content_flgs;/* Mac content flag byte */
70 unsigned char pad_002; /* Alignment */
71 unsigned short domain; /* Domain */
72 unsigned char pad_003[12]; /* Domain masks */
73 unsigned char pad_004[36]; /* reserved */
74} __attribute__((packed));
75
76int zcrypt_pcixcc_init(void); 31int zcrypt_pcixcc_init(void);
77void zcrypt_pcixcc_exit(void); 32void zcrypt_pcixcc_exit(void);
78 33
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 1c8f71a598..c0d1c0eb32 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -28,7 +28,7 @@ static void zfcp_ccw_remove(struct ccw_device *);
28static int zfcp_ccw_set_online(struct ccw_device *); 28static int zfcp_ccw_set_online(struct ccw_device *);
29static int zfcp_ccw_set_offline(struct ccw_device *); 29static int zfcp_ccw_set_offline(struct ccw_device *);
30static int zfcp_ccw_notify(struct ccw_device *, int); 30static int zfcp_ccw_notify(struct ccw_device *, int);
31static void zfcp_ccw_shutdown(struct device *); 31static void zfcp_ccw_shutdown(struct ccw_device *);
32 32
33static struct ccw_device_id zfcp_ccw_device_id[] = { 33static struct ccw_device_id zfcp_ccw_device_id[] = {
34 {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE, 34 {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
@@ -51,9 +51,7 @@ static struct ccw_driver zfcp_ccw_driver = {
51 .set_online = zfcp_ccw_set_online, 51 .set_online = zfcp_ccw_set_online,
52 .set_offline = zfcp_ccw_set_offline, 52 .set_offline = zfcp_ccw_set_offline,
53 .notify = zfcp_ccw_notify, 53 .notify = zfcp_ccw_notify,
54 .driver = { 54 .shutdown = zfcp_ccw_shutdown,
55 .shutdown = zfcp_ccw_shutdown,
56 },
57}; 55};
58 56
59MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id); 57MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
@@ -277,12 +275,12 @@ zfcp_ccw_register(void)
277 * Makes sure that QDIO queues are down when the system gets stopped. 275 * Makes sure that QDIO queues are down when the system gets stopped.
278 */ 276 */
279static void 277static void
280zfcp_ccw_shutdown(struct device *dev) 278zfcp_ccw_shutdown(struct ccw_device *cdev)
281{ 279{
282 struct zfcp_adapter *adapter; 280 struct zfcp_adapter *adapter;
283 281
284 down(&zfcp_data.config_sema); 282 down(&zfcp_data.config_sema);
285 adapter = dev_get_drvdata(dev); 283 adapter = dev_get_drvdata(&cdev->dev);
286 zfcp_erp_adapter_shutdown(adapter, 0); 284 zfcp_erp_adapter_shutdown(adapter, 0);
287 zfcp_erp_wait(adapter); 285 zfcp_erp_wait(adapter);
288 up(&zfcp_data.config_sema); 286 up(&zfcp_data.config_sema);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 5f3212440f..ffa3bf7569 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -19,8 +19,8 @@
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 20 */
21 21
22#include <asm/debug.h>
23#include <linux/ctype.h> 22#include <linux/ctype.h>
23#include <asm/debug.h>
24#include "zfcp_ext.h" 24#include "zfcp_ext.h"
25 25
26static u32 dbfsize = 4; 26static u32 dbfsize = 4;
@@ -35,17 +35,17 @@ static int
35zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck) 35zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
36{ 36{
37 unsigned long long sec; 37 unsigned long long sec;
38 struct timespec xtime; 38 struct timespec dbftime;
39 int len = 0; 39 int len = 0;
40 40
41 stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096); 41 stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
42 sec = stck >> 12; 42 sec = stck >> 12;
43 do_div(sec, 1000000); 43 do_div(sec, 1000000);
44 xtime.tv_sec = sec; 44 dbftime.tv_sec = sec;
45 stck -= (sec * 1000000) << 12; 45 stck -= (sec * 1000000) << 12;
46 xtime.tv_nsec = ((stck * 1000) >> 12); 46 dbftime.tv_nsec = ((stck * 1000) >> 12);
47 len += sprintf(out_buf + len, "%-24s%011lu:%06lu\n", 47 len += sprintf(out_buf + len, "%-24s%011lu:%06lu\n",
48 label, xtime.tv_sec, xtime.tv_nsec); 48 label, dbftime.tv_sec, dbftime.tv_nsec);
49 49
50 return len; 50 return len;
51} 51}
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index d8cd75ce2d..16b4418ab2 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -54,7 +54,7 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *, int);
54static int zfcp_erp_strategy_statechange(int, u32, struct zfcp_adapter *, 54static int zfcp_erp_strategy_statechange(int, u32, struct zfcp_adapter *,
55 struct zfcp_port *, 55 struct zfcp_port *,
56 struct zfcp_unit *, int); 56 struct zfcp_unit *, int);
57static inline int zfcp_erp_strategy_statechange_detected(atomic_t *, u32); 57static int zfcp_erp_strategy_statechange_detected(atomic_t *, u32);
58static int zfcp_erp_strategy_followup_actions(int, struct zfcp_adapter *, 58static int zfcp_erp_strategy_followup_actions(int, struct zfcp_adapter *,
59 struct zfcp_port *, 59 struct zfcp_port *,
60 struct zfcp_unit *, int); 60 struct zfcp_unit *, int);
@@ -106,8 +106,8 @@ static void zfcp_erp_action_cleanup(int, struct zfcp_adapter *,
106static void zfcp_erp_action_ready(struct zfcp_erp_action *); 106static void zfcp_erp_action_ready(struct zfcp_erp_action *);
107static int zfcp_erp_action_exists(struct zfcp_erp_action *); 107static int zfcp_erp_action_exists(struct zfcp_erp_action *);
108 108
109static inline void zfcp_erp_action_to_ready(struct zfcp_erp_action *); 109static void zfcp_erp_action_to_ready(struct zfcp_erp_action *);
110static inline void zfcp_erp_action_to_running(struct zfcp_erp_action *); 110static void zfcp_erp_action_to_running(struct zfcp_erp_action *);
111 111
112static void zfcp_erp_memwait_handler(unsigned long); 112static void zfcp_erp_memwait_handler(unsigned long);
113 113
@@ -952,7 +952,7 @@ zfcp_erp_memwait_handler(unsigned long data)
952 * action gets an appropriate flag and will be processed 952 * action gets an appropriate flag and will be processed
953 * accordingly 953 * accordingly
954 */ 954 */
955void zfcp_erp_timeout_handler(unsigned long data) 955static void zfcp_erp_timeout_handler(unsigned long data)
956{ 956{
957 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data; 957 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
958 struct zfcp_adapter *adapter = erp_action->adapter; 958 struct zfcp_adapter *adapter = erp_action->adapter;
@@ -1491,7 +1491,7 @@ zfcp_erp_strategy_statechange(int action,
1491 return retval; 1491 return retval;
1492} 1492}
1493 1493
1494static inline int 1494static int
1495zfcp_erp_strategy_statechange_detected(atomic_t * target_status, u32 erp_status) 1495zfcp_erp_strategy_statechange_detected(atomic_t * target_status, u32 erp_status)
1496{ 1496{
1497 return 1497 return
@@ -2001,7 +2001,7 @@ zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
2001 * returns: 0 - successful setup 2001 * returns: 0 - successful setup
2002 * !0 - failed setup 2002 * !0 - failed setup
2003 */ 2003 */
2004int 2004static int
2005zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action) 2005zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
2006{ 2006{
2007 int retval; 2007 int retval;
@@ -3248,8 +3248,7 @@ static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
3248 zfcp_erp_action_dismiss(&unit->erp_action); 3248 zfcp_erp_action_dismiss(&unit->erp_action);
3249} 3249}
3250 3250
3251static inline void 3251static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
3252zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
3253{ 3252{
3254 struct zfcp_adapter *adapter = erp_action->adapter; 3253 struct zfcp_adapter *adapter = erp_action->adapter;
3255 3254
@@ -3258,8 +3257,7 @@ zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
3258 list_move(&erp_action->list, &erp_action->adapter->erp_running_head); 3257 list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
3259} 3258}
3260 3259
3261static inline void 3260static void zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action)
3262zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action)
3263{ 3261{
3264 struct zfcp_adapter *adapter = erp_action->adapter; 3262 struct zfcp_adapter *adapter = erp_action->adapter;
3265 3263
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index f142eafb6f..b41dfb5390 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3829,18 +3829,18 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3829 3829
3830/** 3830/**
3831 * ipr_sata_reset - Reset the SATA port 3831 * ipr_sata_reset - Reset the SATA port
3832 * @ap: SATA port to reset 3832 * @link: SATA link to reset
3833 * @classes: class of the attached device 3833 * @classes: class of the attached device
3834 * 3834 *
3835 * This function issues a SATA phy reset to the affected ATA port. 3835 * This function issues a SATA phy reset to the affected ATA link.
3836 * 3836 *
3837 * Return value: 3837 * Return value:
3838 * 0 on success / non-zero on failure 3838 * 0 on success / non-zero on failure
3839 **/ 3839 **/
3840static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes, 3840static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
3841 unsigned long deadline) 3841 unsigned long deadline)
3842{ 3842{
3843 struct ipr_sata_port *sata_port = ap->private_data; 3843 struct ipr_sata_port *sata_port = link->ap->private_data;
3844 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 3844 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3845 struct ipr_resource_entry *res; 3845 struct ipr_resource_entry *res;
3846 unsigned long lock_flags = 0; 3846 unsigned long lock_flags = 0;
@@ -4981,22 +4981,22 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
4981 rc = ipr_device_reset(ioa_cfg, res); 4981 rc = ipr_device_reset(ioa_cfg, res);
4982 4982
4983 if (rc) { 4983 if (rc) {
4984 ap->ops->port_disable(ap); 4984 ata_port_disable(ap);
4985 goto out_unlock; 4985 goto out_unlock;
4986 } 4986 }
4987 4987
4988 switch(res->cfgte.proto) { 4988 switch(res->cfgte.proto) {
4989 case IPR_PROTO_SATA: 4989 case IPR_PROTO_SATA:
4990 case IPR_PROTO_SAS_STP: 4990 case IPR_PROTO_SAS_STP:
4991 ap->device[0].class = ATA_DEV_ATA; 4991 ap->link.device[0].class = ATA_DEV_ATA;
4992 break; 4992 break;
4993 case IPR_PROTO_SATA_ATAPI: 4993 case IPR_PROTO_SATA_ATAPI:
4994 case IPR_PROTO_SAS_STP_ATAPI: 4994 case IPR_PROTO_SAS_STP_ATAPI:
4995 ap->device[0].class = ATA_DEV_ATAPI; 4995 ap->link.device[0].class = ATA_DEV_ATAPI;
4996 break; 4996 break;
4997 default: 4997 default:
4998 ap->device[0].class = ATA_DEV_UNKNOWN; 4998 ap->link.device[0].class = ATA_DEV_UNKNOWN;
4999 ap->ops->port_disable(ap); 4999 ata_port_disable(ap);
5000 break; 5000 break;
5001 }; 5001 };
5002 5002
@@ -5262,7 +5262,6 @@ static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5262} 5262}
5263 5263
5264static struct ata_port_operations ipr_sata_ops = { 5264static struct ata_port_operations ipr_sata_ops = {
5265 .port_disable = ata_port_disable,
5266 .check_status = ipr_ata_check_status, 5265 .check_status = ipr_ata_check_status,
5267 .check_altstatus = ipr_ata_check_altstatus, 5266 .check_altstatus = ipr_ata_check_altstatus,
5268 .dev_select = ata_noop_dev_select, 5267 .dev_select = ata_noop_dev_select,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 5e573efcf0..0829b55c64 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -249,17 +249,17 @@ static void sas_ata_phy_reset(struct ata_port *ap)
249 switch (dev->sata_dev.command_set) { 249 switch (dev->sata_dev.command_set) {
250 case ATA_COMMAND_SET: 250 case ATA_COMMAND_SET:
251 SAS_DPRINTK("%s: Found ATA device.\n", __FUNCTION__); 251 SAS_DPRINTK("%s: Found ATA device.\n", __FUNCTION__);
252 ap->device[0].class = ATA_DEV_ATA; 252 ap->link.device[0].class = ATA_DEV_ATA;
253 break; 253 break;
254 case ATAPI_COMMAND_SET: 254 case ATAPI_COMMAND_SET:
255 SAS_DPRINTK("%s: Found ATAPI device.\n", __FUNCTION__); 255 SAS_DPRINTK("%s: Found ATAPI device.\n", __FUNCTION__);
256 ap->device[0].class = ATA_DEV_ATAPI; 256 ap->link.device[0].class = ATA_DEV_ATAPI;
257 break; 257 break;
258 default: 258 default:
259 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n", 259 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
260 __FUNCTION__, 260 __FUNCTION__,
261 dev->sata_dev.command_set); 261 dev->sata_dev.command_set);
262 ap->device[0].class = ATA_DEV_UNKNOWN; 262 ap->link.device[0].class = ATA_DEV_UNKNOWN;
263 break; 263 break;
264 } 264 }
265 265
@@ -317,7 +317,7 @@ static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
317 dev->sata_dev.serror = val; 317 dev->sata_dev.serror = val;
318 break; 318 break;
319 case SCR_ACTIVE: 319 case SCR_ACTIVE:
320 dev->sata_dev.ap->sactive = val; 320 dev->sata_dev.ap->link.sactive = val;
321 break; 321 break;
322 default: 322 default:
323 return -EINVAL; 323 return -EINVAL;
@@ -342,7 +342,7 @@ static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
342 *val = dev->sata_dev.serror; 342 *val = dev->sata_dev.serror;
343 return 0; 343 return 0;
344 case SCR_ACTIVE: 344 case SCR_ACTIVE:
345 *val = dev->sata_dev.ap->sactive; 345 *val = dev->sata_dev.ap->link.sactive;
346 return 0; 346 return 0;
347 default: 347 default:
348 return -EINVAL; 348 return -EINVAL;
@@ -350,7 +350,6 @@ static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
350} 350}
351 351
352static struct ata_port_operations sas_sata_ops = { 352static struct ata_port_operations sas_sata_ops = {
353 .port_disable = ata_port_disable,
354 .check_status = sas_ata_check_status, 353 .check_status = sas_ata_check_status,
355 .check_altstatus = sas_ata_check_status, 354 .check_altstatus = sas_ata_check_status,
356 .dev_select = ata_noop_dev_select, 355 .dev_select = ata_noop_dev_select,
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 34cdce6738..ede9986d34 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -277,16 +277,11 @@ static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
277 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; 277 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
278} 278}
279 279
280static int scsi_bus_uevent(struct device *dev, char **envp, int num_envp, 280static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
281 char *buffer, int buffer_size)
282{ 281{
283 struct scsi_device *sdev = to_scsi_device(dev); 282 struct scsi_device *sdev = to_scsi_device(dev);
284 int i = 0;
285 int length = 0;
286 283
287 add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, 284 add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type);
288 "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type);
289 envp[i] = NULL;
290 return 0; 285 return 0;
291} 286}
292 287
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 053fca41b0..73440e2683 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -4,6 +4,7 @@
4 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO) 4 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
5 * 5 *
6 * Copyright (C) 2002 - 2006 Paul Mundt 6 * Copyright (C) 2002 - 2006 Paul Mundt
7 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
7 * 8 *
8 * based off of the old drivers/char/sh-sci.c by: 9 * based off of the old drivers/char/sh-sci.c by:
9 * 10 *
@@ -301,6 +302,38 @@ static void sci_init_pins_scif(struct uart_port* port, unsigned int cflag)
301 } 302 }
302 sci_out(port, SCFCR, fcr_val); 303 sci_out(port, SCFCR, fcr_val);
303} 304}
305#elif defined(CONFIG_CPU_SUBTYPE_SH7720)
306static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag)
307{
308 unsigned int fcr_val = 0;
309 unsigned short data;
310
311 if (cflag & CRTSCTS) {
312 /* enable RTS/CTS */
313 if (port->mapbase == 0xa4430000) { /* SCIF0 */
314 /* Clear PTCR bit 9-2; enable all scif pins but sck */
315 data = ctrl_inw(PORT_PTCR);
316 ctrl_outw((data & 0xfc03), PORT_PTCR);
317 } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
318 /* Clear PVCR bit 9-2 */
319 data = ctrl_inw(PORT_PVCR);
320 ctrl_outw((data & 0xfc03), PORT_PVCR);
321 }
322 fcr_val |= SCFCR_MCE;
323 } else {
324 if (port->mapbase == 0xa4430000) { /* SCIF0 */
325 /* Clear PTCR bit 5-2; enable only tx and rx */
326 data = ctrl_inw(PORT_PTCR);
327 ctrl_outw((data & 0xffc3), PORT_PTCR);
328 } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
329 /* Clear PVCR bit 5-2 */
330 data = ctrl_inw(PORT_PVCR);
331 ctrl_outw((data & 0xffc3), PORT_PVCR);
332 }
333 }
334 sci_out(port, SCFCR, fcr_val);
335}
336
304#elif defined(CONFIG_CPU_SH3) 337#elif defined(CONFIG_CPU_SH3)
305/* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */ 338/* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */
306static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) 339static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag)
@@ -1276,7 +1309,7 @@ static int __init sci_console_init(void)
1276console_initcall(sci_console_init); 1309console_initcall(sci_console_init);
1277#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ 1310#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
1278 1311
1279#ifdef CONFIG_SH_KGDB 1312#ifdef CONFIG_SH_KGDB_CONSOLE
1280/* 1313/*
1281 * FIXME: Most of this can go away.. at the moment, we rely on 1314 * FIXME: Most of this can go away.. at the moment, we rely on
1282 * arch/sh/kernel/setup.c to do the command line parsing for kgdb, though 1315 * arch/sh/kernel/setup.c to do the command line parsing for kgdb, though
@@ -1334,9 +1367,7 @@ int __init kgdb_console_setup(struct console *co, char *options)
1334 1367
1335 return uart_set_options(port, co, baud, parity, bits, flow); 1368 return uart_set_options(port, co, baud, parity, bits, flow);
1336} 1369}
1337#endif /* CONFIG_SH_KGDB */
1338 1370
1339#ifdef CONFIG_SH_KGDB_CONSOLE
1340static struct console kgdb_console = { 1371static struct console kgdb_console = {
1341 .name = "ttySC", 1372 .name = "ttySC",
1342 .device = uart_console_device, 1373 .device = uart_console_device,
@@ -1432,7 +1463,7 @@ static int __devinit sci_probe(struct platform_device *dev)
1432 1463
1433#ifdef CONFIG_CPU_FREQ 1464#ifdef CONFIG_CPU_FREQ
1434 cpufreq_register_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER); 1465 cpufreq_register_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER);
1435 dev_info(&dev->dev, "sci: CPU frequency notifier registered\n"); 1466 dev_info(&dev->dev, "CPU frequency notifier registered\n");
1436#endif 1467#endif
1437 1468
1438#ifdef CONFIG_SH_STANDARD_BIOS 1469#ifdef CONFIG_SH_STANDARD_BIOS
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index cf75466ebf..e89ae29645 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -10,19 +10,19 @@
10 * Modified to support SH7300(SH-Mobile) SCIF. Takashi Kusuda (Jun 2003). 10 * Modified to support SH7300(SH-Mobile) SCIF. Takashi Kusuda (Jun 2003).
11 * Modified to support H8/300 Series Yoshinori Sato (Feb 2004). 11 * Modified to support H8/300 Series Yoshinori Sato (Feb 2004).
12 * Removed SH7300 support (Jul 2007). 12 * Removed SH7300 support (Jul 2007).
13 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Aug 2007).
13 */ 14 */
14#include <linux/serial_core.h> 15#include <linux/serial_core.h>
15#include <asm/io.h> 16#include <asm/io.h>
16 17
17#if defined(__H8300H__) || defined(__H8300S__)
18#include <asm/gpio.h> 18#include <asm/gpio.h>
19
19#if defined(CONFIG_H83007) || defined(CONFIG_H83068) 20#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
20#include <asm/regs306x.h> 21#include <asm/regs306x.h>
21#endif 22#endif
22#if defined(CONFIG_H8S2678) 23#if defined(CONFIG_H8S2678)
23#include <asm/regs267x.h> 24#include <asm/regs267x.h>
24#endif 25#endif
25#endif
26 26
27#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ 27#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
28 defined(CONFIG_CPU_SUBTYPE_SH7707) || \ 28 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
@@ -46,6 +46,10 @@
46 */ 46 */
47# define SCSCR_INIT(port) (port->mapbase == SCIF2) ? 0xF3 : 0xF0 47# define SCSCR_INIT(port) (port->mapbase == SCIF2) ? 0xF3 : 0xF0
48# define SCIF_ONLY 48# define SCIF_ONLY
49#elif defined(CONFIG_CPU_SUBTYPE_SH7720)
50# define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */
51# define SCIF_ONLY
52#define SCIF_ORER 0x0200 /* overrun error bit */
49#elif defined(CONFIG_SH_RTS7751R2D) 53#elif defined(CONFIG_SH_RTS7751R2D)
50# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ 54# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
51# define SCIF_ORER 0x0001 /* overrun error bit */ 55# define SCIF_ORER 0x0001 /* overrun error bit */
@@ -217,7 +221,8 @@
217#define SCIF_RDF 0x0002 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ 221#define SCIF_RDF 0x0002 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
218#define SCIF_DR 0x0001 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ 222#define SCIF_DR 0x0001 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
219 223
220#if defined(CONFIG_CPU_SUBTYPE_SH7705) 224#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
225 defined(CONFIG_CPU_SUBTYPE_SH7720)
221#define SCIF_ORER 0x0200 226#define SCIF_ORER 0x0200
222#define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK | SCIF_ORER) 227#define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK | SCIF_ORER)
223#define SCIF_RFDC_MASK 0x007f 228#define SCIF_RFDC_MASK 0x007f
@@ -254,7 +259,8 @@
254# define SCxSR_FER(port) SCIF_FER 259# define SCxSR_FER(port) SCIF_FER
255# define SCxSR_PER(port) SCIF_PER 260# define SCxSR_PER(port) SCIF_PER
256# define SCxSR_BRK(port) SCIF_BRK 261# define SCxSR_BRK(port) SCIF_BRK
257#if defined(CONFIG_CPU_SUBTYPE_SH7705) 262#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
263 defined(CONFIG_CPU_SUBTYPE_SH7720)
258# define SCxSR_RDxF_CLEAR(port) (sci_in(port,SCxSR)&0xfffc) 264# define SCxSR_RDxF_CLEAR(port) (sci_in(port,SCxSR)&0xfffc)
259# define SCxSR_ERROR_CLEAR(port) (sci_in(port,SCxSR)&0xfd73) 265# define SCxSR_ERROR_CLEAR(port) (sci_in(port,SCxSR)&0xfd73)
260# define SCxSR_TDxE_CLEAR(port) (sci_in(port,SCxSR)&0xffdf) 266# define SCxSR_TDxE_CLEAR(port) (sci_in(port,SCxSR)&0xffdf)
@@ -362,7 +368,8 @@
362 CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size) 368 CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size)
363#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ 369#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
364 CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) 370 CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
365#elif defined(CONFIG_CPU_SUBTYPE_SH7705) 371#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
372 defined(CONFIG_CPU_SUBTYPE_SH7720)
366#define SCIF_FNS(name, scif_offset, scif_size) \ 373#define SCIF_FNS(name, scif_offset, scif_size) \
367 CPU_SCIF_FNS(name, scif_offset, scif_size) 374 CPU_SCIF_FNS(name, scif_offset, scif_size)
368#else 375#else
@@ -388,7 +395,8 @@
388 CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) 395 CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
389#endif 396#endif
390 397
391#if defined(CONFIG_CPU_SUBTYPE_SH7705) 398#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
399 defined(CONFIG_CPU_SUBTYPE_SH7720)
392 400
393SCIF_FNS(SCSMR, 0x00, 16) 401SCIF_FNS(SCSMR, 0x00, 16)
394SCIF_FNS(SCBRR, 0x04, 8) 402SCIF_FNS(SCBRR, 0x04, 8)
@@ -510,7 +518,15 @@ static inline void set_sh771x_scif_pfc(struct uart_port *port)
510 return; 518 return;
511 } 519 }
512} 520}
513 521#elif defined(CONFIG_CPU_SUBTYPE_SH7720)
522static inline int sci_rxd_in(struct uart_port *port)
523{
524 if (port->mapbase == 0xa4430000)
525 return sci_in(port, SCxSR) & 0x0003 ? 1 : 0;
526 else if (port->mapbase == 0xa4438000)
527 return sci_in(port, SCxSR) & 0x0003 ? 1 : 0;
528 return 1;
529}
514#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ 530#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
515 defined(CONFIG_CPU_SUBTYPE_SH7751) || \ 531 defined(CONFIG_CPU_SUBTYPE_SH7751) || \
516 defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ 532 defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
@@ -653,6 +669,7 @@ static inline int sci_rxd_in(struct uart_port *port)
653 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ 669 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
654 if (port->mapbase == 0xffc60000) 670 if (port->mapbase == 0xffc60000)
655 return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ 671 return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
672 return 1;
656} 673}
657#endif 674#endif
658 675
@@ -691,7 +708,8 @@ static inline int sci_rxd_in(struct uart_port *port)
691#if defined(CONFIG_CPU_SUBTYPE_SH7780) || \ 708#if defined(CONFIG_CPU_SUBTYPE_SH7780) || \
692 defined(CONFIG_CPU_SUBTYPE_SH7785) 709 defined(CONFIG_CPU_SUBTYPE_SH7785)
693#define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(16*bps)-1) 710#define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(16*bps)-1)
694#elif defined(CONFIG_CPU_SUBTYPE_SH7705) 711#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
712 defined(CONFIG_CPU_SUBTYPE_SH7720)
695#define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1) 713#define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1)
696#elif defined(__H8300H__) || defined(__H8300S__) 714#elif defined(__H8300H__) || defined(__H8300S__)
697#define SCBRR_VALUE(bps) (((CONFIG_CPU_CLOCK*1000/32)/bps)-1) 715#define SCBRR_VALUE(bps) (((CONFIG_CPU_CLOCK*1000/32)/bps)-1)
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 8a143894e3..a96f4a8cfe 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -2,5 +2,5 @@
2# Makefile for the SuperH specific drivers. 2# Makefile for the SuperH specific drivers.
3# 3#
4 4
5obj-$(CONFIG_SUPERHYWAY) += superhyway/ 5obj-$(CONFIG_SUPERHYWAY) += superhyway/
6 6obj-$(CONFIG_MAPLE) += maple/
diff --git a/drivers/sh/maple/Makefile b/drivers/sh/maple/Makefile
new file mode 100644
index 0000000000..65dfeeb610
--- /dev/null
+++ b/drivers/sh/maple/Makefile
@@ -0,0 +1,3 @@
1# Makefile for Maple Bus
2
3obj-$(CONFIG_MAPLE) := maple.o
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
new file mode 100644
index 0000000000..161d1021b7
--- /dev/null
+++ b/drivers/sh/maple/maple.c
@@ -0,0 +1,735 @@
1/*
2 * Core maple bus functionality
3 *
4 * Copyright (C) 2007 Adrian McMenamin
5 *
6 * Based on 2.4 code by:
7 *
8 * Copyright (C) 2000-2001 YAEGASHI Takeshi
9 * Copyright (C) 2001 M. R. Brown
10 * Copyright (C) 2001 Paul Mundt
11 *
12 * and others.
13 *
14 * This file is subject to the terms and conditions of the GNU General Public
15 * License. See the file "COPYING" in the main directory of this archive
16 * for more details.
17 */
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/device.h>
21#include <linux/module.h>
22#include <linux/interrupt.h>
23#include <linux/list.h>
24#include <linux/io.h>
25#include <linux/slab.h>
26#include <linux/maple.h>
27#include <linux/dma-mapping.h>
28#include <asm/cacheflush.h>
29#include <asm/dma.h>
30#include <asm/io.h>
31#include <asm/mach/dma.h>
32#include <asm/mach/sysasic.h>
33#include <asm/mach/maple.h>
34
35MODULE_AUTHOR("Yaegshi Takeshi, Paul Mundt, M.R. Brown, Adrian McMenamin");
36MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
37MODULE_LICENSE("GPL v2");
38MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
39
40static void maple_dma_handler(struct work_struct *work);
41static void maple_vblank_handler(struct work_struct *work);
42
43static DECLARE_WORK(maple_dma_process, maple_dma_handler);
44static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);
45
46static LIST_HEAD(maple_waitq);
47static LIST_HEAD(maple_sentq);
48
49static DEFINE_MUTEX(maple_list_lock);
50
51static struct maple_driver maple_dummy_driver;
52static struct device maple_bus;
53static int subdevice_map[MAPLE_PORTS];
54static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
55static unsigned long maple_pnp_time;
56static int started, scanning, liststatus;
57static struct kmem_cache *maple_queue_cache;
58
59struct maple_device_specify {
60 int port;
61 int unit;
62};
63
64/**
65 * maple_driver_register - register a device driver
66 * automatically makes the driver bus a maple bus
67 * @drv: the driver to be registered
68 */
69int maple_driver_register(struct device_driver *drv)
70{
71 if (!drv)
72 return -EINVAL;
73 drv->bus = &maple_bus_type;
74 return driver_register(drv);
75}
76EXPORT_SYMBOL_GPL(maple_driver_register);
77
78/* set hardware registers to enable next round of dma */
79static void maplebus_dma_reset(void)
80{
81 ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
82 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
83 ctrl_outl(1, MAPLE_TRIGTYPE);
84 ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED);
85 ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
86 ctrl_outl(1, MAPLE_ENABLE);
87}
88
89/**
90 * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
91 * @dev: device responding
92 * @callback: handler callback
93 * @interval: interval in jiffies between callbacks
94 * @function: the function code for the device
95 */
96void maple_getcond_callback(struct maple_device *dev,
97 void (*callback) (struct mapleq * mq),
98 unsigned long interval, unsigned long function)
99{
100 dev->callback = callback;
101 dev->interval = interval;
102 dev->function = cpu_to_be32(function);
103 dev->when = jiffies;
104}
105EXPORT_SYMBOL_GPL(maple_getcond_callback);
106
107static int maple_dma_done(void)
108{
109 return (ctrl_inl(MAPLE_STATE) & 1) == 0;
110}
111
112static void maple_release_device(struct device *dev)
113{
114 if (dev->type) {
115 kfree(dev->type->name);
116 kfree(dev->type);
117 }
118}
119
120/**
121 * maple_add_packet - add a single instruction to the queue
122 * @mq: instruction to add to waiting queue
123 */
124void maple_add_packet(struct mapleq *mq)
125{
126 mutex_lock(&maple_list_lock);
127 list_add(&mq->list, &maple_waitq);
128 mutex_unlock(&maple_list_lock);
129}
130EXPORT_SYMBOL_GPL(maple_add_packet);
131
132static struct mapleq *maple_allocq(struct maple_device *dev)
133{
134 struct mapleq *mq;
135
136 mq = kmalloc(sizeof(*mq), GFP_KERNEL);
137 if (!mq)
138 return NULL;
139
140 mq->dev = dev;
141 mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
142 mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
143 if (!mq->recvbuf) {
144 kfree(mq);
145 return NULL;
146 }
147
148 return mq;
149}
150
151static struct maple_device *maple_alloc_dev(int port, int unit)
152{
153 struct maple_device *dev;
154
155 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
156 if (!dev)
157 return NULL;
158
159 dev->port = port;
160 dev->unit = unit;
161 dev->mq = maple_allocq(dev);
162
163 if (!dev->mq) {
164 kfree(dev);
165 return NULL;
166 }
167
168 return dev;
169}
170
171static void maple_free_dev(struct maple_device *mdev)
172{
173 if (!mdev)
174 return;
175 if (mdev->mq) {
176 kmem_cache_free(maple_queue_cache, mdev->mq->recvbufdcsp);
177 kfree(mdev->mq);
178 }
179 kfree(mdev);
180}
181
182/* process the command queue into a maple command block
183 * terminating command has bit 32 of first long set to 0
184 */
185static void maple_build_block(struct mapleq *mq)
186{
187 int port, unit, from, to, len;
188 unsigned long *lsendbuf = mq->sendbuf;
189
190 port = mq->dev->port & 3;
191 unit = mq->dev->unit;
192 len = mq->length;
193 from = port << 6;
194 to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
195
196 *maple_lastptr &= 0x7fffffff;
197 maple_lastptr = maple_sendptr;
198
199 *maple_sendptr++ = (port << 16) | len | 0x80000000;
200 *maple_sendptr++ = PHYSADDR(mq->recvbuf);
201 *maple_sendptr++ =
202 mq->command | (to << 8) | (from << 16) | (len << 24);
203
204 while (len-- > 0)
205 *maple_sendptr++ = *lsendbuf++;
206}
207
208/* build up command queue */
209static void maple_send(void)
210{
211 int i;
212 int maple_packets;
213 struct mapleq *mq, *nmq;
214
215 if (!list_empty(&maple_sentq))
216 return;
217 if (list_empty(&maple_waitq) || !maple_dma_done())
218 return;
219 maple_packets = 0;
220 maple_sendptr = maple_lastptr = maple_sendbuf;
221 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
222 maple_build_block(mq);
223 list_move(&mq->list, &maple_sentq);
224 if (maple_packets++ > MAPLE_MAXPACKETS)
225 break;
226 }
227 if (maple_packets > 0) {
228 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
229 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
230 PAGE_SIZE, DMA_BIDIRECTIONAL);
231 }
232}
233
234static int attach_matching_maple_driver(struct device_driver *driver,
235 void *devptr)
236{
237 struct maple_driver *maple_drv;
238 struct maple_device *mdev;
239
240 mdev = devptr;
241 maple_drv = to_maple_driver(driver);
242 if (mdev->devinfo.function & be32_to_cpu(maple_drv->function)) {
243 if (maple_drv->connect(mdev) == 0) {
244 mdev->driver = maple_drv;
245 return 1;
246 }
247 }
248 return 0;
249}
250
251static void maple_detach_driver(struct maple_device *mdev)
252{
253 if (!mdev)
254 return;
255 if (mdev->driver) {
256 if (mdev->driver->disconnect)
257 mdev->driver->disconnect(mdev);
258 }
259 mdev->driver = NULL;
260 if (mdev->registered) {
261 maple_release_device(&mdev->dev);
262 device_unregister(&mdev->dev);
263 }
264 mdev->registered = 0;
265 maple_free_dev(mdev);
266}
267
268/* process initial MAPLE_COMMAND_DEVINFO for each device or port */
269static void maple_attach_driver(struct maple_device *dev)
270{
271 char *p;
272
273 char *recvbuf;
274 unsigned long function;
275 int matched, retval;
276
277 recvbuf = dev->mq->recvbuf;
278 memcpy(&dev->devinfo, recvbuf + 4, sizeof(dev->devinfo));
279 memcpy(dev->product_name, dev->devinfo.product_name, 30);
280 memcpy(dev->product_licence, dev->devinfo.product_licence, 60);
281 dev->product_name[30] = '\0';
282 dev->product_licence[60] = '\0';
283
284 for (p = dev->product_name + 29; dev->product_name <= p; p--)
285 if (*p == ' ')
286 *p = '\0';
287 else
288 break;
289
290 for (p = dev->product_licence + 59; dev->product_licence <= p; p--)
291 if (*p == ' ')
292 *p = '\0';
293 else
294 break;
295
296 function = be32_to_cpu(dev->devinfo.function);
297
298 if (function > 0x200) {
299 /* Do this silently - as not a real device */
300 function = 0;
301 dev->driver = &maple_dummy_driver;
302 sprintf(dev->dev.bus_id, "%d:0.port", dev->port);
303 } else {
304 printk(KERN_INFO
305 "Maple bus at (%d, %d): Connected function 0x%lX\n",
306 dev->port, dev->unit, function);
307
308 matched =
309 bus_for_each_drv(&maple_bus_type, NULL, dev,
310 attach_matching_maple_driver);
311
312 if (matched == 0) {
313 /* Driver does not exist yet */
314 printk(KERN_INFO
315 "No maple driver found for this device\n");
316 dev->driver = &maple_dummy_driver;
317 }
318
319 sprintf(dev->dev.bus_id, "%d:0%d.%lX", dev->port,
320 dev->unit, function);
321 }
322 dev->function = function;
323 dev->dev.bus = &maple_bus_type;
324 dev->dev.parent = &maple_bus;
325 dev->dev.release = &maple_release_device;
326 retval = device_register(&dev->dev);
327 if (retval) {
328 printk(KERN_INFO
329 "Maple bus: Attempt to register device (%x, %x) failed.\n",
330 dev->port, dev->unit);
331 maple_free_dev(dev);
332 }
333 dev->registered = 1;
334}
335
336/*
337 * if device has been registered for the given
338 * port and unit then return 1 - allows identification
339 * of which devices need to be attached or detached
340 */
341static int detach_maple_device(struct device *device, void *portptr)
342{
343 struct maple_device_specify *ds;
344 struct maple_device *mdev;
345
346 ds = portptr;
347 mdev = to_maple_dev(device);
348 if (mdev->port == ds->port && mdev->unit == ds->unit)
349 return 1;
350 return 0;
351}
352
353static int setup_maple_commands(struct device *device, void *ignored)
354{
355 struct maple_device *maple_dev = to_maple_dev(device);
356
357 if ((maple_dev->interval > 0)
358 && time_after(jiffies, maple_dev->when)) {
359 maple_dev->when = jiffies + maple_dev->interval;
360 maple_dev->mq->command = MAPLE_COMMAND_GETCOND;
361 maple_dev->mq->sendbuf = &maple_dev->function;
362 maple_dev->mq->length = 1;
363 maple_add_packet(maple_dev->mq);
364 liststatus++;
365 } else {
366 if (time_after(jiffies, maple_pnp_time)) {
367 maple_dev->mq->command = MAPLE_COMMAND_DEVINFO;
368 maple_dev->mq->length = 0;
369 maple_add_packet(maple_dev->mq);
370 liststatus++;
371 }
372 }
373
374 return 0;
375}
376
377/* VBLANK bottom half - implemented via workqueue */
378static void maple_vblank_handler(struct work_struct *work)
379{
380 if (!maple_dma_done())
381 return;
382 if (!list_empty(&maple_sentq))
383 return;
384 ctrl_outl(0, MAPLE_ENABLE);
385 liststatus = 0;
386 bus_for_each_dev(&maple_bus_type, NULL, NULL,
387 setup_maple_commands);
388 if (time_after(jiffies, maple_pnp_time))
389 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
390 if (liststatus && list_empty(&maple_sentq)) {
391 INIT_LIST_HEAD(&maple_sentq);
392 maple_send();
393 }
394 maplebus_dma_reset();
395}
396
397/* handle devices added via hotplugs - placing them on queue for DEVINFO*/
398static void maple_map_subunits(struct maple_device *mdev, int submask)
399{
400 int retval, k, devcheck;
401 struct maple_device *mdev_add;
402 struct maple_device_specify ds;
403
404 for (k = 0; k < 5; k++) {
405 ds.port = mdev->port;
406 ds.unit = k + 1;
407 retval =
408 bus_for_each_dev(&maple_bus_type, NULL, &ds,
409 detach_maple_device);
410 if (retval) {
411 submask = submask >> 1;
412 continue;
413 }
414 devcheck = submask & 0x01;
415 if (devcheck) {
416 mdev_add = maple_alloc_dev(mdev->port, k + 1);
417 if (!mdev_add)
418 return;
419 mdev_add->mq->command = MAPLE_COMMAND_DEVINFO;
420 mdev_add->mq->length = 0;
421 maple_add_packet(mdev_add->mq);
422 scanning = 1;
423 }
424 submask = submask >> 1;
425 }
426}
427
428/* mark a device as removed */
429static void maple_clean_submap(struct maple_device *mdev)
430{
431 int killbit;
432
433 killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
434 killbit = ~killbit;
435 killbit &= 0xFF;
436 subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
437}
438
439/* handle empty port or hotplug removal */
440static void maple_response_none(struct maple_device *mdev,
441 struct mapleq *mq)
442{
443 if (mdev->unit != 0) {
444 list_del(&mq->list);
445 maple_clean_submap(mdev);
446 printk(KERN_INFO
447 "Maple bus device detaching at (%d, %d)\n",
448 mdev->port, mdev->unit);
449 maple_detach_driver(mdev);
450 return;
451 }
452 if (!started) {
453 printk(KERN_INFO "No maple devices attached to port %d\n",
454 mdev->port);
455 return;
456 }
457 maple_clean_submap(mdev);
458}
459
460/* preprocess hotplugs or scans */
461static void maple_response_devinfo(struct maple_device *mdev,
462 char *recvbuf)
463{
464 char submask;
465 if ((!started) || (scanning == 2)) {
466 maple_attach_driver(mdev);
467 return;
468 }
469 if (mdev->unit == 0) {
470 submask = recvbuf[2] & 0x1F;
471 if (submask ^ subdevice_map[mdev->port]) {
472 maple_map_subunits(mdev, submask);
473 subdevice_map[mdev->port] = submask;
474 }
475 }
476}
477
478/* maple dma end bottom half - implemented via workqueue */
479static void maple_dma_handler(struct work_struct *work)
480{
481 struct mapleq *mq, *nmq;
482 struct maple_device *dev;
483 char *recvbuf;
484 enum maple_code code;
485
486 if (!maple_dma_done())
487 return;
488 ctrl_outl(0, MAPLE_ENABLE);
489 if (!list_empty(&maple_sentq)) {
490 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
491 recvbuf = mq->recvbuf;
492 code = recvbuf[0];
493 dev = mq->dev;
494 switch (code) {
495 case MAPLE_RESPONSE_NONE:
496 maple_response_none(dev, mq);
497 break;
498
499 case MAPLE_RESPONSE_DEVINFO:
500 maple_response_devinfo(dev, recvbuf);
501 break;
502
503 case MAPLE_RESPONSE_DATATRF:
504 if (dev->callback)
505 dev->callback(mq);
506 break;
507
508 case MAPLE_RESPONSE_FILEERR:
509 case MAPLE_RESPONSE_AGAIN:
510 case MAPLE_RESPONSE_BADCMD:
511 case MAPLE_RESPONSE_BADFUNC:
512 printk(KERN_DEBUG
513 "Maple non-fatal error 0x%X\n",
514 code);
515 break;
516
517 case MAPLE_RESPONSE_ALLINFO:
518 printk(KERN_DEBUG
519 "Maple - extended device information not supported\n");
520 break;
521
522 case MAPLE_RESPONSE_OK:
523 break;
524
525 default:
526 break;
527 }
528 }
529 INIT_LIST_HEAD(&maple_sentq);
530 if (scanning == 1) {
531 maple_send();
532 scanning = 2;
533 } else
534 scanning = 0;
535
536 if (started == 0)
537 started = 1;
538 }
539 maplebus_dma_reset();
540}
541
542static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id)
543{
544 /* Load everything into the bottom half */
545 schedule_work(&maple_dma_process);
546 return IRQ_HANDLED;
547}
548
549static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id)
550{
551 schedule_work(&maple_vblank_process);
552 return IRQ_HANDLED;
553}
554
555static struct irqaction maple_dma_irq = {
556 .name = "maple bus DMA handler",
557 .handler = maplebus_dma_interrupt,
558 .flags = IRQF_SHARED,
559};
560
561static struct irqaction maple_vblank_irq = {
562 .name = "maple bus VBLANK handler",
563 .handler = maplebus_vblank_interrupt,
564 .flags = IRQF_SHARED,
565};
566
567static int maple_set_dma_interrupt_handler(void)
568{
569 return setup_irq(HW_EVENT_MAPLE_DMA, &maple_dma_irq);
570}
571
572static int maple_set_vblank_interrupt_handler(void)
573{
574 return setup_irq(HW_EVENT_VSYNC, &maple_vblank_irq);
575}
576
577static int maple_get_dma_buffer(void)
578{
579 maple_sendbuf =
580 (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
581 MAPLE_DMA_PAGES);
582 if (!maple_sendbuf)
583 return -ENOMEM;
584 return 0;
585}
586
587static int match_maple_bus_driver(struct device *devptr,
588 struct device_driver *drvptr)
589{
590 struct maple_driver *maple_drv;
591 struct maple_device *maple_dev;
592
593 maple_drv = container_of(drvptr, struct maple_driver, drv);
594 maple_dev = container_of(devptr, struct maple_device, dev);
595 /* Trap empty port case */
596 if (maple_dev->devinfo.function == 0xFFFFFFFF)
597 return 0;
598 else if (maple_dev->devinfo.function &
599 be32_to_cpu(maple_drv->function))
600 return 1;
601 return 0;
602}
603
604static int maple_bus_uevent(struct device *dev, char **envp,
605 int num_envp, char *buffer, int buffer_size)
606{
607 return 0;
608}
609
610static void maple_bus_release(struct device *dev)
611{
612}
613
614static struct maple_driver maple_dummy_driver = {
615 .drv = {
616 .name = "maple_dummy_driver",
617 .bus = &maple_bus_type,
618 },
619};
620
621struct bus_type maple_bus_type = {
622 .name = "maple",
623 .match = match_maple_bus_driver,
624 .uevent = maple_bus_uevent,
625};
626EXPORT_SYMBOL_GPL(maple_bus_type);
627
628static struct device maple_bus = {
629 .bus_id = "maple",
630 .release = maple_bus_release,
631};
632
633static int __init maple_bus_init(void)
634{
635 int retval, i;
636 struct maple_device *mdev[MAPLE_PORTS];
637 ctrl_outl(0, MAPLE_STATE);
638
639 retval = device_register(&maple_bus);
640 if (retval)
641 goto cleanup;
642
643 retval = bus_register(&maple_bus_type);
644 if (retval)
645 goto cleanup_device;
646
647 retval = driver_register(&maple_dummy_driver.drv);
648
649 if (retval)
650 goto cleanup_bus;
651
652 /* allocate memory for maple bus dma */
653 retval = maple_get_dma_buffer();
654 if (retval) {
655 printk(KERN_INFO
656 "Maple bus: Failed to allocate Maple DMA buffers\n");
657 goto cleanup_basic;
658 }
659
660 /* set up DMA interrupt handler */
661 retval = maple_set_dma_interrupt_handler();
662 if (retval) {
663 printk(KERN_INFO
664 "Maple bus: Failed to grab maple DMA IRQ\n");
665 goto cleanup_dma;
666 }
667
668 /* set up VBLANK interrupt handler */
669 retval = maple_set_vblank_interrupt_handler();
670 if (retval) {
671 printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n");
672 goto cleanup_irq;
673 }
674
675 maple_queue_cache =
676 kmem_cache_create("maple_queue_cache", 0x400, 0,
677 SLAB_HWCACHE_ALIGN, NULL);
678
679 if (!maple_queue_cache)
680 goto cleanup_bothirqs;
681
682 /* setup maple ports */
683 for (i = 0; i < MAPLE_PORTS; i++) {
684 mdev[i] = maple_alloc_dev(i, 0);
685 if (!mdev[i]) {
686 while (i-- > 0)
687 maple_free_dev(mdev[i]);
688 goto cleanup_cache;
689 }
690 mdev[i]->registered = 0;
691 mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO;
692 mdev[i]->mq->length = 0;
693 maple_attach_driver(mdev[i]);
694 maple_add_packet(mdev[i]->mq);
695 subdevice_map[i] = 0;
696 }
697
698 /* setup maplebus hardware */
699 maplebus_dma_reset();
700
701 /* initial detection */
702 maple_send();
703
704 maple_pnp_time = jiffies;
705
706 printk(KERN_INFO "Maple bus core now registered.\n");
707
708 return 0;
709
710cleanup_cache:
711 kmem_cache_destroy(maple_queue_cache);
712
713cleanup_bothirqs:
714 free_irq(HW_EVENT_VSYNC, 0);
715
716cleanup_irq:
717 free_irq(HW_EVENT_MAPLE_DMA, 0);
718
719cleanup_dma:
720 free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
721
722cleanup_basic:
723 driver_unregister(&maple_dummy_driver.drv);
724
725cleanup_bus:
726 bus_unregister(&maple_bus_type);
727
728cleanup_device:
729 device_unregister(&maple_bus);
730
731cleanup:
732 printk(KERN_INFO "Maple bus registration failed\n");
733 return retval;
734}
735subsys_initcall(maple_bus_init);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index e84d215979..bcb8dd5fb0 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -67,14 +67,11 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
67 return strncmp(spi->modalias, drv->name, BUS_ID_SIZE) == 0; 67 return strncmp(spi->modalias, drv->name, BUS_ID_SIZE) == 0;
68} 68}
69 69
70static int spi_uevent(struct device *dev, char **envp, int num_envp, 70static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
71 char *buffer, int buffer_size)
72{ 71{
73 const struct spi_device *spi = to_spi_device(dev); 72 const struct spi_device *spi = to_spi_device(dev);
74 73
75 envp[0] = buffer; 74 add_uevent_var(env, "MODALIAS=%s", spi->modalias);
76 snprintf(buffer, buffer_size, "MODALIAS=%s", spi->modalias);
77 envp[1] = NULL;
78 return 0; 75 return 0;
79} 76}
80 77
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 74d5182db4..cfd13eb866 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -11,6 +11,7 @@
11#include "ssb_private.h" 11#include "ssb_private.h"
12 12
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/io.h>
14#include <linux/ssb/ssb.h> 15#include <linux/ssb/ssb.h>
15#include <linux/ssb/ssb_regs.h> 16#include <linux/ssb/ssb_regs.h>
16#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index ac49b15fa7..516a6400db 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -28,27 +28,7 @@ obj-$(CONFIG_USB_MICROTEK) += image/
28 28
29obj-$(CONFIG_USB_SERIAL) += serial/ 29obj-$(CONFIG_USB_SERIAL) += serial/
30 30
31obj-$(CONFIG_USB_ADUTUX) += misc/ 31obj-$(CONFIG_USB) += misc/
32obj-$(CONFIG_USB_APPLEDISPLAY) += misc/
33obj-$(CONFIG_USB_AUERSWALD) += misc/
34obj-$(CONFIG_USB_BERRY_CHARGE) += misc/
35obj-$(CONFIG_USB_CYPRESS_CY7C63)+= misc/
36obj-$(CONFIG_USB_CYTHERM) += misc/
37obj-$(CONFIG_USB_EMI26) += misc/
38obj-$(CONFIG_USB_EMI62) += misc/
39obj-$(CONFIG_USB_FTDI_ELAN) += misc/
40obj-$(CONFIG_USB_IDMOUSE) += misc/
41obj-$(CONFIG_USB_LCD) += misc/
42obj-$(CONFIG_USB_LD) += misc/
43obj-$(CONFIG_USB_LED) += misc/
44obj-$(CONFIG_USB_LEGOTOWER) += misc/
45obj-$(CONFIG_USB_PHIDGETSERVO) += misc/
46obj-$(CONFIG_USB_RIO500) += misc/
47obj-$(CONFIG_USB_SISUSBVGA) += misc/
48obj-$(CONFIG_USB_TEST) += misc/
49obj-$(CONFIG_USB_TRANCEVIBRATOR)+= misc/
50obj-$(CONFIG_USB_USS720) += misc/
51obj-$(CONFIG_USB_IOWARRIOR) += misc/
52 32
53obj-$(CONFIG_USB_ATM) += atm/ 33obj-$(CONFIG_USB_ATM) += atm/
54obj-$(CONFIG_USB_SPEEDTOUCH) += atm/ 34obj-$(CONFIG_USB_SPEEDTOUCH) += atm/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index a73e714288..a51eeedc18 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -482,7 +482,9 @@ static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm,
482 int rbuflen = ((rsize - 1) / stride + 1) * CMD_PACKET_SIZE; 482 int rbuflen = ((rsize - 1) / stride + 1) * CMD_PACKET_SIZE;
483 483
484 if (wbuflen > PAGE_SIZE || rbuflen > PAGE_SIZE) { 484 if (wbuflen > PAGE_SIZE || rbuflen > PAGE_SIZE) {
485 dbg("too big transfer requested"); 485 if (printk_ratelimit())
486 usb_err(instance->usbatm, "requested transfer size too large (%d, %d)\n",
487 wbuflen, rbuflen);
486 ret = -ENOMEM; 488 ret = -ENOMEM;
487 goto fail; 489 goto fail;
488 } 490 }
@@ -493,8 +495,9 @@ static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm,
493 init_completion(&instance->rcv_done); 495 init_completion(&instance->rcv_done);
494 ret = usb_submit_urb(instance->rcv_urb, GFP_KERNEL); 496 ret = usb_submit_urb(instance->rcv_urb, GFP_KERNEL);
495 if (ret < 0) { 497 if (ret < 0) {
496 dbg("submitting read urb for cm %#x failed", cm); 498 if (printk_ratelimit())
497 ret = ret; 499 usb_err(instance->usbatm, "submit of read urb for cm %#x failed (%d)\n",
500 cm, ret);
498 goto fail; 501 goto fail;
499 } 502 }
500 503
@@ -510,27 +513,29 @@ static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm,
510 init_completion(&instance->snd_done); 513 init_completion(&instance->snd_done);
511 ret = usb_submit_urb(instance->snd_urb, GFP_KERNEL); 514 ret = usb_submit_urb(instance->snd_urb, GFP_KERNEL);
512 if (ret < 0) { 515 if (ret < 0) {
513 dbg("submitting write urb for cm %#x failed", cm); 516 if (printk_ratelimit())
514 ret = ret; 517 usb_err(instance->usbatm, "submit of write urb for cm %#x failed (%d)\n",
518 cm, ret);
515 goto fail; 519 goto fail;
516 } 520 }
517 521
518 ret = cxacru_start_wait_urb(instance->snd_urb, &instance->snd_done, NULL); 522 ret = cxacru_start_wait_urb(instance->snd_urb, &instance->snd_done, NULL);
519 if (ret < 0) { 523 if (ret < 0) {
520 dbg("sending cm %#x failed", cm); 524 if (printk_ratelimit())
521 ret = ret; 525 usb_err(instance->usbatm, "send of cm %#x failed (%d)\n", cm, ret);
522 goto fail; 526 goto fail;
523 } 527 }
524 528
525 ret = cxacru_start_wait_urb(instance->rcv_urb, &instance->rcv_done, &actlen); 529 ret = cxacru_start_wait_urb(instance->rcv_urb, &instance->rcv_done, &actlen);
526 if (ret < 0) { 530 if (ret < 0) {
527 dbg("receiving cm %#x failed", cm); 531 if (printk_ratelimit())
528 ret = ret; 532 usb_err(instance->usbatm, "receive of cm %#x failed (%d)\n", cm, ret);
529 goto fail; 533 goto fail;
530 } 534 }
531 if (actlen % CMD_PACKET_SIZE || !actlen) { 535 if (actlen % CMD_PACKET_SIZE || !actlen) {
532 dbg("response is not a positive multiple of %d: %#x", 536 if (printk_ratelimit())
533 CMD_PACKET_SIZE, actlen); 537 usb_err(instance->usbatm, "invalid response length to cm %#x: %d\n",
538 cm, actlen);
534 ret = -EIO; 539 ret = -EIO;
535 goto fail; 540 goto fail;
536 } 541 }
@@ -538,12 +543,16 @@ static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm,
538 /* check the return status and copy the data to the output buffer, if needed */ 543 /* check the return status and copy the data to the output buffer, if needed */
539 for (offb = offd = 0; offd < rsize && offb < actlen; offb += CMD_PACKET_SIZE) { 544 for (offb = offd = 0; offd < rsize && offb < actlen; offb += CMD_PACKET_SIZE) {
540 if (rbuf[offb] != cm) { 545 if (rbuf[offb] != cm) {
541 dbg("wrong cm %#x in response", rbuf[offb]); 546 if (printk_ratelimit())
547 usb_err(instance->usbatm, "wrong cm %#x in response to cm %#x\n",
548 rbuf[offb], cm);
542 ret = -EIO; 549 ret = -EIO;
543 goto fail; 550 goto fail;
544 } 551 }
545 if (rbuf[offb + 1] != CM_STATUS_SUCCESS) { 552 if (rbuf[offb + 1] != CM_STATUS_SUCCESS) {
546 dbg("response failed: %#x", rbuf[offb + 1]); 553 if (printk_ratelimit())
554 usb_err(instance->usbatm, "response to cm %#x failed: %#x\n",
555 cm, rbuf[offb + 1]);
547 ret = -EIO; 556 ret = -EIO;
548 goto fail; 557 goto fail;
549 } 558 }
@@ -582,14 +591,18 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
582 for (offb = 0; offb < len; ) { 591 for (offb = 0; offb < len; ) {
583 int l = le32_to_cpu(buf[offb++]); 592 int l = le32_to_cpu(buf[offb++]);
584 if (l > stride || l > (len - offb) / 2) { 593 if (l > stride || l > (len - offb) / 2) {
585 dbg("wrong data length %#x in response", l); 594 if (printk_ratelimit())
595 usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n",
596 cm, l);
586 ret = -EIO; 597 ret = -EIO;
587 goto cleanup; 598 goto cleanup;
588 } 599 }
589 while (l--) { 600 while (l--) {
590 offd = le32_to_cpu(buf[offb++]); 601 offd = le32_to_cpu(buf[offb++]);
591 if (offd >= size) { 602 if (offd >= size) {
592 dbg("wrong index %#x in response", offd); 603 if (printk_ratelimit())
604 usb_err(instance->usbatm, "wrong index #%x in response to cm #%x\n",
605 offd, cm);
593 ret = -EIO; 606 ret = -EIO;
594 goto cleanup; 607 goto cleanup;
595 } 608 }
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index eb0615abff..8b132c4a50 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -251,7 +251,6 @@ static int speedtch_upload_firmware(struct speedtch_instance_data *instance,
251{ 251{
252 unsigned char *buffer; 252 unsigned char *buffer;
253 struct usbatm_data *usbatm = instance->usbatm; 253 struct usbatm_data *usbatm = instance->usbatm;
254 struct usb_interface *intf;
255 struct usb_device *usb_dev = usbatm->usb_dev; 254 struct usb_device *usb_dev = usbatm->usb_dev;
256 int actual_length; 255 int actual_length;
257 int ret = 0; 256 int ret = 0;
@@ -265,7 +264,7 @@ static int speedtch_upload_firmware(struct speedtch_instance_data *instance,
265 goto out; 264 goto out;
266 } 265 }
267 266
268 if (!(intf = usb_ifnum_to_if(usb_dev, 2))) { 267 if (!usb_ifnum_to_if(usb_dev, 2)) {
269 ret = -ENODEV; 268 ret = -ENODEV;
270 usb_dbg(usbatm, "%s: interface not found!\n", __func__); 269 usb_dbg(usbatm, "%s: interface not found!\n", __func__);
271 goto out_free; 270 goto out_free;
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 29807d048b..389c5b164e 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -2,7 +2,8 @@
2 * Copyright (c) 2003, 2004 2 * Copyright (c) 2003, 2004
3 * Damien Bergamini <damien.bergamini@free.fr>. All rights reserved. 3 * Damien Bergamini <damien.bergamini@free.fr>. All rights reserved.
4 * 4 *
5 * Copyright (c) 2005 Matthieu Castet <castet.matthieu@free.fr> 5 * Copyright (c) 2005-2007 Matthieu Castet <castet.matthieu@free.fr>
6 * Copyright (c) 2005-2007 Stanislaw Gruszka <stf_xl@wp.pl>
6 * 7 *
7 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU 9 * licenses. You may choose to be licensed under the terms of the GNU
@@ -107,18 +108,51 @@
107#define uea_info(usb_dev, format,args...) \ 108#define uea_info(usb_dev, format,args...) \
108 dev_info(&(usb_dev)->dev ,"[ueagle-atm] " format, ##args) 109 dev_info(&(usb_dev)->dev ,"[ueagle-atm] " format, ##args)
109 110
110struct uea_cmvs { 111struct intr_pkt;
112
113/* cmv's from firmware */
114struct uea_cmvs_v1 {
111 u32 address; 115 u32 address;
112 u16 offset; 116 u16 offset;
113 u32 data; 117 u32 data;
114} __attribute__ ((packed)); 118} __attribute__ ((packed));
115 119
120struct uea_cmvs_v2 {
121 u32 group;
122 u32 address;
123 u32 offset;
124 u32 data;
125} __attribute__ ((packed));
126
127/* information about currently processed cmv */
128struct cmv_dsc_e1 {
129 u8 function;
130 u16 idx;
131 u32 address;
132 u16 offset;
133};
134
135struct cmv_dsc_e4 {
136 u16 function;
137 u16 offset;
138 u16 address;
139 u16 group;
140};
141
142union cmv_dsc {
143 struct cmv_dsc_e1 e1;
144 struct cmv_dsc_e4 e4;
145};
146
116struct uea_softc { 147struct uea_softc {
117 struct usb_device *usb_dev; 148 struct usb_device *usb_dev;
118 struct usbatm_data *usbatm; 149 struct usbatm_data *usbatm;
119 150
120 int modem_index; 151 int modem_index;
121 unsigned int driver_info; 152 unsigned int driver_info;
153 int annex;
154#define ANNEXA 0
155#define ANNEXB 1
122 156
123 int booting; 157 int booting;
124 int reset; 158 int reset;
@@ -127,20 +161,23 @@ struct uea_softc {
127 161
128 struct task_struct *kthread; 162 struct task_struct *kthread;
129 u32 data; 163 u32 data;
130 wait_queue_head_t cmv_ack_wait; 164 u32 data1;
165
131 int cmv_ack; 166 int cmv_ack;
167 union cmv_dsc cmv_dsc;
132 168
133 struct work_struct task; 169 struct work_struct task;
170 struct workqueue_struct *work_q;
134 u16 pageno; 171 u16 pageno;
135 u16 ovl; 172 u16 ovl;
136 173
137 const struct firmware *dsp_firm; 174 const struct firmware *dsp_firm;
138 struct urb *urb_int; 175 struct urb *urb_int;
139 176
140 u8 cmv_function; 177 void (*dispatch_cmv) (struct uea_softc *, struct intr_pkt *);
141 u16 cmv_idx; 178 void (*schedule_load_page) (struct uea_softc *, struct intr_pkt *);
142 u32 cmv_address; 179 int (*stat) (struct uea_softc *);
143 u16 cmv_offset; 180 int (*send_cmvs) (struct uea_softc *);
144 181
145 /* keep in sync with eaglectl */ 182 /* keep in sync with eaglectl */
146 struct uea_stats { 183 struct uea_stats {
@@ -174,10 +211,34 @@ struct uea_softc {
174#define ELSA_PID_PSTFIRM 0x3350 211#define ELSA_PID_PSTFIRM 0x3350
175#define ELSA_PID_PREFIRM 0x3351 212#define ELSA_PID_PREFIRM 0x3351
176 213
214#define ELSA_PID_A_PREFIRM 0x3352
215#define ELSA_PID_A_PSTFIRM 0x3353
216#define ELSA_PID_B_PREFIRM 0x3362
217#define ELSA_PID_B_PSTFIRM 0x3363
218
177/* 219/*
178 * Sagem USB IDs 220 * Devolo IDs : pots if (pid & 0x10)
179 */ 221 */
180#define EAGLE_VID 0x1110 222#define DEVOLO_VID 0x1039
223#define DEVOLO_EAGLE_I_A_PID_PSTFIRM 0x2110
224#define DEVOLO_EAGLE_I_A_PID_PREFIRM 0x2111
225
226#define DEVOLO_EAGLE_I_B_PID_PSTFIRM 0x2100
227#define DEVOLO_EAGLE_I_B_PID_PREFIRM 0x2101
228
229#define DEVOLO_EAGLE_II_A_PID_PSTFIRM 0x2130
230#define DEVOLO_EAGLE_II_A_PID_PREFIRM 0x2131
231
232#define DEVOLO_EAGLE_II_B_PID_PSTFIRM 0x2120
233#define DEVOLO_EAGLE_II_B_PID_PREFIRM 0x2121
234
235/*
236 * Reference design USB IDs
237 */
238#define ANALOG_VID 0x1110
239#define ADI930_PID_PREFIRM 0x9001
240#define ADI930_PID_PSTFIRM 0x9000
241
181#define EAGLE_I_PID_PREFIRM 0x9010 /* Eagle I */ 242#define EAGLE_I_PID_PREFIRM 0x9010 /* Eagle I */
182#define EAGLE_I_PID_PSTFIRM 0x900F /* Eagle I */ 243#define EAGLE_I_PID_PSTFIRM 0x900F /* Eagle I */
183 244
@@ -187,12 +248,12 @@ struct uea_softc {
187#define EAGLE_II_PID_PREFIRM 0x9022 /* Eagle II */ 248#define EAGLE_II_PID_PREFIRM 0x9022 /* Eagle II */
188#define EAGLE_II_PID_PSTFIRM 0x9021 /* Eagle II */ 249#define EAGLE_II_PID_PSTFIRM 0x9021 /* Eagle II */
189 250
190/*
191 * Eagle III Pid
192 */
193#define EAGLE_III_PID_PREFIRM 0x9032 /* Eagle III */ 251#define EAGLE_III_PID_PREFIRM 0x9032 /* Eagle III */
194#define EAGLE_III_PID_PSTFIRM 0x9031 /* Eagle III */ 252#define EAGLE_III_PID_PSTFIRM 0x9031 /* Eagle III */
195 253
254#define EAGLE_IV_PID_PREFIRM 0x9042 /* Eagle IV */
255#define EAGLE_IV_PID_PSTFIRM 0x9041 /* Eagle IV */
256
196/* 257/*
197 * USR USB IDs 258 * USR USB IDs
198 */ 259 */
@@ -208,11 +269,15 @@ struct uea_softc {
208 269
209#define PREFIRM 0 270#define PREFIRM 0
210#define PSTFIRM (1<<7) 271#define PSTFIRM (1<<7)
272#define AUTO_ANNEX_A (1<<8)
273#define AUTO_ANNEX_B (1<<9)
274
211enum { 275enum {
212 ADI930 = 0, 276 ADI930 = 0,
213 EAGLE_I, 277 EAGLE_I,
214 EAGLE_II, 278 EAGLE_II,
215 EAGLE_III 279 EAGLE_III,
280 EAGLE_IV
216}; 281};
217 282
218/* macros for both struct usb_device_id and struct uea_softc */ 283/* macros for both struct usb_device_id and struct uea_softc */
@@ -221,15 +286,18 @@ enum {
221#define UEA_CHIP_VERSION(x) \ 286#define UEA_CHIP_VERSION(x) \
222 ((x)->driver_info & 0xf) 287 ((x)->driver_info & 0xf)
223 288
224#define IS_ISDN(usb_dev) \ 289#define IS_ISDN(x) \
225 (le16_to_cpu((usb_dev)->descriptor.bcdDevice) & 0x80) 290 ((x)->annex & ANNEXB)
226 291
227#define INS_TO_USBDEV(ins) ins->usb_dev 292#define INS_TO_USBDEV(ins) ins->usb_dev
228 293
229#define GET_STATUS(data) \ 294#define GET_STATUS(data) \
230 ((data >> 8) & 0xf) 295 ((data >> 8) & 0xf)
296
231#define IS_OPERATIONAL(sc) \ 297#define IS_OPERATIONAL(sc) \
232 (GET_STATUS(sc->stats.phy.state) == 2) 298 ((UEA_CHIP_VERSION(sc) != EAGLE_IV) ? \
299 (GET_STATUS(sc->stats.phy.state) == 2) : \
300 (sc->stats.phy.state == 7))
233 301
234/* 302/*
235 * Set of macros to handle unaligned data in the firmware blob. 303 * Set of macros to handle unaligned data in the firmware blob.
@@ -259,7 +327,8 @@ enum {
259#define UEA_INTR_PIPE 0x04 327#define UEA_INTR_PIPE 0x04
260#define UEA_ISO_DATA_PIPE 0x08 328#define UEA_ISO_DATA_PIPE 0x08
261 329
262#define UEA_SET_BLOCK 0x0001 330#define UEA_E1_SET_BLOCK 0x0001
331#define UEA_E4_SET_BLOCK 0x002c
263#define UEA_SET_MODE 0x0003 332#define UEA_SET_MODE 0x0003
264#define UEA_SET_2183_DATA 0x0004 333#define UEA_SET_2183_DATA 0x0004
265#define UEA_SET_TIMEOUT 0x0011 334#define UEA_SET_TIMEOUT 0x0011
@@ -275,71 +344,179 @@ enum {
275#define UEA_MPTX_MAILBOX (0x3fd6 | 0x4000) 344#define UEA_MPTX_MAILBOX (0x3fd6 | 0x4000)
276#define UEA_MPRX_MAILBOX (0x3fdf | 0x4000) 345#define UEA_MPRX_MAILBOX (0x3fdf | 0x4000)
277 346
278/* structure describing a block within a DSP page */ 347/* block information in eagle4 dsp firmware */
279struct block_info { 348struct block_index {
349 __le32 PageOffset;
350 __le32 NotLastBlock;
351 __le32 dummy;
352 __le32 PageSize;
353 __le32 PageAddress;
354 __le16 dummy1;
355 __le16 PageNumber;
356} __attribute__ ((packed));
357
358#define E4_IS_BOOT_PAGE(PageSize) ((le32_to_cpu(PageSize)) & 0x80000000)
359#define E4_PAGE_BYTES(PageSize) ((le32_to_cpu(PageSize) & 0x7fffffff) * 4)
360
361#define E4_L1_STRING_HEADER 0x10
362#define E4_MAX_PAGE_NUMBER 0x58
363#define E4_NO_SWAPPAGE_HEADERS 0x31
364
365/* l1_code is eagle4 dsp firmware format */
366struct l1_code {
367 u8 string_header[E4_L1_STRING_HEADER];
368 u8 page_number_to_block_index[E4_MAX_PAGE_NUMBER];
369 struct block_index page_header[E4_NO_SWAPPAGE_HEADERS];
370 u8 code [0];
371} __attribute__ ((packed));
372
373/* structures describing a block within a DSP page */
374struct block_info_e1 {
280 __le16 wHdr; 375 __le16 wHdr;
281#define UEA_BIHDR 0xabcd
282 __le16 wAddress; 376 __le16 wAddress;
283 __le16 wSize; 377 __le16 wSize;
284 __le16 wOvlOffset; 378 __le16 wOvlOffset;
285 __le16 wOvl; /* overlay */ 379 __le16 wOvl; /* overlay */
286 __le16 wLast; 380 __le16 wLast;
287} __attribute__ ((packed)); 381} __attribute__ ((packed));
288#define BLOCK_INFO_SIZE 12 382#define E1_BLOCK_INFO_SIZE 12
383
384struct block_info_e4 {
385 __be16 wHdr;
386 __u8 bBootPage;
387 __u8 bPageNumber;
388 __be32 dwSize;
389 __be32 dwAddress;
390 __be16 wReserved;
391} __attribute__ ((packed));
392#define E4_BLOCK_INFO_SIZE 14
289 393
290/* structure representing a CMV (Configuration and Management Variable) */ 394#define UEA_BIHDR 0xabcd
291struct cmv { 395#define UEA_RESERVED 0xffff
292 __le16 wPreamble; 396
293#define PREAMBLE 0x535c 397/* constants describing cmv type */
294 __u8 bDirection; 398#define E1_PREAMBLE 0x535c
295#define MODEMTOHOST 0x01 399#define E1_MODEMTOHOST 0x01
296#define HOSTTOMODEM 0x10 400#define E1_HOSTTOMODEM 0x10
297 __u8 bFunction; 401
298#define FUNCTION_TYPE(f) ((f) >> 4) 402#define E1_MEMACCESS 0x1
299#define MEMACCESS 0x1 403#define E1_ADSLDIRECTIVE 0x7
300#define ADSLDIRECTIVE 0x7 404#define E1_FUNCTION_TYPE(f) ((f) >> 4)
405#define E1_FUNCTION_SUBTYPE(f) ((f) & 0x0f)
406
407#define E4_MEMACCESS 0
408#define E4_ADSLDIRECTIVE 0xf
409#define E4_FUNCTION_TYPE(f) ((f) >> 8)
410#define E4_FUNCTION_SIZE(f) ((f) & 0x0f)
411#define E4_FUNCTION_SUBTYPE(f) (((f) >> 4) & 0x0f)
301 412
302#define FUNCTION_SUBTYPE(f) ((f) & 0x0f)
303/* for MEMACCESS */ 413/* for MEMACCESS */
304#define REQUESTREAD 0x0 414#define E1_REQUESTREAD 0x0
305#define REQUESTWRITE 0x1 415#define E1_REQUESTWRITE 0x1
306#define REPLYREAD 0x2 416#define E1_REPLYREAD 0x2
307#define REPLYWRITE 0x3 417#define E1_REPLYWRITE 0x3
418
419#define E4_REQUESTREAD 0x0
420#define E4_REQUESTWRITE 0x4
421#define E4_REPLYREAD (E4_REQUESTREAD | 1)
422#define E4_REPLYWRITE (E4_REQUESTWRITE | 1)
423
308/* for ADSLDIRECTIVE */ 424/* for ADSLDIRECTIVE */
309#define KERNELREADY 0x0 425#define E1_KERNELREADY 0x0
310#define MODEMREADY 0x1 426#define E1_MODEMREADY 0x1
311 427
312#define MAKEFUNCTION(t, s) (((t) & 0xf) << 4 | ((s) & 0xf)) 428#define E4_KERNELREADY 0x0
313 __le16 wIndex; 429#define E4_MODEMREADY 0x1
314 __le32 dwSymbolicAddress; 430
315#define MAKESA(a, b, c, d) \ 431#define E1_MAKEFUNCTION(t, s) (((t) & 0xf) << 4 | ((s) & 0xf))
432#define E4_MAKEFUNCTION(t, st, s) (((t) & 0xf) << 8 | ((st) & 0xf) << 4 | ((s) & 0xf))
433
434#define E1_MAKESA(a, b, c, d) \
316 (((c) & 0xff) << 24 | \ 435 (((c) & 0xff) << 24 | \
317 ((d) & 0xff) << 16 | \ 436 ((d) & 0xff) << 16 | \
318 ((a) & 0xff) << 8 | \ 437 ((a) & 0xff) << 8 | \
319 ((b) & 0xff)) 438 ((b) & 0xff))
320#define GETSA1(a) ((a >> 8) & 0xff) 439
321#define GETSA2(a) (a & 0xff) 440#define E1_GETSA1(a) ((a >> 8) & 0xff)
322#define GETSA3(a) ((a >> 24) & 0xff) 441#define E1_GETSA2(a) (a & 0xff)
323#define GETSA4(a) ((a >> 16) & 0xff) 442#define E1_GETSA3(a) ((a >> 24) & 0xff)
324 443#define E1_GETSA4(a) ((a >> 16) & 0xff)
325#define SA_CNTL MAKESA('C', 'N', 'T', 'L') 444
326#define SA_DIAG MAKESA('D', 'I', 'A', 'G') 445#define E1_SA_CNTL E1_MAKESA('C', 'N', 'T', 'L')
327#define SA_INFO MAKESA('I', 'N', 'F', 'O') 446#define E1_SA_DIAG E1_MAKESA('D', 'I', 'A', 'G')
328#define SA_OPTN MAKESA('O', 'P', 'T', 'N') 447#define E1_SA_INFO E1_MAKESA('I', 'N', 'F', 'O')
329#define SA_RATE MAKESA('R', 'A', 'T', 'E') 448#define E1_SA_OPTN E1_MAKESA('O', 'P', 'T', 'N')
330#define SA_STAT MAKESA('S', 'T', 'A', 'T') 449#define E1_SA_RATE E1_MAKESA('R', 'A', 'T', 'E')
450#define E1_SA_STAT E1_MAKESA('S', 'T', 'A', 'T')
451
452#define E4_SA_CNTL 1
453#define E4_SA_STAT 2
454#define E4_SA_INFO 3
455#define E4_SA_TEST 4
456#define E4_SA_OPTN 5
457#define E4_SA_RATE 6
458#define E4_SA_DIAG 7
459#define E4_SA_CNFG 8
460
461/* structures representing a CMV (Configuration and Management Variable) */
462struct cmv_e1 {
463 __le16 wPreamble;
464 __u8 bDirection;
465 __u8 bFunction;
466 __le16 wIndex;
467 __le32 dwSymbolicAddress;
331 __le16 wOffsetAddress; 468 __le16 wOffsetAddress;
332 __le32 dwData; 469 __le32 dwData;
333} __attribute__ ((packed)); 470} __attribute__ ((packed));
334#define CMV_SIZE 16
335 471
336/* structure representing swap information */ 472struct cmv_e4 {
337struct swap_info { 473 __be16 wGroup;
474 __be16 wFunction;
475 __be16 wOffset;
476 __be16 wAddress;
477 __be32 dwData [6];
478} __attribute__ ((packed));
479
480/* structures representing swap information */
481struct swap_info_e1 {
338 __u8 bSwapPageNo; 482 __u8 bSwapPageNo;
339 __u8 bOvl; /* overlay */ 483 __u8 bOvl; /* overlay */
340} __attribute__ ((packed)); 484} __attribute__ ((packed));
341 485
342/* structure representing interrupt data */ 486struct swap_info_e4 {
487 __u8 bSwapPageNo;
488} __attribute__ ((packed));
489
490/* structures representing interrupt data */
491#define e1_bSwapPageNo u.e1.s1.swapinfo.bSwapPageNo
492#define e1_bOvl u.e1.s1.swapinfo.bOvl
493#define e4_bSwapPageNo u.e4.s1.swapinfo.bSwapPageNo
494
495#define INT_LOADSWAPPAGE 0x0001
496#define INT_INCOMINGCMV 0x0002
497
498union intr_data_e1 {
499 struct {
500 struct swap_info_e1 swapinfo;
501 __le16 wDataSize;
502 } __attribute__ ((packed)) s1;
503 struct {
504 struct cmv_e1 cmv;
505 __le16 wDataSize;
506 } __attribute__ ((packed)) s2;
507} __attribute__ ((packed));
508
509union intr_data_e4 {
510 struct {
511 struct swap_info_e4 swapinfo;
512 __le16 wDataSize;
513 } __attribute__ ((packed)) s1;
514 struct {
515 struct cmv_e4 cmv;
516 __le16 wDataSize;
517 } __attribute__ ((packed)) s2;
518} __attribute__ ((packed));
519
343struct intr_pkt { 520struct intr_pkt {
344 __u8 bType; 521 __u8 bType;
345 __u8 bNotification; 522 __u8 bNotification;
@@ -347,43 +524,48 @@ struct intr_pkt {
347 __le16 wIndex; 524 __le16 wIndex;
348 __le16 wLength; 525 __le16 wLength;
349 __le16 wInterrupt; 526 __le16 wInterrupt;
350#define INT_LOADSWAPPAGE 0x0001
351#define INT_INCOMINGCMV 0x0002
352 union { 527 union {
353 struct { 528 union intr_data_e1 e1;
354 struct swap_info swapinfo; 529 union intr_data_e4 e4;
355 __le16 wDataSize; 530 } u;
356 } __attribute__ ((packed)) s1;
357
358 struct {
359 struct cmv cmv;
360 __le16 wDataSize;
361 } __attribute__ ((packed)) s2;
362 } __attribute__ ((packed)) u;
363#define bSwapPageNo u.s1.swapinfo.bSwapPageNo
364#define bOvl u.s1.swapinfo.bOvl
365} __attribute__ ((packed)); 531} __attribute__ ((packed));
366#define INTR_PKT_SIZE 28 532
533#define E1_INTR_PKT_SIZE 28
534#define E4_INTR_PKT_SIZE 64
367 535
368static struct usb_driver uea_driver; 536static struct usb_driver uea_driver;
369static DEFINE_MUTEX(uea_mutex); 537static DEFINE_MUTEX(uea_mutex);
370static const char *chip_name[] = {"ADI930", "Eagle I", "Eagle II", "Eagle III"}; 538static const char *chip_name[] = {"ADI930", "Eagle I", "Eagle II", "Eagle III", "Eagle IV"};
371 539
372static int modem_index; 540static int modem_index;
373static unsigned int debug; 541static unsigned int debug;
374static int use_iso[NB_MODEM] = {[0 ... (NB_MODEM - 1)] = 1}; 542static unsigned int altsetting[NB_MODEM] = {[0 ... (NB_MODEM - 1)] = FASTEST_ISO_INTF};
375static int sync_wait[NB_MODEM]; 543static int sync_wait[NB_MODEM];
376static char *cmv_file[NB_MODEM]; 544static char *cmv_file[NB_MODEM];
545static int annex[NB_MODEM];
377 546
378module_param(debug, uint, 0644); 547module_param(debug, uint, 0644);
379MODULE_PARM_DESC(debug, "module debug level (0=off,1=on,2=verbose)"); 548MODULE_PARM_DESC(debug, "module debug level (0=off,1=on,2=verbose)");
380module_param_array(use_iso, bool, NULL, 0644); 549module_param_array(altsetting, uint, NULL, 0644);
381MODULE_PARM_DESC(use_iso, "use isochronous usb pipe for incoming traffic"); 550MODULE_PARM_DESC(altsetting, "alternate setting for incoming traffic: 0=bulk, "
551 "1=isoc slowest, ... , 8=isoc fastest (default)");
382module_param_array(sync_wait, bool, NULL, 0644); 552module_param_array(sync_wait, bool, NULL, 0644);
383MODULE_PARM_DESC(sync_wait, "wait the synchronisation before starting ATM"); 553MODULE_PARM_DESC(sync_wait, "wait the synchronisation before starting ATM");
384module_param_array(cmv_file, charp, NULL, 0644); 554module_param_array(cmv_file, charp, NULL, 0644);
385MODULE_PARM_DESC(cmv_file, 555MODULE_PARM_DESC(cmv_file,
386 "file name with configuration and management variables"); 556 "file name with configuration and management variables");
557module_param_array(annex, uint, NULL, 0644);
558MODULE_PARM_DESC(annex,
559 "manually set annex a/b (0=auto, 1=annex a, 2=annex b)");
560
561#define uea_wait(sc, cond, timeo) \
562({ \
563 int _r = wait_event_interruptible_timeout(sc->sync_q, \
564 (cond) || kthread_should_stop(), timeo); \
565 if (kthread_should_stop()) \
566 _r = -ENODEV; \
567 _r; \
568})
387 569
388#define UPDATE_ATM_STAT(type, val) \ 570#define UPDATE_ATM_STAT(type, val) \
389 do { \ 571 do { \
@@ -519,6 +701,9 @@ static int uea_load_firmware(struct usb_device *usb, unsigned int ver)
519 case EAGLE_III: 701 case EAGLE_III:
520 fw_name = FW_DIR "eagleIII.fw"; 702 fw_name = FW_DIR "eagleIII.fw";
521 break; 703 break;
704 case EAGLE_IV:
705 fw_name = FW_DIR "eagleIV.fw";
706 break;
522 } 707 }
523 708
524 ret = request_firmware_nowait(THIS_MODULE, 1, fw_name, &usb->dev, usb, uea_upload_pre_firmware); 709 ret = request_firmware_nowait(THIS_MODULE, 1, fw_name, &usb->dev, usb, uea_upload_pre_firmware);
@@ -537,7 +722,7 @@ static int uea_load_firmware(struct usb_device *usb, unsigned int ver)
537/* 722/*
538 * Make sure that the DSP code provided is safe to use. 723 * Make sure that the DSP code provided is safe to use.
539 */ 724 */
540static int check_dsp(u8 *dsp, unsigned int len) 725static int check_dsp_e1(u8 *dsp, unsigned int len)
541{ 726{
542 u8 pagecount, blockcount; 727 u8 pagecount, blockcount;
543 u16 blocksize; 728 u16 blocksize;
@@ -588,6 +773,51 @@ static int check_dsp(u8 *dsp, unsigned int len)
588 return 0; 773 return 0;
589} 774}
590 775
776static int check_dsp_e4(u8 *dsp, int len)
777{
778 int i;
779 struct l1_code *p = (struct l1_code *) dsp;
780 unsigned int sum = p->code - dsp;
781
782 if (len < sum)
783 return 1;
784
785 if (strcmp("STRATIPHY ANEXA", p->string_header) != 0 &&
786 strcmp("STRATIPHY ANEXB", p->string_header) != 0)
787 return 1;
788
789 for (i = 0; i < E4_MAX_PAGE_NUMBER; i++) {
790 struct block_index *blockidx;
791 u8 blockno = p->page_number_to_block_index[i];
792 if (blockno >= E4_NO_SWAPPAGE_HEADERS)
793 continue;
794
795 do {
796 u64 l;
797
798 if (blockno >= E4_NO_SWAPPAGE_HEADERS)
799 return 1;
800
801 blockidx = &p->page_header[blockno++];
802 if ((u8 *)(blockidx + 1) - dsp >= len)
803 return 1;
804
805 if (le16_to_cpu(blockidx->PageNumber) != i)
806 return 1;
807
808 l = E4_PAGE_BYTES(blockidx->PageSize);
809 sum += l;
810 l += le32_to_cpu(blockidx->PageOffset);
811 if (l > len)
812 return 1;
813
814 /* zero is zero regardless endianes */
815 } while (blockidx->NotLastBlock);
816 }
817
818 return (sum == len) ? 0 : 1;
819}
820
591/* 821/*
592 * send data to the idma pipe 822 * send data to the idma pipe
593 * */ 823 * */
@@ -624,13 +854,18 @@ static int request_dsp(struct uea_softc *sc)
624 int ret; 854 int ret;
625 char *dsp_name; 855 char *dsp_name;
626 856
627 if (UEA_CHIP_VERSION(sc) == ADI930) { 857 if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
628 if (IS_ISDN(sc->usb_dev)) 858 if (IS_ISDN(sc))
859 dsp_name = FW_DIR "DSP4i.bin";
860 else
861 dsp_name = FW_DIR "DSP4p.bin";
862 } else if (UEA_CHIP_VERSION(sc) == ADI930) {
863 if (IS_ISDN(sc))
629 dsp_name = FW_DIR "DSP9i.bin"; 864 dsp_name = FW_DIR "DSP9i.bin";
630 else 865 else
631 dsp_name = FW_DIR "DSP9p.bin"; 866 dsp_name = FW_DIR "DSP9p.bin";
632 } else { 867 } else {
633 if (IS_ISDN(sc->usb_dev)) 868 if (IS_ISDN(sc))
634 dsp_name = FW_DIR "DSPei.bin"; 869 dsp_name = FW_DIR "DSPei.bin";
635 else 870 else
636 dsp_name = FW_DIR "DSPep.bin"; 871 dsp_name = FW_DIR "DSPep.bin";
@@ -640,11 +875,16 @@ static int request_dsp(struct uea_softc *sc)
640 if (ret < 0) { 875 if (ret < 0) {
641 uea_err(INS_TO_USBDEV(sc), 876 uea_err(INS_TO_USBDEV(sc),
642 "requesting firmware %s failed with error %d\n", 877 "requesting firmware %s failed with error %d\n",
643 dsp_name, ret); 878 dsp_name, ret);
644 return ret; 879 return ret;
645 } 880 }
646 881
647 if (check_dsp(sc->dsp_firm->data, sc->dsp_firm->size)) { 882 if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
883 ret = check_dsp_e4(sc->dsp_firm->data, sc->dsp_firm->size);
884 else
885 ret = check_dsp_e1(sc->dsp_firm->data, sc->dsp_firm->size);
886
887 if (ret) {
648 uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", 888 uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n",
649 dsp_name); 889 dsp_name);
650 release_firmware(sc->dsp_firm); 890 release_firmware(sc->dsp_firm);
@@ -658,12 +898,12 @@ static int request_dsp(struct uea_softc *sc)
658/* 898/*
659 * The uea_load_page() function must be called within a process context 899 * The uea_load_page() function must be called within a process context
660 */ 900 */
661static void uea_load_page(struct work_struct *work) 901static void uea_load_page_e1(struct work_struct *work)
662{ 902{
663 struct uea_softc *sc = container_of(work, struct uea_softc, task); 903 struct uea_softc *sc = container_of(work, struct uea_softc, task);
664 u16 pageno = sc->pageno; 904 u16 pageno = sc->pageno;
665 u16 ovl = sc->ovl; 905 u16 ovl = sc->ovl;
666 struct block_info bi; 906 struct block_info_e1 bi;
667 907
668 u8 *p; 908 u8 *p;
669 u8 pagecount, blockcount; 909 u8 pagecount, blockcount;
@@ -716,7 +956,7 @@ static void uea_load_page(struct work_struct *work)
716 bi.wLast = cpu_to_le16((i == blockcount - 1) ? 1 : 0); 956 bi.wLast = cpu_to_le16((i == blockcount - 1) ? 1 : 0);
717 957
718 /* send block info through the IDMA pipe */ 958 /* send block info through the IDMA pipe */
719 if (uea_idma_write(sc, &bi, BLOCK_INFO_SIZE)) 959 if (uea_idma_write(sc, &bi, E1_BLOCK_INFO_SIZE))
720 goto bad2; 960 goto bad2;
721 961
722 /* send block data through the IDMA pipe */ 962 /* send block data through the IDMA pipe */
@@ -735,17 +975,114 @@ bad1:
735 uea_err(INS_TO_USBDEV(sc), "invalid DSP page %u requested\n", pageno); 975 uea_err(INS_TO_USBDEV(sc), "invalid DSP page %u requested\n", pageno);
736} 976}
737 977
978static void __uea_load_page_e4(struct uea_softc *sc, u8 pageno, int boot)
979{
980 struct block_info_e4 bi;
981 struct block_index *blockidx;
982 struct l1_code *p = (struct l1_code *) sc->dsp_firm->data;
983 u8 blockno = p->page_number_to_block_index[pageno];
984
985 bi.wHdr = cpu_to_be16(UEA_BIHDR);
986 bi.bBootPage = boot;
987 bi.bPageNumber = pageno;
988 bi.wReserved = cpu_to_be16(UEA_RESERVED);
989
990 do {
991 u8 *blockoffset;
992 unsigned int blocksize;
993
994 blockidx = &p->page_header[blockno];
995 blocksize = E4_PAGE_BYTES(blockidx->PageSize);
996 blockoffset = sc->dsp_firm->data + le32_to_cpu(blockidx->PageOffset);
997
998 bi.dwSize = cpu_to_be32(blocksize);
999 bi.dwAddress = swab32(blockidx->PageAddress);
1000
1001 uea_dbg(INS_TO_USBDEV(sc),
1002 "sending block %u for DSP page %u size %u adress %x\n",
1003 blockno, pageno, blocksize, le32_to_cpu(blockidx->PageAddress));
1004
1005 /* send block info through the IDMA pipe */
1006 if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE))
1007 goto bad;
1008
1009 /* send block data through the IDMA pipe */
1010 if (uea_idma_write(sc, blockoffset, blocksize))
1011 goto bad;
1012
1013 blockno++;
1014 } while (blockidx->NotLastBlock);
1015
1016 return;
1017
1018bad:
1019 uea_err(INS_TO_USBDEV(sc), "sending DSP block %u failed\n", blockno);
1020 return;
1021}
1022
1023static void uea_load_page_e4(struct work_struct *work)
1024{
1025 struct uea_softc *sc = container_of(work, struct uea_softc, task);
1026 u8 pageno = sc->pageno;
1027 int i;
1028 struct block_info_e4 bi;
1029 struct l1_code *p;
1030
1031 uea_dbg(INS_TO_USBDEV(sc), "sending DSP page %u\n", pageno);
1032
1033 /* reload firmware when reboot start and it's loaded already */
1034 if (pageno == 0 && sc->dsp_firm) {
1035 release_firmware(sc->dsp_firm);
1036 sc->dsp_firm = NULL;
1037 }
1038
1039 if (sc->dsp_firm == NULL && request_dsp(sc) < 0)
1040 return;
1041
1042 p = (struct l1_code *) sc->dsp_firm->data;
1043 if (pageno >= p->page_header[0].PageNumber) {
1044 uea_err(INS_TO_USBDEV(sc), "invalid DSP page %u requested\n", pageno);
1045 return;
1046 }
1047
1048 if (pageno != 0) {
1049 __uea_load_page_e4(sc, pageno, 0);
1050 return;
1051 }
1052
1053 uea_dbg(INS_TO_USBDEV(sc),
1054 "sending Main DSP page %u\n", p->page_header[0].PageNumber);
1055
1056 for (i = 0; i < le16_to_cpu(p->page_header[0].PageNumber); i++) {
1057 if (E4_IS_BOOT_PAGE(p->page_header[i].PageSize))
1058 __uea_load_page_e4(sc, i, 1);
1059 }
1060
1061 uea_dbg(INS_TO_USBDEV(sc),"sending start bi\n");
1062
1063 bi.wHdr = cpu_to_be16(UEA_BIHDR);
1064 bi.bBootPage = 0;
1065 bi.bPageNumber = 0xff;
1066 bi.wReserved = cpu_to_be16(UEA_RESERVED);
1067 bi.dwSize = cpu_to_be32(E4_PAGE_BYTES(p->page_header[0].PageSize));
1068 bi.dwAddress = swab32(p->page_header[0].PageAddress);
1069
1070 /* send block info through the IDMA pipe */
1071 if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE))
1072 uea_err(INS_TO_USBDEV(sc), "sending DSP start bi failed\n");
1073}
1074
738static inline void wake_up_cmv_ack(struct uea_softc *sc) 1075static inline void wake_up_cmv_ack(struct uea_softc *sc)
739{ 1076{
740 BUG_ON(sc->cmv_ack); 1077 BUG_ON(sc->cmv_ack);
741 sc->cmv_ack = 1; 1078 sc->cmv_ack = 1;
742 wake_up(&sc->cmv_ack_wait); 1079 wake_up(&sc->sync_q);
743} 1080}
744 1081
745static inline int wait_cmv_ack(struct uea_softc *sc) 1082static inline int wait_cmv_ack(struct uea_softc *sc)
746{ 1083{
747 int ret = wait_event_interruptible_timeout(sc->cmv_ack_wait, 1084 int ret = uea_wait(sc, sc->cmv_ack , ACK_TIMEOUT);
748 sc->cmv_ack, ACK_TIMEOUT); 1085
749 sc->cmv_ack = 0; 1086 sc->cmv_ack = 0;
750 1087
751 uea_dbg(INS_TO_USBDEV(sc), "wait_event_timeout : %d ms\n", 1088 uea_dbg(INS_TO_USBDEV(sc), "wait_event_timeout : %d ms\n",
@@ -792,33 +1129,68 @@ static int uea_request(struct uea_softc *sc,
792 return 0; 1129 return 0;
793} 1130}
794 1131
795static int uea_cmv(struct uea_softc *sc, 1132static int uea_cmv_e1(struct uea_softc *sc,
796 u8 function, u32 address, u16 offset, u32 data) 1133 u8 function, u32 address, u16 offset, u32 data)
797{ 1134{
798 struct cmv cmv; 1135 struct cmv_e1 cmv;
799 int ret; 1136 int ret;
800 1137
801 uea_enters(INS_TO_USBDEV(sc)); 1138 uea_enters(INS_TO_USBDEV(sc));
802 uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Address : %c%c%c%c, " 1139 uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Address : %c%c%c%c, "
803 "offset : 0x%04x, data : 0x%08x\n", 1140 "offset : 0x%04x, data : 0x%08x\n",
804 FUNCTION_TYPE(function), FUNCTION_SUBTYPE(function), 1141 E1_FUNCTION_TYPE(function), E1_FUNCTION_SUBTYPE(function),
805 GETSA1(address), GETSA2(address), GETSA3(address), 1142 E1_GETSA1(address), E1_GETSA2(address), E1_GETSA3(address),
806 GETSA4(address), offset, data); 1143 E1_GETSA4(address), offset, data);
1144
807 /* we send a request, but we expect a reply */ 1145 /* we send a request, but we expect a reply */
808 sc->cmv_function = function | 0x2; 1146 sc->cmv_dsc.e1.function = function | 0x2;
809 sc->cmv_idx++; 1147 sc->cmv_dsc.e1.idx++;
810 sc->cmv_address = address; 1148 sc->cmv_dsc.e1.address = address;
811 sc->cmv_offset = offset; 1149 sc->cmv_dsc.e1.offset = offset;
812 1150
813 cmv.wPreamble = cpu_to_le16(PREAMBLE); 1151 cmv.wPreamble = cpu_to_le16(E1_PREAMBLE);
814 cmv.bDirection = HOSTTOMODEM; 1152 cmv.bDirection = E1_HOSTTOMODEM;
815 cmv.bFunction = function; 1153 cmv.bFunction = function;
816 cmv.wIndex = cpu_to_le16(sc->cmv_idx); 1154 cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx);
817 put_unaligned(cpu_to_le32(address), &cmv.dwSymbolicAddress); 1155 put_unaligned(cpu_to_le32(address), &cmv.dwSymbolicAddress);
818 cmv.wOffsetAddress = cpu_to_le16(offset); 1156 cmv.wOffsetAddress = cpu_to_le16(offset);
819 put_unaligned(cpu_to_le32(data >> 16 | data << 16), &cmv.dwData); 1157 put_unaligned(cpu_to_le32(data >> 16 | data << 16), &cmv.dwData);
820 1158
821 ret = uea_request(sc, UEA_SET_BLOCK, UEA_MPTX_START, CMV_SIZE, &cmv); 1159 ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv);
1160 if (ret < 0)
1161 return ret;
1162 ret = wait_cmv_ack(sc);
1163 uea_leaves(INS_TO_USBDEV(sc));
1164 return ret;
1165}
1166
1167static int uea_cmv_e4(struct uea_softc *sc,
1168 u16 function, u16 group, u16 address, u16 offset, u32 data)
1169{
1170 struct cmv_e4 cmv;
1171 int ret;
1172
1173 uea_enters(INS_TO_USBDEV(sc));
1174 memset(&cmv, 0, sizeof(cmv));
1175
1176 uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Group : 0x%04x, "
1177 "Address : 0x%04x, offset : 0x%04x, data : 0x%08x\n",
1178 E4_FUNCTION_TYPE(function), E4_FUNCTION_SUBTYPE(function),
1179 group, address, offset, data);
1180
1181 /* we send a request, but we expect a reply */
1182 sc->cmv_dsc.e4.function = function | (0x1 << 4);
1183 sc->cmv_dsc.e4.offset = offset;
1184 sc->cmv_dsc.e4.address = address;
1185 sc->cmv_dsc.e4.group = group;
1186
1187 cmv.wFunction = cpu_to_be16(function);
1188 cmv.wGroup = cpu_to_be16(group);
1189 cmv.wAddress = cpu_to_be16(address);
1190 cmv.wOffset = cpu_to_be16(offset);
1191 cmv.dwData[0] = cpu_to_be32(data);
1192
1193 ret = uea_request(sc, UEA_E4_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv);
822 if (ret < 0) 1194 if (ret < 0)
823 return ret; 1195 return ret;
824 ret = wait_cmv_ack(sc); 1196 ret = wait_cmv_ack(sc);
@@ -826,10 +1198,10 @@ static int uea_cmv(struct uea_softc *sc,
826 return ret; 1198 return ret;
827} 1199}
828 1200
829static inline int uea_read_cmv(struct uea_softc *sc, 1201static inline int uea_read_cmv_e1(struct uea_softc *sc,
830 u32 address, u16 offset, u32 *data) 1202 u32 address, u16 offset, u32 *data)
831{ 1203{
832 int ret = uea_cmv(sc, MAKEFUNCTION(MEMACCESS, REQUESTREAD), 1204 int ret = uea_cmv_e1(sc, E1_MAKEFUNCTION(E1_MEMACCESS, E1_REQUESTREAD),
833 address, offset, 0); 1205 address, offset, 0);
834 if (ret < 0) 1206 if (ret < 0)
835 uea_err(INS_TO_USBDEV(sc), 1207 uea_err(INS_TO_USBDEV(sc),
@@ -840,10 +1212,27 @@ static inline int uea_read_cmv(struct uea_softc *sc,
840 return ret; 1212 return ret;
841} 1213}
842 1214
843static inline int uea_write_cmv(struct uea_softc *sc, 1215static inline int uea_read_cmv_e4(struct uea_softc *sc,
1216 u8 size, u16 group, u16 address, u16 offset, u32 *data)
1217{
1218 int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS, E4_REQUESTREAD, size),
1219 group, address, offset, 0);
1220 if (ret < 0)
1221 uea_err(INS_TO_USBDEV(sc),
1222 "reading cmv failed with error %d\n", ret);
1223 else {
1224 *data = sc->data;
1225 /* size is in 16-bit word quantities */
1226 if (size > 2)
1227 *(data + 1) = sc->data1;
1228 }
1229 return ret;
1230}
1231
1232static inline int uea_write_cmv_e1(struct uea_softc *sc,
844 u32 address, u16 offset, u32 data) 1233 u32 address, u16 offset, u32 data)
845{ 1234{
846 int ret = uea_cmv(sc, MAKEFUNCTION(MEMACCESS, REQUESTWRITE), 1235 int ret = uea_cmv_e1(sc, E1_MAKEFUNCTION(E1_MEMACCESS, E1_REQUESTWRITE),
847 address, offset, data); 1236 address, offset, data);
848 if (ret < 0) 1237 if (ret < 0)
849 uea_err(INS_TO_USBDEV(sc), 1238 uea_err(INS_TO_USBDEV(sc),
@@ -852,12 +1241,48 @@ static inline int uea_write_cmv(struct uea_softc *sc,
852 return ret; 1241 return ret;
853} 1242}
854 1243
1244static inline int uea_write_cmv_e4(struct uea_softc *sc,
1245 u8 size, u16 group, u16 address, u16 offset, u32 data)
1246{
1247 int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS, E4_REQUESTWRITE, size),
1248 group, address, offset, data);
1249 if (ret < 0)
1250 uea_err(INS_TO_USBDEV(sc),
1251 "writing cmv failed with error %d\n", ret);
1252
1253 return ret;
1254}
1255
1256static void uea_set_bulk_timeout(struct uea_softc *sc, u32 dsrate)
1257{
1258 int ret;
1259 u16 timeout;
1260
1261 /* in bulk mode the modem have problem with high rate
1262 * changing internal timing could improve things, but the
1263 * value is misterious.
1264 * ADI930 don't support it (-EPIPE error).
1265 */
1266
1267 if (UEA_CHIP_VERSION(sc) == ADI930 ||
1268 altsetting[sc->modem_index] > 0 ||
1269 sc->stats.phy.dsrate == dsrate)
1270 return;
1271
1272 /* Original timming (1Mbit/s) from ADI (used in windows driver) */
1273 timeout = (dsrate <= 1024*1024) ? 0 : 1;
1274 ret = uea_request(sc, UEA_SET_TIMEOUT, timeout, 0, NULL);
1275 uea_info(INS_TO_USBDEV(sc), "setting new timeout %d%s\n",
1276 timeout, ret < 0 ? " failed" : "");
1277
1278}
1279
855/* 1280/*
856 * Monitor the modem and update the stat 1281 * Monitor the modem and update the stat
857 * return 0 if everything is ok 1282 * return 0 if everything is ok
858 * return < 0 if an error occurs (-EAGAIN reboot needed) 1283 * return < 0 if an error occurs (-EAGAIN reboot needed)
859 */ 1284 */
860static int uea_stat(struct uea_softc *sc) 1285static int uea_stat_e1(struct uea_softc *sc)
861{ 1286{
862 u32 data; 1287 u32 data;
863 int ret; 1288 int ret;
@@ -865,7 +1290,7 @@ static int uea_stat(struct uea_softc *sc)
865 uea_enters(INS_TO_USBDEV(sc)); 1290 uea_enters(INS_TO_USBDEV(sc));
866 data = sc->stats.phy.state; 1291 data = sc->stats.phy.state;
867 1292
868 ret = uea_read_cmv(sc, SA_STAT, 0, &sc->stats.phy.state); 1293 ret = uea_read_cmv_e1(sc, E1_SA_STAT, 0, &sc->stats.phy.state);
869 if (ret < 0) 1294 if (ret < 0)
870 return ret; 1295 return ret;
871 1296
@@ -885,7 +1310,7 @@ static int uea_stat(struct uea_softc *sc)
885 1310
886 case 3: /* fail ... */ 1311 case 3: /* fail ... */
887 uea_info(INS_TO_USBDEV(sc), "modem synchronization failed" 1312 uea_info(INS_TO_USBDEV(sc), "modem synchronization failed"
888 " (may be try other cmv/dsp)\n"); 1313 " (may be try other cmv/dsp)\n");
889 return -EAGAIN; 1314 return -EAGAIN;
890 1315
891 case 4 ... 6: /* test state */ 1316 case 4 ... 6: /* test state */
@@ -923,7 +1348,7 @@ static int uea_stat(struct uea_softc *sc)
923 /* wake up processes waiting for synchronization */ 1348 /* wake up processes waiting for synchronization */
924 wake_up(&sc->sync_q); 1349 wake_up(&sc->sync_q);
925 1350
926 ret = uea_read_cmv(sc, SA_DIAG, 2, &sc->stats.phy.flags); 1351 ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 2, &sc->stats.phy.flags);
927 if (ret < 0) 1352 if (ret < 0)
928 return ret; 1353 return ret;
929 sc->stats.phy.mflags |= sc->stats.phy.flags; 1354 sc->stats.phy.mflags |= sc->stats.phy.flags;
@@ -937,105 +1362,223 @@ static int uea_stat(struct uea_softc *sc)
937 return 0; 1362 return 0;
938 } 1363 }
939 1364
940 ret = uea_read_cmv(sc, SA_RATE, 0, &data); 1365 ret = uea_read_cmv_e1(sc, E1_SA_RATE, 0, &data);
941 if (ret < 0) 1366 if (ret < 0)
942 return ret; 1367 return ret;
943 1368
944 /* in bulk mode the modem have problem with high rate 1369 uea_set_bulk_timeout(sc, (data >> 16) * 32);
945 * changing internal timing could improve things, but the
946 * value is misterious.
947 * ADI930 don't support it (-EPIPE error).
948 */
949 if (UEA_CHIP_VERSION(sc) != ADI930
950 && !use_iso[sc->modem_index]
951 && sc->stats.phy.dsrate != (data >> 16) * 32) {
952 /* Original timming from ADI(used in windows driver)
953 * 0x20ffff>>16 * 32 = 32 * 32 = 1Mbits
954 */
955 u16 timeout = (data <= 0x20ffff) ? 0 : 1;
956 ret = uea_request(sc, UEA_SET_TIMEOUT, timeout, 0, NULL);
957 uea_info(INS_TO_USBDEV(sc),
958 "setting new timeout %d%s\n", timeout,
959 ret < 0?" failed":"");
960 }
961 sc->stats.phy.dsrate = (data >> 16) * 32; 1370 sc->stats.phy.dsrate = (data >> 16) * 32;
962 sc->stats.phy.usrate = (data & 0xffff) * 32; 1371 sc->stats.phy.usrate = (data & 0xffff) * 32;
963 UPDATE_ATM_STAT(link_rate, sc->stats.phy.dsrate * 1000 / 424); 1372 UPDATE_ATM_STAT(link_rate, sc->stats.phy.dsrate * 1000 / 424);
964 1373
965 ret = uea_read_cmv(sc, SA_DIAG, 23, &data); 1374 ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 23, &data);
966 if (ret < 0) 1375 if (ret < 0)
967 return ret; 1376 return ret;
968 sc->stats.phy.dsattenuation = (data & 0xff) / 2; 1377 sc->stats.phy.dsattenuation = (data & 0xff) / 2;
969 1378
970 ret = uea_read_cmv(sc, SA_DIAG, 47, &data); 1379 ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 47, &data);
971 if (ret < 0) 1380 if (ret < 0)
972 return ret; 1381 return ret;
973 sc->stats.phy.usattenuation = (data & 0xff) / 2; 1382 sc->stats.phy.usattenuation = (data & 0xff) / 2;
974 1383
975 ret = uea_read_cmv(sc, SA_DIAG, 25, &sc->stats.phy.dsmargin); 1384 ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 25, &sc->stats.phy.dsmargin);
976 if (ret < 0) 1385 if (ret < 0)
977 return ret; 1386 return ret;
978 1387
979 ret = uea_read_cmv(sc, SA_DIAG, 49, &sc->stats.phy.usmargin); 1388 ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 49, &sc->stats.phy.usmargin);
980 if (ret < 0) 1389 if (ret < 0)
981 return ret; 1390 return ret;
982 1391
983 ret = uea_read_cmv(sc, SA_DIAG, 51, &sc->stats.phy.rxflow); 1392 ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 51, &sc->stats.phy.rxflow);
984 if (ret < 0) 1393 if (ret < 0)
985 return ret; 1394 return ret;
986 1395
987 ret = uea_read_cmv(sc, SA_DIAG, 52, &sc->stats.phy.txflow); 1396 ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 52, &sc->stats.phy.txflow);
988 if (ret < 0) 1397 if (ret < 0)
989 return ret; 1398 return ret;
990 1399
991 ret = uea_read_cmv(sc, SA_DIAG, 54, &sc->stats.phy.dsunc); 1400 ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 54, &sc->stats.phy.dsunc);
992 if (ret < 0) 1401 if (ret < 0)
993 return ret; 1402 return ret;
994 1403
995 /* only for atu-c */ 1404 /* only for atu-c */
996 ret = uea_read_cmv(sc, SA_DIAG, 58, &sc->stats.phy.usunc); 1405 ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 58, &sc->stats.phy.usunc);
997 if (ret < 0) 1406 if (ret < 0)
998 return ret; 1407 return ret;
999 1408
1000 ret = uea_read_cmv(sc, SA_DIAG, 53, &sc->stats.phy.dscorr); 1409 ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 53, &sc->stats.phy.dscorr);
1001 if (ret < 0) 1410 if (ret < 0)
1002 return ret; 1411 return ret;
1003 1412
1004 /* only for atu-c */ 1413 /* only for atu-c */
1005 ret = uea_read_cmv(sc, SA_DIAG, 57, &sc->stats.phy.uscorr); 1414 ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 57, &sc->stats.phy.uscorr);
1006 if (ret < 0) 1415 if (ret < 0)
1007 return ret; 1416 return ret;
1008 1417
1009 ret = uea_read_cmv(sc, SA_INFO, 8, &sc->stats.phy.vidco); 1418 ret = uea_read_cmv_e1(sc, E1_SA_INFO, 8, &sc->stats.phy.vidco);
1010 if (ret < 0) 1419 if (ret < 0)
1011 return ret; 1420 return ret;
1012 1421
1013 ret = uea_read_cmv(sc, SA_INFO, 13, &sc->stats.phy.vidcpe); 1422 ret = uea_read_cmv_e1(sc, E1_SA_INFO, 13, &sc->stats.phy.vidcpe);
1014 if (ret < 0) 1423 if (ret < 0)
1015 return ret; 1424 return ret;
1016 1425
1017 return 0; 1426 return 0;
1018} 1427}
1019 1428
1020static int request_cmvs(struct uea_softc *sc, 1429static int uea_stat_e4(struct uea_softc *sc)
1021 struct uea_cmvs **cmvs, const struct firmware **fw)
1022{ 1430{
1023 int ret, size; 1431 u32 data;
1024 u8 *data; 1432 u32 tmp_arr[2];
1433 int ret;
1434
1435 uea_enters(INS_TO_USBDEV(sc));
1436 data = sc->stats.phy.state;
1437
1438 /* XXX only need to be done before operationnal... */
1439 ret = uea_read_cmv_e4(sc, 1, E4_SA_STAT, 0, 0, &sc->stats.phy.state);
1440 if (ret < 0)
1441 return ret;
1442
1443 switch (sc->stats.phy.state) {
1444 case 0x0: /* not yet synchronized */
1445 case 0x1:
1446 case 0x3:
1447 case 0x4:
1448 uea_dbg(INS_TO_USBDEV(sc), "modem not yet synchronized\n");
1449 return 0;
1450 case 0x5: /* initialization */
1451 case 0x6:
1452 case 0x9:
1453 case 0xa:
1454 uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n");
1455 return 0;
1456 case 0x2: /* fail ... */
1457 uea_info(INS_TO_USBDEV(sc), "modem synchronization failed"
1458 " (may be try other cmv/dsp)\n");
1459 return -EAGAIN;
1460 case 0x7: /* operational */
1461 break;
1462 default:
1463 uea_warn(INS_TO_USBDEV(sc), "unknown state: %x\n", sc->stats.phy.state);
1464 return 0;
1465 }
1466
1467 if (data != 7) {
1468 uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_OFF, 0, NULL);
1469 uea_info(INS_TO_USBDEV(sc), "modem operational\n");
1470
1471 /* release the dsp firmware as it is not needed until
1472 * the next failure
1473 */
1474 if (sc->dsp_firm) {
1475 release_firmware(sc->dsp_firm);
1476 sc->dsp_firm = NULL;
1477 }
1478 }
1479
1480 /* always update it as atm layer could not be init when we switch to
1481 * operational state
1482 */
1483 UPDATE_ATM_STAT(signal, ATM_PHY_SIG_FOUND);
1484
1485 /* wake up processes waiting for synchronization */
1486 wake_up(&sc->sync_q);
1487
1488 /* TODO improve this state machine :
1489 * we need some CMV info : what they do and their unit
1490 * we should find the equivalent of eagle3- CMV
1491 */
1492 /* check flags */
1493 ret = uea_read_cmv_e4(sc, 1, E4_SA_DIAG, 0, 0, &sc->stats.phy.flags);
1494 if (ret < 0)
1495 return ret;
1496 sc->stats.phy.mflags |= sc->stats.phy.flags;
1497
1498 /* in case of a flags ( for example delineation LOSS (& 0x10)),
1499 * we check the status again in order to detect the failure earlier
1500 */
1501 if (sc->stats.phy.flags) {
1502 uea_dbg(INS_TO_USBDEV(sc), "Stat flag = 0x%x\n",
1503 sc->stats.phy.flags);
1504 if (sc->stats.phy.flags & 1) //delineation LOSS
1505 return -EAGAIN;
1506 if (sc->stats.phy.flags & 0x4000) //Reset Flag
1507 return -EAGAIN;
1508 return 0;
1509 }
1510
1511 /* rate data may be in upper or lower half of 64 bit word, strange */
1512 ret = uea_read_cmv_e4(sc, 4, E4_SA_RATE, 0, 0, tmp_arr);
1513 if (ret < 0)
1514 return ret;
1515 data = (tmp_arr[0]) ? tmp_arr[0] : tmp_arr[1];
1516 sc->stats.phy.usrate = data / 1000;
1517
1518 ret = uea_read_cmv_e4(sc, 4, E4_SA_RATE, 1, 0, tmp_arr);
1519 if (ret < 0)
1520 return ret;
1521 data = (tmp_arr[0]) ? tmp_arr[0] : tmp_arr[1];
1522 uea_set_bulk_timeout(sc, data / 1000);
1523 sc->stats.phy.dsrate = data / 1000;
1524 UPDATE_ATM_STAT(link_rate, sc->stats.phy.dsrate * 1000 / 424);
1525
1526 ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 68, 1, &data);
1527 if (ret < 0)
1528 return ret;
1529 sc->stats.phy.dsattenuation = data / 10;
1530
1531 ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 69, 1, &data);
1532 if (ret < 0)
1533 return ret;
1534 sc->stats.phy.usattenuation = data / 10;
1535
1536 ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 68, 3, &data);
1537 if (ret < 0)
1538 return ret;
1539 sc->stats.phy.dsmargin = data / 2;
1540
1541 ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 69, 3, &data);
1542 if (ret < 0)
1543 return ret;
1544 sc->stats.phy.usmargin = data / 10;
1545
1546 return 0;
1547}
1548
1549static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
1550{
1551 char file_arr[] = "CMVxy.bin";
1025 char *file; 1552 char *file;
1026 char cmv_name[FIRMWARE_NAME_MAX]; /* 30 bytes stack variable */
1027 1553
1554 /* set proper name corresponding modem version and line type */
1028 if (cmv_file[sc->modem_index] == NULL) { 1555 if (cmv_file[sc->modem_index] == NULL) {
1029 if (UEA_CHIP_VERSION(sc) == ADI930) 1556 if (UEA_CHIP_VERSION(sc) == ADI930)
1030 file = (IS_ISDN(sc->usb_dev)) ? "CMV9i.bin" : "CMV9p.bin"; 1557 file_arr[3] = '9';
1558 else if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
1559 file_arr[3] = '4';
1031 else 1560 else
1032 file = (IS_ISDN(sc->usb_dev)) ? "CMVei.bin" : "CMVep.bin"; 1561 file_arr[3] = 'e';
1562
1563 file_arr[4] = IS_ISDN(sc) ? 'i' : 'p';
1564 file = file_arr;
1033 } else 1565 } else
1034 file = cmv_file[sc->modem_index]; 1566 file = cmv_file[sc->modem_index];
1035 1567
1036 strcpy(cmv_name, FW_DIR); 1568 strcpy(cmv_name, FW_DIR);
1037 strlcat(cmv_name, file, sizeof(cmv_name)); 1569 strlcat(cmv_name, file, FIRMWARE_NAME_MAX);
1570 if (ver == 2)
1571 strlcat(cmv_name, ".v2", FIRMWARE_NAME_MAX);
1572}
1573
1574static int request_cmvs_old(struct uea_softc *sc,
1575 void **cmvs, const struct firmware **fw)
1576{
1577 int ret, size;
1578 u8 *data;
1579 char cmv_name[FIRMWARE_NAME_MAX]; /* 30 bytes stack variable */
1038 1580
1581 cmvs_file_name(sc, cmv_name, 1);
1039 ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev); 1582 ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
1040 if (ret < 0) { 1583 if (ret < 0) {
1041 uea_err(INS_TO_USBDEV(sc), 1584 uea_err(INS_TO_USBDEV(sc),
@@ -1045,16 +1588,197 @@ static int request_cmvs(struct uea_softc *sc,
1045 } 1588 }
1046 1589
1047 data = (u8 *) (*fw)->data; 1590 data = (u8 *) (*fw)->data;
1048 size = *data * sizeof(struct uea_cmvs) + 1; 1591 size = (*fw)->size;
1049 if (size != (*fw)->size) { 1592 if (size < 1)
1050 uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", 1593 goto err_fw_corrupted;
1051 cmv_name); 1594
1052 release_firmware(*fw); 1595 if (size != *data * sizeof(struct uea_cmvs_v1) + 1)
1053 return -EILSEQ; 1596 goto err_fw_corrupted;
1597
1598 *cmvs = (void *)(data + 1);
1599 return *data;
1600
1601err_fw_corrupted:
1602 uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", cmv_name);
1603 release_firmware(*fw);
1604 return -EILSEQ;
1605}
1606
1607static int request_cmvs(struct uea_softc *sc,
1608 void **cmvs, const struct firmware **fw, int *ver)
1609{
1610 int ret, size;
1611 u32 crc;
1612 u8 *data;
1613 char cmv_name[FIRMWARE_NAME_MAX]; /* 30 bytes stack variable */
1614
1615 cmvs_file_name(sc, cmv_name, 2);
1616 ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
1617 if (ret < 0) {
1618 /* if caller can handle old version, try to provide it */
1619 if (*ver == 1) {
1620 uea_warn(INS_TO_USBDEV(sc), "requesting firmware %s failed, "
1621 "try to get older cmvs\n", cmv_name);
1622 return request_cmvs_old(sc, cmvs, fw);
1623 }
1624 uea_err(INS_TO_USBDEV(sc),
1625 "requesting firmware %s failed with error %d\n",
1626 cmv_name, ret);
1627 return ret;
1628 }
1629
1630 size = (*fw)->size;
1631 data = (u8 *) (*fw)->data;
1632 if (size < 4 || strncmp(data, "cmv2", 4) != 0) {
1633 if (*ver == 1) {
1634 uea_warn(INS_TO_USBDEV(sc), "firmware %s is corrupted, "
1635 "try to get older cmvs\n", cmv_name);
1636 release_firmware(*fw);
1637 return request_cmvs_old(sc, cmvs, fw);
1638 }
1639 goto err_fw_corrupted;
1054 } 1640 }
1055 1641
1056 *cmvs = (struct uea_cmvs *)(data + 1); 1642 *ver = 2;
1643
1644 data += 4;
1645 size -= 4;
1646 if (size < 5)
1647 goto err_fw_corrupted;
1648
1649 crc = FW_GET_LONG(data);
1650 data += 4;
1651 size -= 4;
1652 if (crc32_be(0, data, size) != crc)
1653 goto err_fw_corrupted;
1654
1655 if (size != *data * sizeof(struct uea_cmvs_v2) + 1)
1656 goto err_fw_corrupted;
1657
1658 *cmvs = (void *) (data + 1);
1057 return *data; 1659 return *data;
1660
1661err_fw_corrupted:
1662 uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", cmv_name);
1663 release_firmware(*fw);
1664 return -EILSEQ;
1665}
1666
1667static int uea_send_cmvs_e1(struct uea_softc *sc)
1668{
1669 int i, ret, len;
1670 void *cmvs_ptr;
1671 const struct firmware *cmvs_fw;
1672 int ver = 1; // we can handle v1 cmv firmware version;
1673
1674 /* Enter in R-IDLE (cmv) until instructed otherwise */
1675 ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 1);
1676 if (ret < 0)
1677 return ret;
1678
1679 /* Dump firmware version */
1680 ret = uea_read_cmv_e1(sc, E1_SA_INFO, 10, &sc->stats.phy.firmid);
1681 if (ret < 0)
1682 return ret;
1683 uea_info(INS_TO_USBDEV(sc), "ATU-R firmware version : %x\n",
1684 sc->stats.phy.firmid);
1685
1686 /* get options */
1687 ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
1688 if (ret < 0)
1689 return ret;
1690
1691 /* send options */
1692 if (ver == 1) {
1693 struct uea_cmvs_v1 *cmvs_v1 = cmvs_ptr;
1694
1695 uea_warn(INS_TO_USBDEV(sc), "use deprecated cmvs version, "
1696 "please update your firmware\n");
1697
1698 for (i = 0; i < len; i++) {
1699 ret = uea_write_cmv_e1(sc, FW_GET_LONG(&cmvs_v1[i].address),
1700 FW_GET_WORD(&cmvs_v1[i].offset),
1701 FW_GET_LONG(&cmvs_v1[i].data));
1702 if (ret < 0)
1703 goto out;
1704 }
1705 } else if (ver == 2) {
1706 struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr;
1707
1708 for (i = 0; i < len; i++) {
1709 ret = uea_write_cmv_e1(sc, FW_GET_LONG(&cmvs_v2[i].address),
1710 (u16) FW_GET_LONG(&cmvs_v2[i].offset),
1711 FW_GET_LONG(&cmvs_v2[i].data));
1712 if (ret < 0)
1713 goto out;
1714 }
1715 } else {
1716 /* This realy should not happen */
1717 uea_err(INS_TO_USBDEV(sc), "bad cmvs version %d\n", ver);
1718 goto out;
1719 }
1720
1721 /* Enter in R-ACT-REQ */
1722 ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 2);
1723 uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n");
1724 uea_info(INS_TO_USBDEV(sc), "modem started, waiting synchronization...\n");
1725out:
1726 release_firmware(cmvs_fw);
1727 return ret;
1728}
1729
1730static int uea_send_cmvs_e4(struct uea_softc *sc)
1731{
1732 int i, ret, len;
1733 void *cmvs_ptr;
1734 const struct firmware *cmvs_fw;
1735 int ver = 2; // we can only handle v2 cmv firmware version;
1736
1737 /* Enter in R-IDLE (cmv) until instructed otherwise */
1738 ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 1);
1739 if (ret < 0)
1740 return ret;
1741
1742 /* Dump firmware version */
1743 /* XXX don't read the 3th byte as it is always 6 */
1744 ret = uea_read_cmv_e4(sc, 2, E4_SA_INFO, 55, 0, &sc->stats.phy.firmid);
1745 if (ret < 0)
1746 return ret;
1747 uea_info(INS_TO_USBDEV(sc), "ATU-R firmware version : %x\n",
1748 sc->stats.phy.firmid);
1749
1750
1751 /* get options */
1752 ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
1753 if (ret < 0)
1754 return ret;
1755
1756 /* send options */
1757 if (ver == 2) {
1758 struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr;
1759
1760 for (i = 0; i < len; i++) {
1761 ret = uea_write_cmv_e4(sc, 1,
1762 FW_GET_LONG(&cmvs_v2[i].group),
1763 FW_GET_LONG(&cmvs_v2[i].address),
1764 FW_GET_LONG(&cmvs_v2[i].offset),
1765 FW_GET_LONG(&cmvs_v2[i].data));
1766 if (ret < 0)
1767 goto out;
1768 }
1769 } else {
1770 /* This realy should not happen */
1771 uea_err(INS_TO_USBDEV(sc), "bad cmvs version %d\n", ver);
1772 goto out;
1773 }
1774
1775 /* Enter in R-ACT-REQ */
1776 ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 2);
1777 uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n");
1778 uea_info(INS_TO_USBDEV(sc), "modem started, waiting synchronization...\n");
1779out:
1780 release_firmware(cmvs_fw);
1781 return ret;
1058} 1782}
1059 1783
1060/* Start boot post firmware modem: 1784/* Start boot post firmware modem:
@@ -1066,9 +1790,7 @@ static int request_cmvs(struct uea_softc *sc,
1066static int uea_start_reset(struct uea_softc *sc) 1790static int uea_start_reset(struct uea_softc *sc)
1067{ 1791{
1068 u16 zero = 0; /* ;-) */ 1792 u16 zero = 0; /* ;-) */
1069 int i, len, ret; 1793 int ret;
1070 struct uea_cmvs *cmvs;
1071 const struct firmware *cmvs_fw;
1072 1794
1073 uea_enters(INS_TO_USBDEV(sc)); 1795 uea_enters(INS_TO_USBDEV(sc));
1074 uea_info(INS_TO_USBDEV(sc), "(re)booting started\n"); 1796 uea_info(INS_TO_USBDEV(sc), "(re)booting started\n");
@@ -1093,25 +1815,36 @@ static int uea_start_reset(struct uea_softc *sc)
1093 uea_request(sc, UEA_SET_MODE, UEA_START_RESET, 0, NULL); 1815 uea_request(sc, UEA_SET_MODE, UEA_START_RESET, 0, NULL);
1094 1816
1095 /* original driver use 200ms, but windows driver use 100ms */ 1817 /* original driver use 200ms, but windows driver use 100ms */
1096 msleep(100); 1818 ret = uea_wait(sc, 0, msecs_to_jiffies(100));
1819 if (ret < 0)
1820 return ret;
1097 1821
1098 /* leave reset mode */ 1822 /* leave reset mode */
1099 uea_request(sc, UEA_SET_MODE, UEA_END_RESET, 0, NULL); 1823 uea_request(sc, UEA_SET_MODE, UEA_END_RESET, 0, NULL);
1100 1824
1101 /* clear tx and rx mailboxes */ 1825 if (UEA_CHIP_VERSION(sc) != EAGLE_IV) {
1102 uea_request(sc, UEA_SET_2183_DATA, UEA_MPTX_MAILBOX, 2, &zero); 1826 /* clear tx and rx mailboxes */
1103 uea_request(sc, UEA_SET_2183_DATA, UEA_MPRX_MAILBOX, 2, &zero); 1827 uea_request(sc, UEA_SET_2183_DATA, UEA_MPTX_MAILBOX, 2, &zero);
1104 uea_request(sc, UEA_SET_2183_DATA, UEA_SWAP_MAILBOX, 2, &zero); 1828 uea_request(sc, UEA_SET_2183_DATA, UEA_MPRX_MAILBOX, 2, &zero);
1829 uea_request(sc, UEA_SET_2183_DATA, UEA_SWAP_MAILBOX, 2, &zero);
1830 }
1831
1832 ret = uea_wait(sc, 0, msecs_to_jiffies(1000));
1833 if (ret < 0)
1834 return ret;
1835
1836 if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
1837 sc->cmv_dsc.e4.function = E4_MAKEFUNCTION(E4_ADSLDIRECTIVE, E4_MODEMREADY, 1);
1838 else
1839 sc->cmv_dsc.e1.function = E1_MAKEFUNCTION(E1_ADSLDIRECTIVE, E1_MODEMREADY);
1105 1840
1106 msleep(1000);
1107 sc->cmv_function = MAKEFUNCTION(ADSLDIRECTIVE, MODEMREADY);
1108 /* demask interrupt */ 1841 /* demask interrupt */
1109 sc->booting = 0; 1842 sc->booting = 0;
1110 1843
1111 /* start loading DSP */ 1844 /* start loading DSP */
1112 sc->pageno = 0; 1845 sc->pageno = 0;
1113 sc->ovl = 0; 1846 sc->ovl = 0;
1114 schedule_work(&sc->task); 1847 queue_work(sc->work_q, &sc->task);
1115 1848
1116 /* wait for modem ready CMV */ 1849 /* wait for modem ready CMV */
1117 ret = wait_cmv_ack(sc); 1850 ret = wait_cmv_ack(sc);
@@ -1120,38 +1853,10 @@ static int uea_start_reset(struct uea_softc *sc)
1120 1853
1121 uea_vdbg(INS_TO_USBDEV(sc), "Ready CMV received\n"); 1854 uea_vdbg(INS_TO_USBDEV(sc), "Ready CMV received\n");
1122 1855
1123 /* Enter in R-IDLE (cmv) until instructed otherwise */ 1856 ret = sc->send_cmvs(sc);
1124 ret = uea_write_cmv(sc, SA_CNTL, 0, 1);
1125 if (ret < 0)
1126 return ret;
1127
1128 /* Dump firmware version */
1129 ret = uea_read_cmv(sc, SA_INFO, 10, &sc->stats.phy.firmid);
1130 if (ret < 0) 1857 if (ret < 0)
1131 return ret; 1858 return ret;
1132 uea_info(INS_TO_USBDEV(sc), "ATU-R firmware version : %x\n",
1133 sc->stats.phy.firmid);
1134 1859
1135 /* get options */
1136 ret = len = request_cmvs(sc, &cmvs, &cmvs_fw);
1137 if (ret < 0)
1138 return ret;
1139
1140 /* send options */
1141 for (i = 0; i < len; i++) {
1142 ret = uea_write_cmv(sc, FW_GET_LONG(&cmvs[i].address),
1143 FW_GET_WORD(&cmvs[i].offset),
1144 FW_GET_LONG(&cmvs[i].data));
1145 if (ret < 0)
1146 goto out;
1147 }
1148 /* Enter in R-ACT-REQ */
1149 ret = uea_write_cmv(sc, SA_CNTL, 0, 2);
1150 uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n");
1151 uea_info(INS_TO_USBDEV(sc), "Modem started, "
1152 "waiting synchronization\n");
1153out:
1154 release_firmware(cmvs_fw);
1155 sc->reset = 0; 1860 sc->reset = 0;
1156 uea_leaves(INS_TO_USBDEV(sc)); 1861 uea_leaves(INS_TO_USBDEV(sc));
1157 return ret; 1862 return ret;
@@ -1174,12 +1879,10 @@ static int uea_kthread(void *data)
1174 if (ret < 0 || sc->reset) 1879 if (ret < 0 || sc->reset)
1175 ret = uea_start_reset(sc); 1880 ret = uea_start_reset(sc);
1176 if (!ret) 1881 if (!ret)
1177 ret = uea_stat(sc); 1882 ret = sc->stat(sc);
1178 if (ret != -EAGAIN) 1883 if (ret != -EAGAIN)
1179 msleep_interruptible(1000); 1884 uea_wait(sc, 0, msecs_to_jiffies(1000));
1180 if (try_to_freeze()) 1885 try_to_freeze();
1181 uea_err(INS_TO_USBDEV(sc), "suspend/resume not supported, "
1182 "please unplug/replug your modem\n");
1183 } 1886 }
1184 uea_leaves(INS_TO_USBDEV(sc)); 1887 uea_leaves(INS_TO_USBDEV(sc));
1185 return ret; 1888 return ret;
@@ -1234,7 +1937,6 @@ static int load_XILINX_firmware(struct uea_softc *sc)
1234 if (ret < 0) 1937 if (ret < 0)
1235 uea_err(sc->usb_dev, "elsa de-assert failed with error %d\n", ret); 1938 uea_err(sc->usb_dev, "elsa de-assert failed with error %d\n", ret);
1236 1939
1237
1238err1: 1940err1:
1239 release_firmware(fw_entry); 1941 release_firmware(fw_entry);
1240err0: 1942err0:
@@ -1243,40 +1945,41 @@ err0:
1243} 1945}
1244 1946
1245/* The modem send us an ack. First with check if it right */ 1947/* The modem send us an ack. First with check if it right */
1246static void uea_dispatch_cmv(struct uea_softc *sc, struct cmv* cmv) 1948static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr)
1247{ 1949{
1950 struct cmv_dsc_e1 *dsc = &sc->cmv_dsc.e1;
1951 struct cmv_e1 *cmv = &intr->u.e1.s2.cmv;
1952
1248 uea_enters(INS_TO_USBDEV(sc)); 1953 uea_enters(INS_TO_USBDEV(sc));
1249 if (le16_to_cpu(cmv->wPreamble) != PREAMBLE) 1954 if (le16_to_cpu(cmv->wPreamble) != E1_PREAMBLE)
1250 goto bad1; 1955 goto bad1;
1251 1956
1252 if (cmv->bDirection != MODEMTOHOST) 1957 if (cmv->bDirection != E1_MODEMTOHOST)
1253 goto bad1; 1958 goto bad1;
1254 1959
1255 /* FIXME : ADI930 reply wrong preambule (func = 2, sub = 2) to 1960 /* FIXME : ADI930 reply wrong preambule (func = 2, sub = 2) to
1256 * the first MEMACESS cmv. Ignore it... 1961 * the first MEMACESS cmv. Ignore it...
1257 */ 1962 */
1258 if (cmv->bFunction != sc->cmv_function) { 1963 if (cmv->bFunction != dsc->function) {
1259 if (UEA_CHIP_VERSION(sc) == ADI930 1964 if (UEA_CHIP_VERSION(sc) == ADI930
1260 && cmv->bFunction == MAKEFUNCTION(2, 2)) { 1965 && cmv->bFunction == E1_MAKEFUNCTION(2, 2)) {
1261 cmv->wIndex = cpu_to_le16(sc->cmv_idx); 1966 cmv->wIndex = cpu_to_le16(dsc->idx);
1262 put_unaligned(cpu_to_le32(sc->cmv_address), &cmv->dwSymbolicAddress); 1967 put_unaligned(cpu_to_le32(dsc->address), &cmv->dwSymbolicAddress);
1263 cmv->wOffsetAddress = cpu_to_le16(sc->cmv_offset); 1968 cmv->wOffsetAddress = cpu_to_le16(dsc->offset);
1264 } 1969 } else
1265 else
1266 goto bad2; 1970 goto bad2;
1267 } 1971 }
1268 1972
1269 if (cmv->bFunction == MAKEFUNCTION(ADSLDIRECTIVE, MODEMREADY)) { 1973 if (cmv->bFunction == E1_MAKEFUNCTION(E1_ADSLDIRECTIVE, E1_MODEMREADY)) {
1270 wake_up_cmv_ack(sc); 1974 wake_up_cmv_ack(sc);
1271 uea_leaves(INS_TO_USBDEV(sc)); 1975 uea_leaves(INS_TO_USBDEV(sc));
1272 return; 1976 return;
1273 } 1977 }
1274 1978
1275 /* in case of MEMACCESS */ 1979 /* in case of MEMACCESS */
1276 if (le16_to_cpu(cmv->wIndex) != sc->cmv_idx || 1980 if (le16_to_cpu(cmv->wIndex) != dsc->idx ||
1277 le32_to_cpu(get_unaligned(&cmv->dwSymbolicAddress)) != 1981 le32_to_cpu(get_unaligned(&cmv->dwSymbolicAddress)) != dsc->address ||
1278 sc->cmv_address 1982 le16_to_cpu(cmv->wOffsetAddress) != dsc->offset)
1279 || le16_to_cpu(cmv->wOffsetAddress) != sc->cmv_offset)
1280 goto bad2; 1983 goto bad2;
1281 1984
1282 sc->data = le32_to_cpu(get_unaligned(&cmv->dwData)); 1985 sc->data = le32_to_cpu(get_unaligned(&cmv->dwData));
@@ -1289,8 +1992,8 @@ static void uea_dispatch_cmv(struct uea_softc *sc, struct cmv* cmv)
1289bad2: 1992bad2:
1290 uea_err(INS_TO_USBDEV(sc), "unexpected cmv received," 1993 uea_err(INS_TO_USBDEV(sc), "unexpected cmv received,"
1291 "Function : %d, Subfunction : %d\n", 1994 "Function : %d, Subfunction : %d\n",
1292 FUNCTION_TYPE(cmv->bFunction), 1995 E1_FUNCTION_TYPE(cmv->bFunction),
1293 FUNCTION_SUBTYPE(cmv->bFunction)); 1996 E1_FUNCTION_SUBTYPE(cmv->bFunction));
1294 uea_leaves(INS_TO_USBDEV(sc)); 1997 uea_leaves(INS_TO_USBDEV(sc));
1295 return; 1998 return;
1296 1999
@@ -1301,6 +2004,61 @@ bad1:
1301 uea_leaves(INS_TO_USBDEV(sc)); 2004 uea_leaves(INS_TO_USBDEV(sc));
1302} 2005}
1303 2006
2007/* The modem send us an ack. First with check if it right */
2008static void uea_dispatch_cmv_e4(struct uea_softc *sc, struct intr_pkt *intr)
2009{
2010 struct cmv_dsc_e4 *dsc = &sc->cmv_dsc.e4;
2011 struct cmv_e4 *cmv = &intr->u.e4.s2.cmv;
2012
2013 uea_enters(INS_TO_USBDEV(sc));
2014 uea_dbg(INS_TO_USBDEV(sc), "cmv %x %x %x %x %x %x\n",
2015 be16_to_cpu(cmv->wGroup), be16_to_cpu(cmv->wFunction),
2016 be16_to_cpu(cmv->wOffset), be16_to_cpu(cmv->wAddress),
2017 be32_to_cpu(cmv->dwData[0]), be32_to_cpu(cmv->dwData[1]));
2018
2019 if (be16_to_cpu(cmv->wFunction) != dsc->function)
2020 goto bad2;
2021
2022 if (be16_to_cpu(cmv->wFunction) == E4_MAKEFUNCTION(E4_ADSLDIRECTIVE, E4_MODEMREADY, 1)) {
2023 wake_up_cmv_ack(sc);
2024 uea_leaves(INS_TO_USBDEV(sc));
2025 return;
2026 }
2027
2028 /* in case of MEMACCESS */
2029 if (be16_to_cpu(cmv->wOffset) != dsc->offset ||
2030 be16_to_cpu(cmv->wGroup) != dsc->group ||
2031 be16_to_cpu(cmv->wAddress) != dsc->address)
2032 goto bad2;
2033
2034 sc->data = be32_to_cpu(cmv->dwData[0]);
2035 sc->data1 = be32_to_cpu(cmv->dwData[1]);
2036 wake_up_cmv_ack(sc);
2037 uea_leaves(INS_TO_USBDEV(sc));
2038 return;
2039
2040bad2:
2041 uea_err(INS_TO_USBDEV(sc), "unexpected cmv received,"
2042 "Function : %d, Subfunction : %d\n",
2043 E4_FUNCTION_TYPE(cmv->wFunction),
2044 E4_FUNCTION_SUBTYPE(cmv->wFunction));
2045 uea_leaves(INS_TO_USBDEV(sc));
2046 return;
2047}
2048
2049static void uea_schedule_load_page_e1(struct uea_softc *sc, struct intr_pkt *intr)
2050{
2051 sc->pageno = intr->e1_bSwapPageNo;
2052 sc->ovl = intr->e1_bOvl >> 4 | intr->e1_bOvl << 4;
2053 queue_work(sc->work_q, &sc->task);
2054}
2055
2056static void uea_schedule_load_page_e4(struct uea_softc *sc, struct intr_pkt *intr)
2057{
2058 sc->pageno = intr->e4_bSwapPageNo;
2059 queue_work(sc->work_q, &sc->task);
2060}
2061
1304/* 2062/*
1305 * interrupt handler 2063 * interrupt handler
1306 */ 2064 */
@@ -1326,13 +2084,11 @@ static void uea_intr(struct urb *urb)
1326 2084
1327 switch (le16_to_cpu(intr->wInterrupt)) { 2085 switch (le16_to_cpu(intr->wInterrupt)) {
1328 case INT_LOADSWAPPAGE: 2086 case INT_LOADSWAPPAGE:
1329 sc->pageno = intr->bSwapPageNo; 2087 sc->schedule_load_page(sc, intr);
1330 sc->ovl = intr->bOvl >> 4 | intr->bOvl << 4;
1331 schedule_work(&sc->task);
1332 break; 2088 break;
1333 2089
1334 case INT_INCOMINGCMV: 2090 case INT_INCOMINGCMV:
1335 uea_dispatch_cmv(sc, &intr->u.s2.cmv); 2091 sc->dispatch_cmv(sc, intr);
1336 break; 2092 break;
1337 2093
1338 default: 2094 default:
@@ -1349,35 +2105,55 @@ resubmit:
1349 */ 2105 */
1350static int uea_boot(struct uea_softc *sc) 2106static int uea_boot(struct uea_softc *sc)
1351{ 2107{
1352 int ret; 2108 int ret, size;
1353 struct intr_pkt *intr; 2109 struct intr_pkt *intr;
1354 2110
1355 uea_enters(INS_TO_USBDEV(sc)); 2111 uea_enters(INS_TO_USBDEV(sc));
1356 2112
1357 INIT_WORK(&sc->task, uea_load_page); 2113 if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
2114 size = E4_INTR_PKT_SIZE;
2115 sc->dispatch_cmv = uea_dispatch_cmv_e4;
2116 sc->schedule_load_page = uea_schedule_load_page_e4;
2117 sc->stat = uea_stat_e4;
2118 sc->send_cmvs = uea_send_cmvs_e4;
2119 INIT_WORK(&sc->task, uea_load_page_e4);
2120 } else {
2121 size = E1_INTR_PKT_SIZE;
2122 sc->dispatch_cmv = uea_dispatch_cmv_e1;
2123 sc->schedule_load_page = uea_schedule_load_page_e1;
2124 sc->stat = uea_stat_e1;
2125 sc->send_cmvs = uea_send_cmvs_e1;
2126 INIT_WORK(&sc->task, uea_load_page_e1);
2127 }
2128
1358 init_waitqueue_head(&sc->sync_q); 2129 init_waitqueue_head(&sc->sync_q);
1359 init_waitqueue_head(&sc->cmv_ack_wait); 2130
2131 sc->work_q = create_workqueue("ueagle-dsp");
2132 if (!sc->work_q) {
2133 uea_err(INS_TO_USBDEV(sc), "cannot allocate workqueue\n");
2134 uea_leaves(INS_TO_USBDEV(sc));
2135 return -ENOMEM;
2136 }
1360 2137
1361 if (UEA_CHIP_VERSION(sc) == ADI930) 2138 if (UEA_CHIP_VERSION(sc) == ADI930)
1362 load_XILINX_firmware(sc); 2139 load_XILINX_firmware(sc);
1363 2140
1364 intr = kmalloc(INTR_PKT_SIZE, GFP_KERNEL); 2141 intr = kmalloc(size, GFP_KERNEL);
1365 if (!intr) { 2142 if (!intr) {
1366 uea_err(INS_TO_USBDEV(sc), 2143 uea_err(INS_TO_USBDEV(sc),
1367 "cannot allocate interrupt package\n"); 2144 "cannot allocate interrupt package\n");
1368 uea_leaves(INS_TO_USBDEV(sc)); 2145 goto err0;
1369 return -ENOMEM;
1370 } 2146 }
1371 2147
1372 sc->urb_int = usb_alloc_urb(0, GFP_KERNEL); 2148 sc->urb_int = usb_alloc_urb(0, GFP_KERNEL);
1373 if (!sc->urb_int) { 2149 if (!sc->urb_int) {
1374 uea_err(INS_TO_USBDEV(sc), "cannot allocate interrupt URB\n"); 2150 uea_err(INS_TO_USBDEV(sc), "cannot allocate interrupt URB\n");
1375 goto err; 2151 goto err1;
1376 } 2152 }
1377 2153
1378 usb_fill_int_urb(sc->urb_int, sc->usb_dev, 2154 usb_fill_int_urb(sc->urb_int, sc->usb_dev,
1379 usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE), 2155 usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE),
1380 intr, INTR_PKT_SIZE, uea_intr, sc, 2156 intr, size, uea_intr, sc,
1381 sc->usb_dev->actconfig->interface[0]->altsetting[0]. 2157 sc->usb_dev->actconfig->interface[0]->altsetting[0].
1382 endpoint[0].desc.bInterval); 2158 endpoint[0].desc.bInterval);
1383 2159
@@ -1385,7 +2161,7 @@ static int uea_boot(struct uea_softc *sc)
1385 if (ret < 0) { 2161 if (ret < 0) {
1386 uea_err(INS_TO_USBDEV(sc), 2162 uea_err(INS_TO_USBDEV(sc),
1387 "urb submition failed with error %d\n", ret); 2163 "urb submition failed with error %d\n", ret);
1388 goto err; 2164 goto err1;
1389 } 2165 }
1390 2166
1391 sc->kthread = kthread_run(uea_kthread, sc, "ueagle-atm"); 2167 sc->kthread = kthread_run(uea_kthread, sc, "ueagle-atm");
@@ -1399,10 +2175,12 @@ static int uea_boot(struct uea_softc *sc)
1399 2175
1400err2: 2176err2:
1401 usb_kill_urb(sc->urb_int); 2177 usb_kill_urb(sc->urb_int);
1402err: 2178err1:
1403 usb_free_urb(sc->urb_int); 2179 usb_free_urb(sc->urb_int);
1404 sc->urb_int = NULL; 2180 sc->urb_int = NULL;
1405 kfree(intr); 2181 kfree(intr);
2182err0:
2183 destroy_workqueue(sc->work_q);
1406 uea_leaves(INS_TO_USBDEV(sc)); 2184 uea_leaves(INS_TO_USBDEV(sc));
1407 return -ENOMEM; 2185 return -ENOMEM;
1408} 2186}
@@ -1417,15 +2195,15 @@ static void uea_stop(struct uea_softc *sc)
1417 ret = kthread_stop(sc->kthread); 2195 ret = kthread_stop(sc->kthread);
1418 uea_dbg(INS_TO_USBDEV(sc), "kthread finish with status %d\n", ret); 2196 uea_dbg(INS_TO_USBDEV(sc), "kthread finish with status %d\n", ret);
1419 2197
1420 /* stop any pending boot process */
1421 flush_scheduled_work();
1422
1423 uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL); 2198 uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL);
1424 2199
1425 usb_kill_urb(sc->urb_int); 2200 usb_kill_urb(sc->urb_int);
1426 kfree(sc->urb_int->transfer_buffer); 2201 kfree(sc->urb_int->transfer_buffer);
1427 usb_free_urb(sc->urb_int); 2202 usb_free_urb(sc->urb_int);
1428 2203
2204 /* stop any pending boot process, when no one can schedule work */
2205 destroy_workqueue(sc->work_q);
2206
1429 if (sc->dsp_firm) 2207 if (sc->dsp_firm)
1430 release_firmware(sc->dsp_firm); 2208 release_firmware(sc->dsp_firm);
1431 uea_leaves(INS_TO_USBDEV(sc)); 2209 uea_leaves(INS_TO_USBDEV(sc));
@@ -1487,6 +2265,7 @@ static ssize_t read_human_status(struct device *dev, struct device_attribute *at
1487 char *buf) 2265 char *buf)
1488{ 2266{
1489 int ret = -ENODEV; 2267 int ret = -ENODEV;
2268 int modem_state;
1490 struct uea_softc *sc; 2269 struct uea_softc *sc;
1491 2270
1492 mutex_lock(&uea_mutex); 2271 mutex_lock(&uea_mutex);
@@ -1494,7 +2273,34 @@ static ssize_t read_human_status(struct device *dev, struct device_attribute *at
1494 if (!sc) 2273 if (!sc)
1495 goto out; 2274 goto out;
1496 2275
1497 switch (GET_STATUS(sc->stats.phy.state)) { 2276 if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
2277 switch (sc->stats.phy.state) {
2278 case 0x0: /* not yet synchronized */
2279 case 0x1:
2280 case 0x3:
2281 case 0x4:
2282 modem_state = 0;
2283 break;
2284 case 0x5: /* initialization */
2285 case 0x6:
2286 case 0x9:
2287 case 0xa:
2288 modem_state = 1;
2289 break;
2290 case 0x7: /* operational */
2291 modem_state = 2;
2292 break;
2293 case 0x2: /* fail ... */
2294 modem_state = 3;
2295 break;
2296 default: /* unknown */
2297 modem_state = 4;
2298 break;
2299 }
2300 } else
2301 modem_state = GET_STATUS(sc->stats.phy.state);
2302
2303 switch (modem_state) {
1498 case 0: 2304 case 0:
1499 ret = sprintf(buf, "Modem is booting\n"); 2305 ret = sprintf(buf, "Modem is booting\n");
1500 break; 2306 break;
@@ -1504,9 +2310,12 @@ static ssize_t read_human_status(struct device *dev, struct device_attribute *at
1504 case 2: 2310 case 2:
1505 ret = sprintf(buf, "Modem is operational\n"); 2311 ret = sprintf(buf, "Modem is operational\n");
1506 break; 2312 break;
1507 default: 2313 case 3:
1508 ret = sprintf(buf, "Modem synchronization failed\n"); 2314 ret = sprintf(buf, "Modem synchronization failed\n");
1509 break; 2315 break;
2316 default:
2317 ret = sprintf(buf, "Modem state is unknown\n");
2318 break;
1510 } 2319 }
1511out: 2320out:
1512 mutex_unlock(&uea_mutex); 2321 mutex_unlock(&uea_mutex);
@@ -1520,18 +2329,26 @@ static ssize_t read_delin(struct device *dev, struct device_attribute *attr,
1520{ 2329{
1521 int ret = -ENODEV; 2330 int ret = -ENODEV;
1522 struct uea_softc *sc; 2331 struct uea_softc *sc;
2332 char *delin = "GOOD";
1523 2333
1524 mutex_lock(&uea_mutex); 2334 mutex_lock(&uea_mutex);
1525 sc = dev_to_uea(dev); 2335 sc = dev_to_uea(dev);
1526 if (!sc) 2336 if (!sc)
1527 goto out; 2337 goto out;
1528 2338
1529 if (sc->stats.phy.flags & 0x0C00) 2339 if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
1530 ret = sprintf(buf, "ERROR\n"); 2340 if (sc->stats.phy.flags & 0x4000)
1531 else if (sc->stats.phy.flags & 0x0030) 2341 delin = "RESET";
1532 ret = sprintf(buf, "LOSS\n"); 2342 else if (sc->stats.phy.flags & 0x0001)
1533 else 2343 delin = "LOSS";
1534 ret = sprintf(buf, "GOOD\n"); 2344 } else {
2345 if (sc->stats.phy.flags & 0x0C00)
2346 delin = "ERROR";
2347 else if (sc->stats.phy.flags & 0x0030)
2348 delin = "LOSS";
2349 }
2350
2351 ret = sprintf(buf, "%s\n", delin);
1535out: 2352out:
1536 mutex_unlock(&uea_mutex); 2353 mutex_unlock(&uea_mutex);
1537 return ret; 2354 return ret;
@@ -1662,6 +2479,7 @@ static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf,
1662 struct usb_device *usb = interface_to_usbdev(intf); 2479 struct usb_device *usb = interface_to_usbdev(intf);
1663 struct uea_softc *sc; 2480 struct uea_softc *sc;
1664 int ret, ifnum = intf->altsetting->desc.bInterfaceNumber; 2481 int ret, ifnum = intf->altsetting->desc.bInterfaceNumber;
2482 unsigned int alt;
1665 2483
1666 uea_enters(usb); 2484 uea_enters(usb);
1667 2485
@@ -1696,22 +2514,29 @@ static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf,
1696 sc->modem_index = (modem_index < NB_MODEM) ? modem_index++ : 0; 2514 sc->modem_index = (modem_index < NB_MODEM) ? modem_index++ : 0;
1697 sc->driver_info = id->driver_info; 2515 sc->driver_info = id->driver_info;
1698 2516
1699 /* ADI930 don't support iso */ 2517 /* first try to use module parameter */
1700 if (UEA_CHIP_VERSION(id) != ADI930 && use_iso[sc->modem_index]) { 2518 if (annex[sc->modem_index] == 1)
1701 int i; 2519 sc->annex = ANNEXA;
1702 2520 else if (annex[sc->modem_index] == 2)
1703 /* try set fastest alternate for inbound traffic interface */ 2521 sc->annex = ANNEXB;
1704 for (i = FASTEST_ISO_INTF; i > 0; i--) 2522 /* try to autodetect annex */
1705 if (usb_set_interface(usb, UEA_DS_IFACE_NO, i) == 0) 2523 else if (sc->driver_info & AUTO_ANNEX_A)
1706 break; 2524 sc->annex = ANNEXA;
2525 else if (sc->driver_info & AUTO_ANNEX_B)
2526 sc->annex = ANNEXB;
2527 else
2528 sc->annex = (le16_to_cpu(sc->usb_dev->descriptor.bcdDevice) & 0x80)?ANNEXB:ANNEXA;
1707 2529
1708 if (i > 0) { 2530 alt = altsetting[sc->modem_index];
1709 uea_dbg(usb, "set alternate %d for 2 interface\n", i); 2531 /* ADI930 don't support iso */
2532 if (UEA_CHIP_VERSION(id) != ADI930 && alt > 0) {
2533 if (alt <= 8 && usb_set_interface(usb, UEA_DS_IFACE_NO, alt) == 0) {
2534 uea_dbg(usb, "set alternate %u for 2 interface\n", alt);
1710 uea_info(usb, "using iso mode\n"); 2535 uea_info(usb, "using iso mode\n");
1711 usbatm->flags |= UDSL_USE_ISOC | UDSL_IGNORE_EILSEQ; 2536 usbatm->flags |= UDSL_USE_ISOC | UDSL_IGNORE_EILSEQ;
1712 } else { 2537 } else {
1713 uea_err(usb, "setting any alternate failed for " 2538 uea_err(usb, "setting alternate %u failed for "
1714 "2 interface, using bulk mode\n"); 2539 "2 interface, using bulk mode\n", alt);
1715 } 2540 }
1716 } 2541 }
1717 2542
@@ -1757,10 +2582,11 @@ static int uea_probe(struct usb_interface *intf, const struct usb_device_id *id)
1757 struct usb_device *usb = interface_to_usbdev(intf); 2582 struct usb_device *usb = interface_to_usbdev(intf);
1758 2583
1759 uea_enters(usb); 2584 uea_enters(usb);
1760 uea_info(usb, "ADSL device founded vid (%#X) pid (%#X) : %s %s\n", 2585 uea_info(usb, "ADSL device founded vid (%#X) pid (%#X) Rev (%#X): %s\n",
1761 le16_to_cpu(usb->descriptor.idVendor), 2586 le16_to_cpu(usb->descriptor.idVendor),
1762 le16_to_cpu(usb->descriptor.idProduct), 2587 le16_to_cpu(usb->descriptor.idProduct),
1763 chip_name[UEA_CHIP_VERSION(id)], IS_ISDN(usb)?"isdn":"pots"); 2588 le16_to_cpu(usb->descriptor.bcdDevice),
2589 chip_name[UEA_CHIP_VERSION(id)]);
1764 2590
1765 usb_reset_device(usb); 2591 usb_reset_device(usb);
1766 2592
@@ -1793,24 +2619,40 @@ static void uea_disconnect(struct usb_interface *intf)
1793 * List of supported VID/PID 2619 * List of supported VID/PID
1794 */ 2620 */
1795static const struct usb_device_id uea_ids[] = { 2621static const struct usb_device_id uea_ids[] = {
2622 {USB_DEVICE(ANALOG_VID, ADI930_PID_PREFIRM), .driver_info = ADI930 | PREFIRM},
2623 {USB_DEVICE(ANALOG_VID, ADI930_PID_PSTFIRM), .driver_info = ADI930 | PSTFIRM},
2624 {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
2625 {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM},
2626 {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM},
2627 {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM},
2628 {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM},
2629 {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM},
2630 {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PREFIRM), .driver_info = EAGLE_III | PREFIRM},
2631 {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PSTFIRM), .driver_info = EAGLE_III | PSTFIRM},
2632 {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PREFIRM), .driver_info = EAGLE_IV | PREFIRM},
2633 {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PSTFIRM), .driver_info = EAGLE_IV | PSTFIRM},
2634 {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
2635 {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
2636 {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
2637 {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
2638 {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM},
2639 {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_A},
2640 {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM},
2641 {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_B},
1796 {USB_DEVICE(ELSA_VID, ELSA_PID_PREFIRM), .driver_info = ADI930 | PREFIRM}, 2642 {USB_DEVICE(ELSA_VID, ELSA_PID_PREFIRM), .driver_info = ADI930 | PREFIRM},
1797 {USB_DEVICE(ELSA_VID, ELSA_PID_PSTFIRM), .driver_info = ADI930 | PSTFIRM}, 2643 {USB_DEVICE(ELSA_VID, ELSA_PID_PSTFIRM), .driver_info = ADI930 | PSTFIRM},
1798 {USB_DEVICE(EAGLE_VID, EAGLE_I_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, 2644 {USB_DEVICE(ELSA_VID, ELSA_PID_A_PREFIRM), .driver_info = ADI930 | PREFIRM},
1799 {USB_DEVICE(EAGLE_VID, EAGLE_I_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM}, 2645 {USB_DEVICE(ELSA_VID, ELSA_PID_A_PSTFIRM), .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_A},
1800 {USB_DEVICE(EAGLE_VID, EAGLE_II_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM}, 2646 {USB_DEVICE(ELSA_VID, ELSA_PID_B_PREFIRM), .driver_info = ADI930 | PREFIRM},
1801 {USB_DEVICE(EAGLE_VID, EAGLE_II_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM}, 2647 {USB_DEVICE(ELSA_VID, ELSA_PID_B_PSTFIRM), .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_B},
1802 {USB_DEVICE(EAGLE_VID, EAGLE_IIC_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM},
1803 {USB_DEVICE(EAGLE_VID, EAGLE_IIC_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM},
1804 {USB_DEVICE(EAGLE_VID, EAGLE_III_PID_PREFIRM), .driver_info = EAGLE_III | PREFIRM},
1805 {USB_DEVICE(EAGLE_VID, EAGLE_III_PID_PSTFIRM), .driver_info = EAGLE_III | PSTFIRM},
1806 {USB_DEVICE(USR_VID, MILLER_A_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, 2648 {USB_DEVICE(USR_VID, MILLER_A_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
1807 {USB_DEVICE(USR_VID, MILLER_A_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM}, 2649 {USB_DEVICE(USR_VID, MILLER_A_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
1808 {USB_DEVICE(USR_VID, MILLER_B_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, 2650 {USB_DEVICE(USR_VID, MILLER_B_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
1809 {USB_DEVICE(USR_VID, MILLER_B_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM}, 2651 {USB_DEVICE(USR_VID, MILLER_B_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
1810 {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PREFIRM),.driver_info = EAGLE_I | PREFIRM}, 2652 {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PREFIRM),.driver_info = EAGLE_I | PREFIRM},
1811 {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PSTFIRM),.driver_info = EAGLE_I | PSTFIRM}, 2653 {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PSTFIRM),.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
1812 {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PREFIRM),.driver_info = EAGLE_I | PREFIRM}, 2654 {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PREFIRM),.driver_info = EAGLE_I | PREFIRM},
1813 {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PSTFIRM),.driver_info = EAGLE_I | PSTFIRM}, 2655 {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PSTFIRM),.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
1814 {} 2656 {}
1815}; 2657};
1816 2658
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 5192cd9356..ad632f2d6f 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -28,6 +28,7 @@
28 * v0.12 - add hpoj.sourceforge.net ioctls (David Paschal) 28 * v0.12 - add hpoj.sourceforge.net ioctls (David Paschal)
29 * v0.13 - alloc space for statusbuf (<status> not on stack); 29 * v0.13 - alloc space for statusbuf (<status> not on stack);
30 * use usb_buffer_alloc() for read buf & write buf; 30 * use usb_buffer_alloc() for read buf & write buf;
31 * none - Maintained in Linux kernel after v0.13
31 */ 32 */
32 33
33/* 34/*
@@ -69,7 +70,6 @@
69#define USBLP_DEVICE_ID_SIZE 1024 70#define USBLP_DEVICE_ID_SIZE 1024
70 71
71/* ioctls: */ 72/* ioctls: */
72#define LPGETSTATUS 0x060b /* same as in drivers/char/lp.c */
73#define IOCNR_GET_DEVICE_ID 1 73#define IOCNR_GET_DEVICE_ID 1
74#define IOCNR_GET_PROTOCOLS 2 74#define IOCNR_GET_PROTOCOLS 2
75#define IOCNR_SET_PROTOCOL 3 75#define IOCNR_SET_PROTOCOL 3
@@ -115,7 +115,7 @@ MFG:HEWLETT-PACKARD;MDL:DESKJET 970C;CMD:MLC,PCL,PML;CLASS:PRINTER;DESCRIPTION:H
115#define USBLP_MINORS 16 115#define USBLP_MINORS 16
116#define USBLP_MINOR_BASE 0 116#define USBLP_MINOR_BASE 0
117 117
118#define USBLP_WRITE_TIMEOUT (5000) /* 5 seconds */ 118#define USBLP_CTL_TIMEOUT 5000 /* 5 seconds */
119 119
120#define USBLP_FIRST_PROTOCOL 1 120#define USBLP_FIRST_PROTOCOL 1
121#define USBLP_LAST_PROTOCOL 3 121#define USBLP_LAST_PROTOCOL 3
@@ -159,10 +159,12 @@ struct usblp {
159 int wstatus; /* bytes written or error */ 159 int wstatus; /* bytes written or error */
160 int rstatus; /* bytes ready or error */ 160 int rstatus; /* bytes ready or error */
161 unsigned int quirks; /* quirks flags */ 161 unsigned int quirks; /* quirks flags */
162 unsigned int flags; /* mode flags */
162 unsigned char used; /* True if open */ 163 unsigned char used; /* True if open */
163 unsigned char present; /* True if not disconnected */ 164 unsigned char present; /* True if not disconnected */
164 unsigned char bidir; /* interface is bidirectional */ 165 unsigned char bidir; /* interface is bidirectional */
165 unsigned char sleeping; /* interface is suspended */ 166 unsigned char sleeping; /* interface is suspended */
167 unsigned char no_paper; /* Paper Out happened */
166 unsigned char *device_id_string; /* IEEE 1284 DEVICE ID string (ptr) */ 168 unsigned char *device_id_string; /* IEEE 1284 DEVICE ID string (ptr) */
167 /* first 2 bytes are (big-endian) length */ 169 /* first 2 bytes are (big-endian) length */
168}; 170};
@@ -259,7 +261,7 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
259 261
260 retval = usb_control_msg(usblp->dev, 262 retval = usb_control_msg(usblp->dev,
261 dir ? usb_rcvctrlpipe(usblp->dev, 0) : usb_sndctrlpipe(usblp->dev, 0), 263 dir ? usb_rcvctrlpipe(usblp->dev, 0) : usb_sndctrlpipe(usblp->dev, 0),
262 request, type | dir | recip, value, index, buf, len, USBLP_WRITE_TIMEOUT); 264 request, type | dir | recip, value, index, buf, len, USBLP_CTL_TIMEOUT);
263 dbg("usblp_control_msg: rq: 0x%02x dir: %d recip: %d value: %d idx: %d len: %#x result: %d", 265 dbg("usblp_control_msg: rq: 0x%02x dir: %d recip: %d value: %d idx: %d len: %#x result: %d",
264 request, !!dir, recip, value, index, len, retval); 266 request, !!dir, recip, value, index, len, retval);
265 return retval < 0 ? retval : 0; 267 return retval < 0 ? retval : 0;
@@ -325,13 +327,11 @@ static void usblp_bulk_write(struct urb *urb)
325 usblp->wstatus = status; 327 usblp->wstatus = status;
326 else 328 else
327 usblp->wstatus = urb->actual_length; 329 usblp->wstatus = urb->actual_length;
330 usblp->no_paper = 0;
328 usblp->wcomplete = 1; 331 usblp->wcomplete = 1;
329 wake_up(&usblp->wwait); 332 wake_up(&usblp->wwait);
330 spin_unlock(&usblp->lock); 333 spin_unlock(&usblp->lock);
331 334
332 /* XXX Use usb_setup_bulk_urb when available. Talk to Marcel. */
333 kfree(urb->transfer_buffer);
334 urb->transfer_buffer = NULL; /* Not refcounted, so to be safe... */
335 usb_free_urb(urb); 335 usb_free_urb(urb);
336} 336}
337 337
@@ -346,16 +346,17 @@ static int usblp_check_status(struct usblp *usblp, int err)
346 unsigned char status, newerr = 0; 346 unsigned char status, newerr = 0;
347 int error; 347 int error;
348 348
349 error = usblp_read_status (usblp, usblp->statusbuf); 349 mutex_lock(&usblp->mut);
350 if (error < 0) { 350 if ((error = usblp_read_status(usblp, usblp->statusbuf)) < 0) {
351 mutex_unlock(&usblp->mut);
351 if (printk_ratelimit()) 352 if (printk_ratelimit())
352 printk(KERN_ERR 353 printk(KERN_ERR
353 "usblp%d: error %d reading printer status\n", 354 "usblp%d: error %d reading printer status\n",
354 usblp->minor, error); 355 usblp->minor, error);
355 return 0; 356 return 0;
356 } 357 }
357
358 status = *usblp->statusbuf; 358 status = *usblp->statusbuf;
359 mutex_unlock(&usblp->mut);
359 360
360 if (~status & LP_PERRORP) 361 if (~status & LP_PERRORP)
361 newerr = 3; 362 newerr = 3;
@@ -411,18 +412,10 @@ static int usblp_open(struct inode *inode, struct file *file)
411 goto out; 412 goto out;
412 413
413 /* 414 /*
414 * TODO: need to implement LP_ABORTOPEN + O_NONBLOCK as in drivers/char/lp.c ??? 415 * We do not implement LP_ABORTOPEN/LPABORTOPEN for two reasons:
415 * This is #if 0-ed because we *don't* want to fail an open 416 * - We do not want persistent state which close(2) does not clear
416 * just because the printer is off-line. 417 * - It is not used anyway, according to CUPS people
417 */ 418 */
418#if 0
419 if ((retval = usblp_check_status(usblp, 0))) {
420 retval = retval > 1 ? -EIO : -ENOSPC;
421 goto out;
422 }
423#else
424 retval = 0;
425#endif
426 419
427 retval = usb_autopm_get_interface(intf); 420 retval = usb_autopm_get_interface(intf);
428 if (retval < 0) 421 if (retval < 0)
@@ -463,6 +456,8 @@ static int usblp_release(struct inode *inode, struct file *file)
463{ 456{
464 struct usblp *usblp = file->private_data; 457 struct usblp *usblp = file->private_data;
465 458
459 usblp->flags &= ~LP_ABORT;
460
466 mutex_lock (&usblp_mutex); 461 mutex_lock (&usblp_mutex);
467 usblp->used = 0; 462 usblp->used = 0;
468 if (usblp->present) { 463 if (usblp->present) {
@@ -485,8 +480,8 @@ static unsigned int usblp_poll(struct file *file, struct poll_table_struct *wait
485 poll_wait(file, &usblp->rwait, wait); 480 poll_wait(file, &usblp->rwait, wait);
486 poll_wait(file, &usblp->wwait, wait); 481 poll_wait(file, &usblp->wwait, wait);
487 spin_lock_irqsave(&usblp->lock, flags); 482 spin_lock_irqsave(&usblp->lock, flags);
488 ret = ((!usblp->bidir || !usblp->rcomplete) ? 0 : POLLIN | POLLRDNORM) 483 ret = ((usblp->bidir && usblp->rcomplete) ? POLLIN | POLLRDNORM : 0) |
489 | (!usblp->wcomplete ? 0 : POLLOUT | POLLWRNORM); 484 ((usblp->no_paper || usblp->wcomplete) ? POLLOUT | POLLWRNORM : 0);
490 spin_unlock_irqrestore(&usblp->lock, flags); 485 spin_unlock_irqrestore(&usblp->lock, flags);
491 return ret; 486 return ret;
492} 487}
@@ -675,6 +670,13 @@ static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
675 retval = -EFAULT; 670 retval = -EFAULT;
676 break; 671 break;
677 672
673 case LPABORT:
674 if (arg)
675 usblp->flags |= LP_ABORT;
676 else
677 usblp->flags &= ~LP_ABORT;
678 break;
679
678 default: 680 default:
679 retval = -ENOTTY; 681 retval = -ENOTTY;
680 } 682 }
@@ -684,10 +686,30 @@ done:
684 return retval; 686 return retval;
685} 687}
686 688
689static struct urb *usblp_new_writeurb(struct usblp *usblp, int transfer_length)
690{
691 struct urb *urb;
692 char *writebuf;
693
694 if ((writebuf = kmalloc(transfer_length, GFP_KERNEL)) == NULL)
695 return NULL;
696 if ((urb = usb_alloc_urb(0, GFP_KERNEL)) == NULL) {
697 kfree(writebuf);
698 return NULL;
699 }
700
701 usb_fill_bulk_urb(urb, usblp->dev,
702 usb_sndbulkpipe(usblp->dev,
703 usblp->protocol[usblp->current_protocol].epwrite->bEndpointAddress),
704 writebuf, transfer_length, usblp_bulk_write, usblp);
705 urb->transfer_flags |= URB_FREE_BUFFER;
706
707 return urb;
708}
709
687static ssize_t usblp_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) 710static ssize_t usblp_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
688{ 711{
689 struct usblp *usblp = file->private_data; 712 struct usblp *usblp = file->private_data;
690 char *writebuf;
691 struct urb *writeurb; 713 struct urb *writeurb;
692 int rv; 714 int rv;
693 int transfer_length; 715 int transfer_length;
@@ -708,17 +730,11 @@ static ssize_t usblp_write(struct file *file, const char __user *buffer, size_t
708 transfer_length = USBLP_BUF_SIZE; 730 transfer_length = USBLP_BUF_SIZE;
709 731
710 rv = -ENOMEM; 732 rv = -ENOMEM;
711 if ((writebuf = kmalloc(USBLP_BUF_SIZE, GFP_KERNEL)) == NULL) 733 if ((writeurb = usblp_new_writeurb(usblp, transfer_length)) == NULL)
712 goto raise_buf;
713 if ((writeurb = usb_alloc_urb(0, GFP_KERNEL)) == NULL)
714 goto raise_urb; 734 goto raise_urb;
715 usb_fill_bulk_urb(writeurb, usblp->dev,
716 usb_sndbulkpipe(usblp->dev,
717 usblp->protocol[usblp->current_protocol].epwrite->bEndpointAddress),
718 writebuf, transfer_length, usblp_bulk_write, usblp);
719 usb_anchor_urb(writeurb, &usblp->urbs); 735 usb_anchor_urb(writeurb, &usblp->urbs);
720 736
721 if (copy_from_user(writebuf, 737 if (copy_from_user(writeurb->transfer_buffer,
722 buffer + writecount, transfer_length)) { 738 buffer + writecount, transfer_length)) {
723 rv = -EFAULT; 739 rv = -EFAULT;
724 goto raise_badaddr; 740 goto raise_badaddr;
@@ -730,6 +746,7 @@ static ssize_t usblp_write(struct file *file, const char __user *buffer, size_t
730 if ((rv = usb_submit_urb(writeurb, GFP_KERNEL)) < 0) { 746 if ((rv = usb_submit_urb(writeurb, GFP_KERNEL)) < 0) {
731 usblp->wstatus = 0; 747 usblp->wstatus = 0;
732 spin_lock_irq(&usblp->lock); 748 spin_lock_irq(&usblp->lock);
749 usblp->no_paper = 0;
733 usblp->wcomplete = 1; 750 usblp->wcomplete = 1;
734 wake_up(&usblp->wwait); 751 wake_up(&usblp->wwait);
735 spin_unlock_irq(&usblp->lock); 752 spin_unlock_irq(&usblp->lock);
@@ -747,12 +764,17 @@ static ssize_t usblp_write(struct file *file, const char __user *buffer, size_t
747 /* Presume that it's going to complete well. */ 764 /* Presume that it's going to complete well. */
748 writecount += transfer_length; 765 writecount += transfer_length;
749 } 766 }
767 if (rv == -ENOSPC) {
768 spin_lock_irq(&usblp->lock);
769 usblp->no_paper = 1; /* Mark for poll(2) */
770 spin_unlock_irq(&usblp->lock);
771 writecount += transfer_length;
772 }
750 /* Leave URB dangling, to be cleaned on close. */ 773 /* Leave URB dangling, to be cleaned on close. */
751 goto collect_error; 774 goto collect_error;
752 } 775 }
753 776
754 if (usblp->wstatus < 0) { 777 if (usblp->wstatus < 0) {
755 usblp_check_status(usblp, 0);
756 rv = -EIO; 778 rv = -EIO;
757 goto collect_error; 779 goto collect_error;
758 } 780 }
@@ -771,8 +793,6 @@ raise_badaddr:
771 usb_unanchor_urb(writeurb); 793 usb_unanchor_urb(writeurb);
772 usb_free_urb(writeurb); 794 usb_free_urb(writeurb);
773raise_urb: 795raise_urb:
774 kfree(writebuf);
775raise_buf:
776raise_wait: 796raise_wait:
777collect_error: /* Out of raise sequence */ 797collect_error: /* Out of raise sequence */
778 mutex_unlock(&usblp->wmut); 798 mutex_unlock(&usblp->wmut);
@@ -838,32 +858,36 @@ done:
838 * when O_NONBLOCK is set. So, applications setting O_NONBLOCK must use 858 * when O_NONBLOCK is set. So, applications setting O_NONBLOCK must use
839 * select(2) or poll(2) to wait for the buffer to drain before closing. 859 * select(2) or poll(2) to wait for the buffer to drain before closing.
840 * Alternatively, set blocking mode with fcntl and issue a zero-size write. 860 * Alternatively, set blocking mode with fcntl and issue a zero-size write.
841 *
842 * Old v0.13 code had a non-functional timeout for wait_event(). Someone forgot
843 * to check the return code for timeout expiration, so it had no effect.
844 * Apparently, it was intended to check for error conditons, such as out
845 * of paper. It is going to return when we settle things with CUPS. XXX
846 */ 861 */
847static int usblp_wwait(struct usblp *usblp, int nonblock) 862static int usblp_wwait(struct usblp *usblp, int nonblock)
848{ 863{
849 DECLARE_WAITQUEUE(waita, current); 864 DECLARE_WAITQUEUE(waita, current);
850 int rc; 865 int rc;
866 int err = 0;
851 867
852 add_wait_queue(&usblp->wwait, &waita); 868 add_wait_queue(&usblp->wwait, &waita);
853 for (;;) { 869 for (;;) {
870 set_current_state(TASK_INTERRUPTIBLE);
854 if (mutex_lock_interruptible(&usblp->mut)) { 871 if (mutex_lock_interruptible(&usblp->mut)) {
855 rc = -EINTR; 872 rc = -EINTR;
856 break; 873 break;
857 } 874 }
858 set_current_state(TASK_INTERRUPTIBLE); 875 rc = usblp_wtest(usblp, nonblock);
859 if ((rc = usblp_wtest(usblp, nonblock)) < 0) {
860 mutex_unlock(&usblp->mut);
861 break;
862 }
863 mutex_unlock(&usblp->mut); 876 mutex_unlock(&usblp->mut);
864 if (rc == 0) 877 if (rc <= 0)
865 break; 878 break;
866 schedule(); 879
880 if (usblp->flags & LP_ABORT) {
881 if (schedule_timeout(msecs_to_jiffies(5000)) == 0) {
882 err = usblp_check_status(usblp, err);
883 if (err == 1) { /* Paper out */
884 rc = -ENOSPC;
885 break;
886 }
887 }
888 } else {
889 schedule();
890 }
867 } 891 }
868 set_current_state(TASK_RUNNING); 892 set_current_state(TASK_RUNNING);
869 remove_wait_queue(&usblp->wwait, &waita); 893 remove_wait_queue(&usblp->wwait, &waita);
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index cb69aa1e02..1a8edcee7f 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -507,18 +507,30 @@ void usb_destroy_configuration(struct usb_device *dev)
507} 507}
508 508
509 509
510// hub-only!! ... and only in reset path, or usb_new_device() 510/*
511// (used by real hubs and virtual root hubs) 511 * Get the USB config descriptors, cache and parse'em
512 *
513 * hub-only!! ... and only in reset path, or usb_new_device()
514 * (used by real hubs and virtual root hubs)
515 *
516 * NOTE: if this is a WUSB device and is not authorized, we skip the
517 * whole thing. A non-authorized USB device has no
518 * configurations.
519 */
512int usb_get_configuration(struct usb_device *dev) 520int usb_get_configuration(struct usb_device *dev)
513{ 521{
514 struct device *ddev = &dev->dev; 522 struct device *ddev = &dev->dev;
515 int ncfg = dev->descriptor.bNumConfigurations; 523 int ncfg = dev->descriptor.bNumConfigurations;
516 int result = -ENOMEM; 524 int result = 0;
517 unsigned int cfgno, length; 525 unsigned int cfgno, length;
518 unsigned char *buffer; 526 unsigned char *buffer;
519 unsigned char *bigbuffer; 527 unsigned char *bigbuffer;
520 struct usb_config_descriptor *desc; 528 struct usb_config_descriptor *desc;
521 529
530 cfgno = 0;
531 if (dev->authorized == 0) /* Not really an error */
532 goto out_not_authorized;
533 result = -ENOMEM;
522 if (ncfg > USB_MAXCONFIG) { 534 if (ncfg > USB_MAXCONFIG) {
523 dev_warn(ddev, "too many configurations: %d, " 535 dev_warn(ddev, "too many configurations: %d, "
524 "using maximum allowed: %d\n", ncfg, USB_MAXCONFIG); 536 "using maximum allowed: %d\n", ncfg, USB_MAXCONFIG);
@@ -545,14 +557,15 @@ int usb_get_configuration(struct usb_device *dev)
545 goto err2; 557 goto err2;
546 desc = (struct usb_config_descriptor *)buffer; 558 desc = (struct usb_config_descriptor *)buffer;
547 559
548 for (cfgno = 0; cfgno < ncfg; cfgno++) { 560 result = 0;
561 for (; cfgno < ncfg; cfgno++) {
549 /* We grab just the first descriptor so we know how long 562 /* We grab just the first descriptor so we know how long
550 * the whole configuration is */ 563 * the whole configuration is */
551 result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, 564 result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
552 buffer, USB_DT_CONFIG_SIZE); 565 buffer, USB_DT_CONFIG_SIZE);
553 if (result < 0) { 566 if (result < 0) {
554 dev_err(ddev, "unable to read config index %d " 567 dev_err(ddev, "unable to read config index %d "
555 "descriptor/%s\n", cfgno, "start"); 568 "descriptor/%s: %d\n", cfgno, "start", result);
556 dev_err(ddev, "chopping to %d config(s)\n", cfgno); 569 dev_err(ddev, "chopping to %d config(s)\n", cfgno);
557 dev->descriptor.bNumConfigurations = cfgno; 570 dev->descriptor.bNumConfigurations = cfgno;
558 break; 571 break;
@@ -599,6 +612,7 @@ int usb_get_configuration(struct usb_device *dev)
599 612
600err: 613err:
601 kfree(buffer); 614 kfree(buffer);
615out_not_authorized:
602 dev->descriptor.bNumConfigurations = cfgno; 616 dev->descriptor.bNumConfigurations = cfgno;
603err2: 617err2:
604 if (result == -ENOMEM) 618 if (result == -ENOMEM)
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 927a181120..f013b4012c 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -71,6 +71,7 @@ struct async {
71 void __user *userbuffer; 71 void __user *userbuffer;
72 void __user *userurb; 72 void __user *userurb;
73 struct urb *urb; 73 struct urb *urb;
74 int status;
74 u32 secid; 75 u32 secid;
75}; 76};
76 77
@@ -289,10 +290,8 @@ static void snoop_urb(struct urb *urb, void __user *userurb)
289 if (!usbfs_snoop) 290 if (!usbfs_snoop)
290 return; 291 return;
291 292
292 if (urb->pipe & USB_DIR_IN) 293 dev_info(&urb->dev->dev, "direction=%s\n",
293 dev_info(&urb->dev->dev, "direction=IN\n"); 294 usb_urb_dir_in(urb) ? "IN" : "OUT");
294 else
295 dev_info(&urb->dev->dev, "direction=OUT\n");
296 dev_info(&urb->dev->dev, "userurb=%p\n", userurb); 295 dev_info(&urb->dev->dev, "userurb=%p\n", userurb);
297 dev_info(&urb->dev->dev, "transfer_buffer_length=%d\n", 296 dev_info(&urb->dev->dev, "transfer_buffer_length=%d\n",
298 urb->transfer_buffer_length); 297 urb->transfer_buffer_length);
@@ -312,9 +311,10 @@ static void async_completed(struct urb *urb)
312 spin_lock(&ps->lock); 311 spin_lock(&ps->lock);
313 list_move_tail(&as->asynclist, &ps->async_completed); 312 list_move_tail(&as->asynclist, &ps->async_completed);
314 spin_unlock(&ps->lock); 313 spin_unlock(&ps->lock);
314 as->status = urb->status;
315 if (as->signr) { 315 if (as->signr) {
316 sinfo.si_signo = as->signr; 316 sinfo.si_signo = as->signr;
317 sinfo.si_errno = as->urb->status; 317 sinfo.si_errno = as->status;
318 sinfo.si_code = SI_ASYNCIO; 318 sinfo.si_code = SI_ASYNCIO;
319 sinfo.si_addr = as->userurb; 319 sinfo.si_addr = as->userurb;
320 kill_pid_info_as_uid(as->signr, &sinfo, as->pid, as->uid, 320 kill_pid_info_as_uid(as->signr, &sinfo, as->pid, as->uid,
@@ -910,6 +910,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
910 struct usb_ctrlrequest *dr = NULL; 910 struct usb_ctrlrequest *dr = NULL;
911 unsigned int u, totlen, isofrmlen; 911 unsigned int u, totlen, isofrmlen;
912 int ret, ifnum = -1; 912 int ret, ifnum = -1;
913 int is_in;
913 914
914 if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP|USBDEVFS_URB_SHORT_NOT_OK| 915 if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP|USBDEVFS_URB_SHORT_NOT_OK|
915 URB_NO_FSBR|URB_ZERO_PACKET)) 916 URB_NO_FSBR|URB_ZERO_PACKET))
@@ -924,16 +925,18 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
924 if ((ret = checkintf(ps, ifnum))) 925 if ((ret = checkintf(ps, ifnum)))
925 return ret; 926 return ret;
926 } 927 }
927 if ((uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0) 928 if ((uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0) {
928 ep = ps->dev->ep_in [uurb->endpoint & USB_ENDPOINT_NUMBER_MASK]; 929 is_in = 1;
929 else 930 ep = ps->dev->ep_in[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK];
930 ep = ps->dev->ep_out [uurb->endpoint & USB_ENDPOINT_NUMBER_MASK]; 931 } else {
932 is_in = 0;
933 ep = ps->dev->ep_out[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK];
934 }
931 if (!ep) 935 if (!ep)
932 return -ENOENT; 936 return -ENOENT;
933 switch(uurb->type) { 937 switch(uurb->type) {
934 case USBDEVFS_URB_TYPE_CONTROL: 938 case USBDEVFS_URB_TYPE_CONTROL:
935 if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 939 if (!usb_endpoint_xfer_control(&ep->desc))
936 != USB_ENDPOINT_XFER_CONTROL)
937 return -EINVAL; 940 return -EINVAL;
938 /* min 8 byte setup packet, max 8 byte setup plus an arbitrary data stage */ 941 /* min 8 byte setup packet, max 8 byte setup plus an arbitrary data stage */
939 if (uurb->buffer_length < 8 || uurb->buffer_length > (8 + MAX_USBFS_BUFFER_SIZE)) 942 if (uurb->buffer_length < 8 || uurb->buffer_length > (8 + MAX_USBFS_BUFFER_SIZE))
@@ -952,23 +955,32 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
952 kfree(dr); 955 kfree(dr);
953 return ret; 956 return ret;
954 } 957 }
955 uurb->endpoint = (uurb->endpoint & ~USB_ENDPOINT_DIR_MASK) | (dr->bRequestType & USB_ENDPOINT_DIR_MASK);
956 uurb->number_of_packets = 0; 958 uurb->number_of_packets = 0;
957 uurb->buffer_length = le16_to_cpup(&dr->wLength); 959 uurb->buffer_length = le16_to_cpup(&dr->wLength);
958 uurb->buffer += 8; 960 uurb->buffer += 8;
959 if (!access_ok((uurb->endpoint & USB_DIR_IN) ? VERIFY_WRITE : VERIFY_READ, uurb->buffer, uurb->buffer_length)) { 961 if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) {
962 is_in = 1;
963 uurb->endpoint |= USB_DIR_IN;
964 } else {
965 is_in = 0;
966 uurb->endpoint &= ~USB_DIR_IN;
967 }
968 if (!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
969 uurb->buffer, uurb->buffer_length)) {
960 kfree(dr); 970 kfree(dr);
961 return -EFAULT; 971 return -EFAULT;
962 } 972 }
963 snoop(&ps->dev->dev, "control urb: bRequest=%02x " 973 snoop(&ps->dev->dev, "control urb: bRequest=%02x "
964 "bRrequestType=%02x wValue=%04x " 974 "bRrequestType=%02x wValue=%04x "
965 "wIndex=%04x wLength=%04x\n", 975 "wIndex=%04x wLength=%04x\n",
966 dr->bRequest, dr->bRequestType, dr->wValue, 976 dr->bRequest, dr->bRequestType,
967 dr->wIndex, dr->wLength); 977 __le16_to_cpup(&dr->wValue),
978 __le16_to_cpup(&dr->wIndex),
979 __le16_to_cpup(&dr->wLength));
968 break; 980 break;
969 981
970 case USBDEVFS_URB_TYPE_BULK: 982 case USBDEVFS_URB_TYPE_BULK:
971 switch (ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 983 switch (usb_endpoint_type(&ep->desc)) {
972 case USB_ENDPOINT_XFER_CONTROL: 984 case USB_ENDPOINT_XFER_CONTROL:
973 case USB_ENDPOINT_XFER_ISOC: 985 case USB_ENDPOINT_XFER_ISOC:
974 return -EINVAL; 986 return -EINVAL;
@@ -977,7 +989,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
977 uurb->number_of_packets = 0; 989 uurb->number_of_packets = 0;
978 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE) 990 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
979 return -EINVAL; 991 return -EINVAL;
980 if (!access_ok((uurb->endpoint & USB_DIR_IN) ? VERIFY_WRITE : VERIFY_READ, uurb->buffer, uurb->buffer_length)) 992 if (!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
993 uurb->buffer, uurb->buffer_length))
981 return -EFAULT; 994 return -EFAULT;
982 snoop(&ps->dev->dev, "bulk urb\n"); 995 snoop(&ps->dev->dev, "bulk urb\n");
983 break; 996 break;
@@ -986,8 +999,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
986 /* arbitrary limit */ 999 /* arbitrary limit */
987 if (uurb->number_of_packets < 1 || uurb->number_of_packets > 128) 1000 if (uurb->number_of_packets < 1 || uurb->number_of_packets > 128)
988 return -EINVAL; 1001 return -EINVAL;
989 if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 1002 if (!usb_endpoint_xfer_isoc(&ep->desc))
990 != USB_ENDPOINT_XFER_ISOC)
991 return -EINVAL; 1003 return -EINVAL;
992 isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) * uurb->number_of_packets; 1004 isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) * uurb->number_of_packets;
993 if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL))) 1005 if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL)))
@@ -1014,12 +1026,12 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1014 1026
1015 case USBDEVFS_URB_TYPE_INTERRUPT: 1027 case USBDEVFS_URB_TYPE_INTERRUPT:
1016 uurb->number_of_packets = 0; 1028 uurb->number_of_packets = 0;
1017 if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 1029 if (!usb_endpoint_xfer_int(&ep->desc))
1018 != USB_ENDPOINT_XFER_INT)
1019 return -EINVAL; 1030 return -EINVAL;
1020 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE) 1031 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
1021 return -EINVAL; 1032 return -EINVAL;
1022 if (!access_ok((uurb->endpoint & USB_DIR_IN) ? VERIFY_WRITE : VERIFY_READ, uurb->buffer, uurb->buffer_length)) 1033 if (!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
1034 uurb->buffer, uurb->buffer_length))
1023 return -EFAULT; 1035 return -EFAULT;
1024 snoop(&ps->dev->dev, "interrupt urb\n"); 1036 snoop(&ps->dev->dev, "interrupt urb\n");
1025 break; 1037 break;
@@ -1039,8 +1051,11 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1039 return -ENOMEM; 1051 return -ENOMEM;
1040 } 1052 }
1041 as->urb->dev = ps->dev; 1053 as->urb->dev = ps->dev;
1042 as->urb->pipe = (uurb->type << 30) | __create_pipe(ps->dev, uurb->endpoint & 0xf) | (uurb->endpoint & USB_DIR_IN); 1054 as->urb->pipe = (uurb->type << 30) |
1043 as->urb->transfer_flags = uurb->flags; 1055 __create_pipe(ps->dev, uurb->endpoint & 0xf) |
1056 (uurb->endpoint & USB_DIR_IN);
1057 as->urb->transfer_flags = uurb->flags |
1058 (is_in ? URB_DIR_IN : URB_DIR_OUT);
1044 as->urb->transfer_buffer_length = uurb->buffer_length; 1059 as->urb->transfer_buffer_length = uurb->buffer_length;
1045 as->urb->setup_packet = (unsigned char*)dr; 1060 as->urb->setup_packet = (unsigned char*)dr;
1046 as->urb->start_frame = uurb->start_frame; 1061 as->urb->start_frame = uurb->start_frame;
@@ -1070,13 +1085,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1070 as->uid = current->uid; 1085 as->uid = current->uid;
1071 as->euid = current->euid; 1086 as->euid = current->euid;
1072 security_task_getsecid(current, &as->secid); 1087 security_task_getsecid(current, &as->secid);
1073 if (!(uurb->endpoint & USB_DIR_IN)) { 1088 if (!is_in) {
1074 if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, as->urb->transfer_buffer_length)) { 1089 if (copy_from_user(as->urb->transfer_buffer, uurb->buffer,
1090 as->urb->transfer_buffer_length)) {
1075 free_async(as); 1091 free_async(as);
1076 return -EFAULT; 1092 return -EFAULT;
1077 } 1093 }
1078 } 1094 }
1079 snoop(&as->urb->dev->dev, "submit urb\n");
1080 snoop_urb(as->urb, as->userurb); 1095 snoop_urb(as->urb, as->userurb);
1081 async_newpending(as); 1096 async_newpending(as);
1082 if ((ret = usb_submit_urb(as->urb, GFP_KERNEL))) { 1097 if ((ret = usb_submit_urb(as->urb, GFP_KERNEL))) {
@@ -1119,14 +1134,14 @@ static int processcompl(struct async *as, void __user * __user *arg)
1119 if (as->userbuffer) 1134 if (as->userbuffer)
1120 if (copy_to_user(as->userbuffer, urb->transfer_buffer, urb->transfer_buffer_length)) 1135 if (copy_to_user(as->userbuffer, urb->transfer_buffer, urb->transfer_buffer_length))
1121 return -EFAULT; 1136 return -EFAULT;
1122 if (put_user(urb->status, &userurb->status)) 1137 if (put_user(as->status, &userurb->status))
1123 return -EFAULT; 1138 return -EFAULT;
1124 if (put_user(urb->actual_length, &userurb->actual_length)) 1139 if (put_user(urb->actual_length, &userurb->actual_length))
1125 return -EFAULT; 1140 return -EFAULT;
1126 if (put_user(urb->error_count, &userurb->error_count)) 1141 if (put_user(urb->error_count, &userurb->error_count))
1127 return -EFAULT; 1142 return -EFAULT;
1128 1143
1129 if (usb_pipeisoc(urb->pipe)) { 1144 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) {
1130 for (i = 0; i < urb->number_of_packets; i++) { 1145 for (i = 0; i < urb->number_of_packets; i++) {
1131 if (put_user(urb->iso_frame_desc[i].actual_length, 1146 if (put_user(urb->iso_frame_desc[i].actual_length,
1132 &userurb->iso_frame_desc[i].actual_length)) 1147 &userurb->iso_frame_desc[i].actual_length))
@@ -1233,14 +1248,14 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
1233 if (as->userbuffer) 1248 if (as->userbuffer)
1234 if (copy_to_user(as->userbuffer, urb->transfer_buffer, urb->transfer_buffer_length)) 1249 if (copy_to_user(as->userbuffer, urb->transfer_buffer, urb->transfer_buffer_length))
1235 return -EFAULT; 1250 return -EFAULT;
1236 if (put_user(urb->status, &userurb->status)) 1251 if (put_user(as->status, &userurb->status))
1237 return -EFAULT; 1252 return -EFAULT;
1238 if (put_user(urb->actual_length, &userurb->actual_length)) 1253 if (put_user(urb->actual_length, &userurb->actual_length))
1239 return -EFAULT; 1254 return -EFAULT;
1240 if (put_user(urb->error_count, &userurb->error_count)) 1255 if (put_user(urb->error_count, &userurb->error_count))
1241 return -EFAULT; 1256 return -EFAULT;
1242 1257
1243 if (usb_pipeisoc(urb->pipe)) { 1258 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) {
1244 for (i = 0; i < urb->number_of_packets; i++) { 1259 for (i = 0; i < urb->number_of_packets; i++) {
1245 if (put_user(urb->iso_frame_desc[i].actual_length, 1260 if (put_user(urb->iso_frame_desc[i].actual_length,
1246 &userurb->iso_frame_desc[i].actual_length)) 1261 &userurb->iso_frame_desc[i].actual_length))
@@ -1576,6 +1591,7 @@ static unsigned int usbdev_poll(struct file *file, struct poll_table_struct *wai
1576} 1591}
1577 1592
1578const struct file_operations usbdev_file_operations = { 1593const struct file_operations usbdev_file_operations = {
1594 .owner = THIS_MODULE,
1579 .llseek = usbdev_lseek, 1595 .llseek = usbdev_lseek,
1580 .read = usbdev_read, 1596 .read = usbdev_read,
1581 .poll = usbdev_poll, 1597 .poll = usbdev_poll,
@@ -1625,10 +1641,7 @@ static struct notifier_block usbdev_nb = {
1625}; 1641};
1626#endif 1642#endif
1627 1643
1628static struct cdev usb_device_cdev = { 1644static struct cdev usb_device_cdev;
1629 .kobj = {.name = "usb_device", },
1630 .owner = THIS_MODULE,
1631};
1632 1645
1633int __init usb_devio_init(void) 1646int __init usb_devio_init(void)
1634{ 1647{
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 63b1243a91..8586817698 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -202,6 +202,11 @@ static int usb_probe_interface(struct device *dev)
202 intf = to_usb_interface(dev); 202 intf = to_usb_interface(dev);
203 udev = interface_to_usbdev(intf); 203 udev = interface_to_usbdev(intf);
204 204
205 if (udev->authorized == 0) {
206 dev_err(&intf->dev, "Device is not authorized for usage\n");
207 return -ENODEV;
208 }
209
205 id = usb_match_id(intf, driver->id_table); 210 id = usb_match_id(intf, driver->id_table);
206 if (!id) 211 if (!id)
207 id = usb_match_dynamic_id(intf, driver); 212 id = usb_match_dynamic_id(intf, driver);
@@ -576,12 +581,9 @@ static int usb_device_match(struct device *dev, struct device_driver *drv)
576} 581}
577 582
578#ifdef CONFIG_HOTPLUG 583#ifdef CONFIG_HOTPLUG
579static int usb_uevent(struct device *dev, char **envp, int num_envp, 584static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
580 char *buffer, int buffer_size)
581{ 585{
582 struct usb_device *usb_dev; 586 struct usb_device *usb_dev;
583 int i = 0;
584 int length = 0;
585 587
586 if (!dev) 588 if (!dev)
587 return -ENODEV; 589 return -ENODEV;
@@ -610,51 +612,39 @@ static int usb_uevent(struct device *dev, char **envp, int num_envp,
610 * all the device descriptors we don't tell them about. Or 612 * all the device descriptors we don't tell them about. Or
611 * act as usermode drivers. 613 * act as usermode drivers.
612 */ 614 */
613 if (add_uevent_var(envp, num_envp, &i, 615 if (add_uevent_var(env, "DEVICE=/proc/bus/usb/%03d/%03d",
614 buffer, buffer_size, &length,
615 "DEVICE=/proc/bus/usb/%03d/%03d",
616 usb_dev->bus->busnum, usb_dev->devnum)) 616 usb_dev->bus->busnum, usb_dev->devnum))
617 return -ENOMEM; 617 return -ENOMEM;
618#endif 618#endif
619 619
620 /* per-device configurations are common */ 620 /* per-device configurations are common */
621 if (add_uevent_var(envp, num_envp, &i, 621 if (add_uevent_var(env, "PRODUCT=%x/%x/%x",
622 buffer, buffer_size, &length,
623 "PRODUCT=%x/%x/%x",
624 le16_to_cpu(usb_dev->descriptor.idVendor), 622 le16_to_cpu(usb_dev->descriptor.idVendor),
625 le16_to_cpu(usb_dev->descriptor.idProduct), 623 le16_to_cpu(usb_dev->descriptor.idProduct),
626 le16_to_cpu(usb_dev->descriptor.bcdDevice))) 624 le16_to_cpu(usb_dev->descriptor.bcdDevice)))
627 return -ENOMEM; 625 return -ENOMEM;
628 626
629 /* class-based driver binding models */ 627 /* class-based driver binding models */
630 if (add_uevent_var(envp, num_envp, &i, 628 if (add_uevent_var(env, "TYPE=%d/%d/%d",
631 buffer, buffer_size, &length,
632 "TYPE=%d/%d/%d",
633 usb_dev->descriptor.bDeviceClass, 629 usb_dev->descriptor.bDeviceClass,
634 usb_dev->descriptor.bDeviceSubClass, 630 usb_dev->descriptor.bDeviceSubClass,
635 usb_dev->descriptor.bDeviceProtocol)) 631 usb_dev->descriptor.bDeviceProtocol))
636 return -ENOMEM; 632 return -ENOMEM;
637 633
638 if (add_uevent_var(envp, num_envp, &i, 634 if (add_uevent_var(env, "BUSNUM=%03d",
639 buffer, buffer_size, &length,
640 "BUSNUM=%03d",
641 usb_dev->bus->busnum)) 635 usb_dev->bus->busnum))
642 return -ENOMEM; 636 return -ENOMEM;
643 637
644 if (add_uevent_var(envp, num_envp, &i, 638 if (add_uevent_var(env, "DEVNUM=%03d",
645 buffer, buffer_size, &length,
646 "DEVNUM=%03d",
647 usb_dev->devnum)) 639 usb_dev->devnum))
648 return -ENOMEM; 640 return -ENOMEM;
649 641
650 envp[i] = NULL;
651 return 0; 642 return 0;
652} 643}
653 644
654#else 645#else
655 646
656static int usb_uevent(struct device *dev, char **envp, 647static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
657 int num_envp, char *buffer, int buffer_size)
658{ 648{
659 return -ENODEV; 649 return -ENODEV;
660} 650}
@@ -945,11 +935,11 @@ done:
945#ifdef CONFIG_USB_SUSPEND 935#ifdef CONFIG_USB_SUSPEND
946 936
947/* Internal routine to check whether we may autosuspend a device. */ 937/* Internal routine to check whether we may autosuspend a device. */
948static int autosuspend_check(struct usb_device *udev) 938static int autosuspend_check(struct usb_device *udev, int reschedule)
949{ 939{
950 int i; 940 int i;
951 struct usb_interface *intf; 941 struct usb_interface *intf;
952 unsigned long suspend_time; 942 unsigned long suspend_time, j;
953 943
954 /* For autosuspend, fail fast if anything is in use or autosuspend 944 /* For autosuspend, fail fast if anything is in use or autosuspend
955 * is disabled. Also fail if any interfaces require remote wakeup 945 * is disabled. Also fail if any interfaces require remote wakeup
@@ -991,20 +981,20 @@ static int autosuspend_check(struct usb_device *udev)
991 } 981 }
992 982
993 /* If everything is okay but the device hasn't been idle for long 983 /* If everything is okay but the device hasn't been idle for long
994 * enough, queue a delayed autosuspend request. 984 * enough, queue a delayed autosuspend request. If the device
985 * _has_ been idle for long enough and the reschedule flag is set,
986 * likewise queue a delayed (1 second) autosuspend request.
995 */ 987 */
996 if (time_after(suspend_time, jiffies)) { 988 j = jiffies;
989 if (time_before(j, suspend_time))
990 reschedule = 1;
991 else
992 suspend_time = j + HZ;
993 if (reschedule) {
997 if (!timer_pending(&udev->autosuspend.timer)) { 994 if (!timer_pending(&udev->autosuspend.timer)) {
998
999 /* The value of jiffies may change between the
1000 * time_after() comparison above and the subtraction
1001 * below. That's okay; the system behaves sanely
1002 * when a timer is registered for the present moment
1003 * or for the past.
1004 */
1005 queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend, 995 queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
1006 round_jiffies_relative(suspend_time - jiffies)); 996 round_jiffies_relative(suspend_time - j));
1007 } 997 }
1008 return -EAGAIN; 998 return -EAGAIN;
1009 } 999 }
1010 return 0; 1000 return 0;
@@ -1012,7 +1002,7 @@ static int autosuspend_check(struct usb_device *udev)
1012 1002
1013#else 1003#else
1014 1004
1015static inline int autosuspend_check(struct usb_device *udev) 1005static inline int autosuspend_check(struct usb_device *udev, int reschedule)
1016{ 1006{
1017 return 0; 1007 return 0;
1018} 1008}
@@ -1069,7 +1059,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1069 udev->do_remote_wakeup = device_may_wakeup(&udev->dev); 1059 udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
1070 1060
1071 if (udev->auto_pm) { 1061 if (udev->auto_pm) {
1072 status = autosuspend_check(udev); 1062 status = autosuspend_check(udev, 0);
1073 if (status < 0) 1063 if (status < 0)
1074 goto done; 1064 goto done;
1075 } 1065 }
@@ -1083,15 +1073,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1083 break; 1073 break;
1084 } 1074 }
1085 } 1075 }
1086 if (status == 0) { 1076 if (status == 0)
1087
1088 /* Non-root devices don't need to do anything for FREEZE
1089 * or PRETHAW. */
1090 if (udev->parent && (msg.event == PM_EVENT_FREEZE ||
1091 msg.event == PM_EVENT_PRETHAW))
1092 goto done;
1093 status = usb_suspend_device(udev, msg); 1077 status = usb_suspend_device(udev, msg);
1094 }
1095 1078
1096 /* If the suspend failed, resume interfaces that did get suspended */ 1079 /* If the suspend failed, resume interfaces that did get suspended */
1097 if (status != 0) { 1080 if (status != 0) {
@@ -1102,12 +1085,24 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1102 1085
1103 /* Try another autosuspend when the interfaces aren't busy */ 1086 /* Try another autosuspend when the interfaces aren't busy */
1104 if (udev->auto_pm) 1087 if (udev->auto_pm)
1105 autosuspend_check(udev); 1088 autosuspend_check(udev, status == -EBUSY);
1106 1089
1107 /* If the suspend succeeded, propagate it up the tree */ 1090 /* If the suspend succeeded then prevent any more URB submissions,
1091 * flush any outstanding URBs, and propagate the suspend up the tree.
1092 */
1108 } else { 1093 } else {
1109 cancel_delayed_work(&udev->autosuspend); 1094 cancel_delayed_work(&udev->autosuspend);
1110 if (parent) 1095 udev->can_submit = 0;
1096 for (i = 0; i < 16; ++i) {
1097 usb_hcd_flush_endpoint(udev, udev->ep_out[i]);
1098 usb_hcd_flush_endpoint(udev, udev->ep_in[i]);
1099 }
1100
1101 /* If this is just a FREEZE or a PRETHAW, udev might
1102 * not really be suspended. Only true suspends get
1103 * propagated up the device tree.
1104 */
1105 if (parent && udev->state == USB_STATE_SUSPENDED)
1111 usb_autosuspend_device(parent); 1106 usb_autosuspend_device(parent);
1112 } 1107 }
1113 1108
@@ -1156,6 +1151,7 @@ static int usb_resume_both(struct usb_device *udev)
1156 status = -ENODEV; 1151 status = -ENODEV;
1157 goto done; 1152 goto done;
1158 } 1153 }
1154 udev->can_submit = 1;
1159 1155
1160 /* Propagate the resume up the tree, if necessary */ 1156 /* Propagate the resume up the tree, if necessary */
1161 if (udev->state == USB_STATE_SUSPENDED) { 1157 if (udev->state == USB_STATE_SUSPENDED) {
@@ -1529,9 +1525,21 @@ int usb_external_resume_device(struct usb_device *udev)
1529 1525
1530static int usb_suspend(struct device *dev, pm_message_t message) 1526static int usb_suspend(struct device *dev, pm_message_t message)
1531{ 1527{
1528 struct usb_device *udev;
1529
1532 if (!is_usb_device(dev)) /* Ignore PM for interfaces */ 1530 if (!is_usb_device(dev)) /* Ignore PM for interfaces */
1533 return 0; 1531 return 0;
1534 return usb_external_suspend_device(to_usb_device(dev), message); 1532 udev = to_usb_device(dev);
1533
1534 /* If udev is already suspended, we can skip this suspend and
1535 * we should also skip the upcoming system resume. */
1536 if (udev->state == USB_STATE_SUSPENDED) {
1537 udev->skip_sys_resume = 1;
1538 return 0;
1539 }
1540
1541 udev->skip_sys_resume = 0;
1542 return usb_external_suspend_device(udev, message);
1535} 1543}
1536 1544
1537static int usb_resume(struct device *dev) 1545static int usb_resume(struct device *dev)
@@ -1542,13 +1550,14 @@ static int usb_resume(struct device *dev)
1542 return 0; 1550 return 0;
1543 udev = to_usb_device(dev); 1551 udev = to_usb_device(dev);
1544 1552
1545 /* If autoresume is disabled then we also want to prevent resume 1553 /* If udev->skip_sys_resume is set then udev was already suspended
1546 * during system wakeup. However, a "persistent-device" reset-resume 1554 * when the system suspend started, so we don't want to resume
1547 * after power loss counts as a wakeup event. So allow a 1555 * udev during this system wakeup. However a reset-resume counts
1548 * reset-resume to occur if remote wakeup is enabled. */ 1556 * as a wakeup event, so allow a reset-resume to occur if remote
1549 if (udev->autoresume_disabled) { 1557 * wakeup is enabled. */
1558 if (udev->skip_sys_resume) {
1550 if (!(udev->reset_resume && udev->do_remote_wakeup)) 1559 if (!(udev->reset_resume && udev->do_remote_wakeup))
1551 return -EPERM; 1560 return -EHOSTUNREACH;
1552 } 1561 }
1553 return usb_external_resume_device(udev); 1562 return usb_external_resume_device(udev);
1554} 1563}
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index e0ec7045e8..7dc123d6b2 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -267,7 +267,6 @@ static void ep_device_release(struct device *dev)
267{ 267{
268 struct ep_device *ep_dev = to_ep_device(dev); 268 struct ep_device *ep_dev = to_ep_device(dev);
269 269
270 dev_dbg(dev, "%s called for %s\n", __FUNCTION__, dev->bus_id);
271 endpoint_free_minor(ep_dev); 270 endpoint_free_minor(ep_dev);
272 kfree(ep_dev); 271 kfree(ep_dev);
273} 272}
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index b2fc2b1152..c1cb94e9f2 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -40,7 +40,7 @@ static int is_activesync(struct usb_interface_descriptor *desc)
40 && desc->bInterfaceProtocol == 1; 40 && desc->bInterfaceProtocol == 1;
41} 41}
42 42
43static int choose_configuration(struct usb_device *udev) 43int usb_choose_configuration(struct usb_device *udev)
44{ 44{
45 int i; 45 int i;
46 int num_configs; 46 int num_configs;
@@ -161,17 +161,20 @@ static int generic_probe(struct usb_device *udev)
161 /* Choose and set the configuration. This registers the interfaces 161 /* Choose and set the configuration. This registers the interfaces
162 * with the driver core and lets interface drivers bind to them. 162 * with the driver core and lets interface drivers bind to them.
163 */ 163 */
164 c = choose_configuration(udev); 164 if (udev->authorized == 0)
165 if (c >= 0) { 165 dev_err(&udev->dev, "Device is not authorized for usage\n");
166 err = usb_set_configuration(udev, c); 166 else {
167 if (err) { 167 c = usb_choose_configuration(udev);
168 dev_err(&udev->dev, "can't set config #%d, error %d\n", 168 if (c >= 0) {
169 err = usb_set_configuration(udev, c);
170 if (err) {
171 dev_err(&udev->dev, "can't set config #%d, error %d\n",
169 c, err); 172 c, err);
170 /* This need not be fatal. The user can try to 173 /* This need not be fatal. The user can try to
171 * set other configurations. */ 174 * set other configurations. */
175 }
172 } 176 }
173 } 177 }
174
175 /* USB device state == configured ... usable */ 178 /* USB device state == configured ... usable */
176 usb_notify_add_device(udev); 179 usb_notify_add_device(udev);
177 180
@@ -203,8 +206,13 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
203 */ 206 */
204 if (!udev->parent) 207 if (!udev->parent)
205 rc = hcd_bus_suspend(udev); 208 rc = hcd_bus_suspend(udev);
209
210 /* Non-root devices don't need to do anything for FREEZE or PRETHAW */
211 else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
212 rc = 0;
206 else 213 else
207 rc = usb_port_suspend(udev); 214 rc = usb_port_suspend(udev);
215
208 return rc; 216 return rc;
209} 217}
210 218
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 42ef1d5f6c..3dd997df85 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -356,10 +356,18 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
356 const u8 *bufp = tbuf; 356 const u8 *bufp = tbuf;
357 int len = 0; 357 int len = 0;
358 int patch_wakeup = 0; 358 int patch_wakeup = 0;
359 unsigned long flags; 359 int status;
360 int status = 0;
361 int n; 360 int n;
362 361
362 might_sleep();
363
364 spin_lock_irq(&hcd_root_hub_lock);
365 status = usb_hcd_link_urb_to_ep(hcd, urb);
366 spin_unlock_irq(&hcd_root_hub_lock);
367 if (status)
368 return status;
369 urb->hcpriv = hcd; /* Indicate it's queued */
370
363 cmd = (struct usb_ctrlrequest *) urb->setup_packet; 371 cmd = (struct usb_ctrlrequest *) urb->setup_packet;
364 typeReq = (cmd->bRequestType << 8) | cmd->bRequest; 372 typeReq = (cmd->bRequestType << 8) | cmd->bRequest;
365 wValue = le16_to_cpu (cmd->wValue); 373 wValue = le16_to_cpu (cmd->wValue);
@@ -523,13 +531,18 @@ error:
523 } 531 }
524 532
525 /* any errors get returned through the urb completion */ 533 /* any errors get returned through the urb completion */
526 local_irq_save (flags); 534 spin_lock_irq(&hcd_root_hub_lock);
527 spin_lock (&urb->lock); 535 usb_hcd_unlink_urb_from_ep(hcd, urb);
528 if (urb->status == -EINPROGRESS) 536
529 urb->status = status; 537 /* This peculiar use of spinlocks echoes what real HC drivers do.
530 spin_unlock (&urb->lock); 538 * Avoiding calls to local_irq_disable/enable makes the code
531 usb_hcd_giveback_urb (hcd, urb); 539 * RT-friendly.
532 local_irq_restore (flags); 540 */
541 spin_unlock(&hcd_root_hub_lock);
542 usb_hcd_giveback_urb(hcd, urb, status);
543 spin_lock(&hcd_root_hub_lock);
544
545 spin_unlock_irq(&hcd_root_hub_lock);
533 return 0; 546 return 0;
534} 547}
535 548
@@ -559,31 +572,23 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
559 if (length > 0) { 572 if (length > 0) {
560 573
561 /* try to complete the status urb */ 574 /* try to complete the status urb */
562 local_irq_save (flags); 575 spin_lock_irqsave(&hcd_root_hub_lock, flags);
563 spin_lock(&hcd_root_hub_lock);
564 urb = hcd->status_urb; 576 urb = hcd->status_urb;
565 if (urb) { 577 if (urb) {
566 spin_lock(&urb->lock); 578 hcd->poll_pending = 0;
567 if (urb->status == -EINPROGRESS) { 579 hcd->status_urb = NULL;
568 hcd->poll_pending = 0; 580 urb->actual_length = length;
569 hcd->status_urb = NULL; 581 memcpy(urb->transfer_buffer, buffer, length);
570 urb->status = 0;
571 urb->hcpriv = NULL;
572 urb->actual_length = length;
573 memcpy(urb->transfer_buffer, buffer, length);
574 } else /* urb has been unlinked */
575 length = 0;
576 spin_unlock(&urb->lock);
577 } else
578 length = 0;
579 spin_unlock(&hcd_root_hub_lock);
580 582
581 /* local irqs are always blocked in completions */ 583 usb_hcd_unlink_urb_from_ep(hcd, urb);
582 if (length > 0) 584 spin_unlock(&hcd_root_hub_lock);
583 usb_hcd_giveback_urb (hcd, urb); 585 usb_hcd_giveback_urb(hcd, urb, 0);
584 else 586 spin_lock(&hcd_root_hub_lock);
587 } else {
588 length = 0;
585 hcd->poll_pending = 1; 589 hcd->poll_pending = 1;
586 local_irq_restore (flags); 590 }
591 spin_unlock_irqrestore(&hcd_root_hub_lock, flags);
587 } 592 }
588 593
589 /* The USB 2.0 spec says 256 ms. This is close enough and won't 594 /* The USB 2.0 spec says 256 ms. This is close enough and won't
@@ -611,33 +616,35 @@ static int rh_queue_status (struct usb_hcd *hcd, struct urb *urb)
611 int len = 1 + (urb->dev->maxchild / 8); 616 int len = 1 + (urb->dev->maxchild / 8);
612 617
613 spin_lock_irqsave (&hcd_root_hub_lock, flags); 618 spin_lock_irqsave (&hcd_root_hub_lock, flags);
614 if (urb->status != -EINPROGRESS) /* already unlinked */ 619 if (hcd->status_urb || urb->transfer_buffer_length < len) {
615 retval = urb->status;
616 else if (hcd->status_urb || urb->transfer_buffer_length < len) {
617 dev_dbg (hcd->self.controller, "not queuing rh status urb\n"); 620 dev_dbg (hcd->self.controller, "not queuing rh status urb\n");
618 retval = -EINVAL; 621 retval = -EINVAL;
619 } else { 622 goto done;
620 hcd->status_urb = urb; 623 }
621 urb->hcpriv = hcd; /* indicate it's queued */
622 624
623 if (!hcd->uses_new_polling) 625 retval = usb_hcd_link_urb_to_ep(hcd, urb);
624 mod_timer (&hcd->rh_timer, 626 if (retval)
625 (jiffies/(HZ/4) + 1) * (HZ/4)); 627 goto done;
626 628
627 /* If a status change has already occurred, report it ASAP */ 629 hcd->status_urb = urb;
628 else if (hcd->poll_pending) 630 urb->hcpriv = hcd; /* indicate it's queued */
629 mod_timer (&hcd->rh_timer, jiffies); 631 if (!hcd->uses_new_polling)
630 retval = 0; 632 mod_timer(&hcd->rh_timer, (jiffies/(HZ/4) + 1) * (HZ/4));
631 } 633
634 /* If a status change has already occurred, report it ASAP */
635 else if (hcd->poll_pending)
636 mod_timer(&hcd->rh_timer, jiffies);
637 retval = 0;
638 done:
632 spin_unlock_irqrestore (&hcd_root_hub_lock, flags); 639 spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
633 return retval; 640 return retval;
634} 641}
635 642
636static int rh_urb_enqueue (struct usb_hcd *hcd, struct urb *urb) 643static int rh_urb_enqueue (struct usb_hcd *hcd, struct urb *urb)
637{ 644{
638 if (usb_pipeint (urb->pipe)) 645 if (usb_endpoint_xfer_int(&urb->ep->desc))
639 return rh_queue_status (hcd, urb); 646 return rh_queue_status (hcd, urb);
640 if (usb_pipecontrol (urb->pipe)) 647 if (usb_endpoint_xfer_control(&urb->ep->desc))
641 return rh_call_control (hcd, urb); 648 return rh_call_control (hcd, urb);
642 return -EINVAL; 649 return -EINVAL;
643} 650}
@@ -647,32 +654,96 @@ static int rh_urb_enqueue (struct usb_hcd *hcd, struct urb *urb)
647/* Unlinks of root-hub control URBs are legal, but they don't do anything 654/* Unlinks of root-hub control URBs are legal, but they don't do anything
648 * since these URBs always execute synchronously. 655 * since these URBs always execute synchronously.
649 */ 656 */
650static int usb_rh_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) 657static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
651{ 658{
652 unsigned long flags; 659 unsigned long flags;
660 int rc;
661
662 spin_lock_irqsave(&hcd_root_hub_lock, flags);
663 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
664 if (rc)
665 goto done;
653 666
654 if (usb_pipeendpoint(urb->pipe) == 0) { /* Control URB */ 667 if (usb_endpoint_num(&urb->ep->desc) == 0) { /* Control URB */
655 ; /* Do nothing */ 668 ; /* Do nothing */
656 669
657 } else { /* Status URB */ 670 } else { /* Status URB */
658 if (!hcd->uses_new_polling) 671 if (!hcd->uses_new_polling)
659 del_timer (&hcd->rh_timer); 672 del_timer (&hcd->rh_timer);
660 local_irq_save (flags);
661 spin_lock (&hcd_root_hub_lock);
662 if (urb == hcd->status_urb) { 673 if (urb == hcd->status_urb) {
663 hcd->status_urb = NULL; 674 hcd->status_urb = NULL;
664 urb->hcpriv = NULL; 675 usb_hcd_unlink_urb_from_ep(hcd, urb);
665 } else 676
666 urb = NULL; /* wasn't fully queued */ 677 spin_unlock(&hcd_root_hub_lock);
667 spin_unlock (&hcd_root_hub_lock); 678 usb_hcd_giveback_urb(hcd, urb, status);
668 if (urb) 679 spin_lock(&hcd_root_hub_lock);
669 usb_hcd_giveback_urb (hcd, urb); 680 }
670 local_irq_restore (flags);
671 } 681 }
682 done:
683 spin_unlock_irqrestore(&hcd_root_hub_lock, flags);
684 return rc;
685}
672 686
673 return 0; 687
688
689/*
690 * Show & store the current value of authorized_default
691 */
692static ssize_t usb_host_authorized_default_show(struct device *dev,
693 struct device_attribute *attr,
694 char *buf)
695{
696 struct usb_device *rh_usb_dev = to_usb_device(dev);
697 struct usb_bus *usb_bus = rh_usb_dev->bus;
698 struct usb_hcd *usb_hcd;
699
700 if (usb_bus == NULL) /* FIXME: not sure if this case is possible */
701 return -ENODEV;
702 usb_hcd = bus_to_hcd(usb_bus);
703 return snprintf(buf, PAGE_SIZE, "%u\n", usb_hcd->authorized_default);
704}
705
706static ssize_t usb_host_authorized_default_store(struct device *dev,
707 struct device_attribute *attr,
708 const char *buf, size_t size)
709{
710 ssize_t result;
711 unsigned val;
712 struct usb_device *rh_usb_dev = to_usb_device(dev);
713 struct usb_bus *usb_bus = rh_usb_dev->bus;
714 struct usb_hcd *usb_hcd;
715
716 if (usb_bus == NULL) /* FIXME: not sure if this case is possible */
717 return -ENODEV;
718 usb_hcd = bus_to_hcd(usb_bus);
719 result = sscanf(buf, "%u\n", &val);
720 if (result == 1) {
721 usb_hcd->authorized_default = val? 1 : 0;
722 result = size;
723 }
724 else
725 result = -EINVAL;
726 return result;
674} 727}
675 728
729static DEVICE_ATTR(authorized_default, 0644,
730 usb_host_authorized_default_show,
731 usb_host_authorized_default_store);
732
733
734/* Group all the USB bus attributes */
735static struct attribute *usb_bus_attrs[] = {
736 &dev_attr_authorized_default.attr,
737 NULL,
738};
739
740static struct attribute_group usb_bus_attr_group = {
741 .name = NULL, /* we want them in the same directory */
742 .attrs = usb_bus_attrs,
743};
744
745
746
676/*-------------------------------------------------------------------------*/ 747/*-------------------------------------------------------------------------*/
677 748
678static struct class *usb_host_class; 749static struct class *usb_host_class;
@@ -726,27 +797,23 @@ static void usb_bus_init (struct usb_bus *bus)
726 */ 797 */
727static int usb_register_bus(struct usb_bus *bus) 798static int usb_register_bus(struct usb_bus *bus)
728{ 799{
800 int result = -E2BIG;
729 int busnum; 801 int busnum;
730 802
731 mutex_lock(&usb_bus_list_lock); 803 mutex_lock(&usb_bus_list_lock);
732 busnum = find_next_zero_bit (busmap.busmap, USB_MAXBUS, 1); 804 busnum = find_next_zero_bit (busmap.busmap, USB_MAXBUS, 1);
733 if (busnum < USB_MAXBUS) { 805 if (busnum >= USB_MAXBUS) {
734 set_bit (busnum, busmap.busmap);
735 bus->busnum = busnum;
736 } else {
737 printk (KERN_ERR "%s: too many buses\n", usbcore_name); 806 printk (KERN_ERR "%s: too many buses\n", usbcore_name);
738 mutex_unlock(&usb_bus_list_lock); 807 goto error_find_busnum;
739 return -E2BIG;
740 } 808 }
741 809 set_bit (busnum, busmap.busmap);
810 bus->busnum = busnum;
742 bus->class_dev = class_device_create(usb_host_class, NULL, MKDEV(0,0), 811 bus->class_dev = class_device_create(usb_host_class, NULL, MKDEV(0,0),
743 bus->controller, "usb_host%d", busnum); 812 bus->controller, "usb_host%d",
744 if (IS_ERR(bus->class_dev)) { 813 busnum);
745 clear_bit(busnum, busmap.busmap); 814 result = PTR_ERR(bus->class_dev);
746 mutex_unlock(&usb_bus_list_lock); 815 if (IS_ERR(bus->class_dev))
747 return PTR_ERR(bus->class_dev); 816 goto error_create_class_dev;
748 }
749
750 class_set_devdata(bus->class_dev, bus); 817 class_set_devdata(bus->class_dev, bus);
751 818
752 /* Add it to the local list of buses */ 819 /* Add it to the local list of buses */
@@ -755,8 +822,15 @@ static int usb_register_bus(struct usb_bus *bus)
755 822
756 usb_notify_add_bus(bus); 823 usb_notify_add_bus(bus);
757 824
758 dev_info (bus->controller, "new USB bus registered, assigned bus number %d\n", bus->busnum); 825 dev_info (bus->controller, "new USB bus registered, assigned bus "
826 "number %d\n", bus->busnum);
759 return 0; 827 return 0;
828
829error_create_class_dev:
830 clear_bit(busnum, busmap.busmap);
831error_find_busnum:
832 mutex_unlock(&usb_bus_list_lock);
833 return result;
760} 834}
761 835
762/** 836/**
@@ -908,103 +982,145 @@ EXPORT_SYMBOL (usb_calc_bus_time);
908 982
909/*-------------------------------------------------------------------------*/ 983/*-------------------------------------------------------------------------*/
910 984
911static void urb_unlink(struct usb_hcd *hcd, struct urb *urb) 985/**
986 * usb_hcd_link_urb_to_ep - add an URB to its endpoint queue
987 * @hcd: host controller to which @urb was submitted
988 * @urb: URB being submitted
989 *
990 * Host controller drivers should call this routine in their enqueue()
991 * method. The HCD's private spinlock must be held and interrupts must
992 * be disabled. The actions carried out here are required for URB
993 * submission, as well as for endpoint shutdown and for usb_kill_urb.
994 *
995 * Returns 0 for no error, otherwise a negative error code (in which case
996 * the enqueue() method must fail). If no error occurs but enqueue() fails
997 * anyway, it must call usb_hcd_unlink_urb_from_ep() before releasing
998 * the private spinlock and returning.
999 */
1000int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb)
912{ 1001{
913 unsigned long flags; 1002 int rc = 0;
914 1003
915 /* clear all state linking urb to this dev (and hcd) */ 1004 spin_lock(&hcd_urb_list_lock);
916 spin_lock_irqsave(&hcd_urb_list_lock, flags);
917 list_del_init (&urb->urb_list);
918 spin_unlock_irqrestore(&hcd_urb_list_lock, flags);
919 1005
920 if (hcd->self.uses_dma && !is_root_hub(urb->dev)) { 1006 /* Check that the URB isn't being killed */
921 if (usb_pipecontrol (urb->pipe) 1007 if (unlikely(urb->reject)) {
922 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) 1008 rc = -EPERM;
923 dma_unmap_single (hcd->self.controller, urb->setup_dma, 1009 goto done;
924 sizeof (struct usb_ctrlrequest),
925 DMA_TO_DEVICE);
926 if (urb->transfer_buffer_length != 0
927 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP))
928 dma_unmap_single (hcd->self.controller,
929 urb->transfer_dma,
930 urb->transfer_buffer_length,
931 usb_pipein (urb->pipe)
932 ? DMA_FROM_DEVICE
933 : DMA_TO_DEVICE);
934 } 1010 }
935}
936
937/* may be called in any context with a valid urb->dev usecount
938 * caller surrenders "ownership" of urb
939 * expects usb_submit_urb() to have sanity checked and conditioned all
940 * inputs in the urb
941 */
942int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
943{
944 int status;
945 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
946 struct usb_host_endpoint *ep;
947 unsigned long flags;
948 1011
949 if (!hcd) 1012 if (unlikely(!urb->ep->enabled)) {
950 return -ENODEV; 1013 rc = -ENOENT;
1014 goto done;
1015 }
951 1016
952 usbmon_urb_submit(&hcd->self, urb); 1017 if (unlikely(!urb->dev->can_submit)) {
1018 rc = -EHOSTUNREACH;
1019 goto done;
1020 }
953 1021
954 /* 1022 /*
955 * Atomically queue the urb, first to our records, then to the HCD. 1023 * Check the host controller's state and add the URB to the
956 * Access to urb->status is controlled by urb->lock ... changes on 1024 * endpoint's queue.
957 * i/o completion (normal or fault) or unlinking.
958 */ 1025 */
959 1026 switch (hcd->state) {
960 // FIXME: verify that quiescing hc works right (RH cleans up)
961
962 spin_lock_irqsave(&hcd_urb_list_lock, flags);
963 ep = (usb_pipein(urb->pipe) ? urb->dev->ep_in : urb->dev->ep_out)
964 [usb_pipeendpoint(urb->pipe)];
965 if (unlikely (!ep))
966 status = -ENOENT;
967 else if (unlikely (urb->reject))
968 status = -EPERM;
969 else switch (hcd->state) {
970 case HC_STATE_RUNNING: 1027 case HC_STATE_RUNNING:
971 case HC_STATE_RESUMING: 1028 case HC_STATE_RESUMING:
972 list_add_tail (&urb->urb_list, &ep->urb_list); 1029 urb->unlinked = 0;
973 status = 0; 1030 list_add_tail(&urb->urb_list, &urb->ep->urb_list);
974 break; 1031 break;
975 default: 1032 default:
976 status = -ESHUTDOWN; 1033 rc = -ESHUTDOWN;
977 break; 1034 goto done;
978 } 1035 }
979 spin_unlock_irqrestore(&hcd_urb_list_lock, flags); 1036 done:
980 if (status) { 1037 spin_unlock(&hcd_urb_list_lock);
981 INIT_LIST_HEAD (&urb->urb_list); 1038 return rc;
982 usbmon_urb_submit_error(&hcd->self, urb, status); 1039}
983 return status; 1040EXPORT_SYMBOL_GPL(usb_hcd_link_urb_to_ep);
1041
1042/**
1043 * usb_hcd_check_unlink_urb - check whether an URB may be unlinked
1044 * @hcd: host controller to which @urb was submitted
1045 * @urb: URB being checked for unlinkability
1046 * @status: error code to store in @urb if the unlink succeeds
1047 *
1048 * Host controller drivers should call this routine in their dequeue()
1049 * method. The HCD's private spinlock must be held and interrupts must
1050 * be disabled. The actions carried out here are required for making
1051 * sure than an unlink is valid.
1052 *
1053 * Returns 0 for no error, otherwise a negative error code (in which case
1054 * the dequeue() method must fail). The possible error codes are:
1055 *
1056 * -EIDRM: @urb was not submitted or has already completed.
1057 * The completion function may not have been called yet.
1058 *
1059 * -EBUSY: @urb has already been unlinked.
1060 */
1061int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
1062 int status)
1063{
1064 struct list_head *tmp;
1065
1066 /* insist the urb is still queued */
1067 list_for_each(tmp, &urb->ep->urb_list) {
1068 if (tmp == &urb->urb_list)
1069 break;
984 } 1070 }
1071 if (tmp != &urb->urb_list)
1072 return -EIDRM;
985 1073
986 /* increment urb's reference count as part of giving it to the HCD 1074 /* Any status except -EINPROGRESS means something already started to
987 * (which now controls it). HCD guarantees that it either returns 1075 * unlink this URB from the hardware. So there's no more work to do.
988 * an error or calls giveback(), but not both.
989 */ 1076 */
990 urb = usb_get_urb (urb); 1077 if (urb->unlinked)
991 atomic_inc (&urb->use_count); 1078 return -EBUSY;
992 1079 urb->unlinked = status;
993 if (is_root_hub(urb->dev)) { 1080
994 /* NOTE: requirement on hub callers (usbfs and the hub 1081 /* IRQ setup can easily be broken so that USB controllers
995 * driver, for now) that URBs' urb->transfer_buffer be 1082 * never get completion IRQs ... maybe even the ones we need to
996 * valid and usb_buffer_{sync,unmap}() not be needed, since 1083 * finish unlinking the initial failed usb_set_address()
997 * they could clobber root hub response data. 1084 * or device descriptor fetch.
998 */ 1085 */
999 status = rh_urb_enqueue (hcd, urb); 1086 if (!test_bit(HCD_FLAG_SAW_IRQ, &hcd->flags) &&
1000 goto done; 1087 !is_root_hub(urb->dev)) {
1088 dev_warn(hcd->self.controller, "Unlink after no-IRQ? "
1089 "Controller is probably using the wrong IRQ.\n");
1090 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
1001 } 1091 }
1002 1092
1003 /* lower level hcd code should use *_dma exclusively, 1093 return 0;
1094}
1095EXPORT_SYMBOL_GPL(usb_hcd_check_unlink_urb);
1096
1097/**
1098 * usb_hcd_unlink_urb_from_ep - remove an URB from its endpoint queue
1099 * @hcd: host controller to which @urb was submitted
1100 * @urb: URB being unlinked
1101 *
1102 * Host controller drivers should call this routine before calling
1103 * usb_hcd_giveback_urb(). The HCD's private spinlock must be held and
1104 * interrupts must be disabled. The actions carried out here are required
1105 * for URB completion.
1106 */
1107void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
1108{
1109 /* clear all state linking urb to this dev (and hcd) */
1110 spin_lock(&hcd_urb_list_lock);
1111 list_del_init(&urb->urb_list);
1112 spin_unlock(&hcd_urb_list_lock);
1113}
1114EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep);
1115
1116static void map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1117{
1118 /* Map the URB's buffers for DMA access.
1119 * Lower level HCD code should use *_dma exclusively,
1004 * unless it uses pio or talks to another transport. 1120 * unless it uses pio or talks to another transport.
1005 */ 1121 */
1006 if (hcd->self.uses_dma) { 1122 if (hcd->self.uses_dma && !is_root_hub(urb->dev)) {
1007 if (usb_pipecontrol (urb->pipe) 1123 if (usb_endpoint_xfer_control(&urb->ep->desc)
1008 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) 1124 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP))
1009 urb->setup_dma = dma_map_single ( 1125 urb->setup_dma = dma_map_single (
1010 hcd->self.controller, 1126 hcd->self.controller,
@@ -1017,20 +1133,75 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
1017 hcd->self.controller, 1133 hcd->self.controller,
1018 urb->transfer_buffer, 1134 urb->transfer_buffer,
1019 urb->transfer_buffer_length, 1135 urb->transfer_buffer_length,
1020 usb_pipein (urb->pipe) 1136 usb_urb_dir_in(urb)
1021 ? DMA_FROM_DEVICE 1137 ? DMA_FROM_DEVICE
1022 : DMA_TO_DEVICE); 1138 : DMA_TO_DEVICE);
1023 } 1139 }
1140}
1024 1141
1025 status = hcd->driver->urb_enqueue (hcd, ep, urb, mem_flags); 1142static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1026done: 1143{
1027 if (unlikely (status)) { 1144 if (hcd->self.uses_dma && !is_root_hub(urb->dev)) {
1028 urb_unlink(hcd, urb); 1145 if (usb_endpoint_xfer_control(&urb->ep->desc)
1029 atomic_dec (&urb->use_count); 1146 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP))
1030 if (urb->reject) 1147 dma_unmap_single(hcd->self.controller, urb->setup_dma,
1031 wake_up (&usb_kill_urb_queue); 1148 sizeof(struct usb_ctrlrequest),
1149 DMA_TO_DEVICE);
1150 if (urb->transfer_buffer_length != 0
1151 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP))
1152 dma_unmap_single(hcd->self.controller,
1153 urb->transfer_dma,
1154 urb->transfer_buffer_length,
1155 usb_urb_dir_in(urb)
1156 ? DMA_FROM_DEVICE
1157 : DMA_TO_DEVICE);
1158 }
1159}
1160
1161/*-------------------------------------------------------------------------*/
1162
1163/* may be called in any context with a valid urb->dev usecount
1164 * caller surrenders "ownership" of urb
1165 * expects usb_submit_urb() to have sanity checked and conditioned all
1166 * inputs in the urb
1167 */
1168int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
1169{
1170 int status;
1171 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
1172
1173 /* increment urb's reference count as part of giving it to the HCD
1174 * (which will control it). HCD guarantees that it either returns
1175 * an error or calls giveback(), but not both.
1176 */
1177 usb_get_urb(urb);
1178 atomic_inc(&urb->use_count);
1179 atomic_inc(&urb->dev->urbnum);
1180 usbmon_urb_submit(&hcd->self, urb);
1181
1182 /* NOTE requirements on root-hub callers (usbfs and the hub
1183 * driver, for now): URBs' urb->transfer_buffer must be
1184 * valid and usb_buffer_{sync,unmap}() not be needed, since
1185 * they could clobber root hub response data. Also, control
1186 * URBs must be submitted in process context with interrupts
1187 * enabled.
1188 */
1189 map_urb_for_dma(hcd, urb);
1190 if (is_root_hub(urb->dev))
1191 status = rh_urb_enqueue(hcd, urb);
1192 else
1193 status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
1194
1195 if (unlikely(status)) {
1032 usbmon_urb_submit_error(&hcd->self, urb, status); 1196 usbmon_urb_submit_error(&hcd->self, urb, status);
1033 usb_put_urb (urb); 1197 unmap_urb_for_dma(hcd, urb);
1198 urb->hcpriv = NULL;
1199 INIT_LIST_HEAD(&urb->urb_list);
1200 atomic_dec(&urb->use_count);
1201 atomic_dec(&urb->dev->urbnum);
1202 if (urb->reject)
1203 wake_up(&usb_kill_urb_queue);
1204 usb_put_urb(urb);
1034 } 1205 }
1035 return status; 1206 return status;
1036} 1207}
@@ -1042,24 +1213,19 @@ done:
1042 * soon as practical. we've already set up the urb's return status, 1213 * soon as practical. we've already set up the urb's return status,
1043 * but we can't know if the callback completed already. 1214 * but we can't know if the callback completed already.
1044 */ 1215 */
1045static int 1216static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status)
1046unlink1 (struct usb_hcd *hcd, struct urb *urb)
1047{ 1217{
1048 int value; 1218 int value;
1049 1219
1050 if (is_root_hub(urb->dev)) 1220 if (is_root_hub(urb->dev))
1051 value = usb_rh_urb_dequeue (hcd, urb); 1221 value = usb_rh_urb_dequeue(hcd, urb, status);
1052 else { 1222 else {
1053 1223
1054 /* The only reason an HCD might fail this call is if 1224 /* The only reason an HCD might fail this call is if
1055 * it has not yet fully queued the urb to begin with. 1225 * it has not yet fully queued the urb to begin with.
1056 * Such failures should be harmless. */ 1226 * Such failures should be harmless. */
1057 value = hcd->driver->urb_dequeue (hcd, urb); 1227 value = hcd->driver->urb_dequeue(hcd, urb, status);
1058 } 1228 }
1059
1060 if (value != 0)
1061 dev_dbg (hcd->self.controller, "dequeue %p --> %d\n",
1062 urb, value);
1063 return value; 1229 return value;
1064} 1230}
1065 1231
@@ -1071,88 +1237,17 @@ unlink1 (struct usb_hcd *hcd, struct urb *urb)
1071 */ 1237 */
1072int usb_hcd_unlink_urb (struct urb *urb, int status) 1238int usb_hcd_unlink_urb (struct urb *urb, int status)
1073{ 1239{
1074 struct usb_host_endpoint *ep; 1240 struct usb_hcd *hcd;
1075 struct usb_hcd *hcd = NULL; 1241 int retval;
1076 struct device *sys = NULL;
1077 unsigned long flags;
1078 struct list_head *tmp;
1079 int retval;
1080
1081 if (!urb)
1082 return -EINVAL;
1083 if (!urb->dev || !urb->dev->bus)
1084 return -ENODEV;
1085 ep = (usb_pipein(urb->pipe) ? urb->dev->ep_in : urb->dev->ep_out)
1086 [usb_pipeendpoint(urb->pipe)];
1087 if (!ep)
1088 return -ENODEV;
1089
1090 /*
1091 * we contend for urb->status with the hcd core,
1092 * which changes it while returning the urb.
1093 *
1094 * Caller guaranteed that the urb pointer hasn't been freed, and
1095 * that it was submitted. But as a rule it can't know whether or
1096 * not it's already been unlinked ... so we respect the reversed
1097 * lock sequence needed for the usb_hcd_giveback_urb() code paths
1098 * (urb lock, then hcd_urb_list_lock) in case some other CPU is now
1099 * unlinking it.
1100 */
1101 spin_lock_irqsave (&urb->lock, flags);
1102 spin_lock(&hcd_urb_list_lock);
1103 1242
1104 sys = &urb->dev->dev;
1105 hcd = bus_to_hcd(urb->dev->bus); 1243 hcd = bus_to_hcd(urb->dev->bus);
1106 if (hcd == NULL) { 1244 retval = unlink1(hcd, urb, status);
1107 retval = -ENODEV;
1108 goto done;
1109 }
1110 1245
1111 /* insist the urb is still queued */
1112 list_for_each(tmp, &ep->urb_list) {
1113 if (tmp == &urb->urb_list)
1114 break;
1115 }
1116 if (tmp != &urb->urb_list) {
1117 retval = -EIDRM;
1118 goto done;
1119 }
1120
1121 /* Any status except -EINPROGRESS means something already started to
1122 * unlink this URB from the hardware. So there's no more work to do.
1123 */
1124 if (urb->status != -EINPROGRESS) {
1125 retval = -EBUSY;
1126 goto done;
1127 }
1128
1129 /* IRQ setup can easily be broken so that USB controllers
1130 * never get completion IRQs ... maybe even the ones we need to
1131 * finish unlinking the initial failed usb_set_address()
1132 * or device descriptor fetch.
1133 */
1134 if (!test_bit(HCD_FLAG_SAW_IRQ, &hcd->flags) &&
1135 !is_root_hub(urb->dev)) {
1136 dev_warn (hcd->self.controller, "Unlink after no-IRQ? "
1137 "Controller is probably using the wrong IRQ.\n");
1138 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
1139 }
1140
1141 urb->status = status;
1142
1143 spin_unlock(&hcd_urb_list_lock);
1144 spin_unlock_irqrestore (&urb->lock, flags);
1145
1146 retval = unlink1 (hcd, urb);
1147 if (retval == 0) 1246 if (retval == 0)
1148 retval = -EINPROGRESS; 1247 retval = -EINPROGRESS;
1149 return retval; 1248 else if (retval != -EIDRM && retval != -EBUSY)
1150 1249 dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n",
1151done: 1250 urb, retval);
1152 spin_unlock(&hcd_urb_list_lock);
1153 spin_unlock_irqrestore (&urb->lock, flags);
1154 if (retval != -EIDRM && sys && sys->driver)
1155 dev_dbg (sys, "hcd_unlink_urb %p fail %d\n", urb, retval);
1156 return retval; 1251 return retval;
1157} 1252}
1158 1253
@@ -1162,6 +1257,7 @@ done:
1162 * usb_hcd_giveback_urb - return URB from HCD to device driver 1257 * usb_hcd_giveback_urb - return URB from HCD to device driver
1163 * @hcd: host controller returning the URB 1258 * @hcd: host controller returning the URB
1164 * @urb: urb being returned to the USB device driver. 1259 * @urb: urb being returned to the USB device driver.
1260 * @status: completion status code for the URB.
1165 * Context: in_interrupt() 1261 * Context: in_interrupt()
1166 * 1262 *
1167 * This hands the URB from HCD to its USB device driver, using its 1263 * This hands the URB from HCD to its USB device driver, using its
@@ -1169,14 +1265,27 @@ done:
1169 * (and is done using urb->hcpriv). It also released all HCD locks; 1265 * (and is done using urb->hcpriv). It also released all HCD locks;
1170 * the device driver won't cause problems if it frees, modifies, 1266 * the device driver won't cause problems if it frees, modifies,
1171 * or resubmits this URB. 1267 * or resubmits this URB.
1268 *
1269 * If @urb was unlinked, the value of @status will be overridden by
1270 * @urb->unlinked. Erroneous short transfers are detected in case
1271 * the HCD hasn't checked for them.
1172 */ 1272 */
1173void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb) 1273void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
1174{ 1274{
1175 urb_unlink(hcd, urb); 1275 urb->hcpriv = NULL;
1176 usbmon_urb_complete (&hcd->self, urb); 1276 if (unlikely(urb->unlinked))
1277 status = urb->unlinked;
1278 else if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
1279 urb->actual_length < urb->transfer_buffer_length &&
1280 !status))
1281 status = -EREMOTEIO;
1282
1283 unmap_urb_for_dma(hcd, urb);
1284 usbmon_urb_complete(&hcd->self, urb, status);
1177 usb_unanchor_urb(urb); 1285 usb_unanchor_urb(urb);
1178 1286
1179 /* pass ownership to the completion handler */ 1287 /* pass ownership to the completion handler */
1288 urb->status = status;
1180 urb->complete (urb); 1289 urb->complete (urb);
1181 atomic_dec (&urb->use_count); 1290 atomic_dec (&urb->use_count);
1182 if (unlikely (urb->reject)) 1291 if (unlikely (urb->reject))
@@ -1187,78 +1296,61 @@ EXPORT_SYMBOL (usb_hcd_giveback_urb);
1187 1296
1188/*-------------------------------------------------------------------------*/ 1297/*-------------------------------------------------------------------------*/
1189 1298
1190/* disables the endpoint: cancels any pending urbs, then synchronizes with 1299/* Cancel all URBs pending on this endpoint and wait for the endpoint's
1191 * the hcd to make sure all endpoint state is gone from hardware, and then 1300 * queue to drain completely. The caller must first insure that no more
1192 * waits until the endpoint's queue is completely drained. use for 1301 * URBs can be submitted for this endpoint.
1193 * set_configuration, set_interface, driver removal, physical disconnect.
1194 *
1195 * example: a qh stored in ep->hcpriv, holding state related to endpoint
1196 * type, maxpacket size, toggle, halt status, and scheduling.
1197 */ 1302 */
1198void usb_hcd_endpoint_disable (struct usb_device *udev, 1303void usb_hcd_flush_endpoint(struct usb_device *udev,
1199 struct usb_host_endpoint *ep) 1304 struct usb_host_endpoint *ep)
1200{ 1305{
1201 struct usb_hcd *hcd; 1306 struct usb_hcd *hcd;
1202 struct urb *urb; 1307 struct urb *urb;
1203 1308
1309 if (!ep)
1310 return;
1311 might_sleep();
1204 hcd = bus_to_hcd(udev->bus); 1312 hcd = bus_to_hcd(udev->bus);
1205 local_irq_disable ();
1206 1313
1207 /* ep is already gone from udev->ep_{in,out}[]; no more submits */ 1314 /* No more submits can occur */
1208rescan: 1315rescan:
1209 spin_lock(&hcd_urb_list_lock); 1316 spin_lock_irq(&hcd_urb_list_lock);
1210 list_for_each_entry (urb, &ep->urb_list, urb_list) { 1317 list_for_each_entry (urb, &ep->urb_list, urb_list) {
1211 int tmp; 1318 int is_in;
1212 1319
1213 /* the urb may already have been unlinked */ 1320 if (urb->unlinked)
1214 if (urb->status != -EINPROGRESS)
1215 continue; 1321 continue;
1216 usb_get_urb (urb); 1322 usb_get_urb (urb);
1323 is_in = usb_urb_dir_in(urb);
1217 spin_unlock(&hcd_urb_list_lock); 1324 spin_unlock(&hcd_urb_list_lock);
1218 1325
1219 spin_lock (&urb->lock); 1326 /* kick hcd */
1220 tmp = urb->status; 1327 unlink1(hcd, urb, -ESHUTDOWN);
1221 if (tmp == -EINPROGRESS) 1328 dev_dbg (hcd->self.controller,
1222 urb->status = -ESHUTDOWN; 1329 "shutdown urb %p ep%d%s%s\n",
1223 spin_unlock (&urb->lock); 1330 urb, usb_endpoint_num(&ep->desc),
1224 1331 is_in ? "in" : "out",
1225 /* kick hcd unless it's already returning this */ 1332 ({ char *s;
1226 if (tmp == -EINPROGRESS) { 1333
1227 tmp = urb->pipe; 1334 switch (usb_endpoint_type(&ep->desc)) {
1228 unlink1 (hcd, urb); 1335 case USB_ENDPOINT_XFER_CONTROL:
1229 dev_dbg (hcd->self.controller, 1336 s = ""; break;
1230 "shutdown urb %p pipe %08x ep%d%s%s\n", 1337 case USB_ENDPOINT_XFER_BULK:
1231 urb, tmp, usb_pipeendpoint (tmp), 1338 s = "-bulk"; break;
1232 (tmp & USB_DIR_IN) ? "in" : "out", 1339 case USB_ENDPOINT_XFER_INT:
1233 ({ char *s; \ 1340 s = "-intr"; break;
1234 switch (usb_pipetype (tmp)) { \ 1341 default:
1235 case PIPE_CONTROL: s = ""; break; \ 1342 s = "-iso"; break;
1236 case PIPE_BULK: s = "-bulk"; break; \ 1343 };
1237 case PIPE_INTERRUPT: s = "-intr"; break; \ 1344 s;
1238 default: s = "-iso"; break; \ 1345 }));
1239 }; s;}));
1240 }
1241 usb_put_urb (urb); 1346 usb_put_urb (urb);
1242 1347
1243 /* list contents may have changed */ 1348 /* list contents may have changed */
1244 goto rescan; 1349 goto rescan;
1245 } 1350 }
1246 spin_unlock(&hcd_urb_list_lock); 1351 spin_unlock_irq(&hcd_urb_list_lock);
1247 local_irq_enable ();
1248
1249 /* synchronize with the hardware, so old configuration state
1250 * clears out immediately (and will be freed).
1251 */
1252 might_sleep ();
1253 if (hcd->driver->endpoint_disable)
1254 hcd->driver->endpoint_disable (hcd, ep);
1255 1352
1256 /* Wait until the endpoint queue is completely empty. Most HCDs 1353 /* Wait until the endpoint queue is completely empty */
1257 * will have done this already in their endpoint_disable method,
1258 * but some might not. And there could be root-hub control URBs
1259 * still pending since they aren't affected by the HCDs'
1260 * endpoint_disable methods.
1261 */
1262 while (!list_empty (&ep->urb_list)) { 1354 while (!list_empty (&ep->urb_list)) {
1263 spin_lock_irq(&hcd_urb_list_lock); 1355 spin_lock_irq(&hcd_urb_list_lock);
1264 1356
@@ -1278,6 +1370,25 @@ rescan:
1278 } 1370 }
1279} 1371}
1280 1372
1373/* Disables the endpoint: synchronizes with the hcd to make sure all
1374 * endpoint state is gone from hardware. usb_hcd_flush_endpoint() must
1375 * have been called previously. Use for set_configuration, set_interface,
1376 * driver removal, physical disconnect.
1377 *
1378 * example: a qh stored in ep->hcpriv, holding state related to endpoint
1379 * type, maxpacket size, toggle, halt status, and scheduling.
1380 */
1381void usb_hcd_disable_endpoint(struct usb_device *udev,
1382 struct usb_host_endpoint *ep)
1383{
1384 struct usb_hcd *hcd;
1385
1386 might_sleep();
1387 hcd = bus_to_hcd(udev->bus);
1388 if (hcd->driver->endpoint_disable)
1389 hcd->driver->endpoint_disable(hcd, ep);
1390}
1391
1281/*-------------------------------------------------------------------------*/ 1392/*-------------------------------------------------------------------------*/
1282 1393
1283/* called in any context */ 1394/* called in any context */
@@ -1525,7 +1636,6 @@ struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
1525 hcd->driver = driver; 1636 hcd->driver = driver;
1526 hcd->product_desc = (driver->product_desc) ? driver->product_desc : 1637 hcd->product_desc = (driver->product_desc) ? driver->product_desc :
1527 "USB Host Controller"; 1638 "USB Host Controller";
1528
1529 return hcd; 1639 return hcd;
1530} 1640}
1531EXPORT_SYMBOL (usb_create_hcd); 1641EXPORT_SYMBOL (usb_create_hcd);
@@ -1570,6 +1680,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
1570 1680
1571 dev_info(hcd->self.controller, "%s\n", hcd->product_desc); 1681 dev_info(hcd->self.controller, "%s\n", hcd->product_desc);
1572 1682
1683 hcd->authorized_default = hcd->wireless? 0 : 1;
1573 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 1684 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1574 1685
1575 /* HC is in reset state, but accessible. Now do the one-time init, 1686 /* HC is in reset state, but accessible. Now do the one-time init,
@@ -1646,10 +1757,20 @@ int usb_add_hcd(struct usb_hcd *hcd,
1646 if ((retval = register_root_hub(hcd)) != 0) 1757 if ((retval = register_root_hub(hcd)) != 0)
1647 goto err_register_root_hub; 1758 goto err_register_root_hub;
1648 1759
1760 retval = sysfs_create_group(&rhdev->dev.kobj, &usb_bus_attr_group);
1761 if (retval < 0) {
1762 printk(KERN_ERR "Cannot register USB bus sysfs attributes: %d\n",
1763 retval);
1764 goto error_create_attr_group;
1765 }
1649 if (hcd->uses_new_polling && hcd->poll_rh) 1766 if (hcd->uses_new_polling && hcd->poll_rh)
1650 usb_hcd_poll_rh_status(hcd); 1767 usb_hcd_poll_rh_status(hcd);
1651 return retval; 1768 return retval;
1652 1769
1770error_create_attr_group:
1771 mutex_lock(&usb_bus_list_lock);
1772 usb_disconnect(&hcd->self.root_hub);
1773 mutex_unlock(&usb_bus_list_lock);
1653err_register_root_hub: 1774err_register_root_hub:
1654 hcd->driver->stop(hcd); 1775 hcd->driver->stop(hcd);
1655err_hcd_driver_start: 1776err_hcd_driver_start:
@@ -1691,6 +1812,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
1691 cancel_work_sync(&hcd->wakeup_work); 1812 cancel_work_sync(&hcd->wakeup_work);
1692#endif 1813#endif
1693 1814
1815 sysfs_remove_group(&hcd->self.root_hub->dev.kobj, &usb_bus_attr_group);
1694 mutex_lock(&usb_bus_list_lock); 1816 mutex_lock(&usb_bus_list_lock);
1695 usb_disconnect(&hcd->self.root_hub); 1817 usb_disconnect(&hcd->self.root_hub);
1696 mutex_unlock(&usb_bus_list_lock); 1818 mutex_unlock(&usb_bus_list_lock);
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index b5ebb73c23..98e24194a4 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -19,6 +19,8 @@
19 19
20#ifdef __KERNEL__ 20#ifdef __KERNEL__
21 21
22#include <linux/rwsem.h>
23
22/* This file contains declarations of usbcore internals that are mostly 24/* This file contains declarations of usbcore internals that are mostly
23 * used or exposed by Host Controller Drivers. 25 * used or exposed by Host Controller Drivers.
24 */ 26 */
@@ -51,6 +53,12 @@
51 * 53 *
52 * Since "struct usb_bus" is so thin, you can't share much code in it. 54 * Since "struct usb_bus" is so thin, you can't share much code in it.
53 * This framework is a layer over that, and should be more sharable. 55 * This framework is a layer over that, and should be more sharable.
56 *
57 * @authorized_default: Specifies if new devices are authorized to
58 * connect by default or they require explicit
59 * user space authorization; this bit is settable
60 * through /sys/class/usb_host/X/authorized_default.
61 * For the rest is RO, so we don't lock to r/w it.
54 */ 62 */
55 63
56/*-------------------------------------------------------------------------*/ 64/*-------------------------------------------------------------------------*/
@@ -90,6 +98,7 @@ struct usb_hcd {
90 unsigned poll_rh:1; /* poll for rh status? */ 98 unsigned poll_rh:1; /* poll for rh status? */
91 unsigned poll_pending:1; /* status has changed? */ 99 unsigned poll_pending:1; /* status has changed? */
92 unsigned wireless:1; /* Wireless USB HCD */ 100 unsigned wireless:1; /* Wireless USB HCD */
101 unsigned authorized_default:1;
93 102
94 int irq; /* irq allocated */ 103 int irq; /* irq allocated */
95 void __iomem *regs; /* device memory/io */ 104 void __iomem *regs; /* device memory/io */
@@ -182,11 +191,10 @@ struct hc_driver {
182 int (*get_frame_number) (struct usb_hcd *hcd); 191 int (*get_frame_number) (struct usb_hcd *hcd);
183 192
184 /* manage i/o requests, device state */ 193 /* manage i/o requests, device state */
185 int (*urb_enqueue) (struct usb_hcd *hcd, 194 int (*urb_enqueue)(struct usb_hcd *hcd,
186 struct usb_host_endpoint *ep, 195 struct urb *urb, gfp_t mem_flags);
187 struct urb *urb, 196 int (*urb_dequeue)(struct usb_hcd *hcd,
188 gfp_t mem_flags); 197 struct urb *urb, int status);
189 int (*urb_dequeue) (struct usb_hcd *hcd, struct urb *urb);
190 198
191 /* hw synch, freeing endpoint resources that urb_dequeue can't */ 199 /* hw synch, freeing endpoint resources that urb_dequeue can't */
192 void (*endpoint_disable)(struct usb_hcd *hcd, 200 void (*endpoint_disable)(struct usb_hcd *hcd,
@@ -204,10 +212,18 @@ struct hc_driver {
204 /* Needed only if port-change IRQs are level-triggered */ 212 /* Needed only if port-change IRQs are level-triggered */
205}; 213};
206 214
215extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
216extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
217 int status);
218extern void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb);
219
207extern int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags); 220extern int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags);
208extern int usb_hcd_unlink_urb (struct urb *urb, int status); 221extern int usb_hcd_unlink_urb (struct urb *urb, int status);
209extern void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb); 222extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb,
210extern void usb_hcd_endpoint_disable (struct usb_device *udev, 223 int status);
224extern void usb_hcd_flush_endpoint(struct usb_device *udev,
225 struct usb_host_endpoint *ep);
226extern void usb_hcd_disable_endpoint(struct usb_device *udev,
211 struct usb_host_endpoint *ep); 227 struct usb_host_endpoint *ep);
212extern int usb_hcd_get_frame_number (struct usb_device *udev); 228extern int usb_hcd_get_frame_number (struct usb_device *udev);
213 229
@@ -402,7 +418,7 @@ static inline void usbfs_cleanup(void) { }
402struct usb_mon_operations { 418struct usb_mon_operations {
403 void (*urb_submit)(struct usb_bus *bus, struct urb *urb); 419 void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
404 void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err); 420 void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
405 void (*urb_complete)(struct usb_bus *bus, struct urb *urb); 421 void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
406 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */ 422 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
407}; 423};
408 424
@@ -421,10 +437,11 @@ static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb,
421 (*mon_ops->urb_submit_error)(bus, urb, error); 437 (*mon_ops->urb_submit_error)(bus, urb, error);
422} 438}
423 439
424static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb) 440static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
441 int status)
425{ 442{
426 if (bus->monitored) 443 if (bus->monitored)
427 (*mon_ops->urb_complete)(bus, urb); 444 (*mon_ops->urb_complete)(bus, urb, status);
428} 445}
429 446
430int usb_mon_register(struct usb_mon_operations *ops); 447int usb_mon_register(struct usb_mon_operations *ops);
@@ -435,7 +452,8 @@ void usb_mon_deregister(void);
435static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) {} 452static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) {}
436static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb, 453static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb,
437 int error) {} 454 int error) {}
438static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb) {} 455static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
456 int status) {}
439 457
440#endif /* CONFIG_USB_MON */ 458#endif /* CONFIG_USB_MON */
441 459
@@ -454,5 +472,9 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb) {}
454 : (in_interrupt () ? "in_interrupt" : "can sleep")) 472 : (in_interrupt () ? "in_interrupt" : "can sleep"))
455 473
456 474
457#endif /* __KERNEL__ */ 475/* This rwsem is for use only by the hub driver and ehci-hcd.
476 * Nobody else should touch it.
477 */
478extern struct rw_semaphore ehci_cf_port_reset_rwsem;
458 479
480#endif /* __KERNEL__ */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index f7b337feb3..d20cb545a6 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -125,6 +125,12 @@ MODULE_PARM_DESC(use_both_schemes,
125 "try the other device initialization scheme if the " 125 "try the other device initialization scheme if the "
126 "first one fails"); 126 "first one fails");
127 127
128/* Mutual exclusion for EHCI CF initialization. This interferes with
129 * port reset on some companion controllers.
130 */
131DECLARE_RWSEM(ehci_cf_port_reset_rwsem);
132EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
133
128 134
129static inline char *portspeed(int portstatus) 135static inline char *portspeed(int portstatus)
130{ 136{
@@ -347,11 +353,11 @@ void usb_kick_khubd(struct usb_device *hdev)
347static void hub_irq(struct urb *urb) 353static void hub_irq(struct urb *urb)
348{ 354{
349 struct usb_hub *hub = urb->context; 355 struct usb_hub *hub = urb->context;
350 int status; 356 int status = urb->status;
351 int i; 357 int i;
352 unsigned long bits; 358 unsigned long bits;
353 359
354 switch (urb->status) { 360 switch (status) {
355 case -ENOENT: /* synchronous unlink */ 361 case -ENOENT: /* synchronous unlink */
356 case -ECONNRESET: /* async unlink */ 362 case -ECONNRESET: /* async unlink */
357 case -ESHUTDOWN: /* hardware going away */ 363 case -ESHUTDOWN: /* hardware going away */
@@ -359,10 +365,10 @@ static void hub_irq(struct urb *urb)
359 365
360 default: /* presumably an error */ 366 default: /* presumably an error */
361 /* Cause a hub reset after 10 consecutive errors */ 367 /* Cause a hub reset after 10 consecutive errors */
362 dev_dbg (hub->intfdev, "transfer --> %d\n", urb->status); 368 dev_dbg (hub->intfdev, "transfer --> %d\n", status);
363 if ((++hub->nerrors < 10) || hub->error) 369 if ((++hub->nerrors < 10) || hub->error)
364 goto resubmit; 370 goto resubmit;
365 hub->error = urb->status; 371 hub->error = status;
366 /* FALL THROUGH */ 372 /* FALL THROUGH */
367 373
368 /* let khubd handle things */ 374 /* let khubd handle things */
@@ -1220,54 +1226,14 @@ static inline void show_string(struct usb_device *udev, char *id, char *string)
1220#endif 1226#endif
1221 1227
1222/** 1228/**
1223 * usb_new_device - perform initial device setup (usbcore-internal) 1229 * usb_configure_device_otg - FIXME (usbcore-internal)
1224 * @udev: newly addressed device (in ADDRESS state) 1230 * @udev: newly addressed device (in ADDRESS state)
1225 * 1231 *
1226 * This is called with devices which have been enumerated, but not yet 1232 * Do configuration for On-The-Go devices
1227 * configured. The device descriptor is available, but not descriptors
1228 * for any device configuration. The caller must have locked either
1229 * the parent hub (if udev is a normal device) or else the
1230 * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
1231 * udev has already been installed, but udev is not yet visible through
1232 * sysfs or other filesystem code.
1233 *
1234 * It will return if the device is configured properly or not. Zero if
1235 * the interface was registered with the driver core; else a negative
1236 * errno value.
1237 *
1238 * This call is synchronous, and may not be used in an interrupt context.
1239 *
1240 * Only the hub driver or root-hub registrar should ever call this.
1241 */ 1233 */
1242int usb_new_device(struct usb_device *udev) 1234static int usb_configure_device_otg(struct usb_device *udev)
1243{ 1235{
1244 int err; 1236 int err = 0;
1245
1246 /* Determine quirks */
1247 usb_detect_quirks(udev);
1248
1249 err = usb_get_configuration(udev);
1250 if (err < 0) {
1251 dev_err(&udev->dev, "can't read configurations, error %d\n",
1252 err);
1253 goto fail;
1254 }
1255
1256 /* read the standard strings and cache them if present */
1257 udev->product = usb_cache_string(udev, udev->descriptor.iProduct);
1258 udev->manufacturer = usb_cache_string(udev,
1259 udev->descriptor.iManufacturer);
1260 udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
1261
1262 /* Tell the world! */
1263 dev_dbg(&udev->dev, "new device strings: Mfr=%d, Product=%d, "
1264 "SerialNumber=%d\n",
1265 udev->descriptor.iManufacturer,
1266 udev->descriptor.iProduct,
1267 udev->descriptor.iSerialNumber);
1268 show_string(udev, "Product", udev->product);
1269 show_string(udev, "Manufacturer", udev->manufacturer);
1270 show_string(udev, "SerialNumber", udev->serial);
1271 1237
1272#ifdef CONFIG_USB_OTG 1238#ifdef CONFIG_USB_OTG
1273 /* 1239 /*
@@ -1329,8 +1295,82 @@ int usb_new_device(struct usb_device *udev)
1329 err = -ENOTSUPP; 1295 err = -ENOTSUPP;
1330 goto fail; 1296 goto fail;
1331 } 1297 }
1298fail:
1332#endif 1299#endif
1300 return err;
1301}
1302
1303
1304/**
1305 * usb_configure_device - Detect and probe device intfs/otg (usbcore-internal)
1306 * @udev: newly addressed device (in ADDRESS state)
1307 *
1308 * This is only called by usb_new_device() and usb_authorize_device()
1309 * and FIXME -- all comments that apply to them apply here wrt to
1310 * environment.
1311 *
1312 * If the device is WUSB and not authorized, we don't attempt to read
1313 * the string descriptors, as they will be errored out by the device
1314 * until it has been authorized.
1315 */
1316static int usb_configure_device(struct usb_device *udev)
1317{
1318 int err;
1333 1319
1320 if (udev->config == NULL) {
1321 err = usb_get_configuration(udev);
1322 if (err < 0) {
1323 dev_err(&udev->dev, "can't read configurations, error %d\n",
1324 err);
1325 goto fail;
1326 }
1327 }
1328 if (udev->wusb == 1 && udev->authorized == 0) {
1329 udev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
1330 udev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
1331 udev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
1332 }
1333 else {
1334 /* read the standard strings and cache them if present */
1335 udev->product = usb_cache_string(udev, udev->descriptor.iProduct);
1336 udev->manufacturer = usb_cache_string(udev,
1337 udev->descriptor.iManufacturer);
1338 udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
1339 }
1340 err = usb_configure_device_otg(udev);
1341fail:
1342 return err;
1343}
1344
1345
1346/**
1347 * usb_new_device - perform initial device setup (usbcore-internal)
1348 * @udev: newly addressed device (in ADDRESS state)
1349 *
1350 * This is called with devices which have been enumerated, but not yet
1351 * configured. The device descriptor is available, but not descriptors
1352 * for any device configuration. The caller must have locked either
1353 * the parent hub (if udev is a normal device) or else the
1354 * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
1355 * udev has already been installed, but udev is not yet visible through
1356 * sysfs or other filesystem code.
1357 *
1358 * It will return if the device is configured properly or not. Zero if
1359 * the interface was registered with the driver core; else a negative
1360 * errno value.
1361 *
1362 * This call is synchronous, and may not be used in an interrupt context.
1363 *
1364 * Only the hub driver or root-hub registrar should ever call this.
1365 */
1366int usb_new_device(struct usb_device *udev)
1367{
1368 int err;
1369
1370 usb_detect_quirks(udev); /* Determine quirks */
1371 err = usb_configure_device(udev); /* detect & probe dev/intfs */
1372 if (err < 0)
1373 goto fail;
1334 /* export the usbdev device-node for libusb */ 1374 /* export the usbdev device-node for libusb */
1335 udev->dev.devt = MKDEV(USB_DEVICE_MAJOR, 1375 udev->dev.devt = MKDEV(USB_DEVICE_MAJOR,
1336 (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); 1376 (((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
@@ -1346,19 +1386,106 @@ int usb_new_device(struct usb_device *udev)
1346 err = device_add(&udev->dev); 1386 err = device_add(&udev->dev);
1347 if (err) { 1387 if (err) {
1348 dev_err(&udev->dev, "can't device_add, error %d\n", err); 1388 dev_err(&udev->dev, "can't device_add, error %d\n", err);
1349 if (udev->parent)
1350 usb_autosuspend_device(udev->parent);
1351 goto fail; 1389 goto fail;
1352 } 1390 }
1353 1391
1354exit: 1392 /* Tell the world! */
1393 dev_dbg(&udev->dev, "new device strings: Mfr=%d, Product=%d, "
1394 "SerialNumber=%d\n",
1395 udev->descriptor.iManufacturer,
1396 udev->descriptor.iProduct,
1397 udev->descriptor.iSerialNumber);
1398 show_string(udev, "Product", udev->product);
1399 show_string(udev, "Manufacturer", udev->manufacturer);
1400 show_string(udev, "SerialNumber", udev->serial);
1355 return err; 1401 return err;
1356 1402
1357fail: 1403fail:
1358 usb_set_device_state(udev, USB_STATE_NOTATTACHED); 1404 usb_set_device_state(udev, USB_STATE_NOTATTACHED);
1359 goto exit; 1405 return err;
1360} 1406}
1361 1407
1408
1409/**
1410 * Similar to usb_disconnect()
1411 *
1412 * We share a lock (that we have) with device_del(), so we need to
1413 * defer its call.
1414 */
1415int usb_deauthorize_device(struct usb_device *usb_dev)
1416{
1417 unsigned cnt;
1418 usb_lock_device(usb_dev);
1419 if (usb_dev->authorized == 0)
1420 goto out_unauthorized;
1421 usb_dev->authorized = 0;
1422 usb_set_configuration(usb_dev, -1);
1423 usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
1424 usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
1425 usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
1426 kfree(usb_dev->config);
1427 usb_dev->config = NULL;
1428 for (cnt = 0; cnt < usb_dev->descriptor.bNumConfigurations; cnt++)
1429 kfree(usb_dev->rawdescriptors[cnt]);
1430 usb_dev->descriptor.bNumConfigurations = 0;
1431 kfree(usb_dev->rawdescriptors);
1432out_unauthorized:
1433 usb_unlock_device(usb_dev);
1434 return 0;
1435}
1436
1437
1438int usb_authorize_device(struct usb_device *usb_dev)
1439{
1440 int result = 0, c;
1441 usb_lock_device(usb_dev);
1442 if (usb_dev->authorized == 1)
1443 goto out_authorized;
1444 kfree(usb_dev->product);
1445 usb_dev->product = NULL;
1446 kfree(usb_dev->manufacturer);
1447 usb_dev->manufacturer = NULL;
1448 kfree(usb_dev->serial);
1449 usb_dev->serial = NULL;
1450 result = usb_autoresume_device(usb_dev);
1451 if (result < 0) {
1452 dev_err(&usb_dev->dev,
1453 "can't autoresume for authorization: %d\n", result);
1454 goto error_autoresume;
1455 }
1456 result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor));
1457 if (result < 0) {
1458 dev_err(&usb_dev->dev, "can't re-read device descriptor for "
1459 "authorization: %d\n", result);
1460 goto error_device_descriptor;
1461 }
1462 usb_dev->authorized = 1;
1463 result = usb_configure_device(usb_dev);
1464 if (result < 0)
1465 goto error_configure;
1466 /* Choose and set the configuration. This registers the interfaces
1467 * with the driver core and lets interface drivers bind to them.
1468 */
1469 c = usb_choose_configuration(usb_dev);
1470 if (c >= 0) {
1471 result = usb_set_configuration(usb_dev, c);
1472 if (result) {
1473 dev_err(&usb_dev->dev,
1474 "can't set config #%d, error %d\n", c, result);
1475 /* This need not be fatal. The user can try to
1476 * set other configurations. */
1477 }
1478 }
1479 dev_info(&usb_dev->dev, "authorized to connect\n");
1480error_configure:
1481error_device_descriptor:
1482error_autoresume:
1483out_authorized:
1484 usb_unlock_device(usb_dev); // complements locktree
1485 return result;
1486}
1487
1488
1362static int hub_port_status(struct usb_hub *hub, int port1, 1489static int hub_port_status(struct usb_hub *hub, int port1,
1363 u16 *status, u16 *change) 1490 u16 *status, u16 *change)
1364{ 1491{
@@ -1460,6 +1587,11 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
1460{ 1587{
1461 int i, status; 1588 int i, status;
1462 1589
1590 /* Block EHCI CF initialization during the port reset.
1591 * Some companion controllers don't like it when they mix.
1592 */
1593 down_read(&ehci_cf_port_reset_rwsem);
1594
1463 /* Reset the port */ 1595 /* Reset the port */
1464 for (i = 0; i < PORT_RESET_TRIES; i++) { 1596 for (i = 0; i < PORT_RESET_TRIES; i++) {
1465 status = set_port_feature(hub->hdev, 1597 status = set_port_feature(hub->hdev,
@@ -1481,6 +1613,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
1481 case 0: 1613 case 0:
1482 /* TRSTRCY = 10 ms; plus some extra */ 1614 /* TRSTRCY = 10 ms; plus some extra */
1483 msleep(10 + 40); 1615 msleep(10 + 40);
1616 udev->devnum = 0; /* Device now at address 0 */
1484 /* FALL THROUGH */ 1617 /* FALL THROUGH */
1485 case -ENOTCONN: 1618 case -ENOTCONN:
1486 case -ENODEV: 1619 case -ENODEV:
@@ -1490,7 +1623,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
1490 usb_set_device_state(udev, status 1623 usb_set_device_state(udev, status
1491 ? USB_STATE_NOTATTACHED 1624 ? USB_STATE_NOTATTACHED
1492 : USB_STATE_DEFAULT); 1625 : USB_STATE_DEFAULT);
1493 return status; 1626 goto done;
1494 } 1627 }
1495 1628
1496 dev_dbg (hub->intfdev, 1629 dev_dbg (hub->intfdev,
@@ -1503,6 +1636,8 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
1503 "Cannot enable port %i. Maybe the USB cable is bad?\n", 1636 "Cannot enable port %i. Maybe the USB cable is bad?\n",
1504 port1); 1637 port1);
1505 1638
1639 done:
1640 up_read(&ehci_cf_port_reset_rwsem);
1506 return status; 1641 return status;
1507} 1642}
1508 1643
@@ -1833,14 +1968,7 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
1833 struct usb_device *udev; 1968 struct usb_device *udev;
1834 1969
1835 udev = hdev->children [port1-1]; 1970 udev = hdev->children [port1-1];
1836 if (udev && msg.event == PM_EVENT_SUSPEND && 1971 if (udev && udev->can_submit) {
1837#ifdef CONFIG_USB_SUSPEND
1838 udev->state != USB_STATE_SUSPENDED
1839#else
1840 udev->dev.power.power_state.event
1841 == PM_EVENT_ON
1842#endif
1843 ) {
1844 if (!hdev->auto_pm) 1972 if (!hdev->auto_pm)
1845 dev_dbg(&intf->dev, "port %d nyet suspended\n", 1973 dev_dbg(&intf->dev, "port %d nyet suspended\n",
1846 port1); 1974 port1);
@@ -1999,26 +2127,27 @@ static void ep0_reinit(struct usb_device *udev)
1999{ 2127{
2000 usb_disable_endpoint(udev, 0 + USB_DIR_IN); 2128 usb_disable_endpoint(udev, 0 + USB_DIR_IN);
2001 usb_disable_endpoint(udev, 0 + USB_DIR_OUT); 2129 usb_disable_endpoint(udev, 0 + USB_DIR_OUT);
2002 udev->ep_in[0] = udev->ep_out[0] = &udev->ep0; 2130 usb_enable_endpoint(udev, &udev->ep0);
2003} 2131}
2004 2132
2005#define usb_sndaddr0pipe() (PIPE_CONTROL << 30) 2133#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
2006#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN) 2134#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
2007 2135
2008static int hub_set_address(struct usb_device *udev) 2136static int hub_set_address(struct usb_device *udev, int devnum)
2009{ 2137{
2010 int retval; 2138 int retval;
2011 2139
2012 if (udev->devnum == 0) 2140 if (devnum <= 1)
2013 return -EINVAL; 2141 return -EINVAL;
2014 if (udev->state == USB_STATE_ADDRESS) 2142 if (udev->state == USB_STATE_ADDRESS)
2015 return 0; 2143 return 0;
2016 if (udev->state != USB_STATE_DEFAULT) 2144 if (udev->state != USB_STATE_DEFAULT)
2017 return -EINVAL; 2145 return -EINVAL;
2018 retval = usb_control_msg(udev, usb_sndaddr0pipe(), 2146 retval = usb_control_msg(udev, usb_sndaddr0pipe(),
2019 USB_REQ_SET_ADDRESS, 0, udev->devnum, 0, 2147 USB_REQ_SET_ADDRESS, 0, devnum, 0,
2020 NULL, 0, USB_CTRL_SET_TIMEOUT); 2148 NULL, 0, USB_CTRL_SET_TIMEOUT);
2021 if (retval == 0) { 2149 if (retval == 0) {
2150 udev->devnum = devnum; /* Device now using proper address */
2022 usb_set_device_state(udev, USB_STATE_ADDRESS); 2151 usb_set_device_state(udev, USB_STATE_ADDRESS);
2023 ep0_reinit(udev); 2152 ep0_reinit(udev);
2024 } 2153 }
@@ -2045,6 +2174,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2045 unsigned delay = HUB_SHORT_RESET_TIME; 2174 unsigned delay = HUB_SHORT_RESET_TIME;
2046 enum usb_device_speed oldspeed = udev->speed; 2175 enum usb_device_speed oldspeed = udev->speed;
2047 char *speed, *type; 2176 char *speed, *type;
2177 int devnum = udev->devnum;
2048 2178
2049 /* root hub ports have a slightly longer reset period 2179 /* root hub ports have a slightly longer reset period
2050 * (from USB 2.0 spec, section 7.1.7.5) 2180 * (from USB 2.0 spec, section 7.1.7.5)
@@ -2074,7 +2204,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2074 goto fail; 2204 goto fail;
2075 } 2205 }
2076 oldspeed = udev->speed; 2206 oldspeed = udev->speed;
2077 2207
2078 /* USB 2.0 section 5.5.3 talks about ep0 maxpacket ... 2208 /* USB 2.0 section 5.5.3 talks about ep0 maxpacket ...
2079 * it's fixed size except for full speed devices. 2209 * it's fixed size except for full speed devices.
2080 * For Wireless USB devices, ep0 max packet is always 512 (tho 2210 * For Wireless USB devices, ep0 max packet is always 512 (tho
@@ -2115,7 +2245,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2115 dev_info (&udev->dev, 2245 dev_info (&udev->dev,
2116 "%s %s speed %sUSB device using %s and address %d\n", 2246 "%s %s speed %sUSB device using %s and address %d\n",
2117 (udev->config) ? "reset" : "new", speed, type, 2247 (udev->config) ? "reset" : "new", speed, type,
2118 udev->bus->controller->driver->name, udev->devnum); 2248 udev->bus->controller->driver->name, devnum);
2119 2249
2120 /* Set up TT records, if needed */ 2250 /* Set up TT records, if needed */
2121 if (hdev->tt) { 2251 if (hdev->tt) {
@@ -2202,7 +2332,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2202 } 2332 }
2203 2333
2204 for (j = 0; j < SET_ADDRESS_TRIES; ++j) { 2334 for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
2205 retval = hub_set_address(udev); 2335 retval = hub_set_address(udev, devnum);
2206 if (retval >= 0) 2336 if (retval >= 0)
2207 break; 2337 break;
2208 msleep(200); 2338 msleep(200);
@@ -2210,7 +2340,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2210 if (retval < 0) { 2340 if (retval < 0) {
2211 dev_err(&udev->dev, 2341 dev_err(&udev->dev,
2212 "device not accepting address %d, error %d\n", 2342 "device not accepting address %d, error %d\n",
2213 udev->devnum, retval); 2343 devnum, retval);
2214 goto fail; 2344 goto fail;
2215 } 2345 }
2216 2346
@@ -2263,8 +2393,10 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2263 retval = 0; 2393 retval = 0;
2264 2394
2265fail: 2395fail:
2266 if (retval) 2396 if (retval) {
2267 hub_port_disable(hub, port1, 0); 2397 hub_port_disable(hub, port1, 0);
2398 udev->devnum = devnum; /* for disconnect processing */
2399 }
2268 mutex_unlock(&usb_address0_mutex); 2400 mutex_unlock(&usb_address0_mutex);
2269 return retval; 2401 return retval;
2270} 2402}
@@ -2699,9 +2831,9 @@ static void hub_events(void)
2699 clear_hub_feature(hdev, C_HUB_LOCAL_POWER); 2831 clear_hub_feature(hdev, C_HUB_LOCAL_POWER);
2700 if (hubstatus & HUB_STATUS_LOCAL_POWER) 2832 if (hubstatus & HUB_STATUS_LOCAL_POWER)
2701 /* FIXME: Is this always true? */ 2833 /* FIXME: Is this always true? */
2702 hub->limited_power = 0;
2703 else
2704 hub->limited_power = 1; 2834 hub->limited_power = 1;
2835 else
2836 hub->limited_power = 0;
2705 } 2837 }
2706 if (hubchange & HUB_CHANGE_OVERCURRENT) { 2838 if (hubchange & HUB_CHANGE_OVERCURRENT) {
2707 dev_dbg (hub_dev, "overcurrent change\n"); 2839 dev_dbg (hub_dev, "overcurrent change\n");
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index d8f7b089a8..c021af3903 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -59,8 +59,8 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length)
59 dev_dbg(&urb->dev->dev, 59 dev_dbg(&urb->dev->dev,
60 "%s timed out on ep%d%s len=%d/%d\n", 60 "%s timed out on ep%d%s len=%d/%d\n",
61 current->comm, 61 current->comm,
62 usb_pipeendpoint(urb->pipe), 62 usb_endpoint_num(&urb->ep->desc),
63 usb_pipein(urb->pipe) ? "in" : "out", 63 usb_urb_dir_in(urb) ? "in" : "out",
64 urb->actual_length, 64 urb->actual_length,
65 urb->transfer_buffer_length); 65 urb->transfer_buffer_length);
66 } else 66 } else
@@ -250,7 +250,8 @@ static void sg_clean (struct usb_sg_request *io)
250 io->urbs = NULL; 250 io->urbs = NULL;
251 } 251 }
252 if (io->dev->dev.dma_mask != NULL) 252 if (io->dev->dev.dma_mask != NULL)
253 usb_buffer_unmap_sg (io->dev, io->pipe, io->sg, io->nents); 253 usb_buffer_unmap_sg (io->dev, usb_pipein(io->pipe),
254 io->sg, io->nents);
254 io->dev = NULL; 255 io->dev = NULL;
255} 256}
256 257
@@ -278,8 +279,8 @@ static void sg_complete (struct urb *urb)
278 dev_err (io->dev->bus->controller, 279 dev_err (io->dev->bus->controller,
279 "dev %s ep%d%s scatterlist error %d/%d\n", 280 "dev %s ep%d%s scatterlist error %d/%d\n",
280 io->dev->devpath, 281 io->dev->devpath,
281 usb_pipeendpoint (urb->pipe), 282 usb_endpoint_num(&urb->ep->desc),
282 usb_pipein (urb->pipe) ? "in" : "out", 283 usb_urb_dir_in(urb) ? "in" : "out",
283 status, io->status); 284 status, io->status);
284 // BUG (); 285 // BUG ();
285 } 286 }
@@ -379,7 +380,8 @@ int usb_sg_init (
379 */ 380 */
380 dma = (dev->dev.dma_mask != NULL); 381 dma = (dev->dev.dma_mask != NULL);
381 if (dma) 382 if (dma)
382 io->entries = usb_buffer_map_sg (dev, pipe, sg, nents); 383 io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe),
384 sg, nents);
383 else 385 else
384 io->entries = nents; 386 io->entries = nents;
385 387
@@ -1013,8 +1015,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr)
1013 ep = dev->ep_in[epnum]; 1015 ep = dev->ep_in[epnum];
1014 dev->ep_in[epnum] = NULL; 1016 dev->ep_in[epnum] = NULL;
1015 } 1017 }
1016 if (ep && dev->bus) 1018 if (ep) {
1017 usb_hcd_endpoint_disable(dev, ep); 1019 ep->enabled = 0;
1020 usb_hcd_flush_endpoint(dev, ep);
1021 usb_hcd_disable_endpoint(dev, ep);
1022 }
1018} 1023}
1019 1024
1020/** 1025/**
@@ -1096,23 +1101,21 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
1096 * Resets the endpoint toggle, and sets dev->ep_{in,out} pointers. 1101 * Resets the endpoint toggle, and sets dev->ep_{in,out} pointers.
1097 * For control endpoints, both the input and output sides are handled. 1102 * For control endpoints, both the input and output sides are handled.
1098 */ 1103 */
1099static void 1104void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep)
1100usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep)
1101{ 1105{
1102 unsigned int epaddr = ep->desc.bEndpointAddress; 1106 int epnum = usb_endpoint_num(&ep->desc);
1103 unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK; 1107 int is_out = usb_endpoint_dir_out(&ep->desc);
1104 int is_control; 1108 int is_control = usb_endpoint_xfer_control(&ep->desc);
1105 1109
1106 is_control = ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 1110 if (is_out || is_control) {
1107 == USB_ENDPOINT_XFER_CONTROL);
1108 if (usb_endpoint_out(epaddr) || is_control) {
1109 usb_settoggle(dev, epnum, 1, 0); 1111 usb_settoggle(dev, epnum, 1, 0);
1110 dev->ep_out[epnum] = ep; 1112 dev->ep_out[epnum] = ep;
1111 } 1113 }
1112 if (!usb_endpoint_out(epaddr) || is_control) { 1114 if (!is_out || is_control) {
1113 usb_settoggle(dev, epnum, 0, 0); 1115 usb_settoggle(dev, epnum, 0, 0);
1114 dev->ep_in[epnum] = ep; 1116 dev->ep_in[epnum] = ep;
1115 } 1117 }
1118 ep->enabled = 1;
1116} 1119}
1117 1120
1118/* 1121/*
@@ -1171,6 +1174,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1171 struct usb_host_interface *alt; 1174 struct usb_host_interface *alt;
1172 int ret; 1175 int ret;
1173 int manual = 0; 1176 int manual = 0;
1177 int changed;
1174 1178
1175 if (dev->state == USB_STATE_SUSPENDED) 1179 if (dev->state == USB_STATE_SUSPENDED)
1176 return -EHOSTUNREACH; 1180 return -EHOSTUNREACH;
@@ -1210,7 +1214,8 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1210 */ 1214 */
1211 1215
1212 /* prevent submissions using previous endpoint settings */ 1216 /* prevent submissions using previous endpoint settings */
1213 if (device_is_registered(&iface->dev)) 1217 changed = (iface->cur_altsetting != alt);
1218 if (changed && device_is_registered(&iface->dev))
1214 usb_remove_sysfs_intf_files(iface); 1219 usb_remove_sysfs_intf_files(iface);
1215 usb_disable_interface(dev, iface); 1220 usb_disable_interface(dev, iface);
1216 1221
@@ -1247,7 +1252,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1247 * (Likewise, EP0 never "halts" on well designed devices.) 1252 * (Likewise, EP0 never "halts" on well designed devices.)
1248 */ 1253 */
1249 usb_enable_interface(dev, iface); 1254 usb_enable_interface(dev, iface);
1250 if (device_is_registered(&iface->dev)) 1255 if (changed && device_is_registered(&iface->dev))
1251 usb_create_sysfs_intf_files(iface); 1256 usb_create_sysfs_intf_files(iface);
1252 1257
1253 return 0; 1258 return 0;
@@ -1328,7 +1333,7 @@ int usb_reset_configuration(struct usb_device *dev)
1328 return 0; 1333 return 0;
1329} 1334}
1330 1335
1331void usb_release_interface(struct device *dev) 1336static void usb_release_interface(struct device *dev)
1332{ 1337{
1333 struct usb_interface *intf = to_usb_interface(dev); 1338 struct usb_interface *intf = to_usb_interface(dev);
1334 struct usb_interface_cache *intfc = 1339 struct usb_interface_cache *intfc =
@@ -1339,14 +1344,11 @@ void usb_release_interface(struct device *dev)
1339} 1344}
1340 1345
1341#ifdef CONFIG_HOTPLUG 1346#ifdef CONFIG_HOTPLUG
1342static int usb_if_uevent(struct device *dev, char **envp, int num_envp, 1347static int usb_if_uevent(struct device *dev, struct kobj_uevent_env *env)
1343 char *buffer, int buffer_size)
1344{ 1348{
1345 struct usb_device *usb_dev; 1349 struct usb_device *usb_dev;
1346 struct usb_interface *intf; 1350 struct usb_interface *intf;
1347 struct usb_host_interface *alt; 1351 struct usb_host_interface *alt;
1348 int i = 0;
1349 int length = 0;
1350 1352
1351 if (!dev) 1353 if (!dev)
1352 return -ENODEV; 1354 return -ENODEV;
@@ -1359,39 +1361,30 @@ static int usb_if_uevent(struct device *dev, char **envp, int num_envp,
1359 alt = intf->cur_altsetting; 1361 alt = intf->cur_altsetting;
1360 1362
1361#ifdef CONFIG_USB_DEVICEFS 1363#ifdef CONFIG_USB_DEVICEFS
1362 if (add_uevent_var(envp, num_envp, &i, 1364 if (add_uevent_var(env, "DEVICE=/proc/bus/usb/%03d/%03d",
1363 buffer, buffer_size, &length,
1364 "DEVICE=/proc/bus/usb/%03d/%03d",
1365 usb_dev->bus->busnum, usb_dev->devnum)) 1365 usb_dev->bus->busnum, usb_dev->devnum))
1366 return -ENOMEM; 1366 return -ENOMEM;
1367#endif 1367#endif
1368 1368
1369 if (add_uevent_var(envp, num_envp, &i, 1369 if (add_uevent_var(env, "PRODUCT=%x/%x/%x",
1370 buffer, buffer_size, &length,
1371 "PRODUCT=%x/%x/%x",
1372 le16_to_cpu(usb_dev->descriptor.idVendor), 1370 le16_to_cpu(usb_dev->descriptor.idVendor),
1373 le16_to_cpu(usb_dev->descriptor.idProduct), 1371 le16_to_cpu(usb_dev->descriptor.idProduct),
1374 le16_to_cpu(usb_dev->descriptor.bcdDevice))) 1372 le16_to_cpu(usb_dev->descriptor.bcdDevice)))
1375 return -ENOMEM; 1373 return -ENOMEM;
1376 1374
1377 if (add_uevent_var(envp, num_envp, &i, 1375 if (add_uevent_var(env, "TYPE=%d/%d/%d",
1378 buffer, buffer_size, &length,
1379 "TYPE=%d/%d/%d",
1380 usb_dev->descriptor.bDeviceClass, 1376 usb_dev->descriptor.bDeviceClass,
1381 usb_dev->descriptor.bDeviceSubClass, 1377 usb_dev->descriptor.bDeviceSubClass,
1382 usb_dev->descriptor.bDeviceProtocol)) 1378 usb_dev->descriptor.bDeviceProtocol))
1383 return -ENOMEM; 1379 return -ENOMEM;
1384 1380
1385 if (add_uevent_var(envp, num_envp, &i, 1381 if (add_uevent_var(env, "INTERFACE=%d/%d/%d",
1386 buffer, buffer_size, &length,
1387 "INTERFACE=%d/%d/%d",
1388 alt->desc.bInterfaceClass, 1382 alt->desc.bInterfaceClass,
1389 alt->desc.bInterfaceSubClass, 1383 alt->desc.bInterfaceSubClass,
1390 alt->desc.bInterfaceProtocol)) 1384 alt->desc.bInterfaceProtocol))
1391 return -ENOMEM; 1385 return -ENOMEM;
1392 1386
1393 if (add_uevent_var(envp, num_envp, &i, 1387 if (add_uevent_var(env,
1394 buffer, buffer_size, &length,
1395 "MODALIAS=usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02X", 1388 "MODALIAS=usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02X",
1396 le16_to_cpu(usb_dev->descriptor.idVendor), 1389 le16_to_cpu(usb_dev->descriptor.idVendor),
1397 le16_to_cpu(usb_dev->descriptor.idProduct), 1390 le16_to_cpu(usb_dev->descriptor.idProduct),
@@ -1404,14 +1397,12 @@ static int usb_if_uevent(struct device *dev, char **envp, int num_envp,
1404 alt->desc.bInterfaceProtocol)) 1397 alt->desc.bInterfaceProtocol))
1405 return -ENOMEM; 1398 return -ENOMEM;
1406 1399
1407 envp[i] = NULL;
1408 return 0; 1400 return 0;
1409} 1401}
1410 1402
1411#else 1403#else
1412 1404
1413static int usb_if_uevent(struct device *dev, char **envp, 1405static int usb_if_uevent(struct device *dev, struct kobj_uevent_env *env)
1414 int num_envp, char *buffer, int buffer_size)
1415{ 1406{
1416 return -ENODEV; 1407 return -ENODEV;
1417} 1408}
@@ -1481,6 +1472,9 @@ static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev,
1481 * channels are available independently; and choosing between open 1472 * channels are available independently; and choosing between open
1482 * standard device protocols (like CDC) or proprietary ones. 1473 * standard device protocols (like CDC) or proprietary ones.
1483 * 1474 *
1475 * Note that a non-authorized device (dev->authorized == 0) will only
1476 * be put in unconfigured mode.
1477 *
1484 * Note that USB has an additional level of device configurability, 1478 * Note that USB has an additional level of device configurability,
1485 * associated with interfaces. That configurability is accessed using 1479 * associated with interfaces. That configurability is accessed using
1486 * usb_set_interface(). 1480 * usb_set_interface().
@@ -1502,7 +1496,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
1502 struct usb_interface **new_interfaces = NULL; 1496 struct usb_interface **new_interfaces = NULL;
1503 int n, nintf; 1497 int n, nintf;
1504 1498
1505 if (configuration == -1) 1499 if (dev->authorized == 0 || configuration == -1)
1506 configuration = 0; 1500 configuration = 0;
1507 else { 1501 else {
1508 for (i = 0; i < dev->descriptor.bNumConfigurations; i++) { 1502 for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index ebf3dc2011..d42c561c75 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -32,52 +32,6 @@ static const struct usb_device_id usb_quirk_list[] = {
32 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, 32 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
33 /* HP 5300/5370C scanner */ 33 /* HP 5300/5370C scanner */
34 { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, 34 { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 },
35 /* Hewlett-Packard PhotoSmart 720 / PhotoSmart 935 (storage) */
36 { USB_DEVICE(0x03f0, 0x4002), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
37
38 /* SGS Thomson Microelectronics 4in1 card reader */
39 { USB_DEVICE(0x0483, 0x0321), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
40
41 /* Acer Peripherals Inc. (now BenQ Corp.) Prisa 640BU */
42 { USB_DEVICE(0x04a5, 0x207e), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
43 /* Benq S2W 3300U */
44 { USB_DEVICE(0x04a5, 0x20b0), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
45 /* Canon, Inc. CanoScan N1240U/LiDE30 */
46 { USB_DEVICE(0x04a9, 0x220e), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
47 /* Canon, Inc. CanoScan N650U/N656U */
48 { USB_DEVICE(0x04a9, 0x2206), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
49 /* Canon, Inc. CanoScan 1220U */
50 { USB_DEVICE(0x04a9, 0x2207), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
51 /* Canon, Inc. CanoScan N670U/N676U/LiDE 20 */
52 { USB_DEVICE(0x04a9, 0x220d), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
53 /* old Cannon scanner */
54 { USB_DEVICE(0x04a9, 0x2220), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
55 /* Seiko Epson Corp. Perfection 1200 */
56 { USB_DEVICE(0x04b8, 0x0104), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
57 /* Seiko Epson Corp. Perfection 660 */
58 { USB_DEVICE(0x04b8, 0x0114), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
59 /* Epson Perfection 1260 Photo */
60 { USB_DEVICE(0x04b8, 0x011d), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
61 /* Seiko Epson Corp - Perfection 1670 */
62 { USB_DEVICE(0x04b8, 0x011f), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
63 /* EPSON Perfection 2480 */
64 { USB_DEVICE(0x04b8, 0x0121), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
65 /* Seiko Epson Corp.*/
66 { USB_DEVICE(0x04b8, 0x0122), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
67 /* Samsung ML-2010 printer */
68 { USB_DEVICE(0x04e8, 0x326c), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
69 /* Samsung ML-2510 Series printer */
70 { USB_DEVICE(0x04e8, 0x327e), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
71 /* Elsa MicroLink 56k (V.250) */
72 { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
73 /* Ultima Electronics Corp.*/
74 { USB_DEVICE(0x05d8, 0x4005), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
75
76 /* Genesys USB-to-IDE */
77 { USB_DEVICE(0x0503, 0x0702), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
78
79 /* USB Graphical LCD - EEH Datalink GmbH */
80 { USB_DEVICE(0x060c, 0x04eb), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
81 35
82 /* INTEL VALUE SSD */ 36 /* INTEL VALUE SSD */
83 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, 37 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -85,44 +39,15 @@ static const struct usb_device_id usb_quirk_list[] = {
85 /* M-Systems Flash Disk Pioneers */ 39 /* M-Systems Flash Disk Pioneers */
86 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, 40 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
87 41
88 /* Agfa Snapscan1212u */
89 { USB_DEVICE(0x06bd, 0x2061), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
90 /* Seagate RSS LLC */
91 { USB_DEVICE(0x0bc2, 0x3000), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
92 /* Umax [hex] Astra 3400U */
93 { USB_DEVICE(0x1606, 0x0060), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
94
95 /* Philips PSC805 audio device */ 42 /* Philips PSC805 audio device */
96 { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, 43 { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
97 44
98 /* Alcor multi-card reader */
99 { USB_DEVICE(0x058f, 0x6366), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
100
101 /* Canon EOS 5D in PC Connection mode */
102 { USB_DEVICE(0x04a9, 0x3101), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
103
104 /* RIM Blackberry */
105 { USB_DEVICE(0x0fca, 0x0001), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
106 { USB_DEVICE(0x0fca, 0x0004), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
107 { USB_DEVICE(0x0fca, 0x0006), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
108
109 /* Apple iPhone */
110 { USB_DEVICE(0x05ac, 0x1290), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
111
112 /* SKYMEDI USB_DRIVE */ 45 /* SKYMEDI USB_DRIVE */
113 { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME }, 46 { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME },
114 47
115 { } /* terminating entry must be last */ 48 { } /* terminating entry must be last */
116}; 49};
117 50
118static void usb_autosuspend_quirk(struct usb_device *udev)
119{
120#ifdef CONFIG_USB_SUSPEND
121 /* disable autosuspend, but allow the user to re-enable it via sysfs */
122 udev->autosuspend_disabled = 1;
123#endif
124}
125
126static const struct usb_device_id *find_id(struct usb_device *udev) 51static const struct usb_device_id *find_id(struct usb_device *udev)
127{ 52{
128 const struct usb_device_id *id = usb_quirk_list; 53 const struct usb_device_id *id = usb_quirk_list;
@@ -149,13 +74,9 @@ void usb_detect_quirks(struct usb_device *udev)
149 dev_dbg(&udev->dev, "USB quirks for this device: %x\n", 74 dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
150 udev->quirks); 75 udev->quirks);
151 76
152 /* do any special quirk handling here if needed */
153 if (udev->quirks & USB_QUIRK_NO_AUTOSUSPEND)
154 usb_autosuspend_quirk(udev);
155
156 /* By default, disable autosuspend for all non-hubs */ 77 /* By default, disable autosuspend for all non-hubs */
157#ifdef CONFIG_USB_SUSPEND 78#ifdef CONFIG_USB_SUSPEND
158 if (udev->descriptor.bDeviceClass != USB_CLASS_HUB) 79 if (udev->descriptor.bDeviceClass != USB_CLASS_HUB)
159 udev->autosuspend_delay = -1; 80 udev->autosuspend_disabled = 1;
160#endif 81#endif
161} 82}
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 2ab222be8f..b04afd06e5 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -169,6 +169,16 @@ show_quirks(struct device *dev, struct device_attribute *attr, char *buf)
169} 169}
170static DEVICE_ATTR(quirks, S_IRUGO, show_quirks, NULL); 170static DEVICE_ATTR(quirks, S_IRUGO, show_quirks, NULL);
171 171
172static ssize_t
173show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
174{
175 struct usb_device *udev;
176
177 udev = to_usb_device(dev);
178 return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
179}
180static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
181
172 182
173#if defined(CONFIG_USB_PERSIST) || defined(CONFIG_USB_SUSPEND) 183#if defined(CONFIG_USB_PERSIST) || defined(CONFIG_USB_SUSPEND)
174static const char power_group[] = "power"; 184static const char power_group[] = "power";
@@ -413,6 +423,44 @@ usb_descriptor_attr(bDeviceProtocol, "%02x\n")
413usb_descriptor_attr(bNumConfigurations, "%d\n") 423usb_descriptor_attr(bNumConfigurations, "%d\n")
414usb_descriptor_attr(bMaxPacketSize0, "%d\n") 424usb_descriptor_attr(bMaxPacketSize0, "%d\n")
415 425
426
427
428/* show if the device is authorized (1) or not (0) */
429static ssize_t usb_dev_authorized_show(struct device *dev,
430 struct device_attribute *attr,
431 char *buf)
432{
433 struct usb_device *usb_dev = to_usb_device(dev);
434 return snprintf(buf, PAGE_SIZE, "%u\n", usb_dev->authorized);
435}
436
437
438/*
439 * Authorize a device to be used in the system
440 *
441 * Writing a 0 deauthorizes the device, writing a 1 authorizes it.
442 */
443static ssize_t usb_dev_authorized_store(struct device *dev,
444 struct device_attribute *attr,
445 const char *buf, size_t size)
446{
447 ssize_t result;
448 struct usb_device *usb_dev = to_usb_device(dev);
449 unsigned val;
450 result = sscanf(buf, "%u\n", &val);
451 if (result != 1)
452 result = -EINVAL;
453 else if (val == 0)
454 result = usb_deauthorize_device(usb_dev);
455 else
456 result = usb_authorize_device(usb_dev);
457 return result < 0? result : size;
458}
459
460static DEVICE_ATTR(authorized, 0644,
461 usb_dev_authorized_show, usb_dev_authorized_store);
462
463
416static struct attribute *dev_attrs[] = { 464static struct attribute *dev_attrs[] = {
417 /* current configuration's attributes */ 465 /* current configuration's attributes */
418 &dev_attr_configuration.attr, 466 &dev_attr_configuration.attr,
@@ -420,6 +468,7 @@ static struct attribute *dev_attrs[] = {
420 &dev_attr_bConfigurationValue.attr, 468 &dev_attr_bConfigurationValue.attr,
421 &dev_attr_bmAttributes.attr, 469 &dev_attr_bmAttributes.attr,
422 &dev_attr_bMaxPower.attr, 470 &dev_attr_bMaxPower.attr,
471 &dev_attr_urbnum.attr,
423 /* device attributes */ 472 /* device attributes */
424 &dev_attr_idVendor.attr, 473 &dev_attr_idVendor.attr,
425 &dev_attr_idProduct.attr, 474 &dev_attr_idProduct.attr,
@@ -435,6 +484,7 @@ static struct attribute *dev_attrs[] = {
435 &dev_attr_version.attr, 484 &dev_attr_version.attr,
436 &dev_attr_maxchild.attr, 485 &dev_attr_maxchild.attr,
437 &dev_attr_quirks.attr, 486 &dev_attr_quirks.attr,
487 &dev_attr_authorized.attr,
438 NULL, 488 NULL,
439}; 489};
440static struct attribute_group dev_attr_grp = { 490static struct attribute_group dev_attr_grp = {
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index be63022846..c20c03aaf0 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -3,6 +3,7 @@
3#include <linux/bitops.h> 3#include <linux/bitops.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/log2.h>
6#include <linux/usb.h> 7#include <linux/usb.h>
7#include <linux/wait.h> 8#include <linux/wait.h>
8#include "hcd.h" 9#include "hcd.h"
@@ -38,7 +39,6 @@ void usb_init_urb(struct urb *urb)
38 if (urb) { 39 if (urb) {
39 memset(urb, 0, sizeof(*urb)); 40 memset(urb, 0, sizeof(*urb));
40 kref_init(&urb->kref); 41 kref_init(&urb->kref);
41 spin_lock_init(&urb->lock);
42 INIT_LIST_HEAD(&urb->anchor_list); 42 INIT_LIST_HEAD(&urb->anchor_list);
43 } 43 }
44} 44}
@@ -277,44 +277,58 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
277 */ 277 */
278int usb_submit_urb(struct urb *urb, gfp_t mem_flags) 278int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
279{ 279{
280 int pipe, temp, max; 280 int xfertype, max;
281 struct usb_device *dev; 281 struct usb_device *dev;
282 int is_out; 282 struct usb_host_endpoint *ep;
283 int is_out;
283 284
284 if (!urb || urb->hcpriv || !urb->complete) 285 if (!urb || urb->hcpriv || !urb->complete)
285 return -EINVAL; 286 return -EINVAL;
286 if (!(dev = urb->dev) || 287 if (!(dev = urb->dev) || dev->state < USB_STATE_DEFAULT)
287 (dev->state < USB_STATE_DEFAULT) ||
288 (!dev->bus) || (dev->devnum <= 0))
289 return -ENODEV; 288 return -ENODEV;
290 if (dev->bus->controller->power.power_state.event != PM_EVENT_ON
291 || dev->state == USB_STATE_SUSPENDED)
292 return -EHOSTUNREACH;
293 289
290 /* For now, get the endpoint from the pipe. Eventually drivers
291 * will be required to set urb->ep directly and we will eliminate
292 * urb->pipe.
293 */
294 ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
295 [usb_pipeendpoint(urb->pipe)];
296 if (!ep)
297 return -ENOENT;
298
299 urb->ep = ep;
294 urb->status = -EINPROGRESS; 300 urb->status = -EINPROGRESS;
295 urb->actual_length = 0; 301 urb->actual_length = 0;
296 302
297 /* Lots of sanity checks, so HCDs can rely on clean data 303 /* Lots of sanity checks, so HCDs can rely on clean data
298 * and don't need to duplicate tests 304 * and don't need to duplicate tests
299 */ 305 */
300 pipe = urb->pipe; 306 xfertype = usb_endpoint_type(&ep->desc);
301 temp = usb_pipetype(pipe); 307 if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
302 is_out = usb_pipeout(pipe); 308 struct usb_ctrlrequest *setup =
309 (struct usb_ctrlrequest *) urb->setup_packet;
310
311 if (!setup)
312 return -ENOEXEC;
313 is_out = !(setup->bRequestType & USB_DIR_IN) ||
314 !setup->wLength;
315 } else {
316 is_out = usb_endpoint_dir_out(&ep->desc);
317 }
303 318
304 if (!usb_pipecontrol(pipe) && dev->state < USB_STATE_CONFIGURED) 319 /* Cache the direction for later use */
305 return -ENODEV; 320 urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) |
321 (is_out ? URB_DIR_OUT : URB_DIR_IN);
306 322
307 /* FIXME there should be a sharable lock protecting us against 323 if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
308 * config/altsetting changes and disconnects, kicking in here. 324 dev->state < USB_STATE_CONFIGURED)
309 * (here == before maxpacket, and eventually endpoint type, 325 return -ENODEV;
310 * checks get made.)
311 */
312 326
313 max = usb_maxpacket(dev, pipe, is_out); 327 max = le16_to_cpu(ep->desc.wMaxPacketSize);
314 if (max <= 0) { 328 if (max <= 0) {
315 dev_dbg(&dev->dev, 329 dev_dbg(&dev->dev,
316 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", 330 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
317 usb_pipeendpoint(pipe), is_out ? "out" : "in", 331 usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
318 __FUNCTION__, max); 332 __FUNCTION__, max);
319 return -EMSGSIZE; 333 return -EMSGSIZE;
320 } 334 }
@@ -323,7 +337,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
323 * but drivers only control those sizes for ISO. 337 * but drivers only control those sizes for ISO.
324 * while we're checking, initialize return status. 338 * while we're checking, initialize return status.
325 */ 339 */
326 if (temp == PIPE_ISOCHRONOUS) { 340 if (xfertype == USB_ENDPOINT_XFER_ISOC) {
327 int n, len; 341 int n, len;
328 342
329 /* "high bandwidth" mode, 1-3 packets/uframe? */ 343 /* "high bandwidth" mode, 1-3 packets/uframe? */
@@ -358,20 +372,20 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
358 372
359 /* enforce simple/standard policy */ 373 /* enforce simple/standard policy */
360 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP | 374 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP |
361 URB_NO_INTERRUPT); 375 URB_NO_INTERRUPT | URB_DIR_MASK);
362 switch (temp) { 376 switch (xfertype) {
363 case PIPE_BULK: 377 case USB_ENDPOINT_XFER_BULK:
364 if (is_out) 378 if (is_out)
365 allowed |= URB_ZERO_PACKET; 379 allowed |= URB_ZERO_PACKET;
366 /* FALLTHROUGH */ 380 /* FALLTHROUGH */
367 case PIPE_CONTROL: 381 case USB_ENDPOINT_XFER_CONTROL:
368 allowed |= URB_NO_FSBR; /* only affects UHCI */ 382 allowed |= URB_NO_FSBR; /* only affects UHCI */
369 /* FALLTHROUGH */ 383 /* FALLTHROUGH */
370 default: /* all non-iso endpoints */ 384 default: /* all non-iso endpoints */
371 if (!is_out) 385 if (!is_out)
372 allowed |= URB_SHORT_NOT_OK; 386 allowed |= URB_SHORT_NOT_OK;
373 break; 387 break;
374 case PIPE_ISOCHRONOUS: 388 case USB_ENDPOINT_XFER_ISOC:
375 allowed |= URB_ISO_ASAP; 389 allowed |= URB_ISO_ASAP;
376 break; 390 break;
377 } 391 }
@@ -393,9 +407,9 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
393 * supports different values... this uses EHCI/UHCI defaults (and 407 * supports different values... this uses EHCI/UHCI defaults (and
394 * EHCI can use smaller non-default values). 408 * EHCI can use smaller non-default values).
395 */ 409 */
396 switch (temp) { 410 switch (xfertype) {
397 case PIPE_ISOCHRONOUS: 411 case USB_ENDPOINT_XFER_ISOC:
398 case PIPE_INTERRUPT: 412 case USB_ENDPOINT_XFER_INT:
399 /* too small? */ 413 /* too small? */
400 if (urb->interval <= 0) 414 if (urb->interval <= 0)
401 return -EINVAL; 415 return -EINVAL;
@@ -405,29 +419,27 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
405 // NOTE usb handles 2^15 419 // NOTE usb handles 2^15
406 if (urb->interval > (1024 * 8)) 420 if (urb->interval > (1024 * 8))
407 urb->interval = 1024 * 8; 421 urb->interval = 1024 * 8;
408 temp = 1024 * 8; 422 max = 1024 * 8;
409 break; 423 break;
410 case USB_SPEED_FULL: /* units are frames/msec */ 424 case USB_SPEED_FULL: /* units are frames/msec */
411 case USB_SPEED_LOW: 425 case USB_SPEED_LOW:
412 if (temp == PIPE_INTERRUPT) { 426 if (xfertype == USB_ENDPOINT_XFER_INT) {
413 if (urb->interval > 255) 427 if (urb->interval > 255)
414 return -EINVAL; 428 return -EINVAL;
415 // NOTE ohci only handles up to 32 429 // NOTE ohci only handles up to 32
416 temp = 128; 430 max = 128;
417 } else { 431 } else {
418 if (urb->interval > 1024) 432 if (urb->interval > 1024)
419 urb->interval = 1024; 433 urb->interval = 1024;
420 // NOTE usb and ohci handle up to 2^15 434 // NOTE usb and ohci handle up to 2^15
421 temp = 1024; 435 max = 1024;
422 } 436 }
423 break; 437 break;
424 default: 438 default:
425 return -EINVAL; 439 return -EINVAL;
426 } 440 }
427 /* power of two? */ 441 /* Round down to a power of 2, no more than max */
428 while (temp > urb->interval) 442 urb->interval = min(max, 1 << ilog2(urb->interval));
429 temp >>= 1;
430 urb->interval = temp;
431 } 443 }
432 444
433 return usb_hcd_submit_urb(urb, mem_flags); 445 return usb_hcd_submit_urb(urb, mem_flags);
@@ -496,8 +508,10 @@ int usb_unlink_urb(struct urb *urb)
496{ 508{
497 if (!urb) 509 if (!urb)
498 return -EINVAL; 510 return -EINVAL;
499 if (!(urb->dev && urb->dev->bus)) 511 if (!urb->dev)
500 return -ENODEV; 512 return -ENODEV;
513 if (!urb->ep)
514 return -EIDRM;
501 return usb_hcd_unlink_urb(urb, -ECONNRESET); 515 return usb_hcd_unlink_urb(urb, -ECONNRESET);
502} 516}
503 517
@@ -523,19 +537,21 @@ int usb_unlink_urb(struct urb *urb)
523 */ 537 */
524void usb_kill_urb(struct urb *urb) 538void usb_kill_urb(struct urb *urb)
525{ 539{
540 static DEFINE_MUTEX(reject_mutex);
541
526 might_sleep(); 542 might_sleep();
527 if (!(urb && urb->dev && urb->dev->bus)) 543 if (!(urb && urb->dev && urb->ep))
528 return; 544 return;
529 spin_lock_irq(&urb->lock); 545 mutex_lock(&reject_mutex);
530 ++urb->reject; 546 ++urb->reject;
531 spin_unlock_irq(&urb->lock); 547 mutex_unlock(&reject_mutex);
532 548
533 usb_hcd_unlink_urb(urb, -ENOENT); 549 usb_hcd_unlink_urb(urb, -ENOENT);
534 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); 550 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
535 551
536 spin_lock_irq(&urb->lock); 552 mutex_lock(&reject_mutex);
537 --urb->reject; 553 --urb->reject;
538 spin_unlock_irq(&urb->lock); 554 mutex_unlock(&reject_mutex);
539} 555}
540 556
541/** 557/**
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 0fee5c66fd..c99938d5f7 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -223,6 +223,15 @@ static void ksuspend_usb_cleanup(void)
223 223
224#endif /* CONFIG_PM */ 224#endif /* CONFIG_PM */
225 225
226
227/* Returns 1 if @usb_bus is WUSB, 0 otherwise */
228static unsigned usb_bus_is_wusb(struct usb_bus *bus)
229{
230 struct usb_hcd *hcd = container_of(bus, struct usb_hcd, self);
231 return hcd->wireless;
232}
233
234
226/** 235/**
227 * usb_alloc_dev - usb device constructor (usbcore-internal) 236 * usb_alloc_dev - usb device constructor (usbcore-internal)
228 * @parent: hub to which device is connected; null to allocate a root hub 237 * @parent: hub to which device is connected; null to allocate a root hub
@@ -239,6 +248,8 @@ struct usb_device *
239usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1) 248usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1)
240{ 249{
241 struct usb_device *dev; 250 struct usb_device *dev;
251 struct usb_hcd *usb_hcd = container_of(bus, struct usb_hcd, self);
252 unsigned root_hub = 0;
242 253
243 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 254 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
244 if (!dev) 255 if (!dev)
@@ -255,12 +266,14 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1)
255 dev->dev.dma_mask = bus->controller->dma_mask; 266 dev->dev.dma_mask = bus->controller->dma_mask;
256 set_dev_node(&dev->dev, dev_to_node(bus->controller)); 267 set_dev_node(&dev->dev, dev_to_node(bus->controller));
257 dev->state = USB_STATE_ATTACHED; 268 dev->state = USB_STATE_ATTACHED;
269 atomic_set(&dev->urbnum, 0);
258 270
259 INIT_LIST_HEAD(&dev->ep0.urb_list); 271 INIT_LIST_HEAD(&dev->ep0.urb_list);
260 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE; 272 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
261 dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT; 273 dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT;
262 /* ep0 maxpacket comes later, from device descriptor */ 274 /* ep0 maxpacket comes later, from device descriptor */
263 dev->ep_in[0] = dev->ep_out[0] = &dev->ep0; 275 usb_enable_endpoint(dev, &dev->ep0);
276 dev->can_submit = 1;
264 277
265 /* Save readable and stable topology id, distinguishing devices 278 /* Save readable and stable topology id, distinguishing devices
266 * by location for diagnostics, tools, driver model, etc. The 279 * by location for diagnostics, tools, driver model, etc. The
@@ -275,6 +288,7 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1)
275 288
276 dev->dev.parent = bus->controller; 289 dev->dev.parent = bus->controller;
277 sprintf(&dev->dev.bus_id[0], "usb%d", bus->busnum); 290 sprintf(&dev->dev.bus_id[0], "usb%d", bus->busnum);
291 root_hub = 1;
278 } else { 292 } else {
279 /* match any labeling on the hubs; it's one-based */ 293 /* match any labeling on the hubs; it's one-based */
280 if (parent->devpath[0] == '0') 294 if (parent->devpath[0] == '0')
@@ -301,6 +315,12 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1)
301 INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work); 315 INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
302 dev->autosuspend_delay = usb_autosuspend_delay * HZ; 316 dev->autosuspend_delay = usb_autosuspend_delay * HZ;
303#endif 317#endif
318 if (root_hub) /* Root hub always ok [and always wired] */
319 dev->authorized = 1;
320 else {
321 dev->authorized = usb_hcd->authorized_default;
322 dev->wusb = usb_bus_is_wusb(bus)? 1 : 0;
323 }
304 return dev; 324 return dev;
305} 325}
306 326
@@ -748,7 +768,7 @@ void usb_buffer_unmap(struct urb *urb)
748/** 768/**
749 * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint 769 * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint
750 * @dev: device to which the scatterlist will be mapped 770 * @dev: device to which the scatterlist will be mapped
751 * @pipe: endpoint defining the mapping direction 771 * @is_in: mapping transfer direction
752 * @sg: the scatterlist to map 772 * @sg: the scatterlist to map
753 * @nents: the number of entries in the scatterlist 773 * @nents: the number of entries in the scatterlist
754 * 774 *
@@ -771,14 +791,13 @@ void usb_buffer_unmap(struct urb *urb)
771 * 791 *
772 * Reverse the effect of this call with usb_buffer_unmap_sg(). 792 * Reverse the effect of this call with usb_buffer_unmap_sg().
773 */ 793 */
774int usb_buffer_map_sg(const struct usb_device *dev, unsigned pipe, 794int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
775 struct scatterlist *sg, int nents) 795 struct scatterlist *sg, int nents)
776{ 796{
777 struct usb_bus *bus; 797 struct usb_bus *bus;
778 struct device *controller; 798 struct device *controller;
779 799
780 if (!dev 800 if (!dev
781 || usb_pipecontrol(pipe)
782 || !(bus = dev->bus) 801 || !(bus = dev->bus)
783 || !(controller = bus->controller) 802 || !(controller = bus->controller)
784 || !controller->dma_mask) 803 || !controller->dma_mask)
@@ -786,7 +805,7 @@ int usb_buffer_map_sg(const struct usb_device *dev, unsigned pipe,
786 805
787 // FIXME generic api broken like pci, can't report errors 806 // FIXME generic api broken like pci, can't report errors
788 return dma_map_sg(controller, sg, nents, 807 return dma_map_sg(controller, sg, nents,
789 usb_pipein(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 808 is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
790} 809}
791 810
792/* XXX DISABLED, no users currently. If you wish to re-enable this 811/* XXX DISABLED, no users currently. If you wish to re-enable this
@@ -799,14 +818,14 @@ int usb_buffer_map_sg(const struct usb_device *dev, unsigned pipe,
799/** 818/**
800 * usb_buffer_dmasync_sg - synchronize DMA and CPU view of scatterlist buffer(s) 819 * usb_buffer_dmasync_sg - synchronize DMA and CPU view of scatterlist buffer(s)
801 * @dev: device to which the scatterlist will be mapped 820 * @dev: device to which the scatterlist will be mapped
802 * @pipe: endpoint defining the mapping direction 821 * @is_in: mapping transfer direction
803 * @sg: the scatterlist to synchronize 822 * @sg: the scatterlist to synchronize
804 * @n_hw_ents: the positive return value from usb_buffer_map_sg 823 * @n_hw_ents: the positive return value from usb_buffer_map_sg
805 * 824 *
806 * Use this when you are re-using a scatterlist's data buffers for 825 * Use this when you are re-using a scatterlist's data buffers for
807 * another USB request. 826 * another USB request.
808 */ 827 */
809void usb_buffer_dmasync_sg(const struct usb_device *dev, unsigned pipe, 828void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
810 struct scatterlist *sg, int n_hw_ents) 829 struct scatterlist *sg, int n_hw_ents)
811{ 830{
812 struct usb_bus *bus; 831 struct usb_bus *bus;
@@ -819,20 +838,20 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, unsigned pipe,
819 return; 838 return;
820 839
821 dma_sync_sg(controller, sg, n_hw_ents, 840 dma_sync_sg(controller, sg, n_hw_ents,
822 usb_pipein(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 841 is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
823} 842}
824#endif 843#endif
825 844
826/** 845/**
827 * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist 846 * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist
828 * @dev: device to which the scatterlist will be mapped 847 * @dev: device to which the scatterlist will be mapped
829 * @pipe: endpoint defining the mapping direction 848 * @is_in: mapping transfer direction
830 * @sg: the scatterlist to unmap 849 * @sg: the scatterlist to unmap
831 * @n_hw_ents: the positive return value from usb_buffer_map_sg 850 * @n_hw_ents: the positive return value from usb_buffer_map_sg
832 * 851 *
833 * Reverses the effect of usb_buffer_map_sg(). 852 * Reverses the effect of usb_buffer_map_sg().
834 */ 853 */
835void usb_buffer_unmap_sg(const struct usb_device *dev, unsigned pipe, 854void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
836 struct scatterlist *sg, int n_hw_ents) 855 struct scatterlist *sg, int n_hw_ents)
837{ 856{
838 struct usb_bus *bus; 857 struct usb_bus *bus;
@@ -845,7 +864,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, unsigned pipe,
845 return; 864 return;
846 865
847 dma_unmap_sg(controller, sg, n_hw_ents, 866 dma_unmap_sg(controller, sg, n_hw_ents,
848 usb_pipein(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 867 is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
849} 868}
850 869
851/* format to disable USB on kernel command line is: nousb */ 870/* format to disable USB on kernel command line is: nousb */
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index ad5fa0338f..c52626c51f 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -8,17 +8,22 @@ extern int usb_create_ep_files(struct device *parent, struct usb_host_endpoint *
8 struct usb_device *udev); 8 struct usb_device *udev);
9extern void usb_remove_ep_files(struct usb_host_endpoint *endpoint); 9extern void usb_remove_ep_files(struct usb_host_endpoint *endpoint);
10 10
11extern void usb_enable_endpoint(struct usb_device *dev,
12 struct usb_host_endpoint *ep);
11extern void usb_disable_endpoint (struct usb_device *dev, unsigned int epaddr); 13extern void usb_disable_endpoint (struct usb_device *dev, unsigned int epaddr);
12extern void usb_disable_interface (struct usb_device *dev, 14extern void usb_disable_interface (struct usb_device *dev,
13 struct usb_interface *intf); 15 struct usb_interface *intf);
14extern void usb_release_interface_cache(struct kref *ref); 16extern void usb_release_interface_cache(struct kref *ref);
15extern void usb_disable_device (struct usb_device *dev, int skip_ep0); 17extern void usb_disable_device (struct usb_device *dev, int skip_ep0);
18extern int usb_deauthorize_device (struct usb_device *);
19extern int usb_authorize_device (struct usb_device *);
16extern void usb_detect_quirks(struct usb_device *udev); 20extern void usb_detect_quirks(struct usb_device *udev);
17 21
18extern int usb_get_device_descriptor(struct usb_device *dev, 22extern int usb_get_device_descriptor(struct usb_device *dev,
19 unsigned int size); 23 unsigned int size);
20extern char *usb_cache_string(struct usb_device *udev, int index); 24extern char *usb_cache_string(struct usb_device *udev, int index);
21extern int usb_set_configuration(struct usb_device *dev, int configuration); 25extern int usb_set_configuration(struct usb_device *dev, int configuration);
26extern int usb_choose_configuration(struct usb_device *udev);
22 27
23extern void usb_kick_khubd(struct usb_device *dev); 28extern void usb_kick_khubd(struct usb_device *dev);
24extern int usb_match_device(struct usb_device *dev, 29extern int usb_match_device(struct usb_device *dev,
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 767aed5b4b..f81d08d653 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -67,6 +67,17 @@ config USB_GADGET_DEBUG_FILES
67 driver on a new board. Enable these files by choosing "Y" 67 driver on a new board. Enable these files by choosing "Y"
68 here. If in doubt, or to conserve kernel memory, say "N". 68 here. If in doubt, or to conserve kernel memory, say "N".
69 69
70config USB_GADGET_DEBUG_FS
71 boolean "Debugging information files in debugfs"
72 depends on USB_GADGET && DEBUG_FS
73 help
74 Some of the drivers in the "gadget" framework can expose
75 debugging information in files under /sys/kernel/debug/.
76 The information in these files may help when you're
77 troubleshooting or bringing up a driver on a new board.
78 Enable these files by choosing "Y" here. If in doubt, or
79 to conserve kernel memory, say "N".
80
70config USB_GADGET_SELECTED 81config USB_GADGET_SELECTED
71 boolean 82 boolean
72 83
@@ -103,6 +114,20 @@ config USB_AMD5536UDC
103 default USB_GADGET 114 default USB_GADGET
104 select USB_GADGET_SELECTED 115 select USB_GADGET_SELECTED
105 116
117config USB_GADGET_ATMEL_USBA
118 boolean "Atmel USBA"
119 select USB_GADGET_DUALSPEED
120 depends on AVR32
121 help
122 USBA is the integrated high-speed USB Device controller on
123 the AT32AP700x processors from Atmel.
124
125config USB_ATMEL_USBA
126 tristate
127 depends on USB_GADGET_ATMEL_USBA
128 default USB_GADGET
129 select USB_GADGET_SELECTED
130
106config USB_GADGET_FSL_USB2 131config USB_GADGET_FSL_USB2
107 boolean "Freescale Highspeed USB DR Peripheral Controller" 132 boolean "Freescale Highspeed USB DR Peripheral Controller"
108 depends on MPC834x || PPC_MPC831x 133 depends on MPC834x || PPC_MPC831x
@@ -228,7 +253,6 @@ config USB_LH7A40X
228 default USB_GADGET 253 default USB_GADGET
229 select USB_GADGET_SELECTED 254 select USB_GADGET_SELECTED
230 255
231
232config USB_GADGET_OMAP 256config USB_GADGET_OMAP
233 boolean "OMAP USB Device Controller" 257 boolean "OMAP USB Device Controller"
234 depends on ARCH_OMAP 258 depends on ARCH_OMAP
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 1bc0f03550..904e57bf61 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_USB_OMAP) += omap_udc.o
14obj-$(CONFIG_USB_LH7A40X) += lh7a40x_udc.o 14obj-$(CONFIG_USB_LH7A40X) += lh7a40x_udc.o
15obj-$(CONFIG_USB_S3C2410) += s3c2410_udc.o 15obj-$(CONFIG_USB_S3C2410) += s3c2410_udc.o
16obj-$(CONFIG_USB_AT91) += at91_udc.o 16obj-$(CONFIG_USB_AT91) += at91_udc.o
17obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
17obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o 18obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
18obj-$(CONFIG_USB_M66592) += m66592-udc.o 19obj-$(CONFIG_USB_M66592) += m66592-udc.o
19 20
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 714156ca8f..1c80406025 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -69,7 +69,7 @@
69 69
70/* gadget stack */ 70/* gadget stack */
71#include <linux/usb/ch9.h> 71#include <linux/usb/ch9.h>
72#include <linux/usb_gadget.h> 72#include <linux/usb/gadget.h>
73 73
74/* udc specific */ 74/* udc specific */
75#include "amd5536udc.h" 75#include "amd5536udc.h"
@@ -3244,7 +3244,6 @@ static int udc_pci_probe(
3244 retval = -ENOMEM; 3244 retval = -ENOMEM;
3245 goto finished; 3245 goto finished;
3246 } 3246 }
3247 memset(dev, 0, sizeof(struct udc));
3248 3247
3249 /* pci setup */ 3248 /* pci setup */
3250 if (pci_enable_device(pdev) < 0) { 3249 if (pci_enable_device(pdev) < 0) {
@@ -3286,14 +3285,12 @@ static int udc_pci_probe(
3286 3285
3287 pci_set_drvdata(pdev, dev); 3286 pci_set_drvdata(pdev, dev);
3288 3287
3289 /* chip revision */ 3288 /* chip revision for Hs AMD5536 */
3290 dev->chiprev = 0; 3289 dev->chiprev = pdev->revision;
3291 3290
3292 pci_set_master(pdev); 3291 pci_set_master(pdev);
3293 pci_set_mwi(pdev); 3292 pci_set_mwi(pdev);
3294 3293
3295 /* chip rev for Hs AMD5536 */
3296 pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) &dev->chiprev);
3297 /* init dma pools */ 3294 /* init dma pools */
3298 if (use_dma) { 3295 if (use_dma) {
3299 retval = init_dma_pools(dev); 3296 retval = init_dma_pools(dev);
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 63d7d65686..a6adf7e0f6 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -38,7 +38,7 @@
38#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
39#include <linux/clk.h> 39#include <linux/clk.h>
40#include <linux/usb/ch9.h> 40#include <linux/usb/ch9.h>
41#include <linux/usb_gadget.h> 41#include <linux/usb/gadget.h>
42 42
43#include <asm/byteorder.h> 43#include <asm/byteorder.h>
44#include <asm/hardware.h> 44#include <asm/hardware.h>
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
new file mode 100644
index 0000000000..4fb5ff4695
--- /dev/null
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -0,0 +1,2077 @@
1/*
2 * Driver for the Atmel USBA high speed USB device controller
3 *
4 * Copyright (C) 2005-2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/clk.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/device.h>
16#include <linux/dma-mapping.h>
17#include <linux/list.h>
18#include <linux/platform_device.h>
19#include <linux/usb/ch9.h>
20#include <linux/usb/gadget.h>
21#include <linux/delay.h>
22
23#include <asm/gpio.h>
24#include <asm/arch/board.h>
25
26#include "atmel_usba_udc.h"
27
28
29static struct usba_udc the_udc;
30
31#ifdef CONFIG_USB_GADGET_DEBUG_FS
32#include <linux/debugfs.h>
33#include <linux/uaccess.h>
34
35static int queue_dbg_open(struct inode *inode, struct file *file)
36{
37 struct usba_ep *ep = inode->i_private;
38 struct usba_request *req, *req_copy;
39 struct list_head *queue_data;
40
41 queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL);
42 if (!queue_data)
43 return -ENOMEM;
44 INIT_LIST_HEAD(queue_data);
45
46 spin_lock_irq(&ep->udc->lock);
47 list_for_each_entry(req, &ep->queue, queue) {
48 req_copy = kmalloc(sizeof(*req_copy), GFP_ATOMIC);
49 if (!req_copy)
50 goto fail;
51 memcpy(req_copy, req, sizeof(*req_copy));
52 list_add_tail(&req_copy->queue, queue_data);
53 }
54 spin_unlock_irq(&ep->udc->lock);
55
56 file->private_data = queue_data;
57 return 0;
58
59fail:
60 spin_unlock_irq(&ep->udc->lock);
61 list_for_each_entry_safe(req, req_copy, queue_data, queue) {
62 list_del(&req->queue);
63 kfree(req);
64 }
65 kfree(queue_data);
66 return -ENOMEM;
67}
68
69/*
70 * bbbbbbbb llllllll IZS sssss nnnn FDL\n\0
71 *
72 * b: buffer address
73 * l: buffer length
74 * I/i: interrupt/no interrupt
75 * Z/z: zero/no zero
76 * S/s: short ok/short not ok
77 * s: status
78 * n: nr_packets
79 * F/f: submitted/not submitted to FIFO
80 * D/d: using/not using DMA
81 * L/l: last transaction/not last transaction
82 */
83static ssize_t queue_dbg_read(struct file *file, char __user *buf,
84 size_t nbytes, loff_t *ppos)
85{
86 struct list_head *queue = file->private_data;
87 struct usba_request *req, *tmp_req;
88 size_t len, remaining, actual = 0;
89 char tmpbuf[38];
90
91 if (!access_ok(VERIFY_WRITE, buf, nbytes))
92 return -EFAULT;
93
94 mutex_lock(&file->f_dentry->d_inode->i_mutex);
95 list_for_each_entry_safe(req, tmp_req, queue, queue) {
96 len = snprintf(tmpbuf, sizeof(tmpbuf),
97 "%8p %08x %c%c%c %5d %c%c%c\n",
98 req->req.buf, req->req.length,
99 req->req.no_interrupt ? 'i' : 'I',
100 req->req.zero ? 'Z' : 'z',
101 req->req.short_not_ok ? 's' : 'S',
102 req->req.status,
103 req->submitted ? 'F' : 'f',
104 req->using_dma ? 'D' : 'd',
105 req->last_transaction ? 'L' : 'l');
106 len = min(len, sizeof(tmpbuf));
107 if (len > nbytes)
108 break;
109
110 list_del(&req->queue);
111 kfree(req);
112
113 remaining = __copy_to_user(buf, tmpbuf, len);
114 actual += len - remaining;
115 if (remaining)
116 break;
117
118 nbytes -= len;
119 buf += len;
120 }
121 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
122
123 return actual;
124}
125
126static int queue_dbg_release(struct inode *inode, struct file *file)
127{
128 struct list_head *queue_data = file->private_data;
129 struct usba_request *req, *tmp_req;
130
131 list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
132 list_del(&req->queue);
133 kfree(req);
134 }
135 kfree(queue_data);
136 return 0;
137}
138
139static int regs_dbg_open(struct inode *inode, struct file *file)
140{
141 struct usba_udc *udc;
142 unsigned int i;
143 u32 *data;
144 int ret = -ENOMEM;
145
146 mutex_lock(&inode->i_mutex);
147 udc = inode->i_private;
148 data = kmalloc(inode->i_size, GFP_KERNEL);
149 if (!data)
150 goto out;
151
152 spin_lock_irq(&udc->lock);
153 for (i = 0; i < inode->i_size / 4; i++)
154 data[i] = __raw_readl(udc->regs + i * 4);
155 spin_unlock_irq(&udc->lock);
156
157 file->private_data = data;
158 ret = 0;
159
160out:
161 mutex_unlock(&inode->i_mutex);
162
163 return ret;
164}
165
166static ssize_t regs_dbg_read(struct file *file, char __user *buf,
167 size_t nbytes, loff_t *ppos)
168{
169 struct inode *inode = file->f_dentry->d_inode;
170 int ret;
171
172 mutex_lock(&inode->i_mutex);
173 ret = simple_read_from_buffer(buf, nbytes, ppos,
174 file->private_data,
175 file->f_dentry->d_inode->i_size);
176 mutex_unlock(&inode->i_mutex);
177
178 return ret;
179}
180
181static int regs_dbg_release(struct inode *inode, struct file *file)
182{
183 kfree(file->private_data);
184 return 0;
185}
186
187const struct file_operations queue_dbg_fops = {
188 .owner = THIS_MODULE,
189 .open = queue_dbg_open,
190 .llseek = no_llseek,
191 .read = queue_dbg_read,
192 .release = queue_dbg_release,
193};
194
195const struct file_operations regs_dbg_fops = {
196 .owner = THIS_MODULE,
197 .open = regs_dbg_open,
198 .llseek = generic_file_llseek,
199 .read = regs_dbg_read,
200 .release = regs_dbg_release,
201};
202
203static void usba_ep_init_debugfs(struct usba_udc *udc,
204 struct usba_ep *ep)
205{
206 struct dentry *ep_root;
207
208 ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root);
209 if (!ep_root)
210 goto err_root;
211 ep->debugfs_dir = ep_root;
212
213 ep->debugfs_queue = debugfs_create_file("queue", 0400, ep_root,
214 ep, &queue_dbg_fops);
215 if (!ep->debugfs_queue)
216 goto err_queue;
217
218 if (ep->can_dma) {
219 ep->debugfs_dma_status
220 = debugfs_create_u32("dma_status", 0400, ep_root,
221 &ep->last_dma_status);
222 if (!ep->debugfs_dma_status)
223 goto err_dma_status;
224 }
225 if (ep_is_control(ep)) {
226 ep->debugfs_state
227 = debugfs_create_u32("state", 0400, ep_root,
228 &ep->state);
229 if (!ep->debugfs_state)
230 goto err_state;
231 }
232
233 return;
234
235err_state:
236 if (ep->can_dma)
237 debugfs_remove(ep->debugfs_dma_status);
238err_dma_status:
239 debugfs_remove(ep->debugfs_queue);
240err_queue:
241 debugfs_remove(ep_root);
242err_root:
243 dev_err(&ep->udc->pdev->dev,
244 "failed to create debugfs directory for %s\n", ep->ep.name);
245}
246
247static void usba_ep_cleanup_debugfs(struct usba_ep *ep)
248{
249 debugfs_remove(ep->debugfs_queue);
250 debugfs_remove(ep->debugfs_dma_status);
251 debugfs_remove(ep->debugfs_state);
252 debugfs_remove(ep->debugfs_dir);
253 ep->debugfs_dma_status = NULL;
254 ep->debugfs_dir = NULL;
255}
256
257static void usba_init_debugfs(struct usba_udc *udc)
258{
259 struct dentry *root, *regs;
260 struct resource *regs_resource;
261
262 root = debugfs_create_dir(udc->gadget.name, NULL);
263 if (IS_ERR(root) || !root)
264 goto err_root;
265 udc->debugfs_root = root;
266
267 regs = debugfs_create_file("regs", 0400, root, udc, &regs_dbg_fops);
268 if (!regs)
269 goto err_regs;
270
271 regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
272 CTRL_IOMEM_ID);
273 regs->d_inode->i_size = regs_resource->end - regs_resource->start + 1;
274 udc->debugfs_regs = regs;
275
276 usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0));
277
278 return;
279
280err_regs:
281 debugfs_remove(root);
282err_root:
283 udc->debugfs_root = NULL;
284 dev_err(&udc->pdev->dev, "debugfs is not available\n");
285}
286
287static void usba_cleanup_debugfs(struct usba_udc *udc)
288{
289 usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0));
290 debugfs_remove(udc->debugfs_regs);
291 debugfs_remove(udc->debugfs_root);
292 udc->debugfs_regs = NULL;
293 udc->debugfs_root = NULL;
294}
295#else
296static inline void usba_ep_init_debugfs(struct usba_udc *udc,
297 struct usba_ep *ep)
298{
299
300}
301
302static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep)
303{
304
305}
306
307static inline void usba_init_debugfs(struct usba_udc *udc)
308{
309
310}
311
312static inline void usba_cleanup_debugfs(struct usba_udc *udc)
313{
314
315}
316#endif
317
318static int vbus_is_present(struct usba_udc *udc)
319{
320 if (udc->vbus_pin != -1)
321 return gpio_get_value(udc->vbus_pin);
322
323 /* No Vbus detection: Assume always present */
324 return 1;
325}
326
327static void copy_to_fifo(void __iomem *fifo, const void *buf, int len)
328{
329 unsigned long tmp;
330
331 DBG(DBG_FIFO, "copy to FIFO (len %d):\n", len);
332 for (; len > 0; len -= 4, buf += 4, fifo += 4) {
333 tmp = *(unsigned long *)buf;
334 if (len >= 4) {
335 DBG(DBG_FIFO, " -> %08lx\n", tmp);
336 __raw_writel(tmp, fifo);
337 } else {
338 do {
339 DBG(DBG_FIFO, " -> %02lx\n", tmp >> 24);
340 __raw_writeb(tmp >> 24, fifo);
341 fifo++;
342 tmp <<= 8;
343 } while (--len);
344 break;
345 }
346 }
347}
348
349static void copy_from_fifo(void *buf, void __iomem *fifo, int len)
350{
351 union {
352 unsigned long *w;
353 unsigned char *b;
354 } p;
355 unsigned long tmp;
356
357 DBG(DBG_FIFO, "copy from FIFO (len %d):\n", len);
358 for (p.w = buf; len > 0; len -= 4, p.w++, fifo += 4) {
359 if (len >= 4) {
360 tmp = __raw_readl(fifo);
361 *p.w = tmp;
362 DBG(DBG_FIFO, " -> %08lx\n", tmp);
363 } else {
364 do {
365 tmp = __raw_readb(fifo);
366 *p.b = tmp;
367 DBG(DBG_FIFO, " -> %02lx\n", tmp);
368 fifo++, p.b++;
369 } while (--len);
370 }
371 }
372}
373
374static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
375{
376 unsigned int transaction_len;
377
378 transaction_len = req->req.length - req->req.actual;
379 req->last_transaction = 1;
380 if (transaction_len > ep->ep.maxpacket) {
381 transaction_len = ep->ep.maxpacket;
382 req->last_transaction = 0;
383 } else if (transaction_len == ep->ep.maxpacket && req->req.zero)
384 req->last_transaction = 0;
385
386 DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n",
387 ep->ep.name, req, transaction_len,
388 req->last_transaction ? ", done" : "");
389
390 copy_to_fifo(ep->fifo, req->req.buf + req->req.actual, transaction_len);
391 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
392 req->req.actual += transaction_len;
393}
394
395static void submit_request(struct usba_ep *ep, struct usba_request *req)
396{
397 DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n",
398 ep->ep.name, req, req->req.length);
399
400 req->req.actual = 0;
401 req->submitted = 1;
402
403 if (req->using_dma) {
404 if (req->req.length == 0) {
405 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
406 return;
407 }
408
409 if (req->req.zero)
410 usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET);
411 else
412 usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET);
413
414 usba_dma_writel(ep, ADDRESS, req->req.dma);
415 usba_dma_writel(ep, CONTROL, req->ctrl);
416 } else {
417 next_fifo_transaction(ep, req);
418 if (req->last_transaction) {
419 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
420 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
421 } else {
422 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
423 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
424 }
425 }
426}
427
428static void submit_next_request(struct usba_ep *ep)
429{
430 struct usba_request *req;
431
432 if (list_empty(&ep->queue)) {
433 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY);
434 return;
435 }
436
437 req = list_entry(ep->queue.next, struct usba_request, queue);
438 if (!req->submitted)
439 submit_request(ep, req);
440}
441
442static void send_status(struct usba_udc *udc, struct usba_ep *ep)
443{
444 ep->state = STATUS_STAGE_IN;
445 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
446 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
447}
448
449static void receive_data(struct usba_ep *ep)
450{
451 struct usba_udc *udc = ep->udc;
452 struct usba_request *req;
453 unsigned long status;
454 unsigned int bytecount, nr_busy;
455 int is_complete = 0;
456
457 status = usba_ep_readl(ep, STA);
458 nr_busy = USBA_BFEXT(BUSY_BANKS, status);
459
460 DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy);
461
462 while (nr_busy > 0) {
463 if (list_empty(&ep->queue)) {
464 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
465 break;
466 }
467 req = list_entry(ep->queue.next,
468 struct usba_request, queue);
469
470 bytecount = USBA_BFEXT(BYTE_COUNT, status);
471
472 if (status & (1 << 31))
473 is_complete = 1;
474 if (req->req.actual + bytecount >= req->req.length) {
475 is_complete = 1;
476 bytecount = req->req.length - req->req.actual;
477 }
478
479 copy_from_fifo(req->req.buf + req->req.actual,
480 ep->fifo, bytecount);
481 req->req.actual += bytecount;
482
483 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
484
485 if (is_complete) {
486 DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name);
487 req->req.status = 0;
488 list_del_init(&req->queue);
489 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
490 spin_unlock(&udc->lock);
491 req->req.complete(&ep->ep, &req->req);
492 spin_lock(&udc->lock);
493 }
494
495 status = usba_ep_readl(ep, STA);
496 nr_busy = USBA_BFEXT(BUSY_BANKS, status);
497
498 if (is_complete && ep_is_control(ep)) {
499 send_status(udc, ep);
500 break;
501 }
502 }
503}
504
505static void
506request_complete(struct usba_ep *ep, struct usba_request *req, int status)
507{
508 struct usba_udc *udc = ep->udc;
509
510 WARN_ON(!list_empty(&req->queue));
511
512 if (req->req.status == -EINPROGRESS)
513 req->req.status = status;
514
515 if (req->mapped) {
516 dma_unmap_single(
517 &udc->pdev->dev, req->req.dma, req->req.length,
518 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
519 req->req.dma = DMA_ADDR_INVALID;
520 req->mapped = 0;
521 }
522
523 DBG(DBG_GADGET | DBG_REQ,
524 "%s: req %p complete: status %d, actual %u\n",
525 ep->ep.name, req, req->req.status, req->req.actual);
526
527 spin_unlock(&udc->lock);
528 req->req.complete(&ep->ep, &req->req);
529 spin_lock(&udc->lock);
530}
531
532static void
533request_complete_list(struct usba_ep *ep, struct list_head *list, int status)
534{
535 struct usba_request *req, *tmp_req;
536
537 list_for_each_entry_safe(req, tmp_req, list, queue) {
538 list_del_init(&req->queue);
539 request_complete(ep, req, status);
540 }
541}
542
543static int
544usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
545{
546 struct usba_ep *ep = to_usba_ep(_ep);
547 struct usba_udc *udc = ep->udc;
548 unsigned long flags, ept_cfg, maxpacket;
549 unsigned int nr_trans;
550
551 DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
552
553 maxpacket = le16_to_cpu(desc->wMaxPacketSize) & 0x7ff;
554
555 if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
556 || ep->index == 0
557 || desc->bDescriptorType != USB_DT_ENDPOINT
558 || maxpacket == 0
559 || maxpacket > ep->fifo_size) {
560 DBG(DBG_ERR, "ep_enable: Invalid argument");
561 return -EINVAL;
562 }
563
564 ep->is_isoc = 0;
565 ep->is_in = 0;
566
567 if (maxpacket <= 8)
568 ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
569 else
570 /* LSB is bit 1, not 0 */
571 ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3);
572
573 DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
574 ep->ep.name, ept_cfg, maxpacket);
575
576 if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) {
577 ep->is_in = 1;
578 ept_cfg |= USBA_EPT_DIR_IN;
579 }
580
581 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
582 case USB_ENDPOINT_XFER_CONTROL:
583 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
584 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE);
585 break;
586 case USB_ENDPOINT_XFER_ISOC:
587 if (!ep->can_isoc) {
588 DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n",
589 ep->ep.name);
590 return -EINVAL;
591 }
592
593 /*
594 * Bits 11:12 specify number of _additional_
595 * transactions per microframe.
596 */
597 nr_trans = ((le16_to_cpu(desc->wMaxPacketSize) >> 11) & 3) + 1;
598 if (nr_trans > 3)
599 return -EINVAL;
600
601 ep->is_isoc = 1;
602 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO);
603
604 /*
605 * Do triple-buffering on high-bandwidth iso endpoints.
606 */
607 if (nr_trans > 1 && ep->nr_banks == 3)
608 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE);
609 else
610 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
611 ept_cfg |= USBA_BF(NB_TRANS, nr_trans);
612 break;
613 case USB_ENDPOINT_XFER_BULK:
614 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK);
615 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
616 break;
617 case USB_ENDPOINT_XFER_INT:
618 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT);
619 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
620 break;
621 }
622
623 spin_lock_irqsave(&ep->udc->lock, flags);
624
625 if (ep->desc) {
626 spin_unlock_irqrestore(&ep->udc->lock, flags);
627 DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
628 return -EBUSY;
629 }
630
631 ep->desc = desc;
632 ep->ep.maxpacket = maxpacket;
633
634 usba_ep_writel(ep, CFG, ept_cfg);
635 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
636
637 if (ep->can_dma) {
638 u32 ctrl;
639
640 usba_writel(udc, INT_ENB,
641 (usba_readl(udc, INT_ENB)
642 | USBA_BF(EPT_INT, 1 << ep->index)
643 | USBA_BF(DMA_INT, 1 << ep->index)));
644 ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA;
645 usba_ep_writel(ep, CTL_ENB, ctrl);
646 } else {
647 usba_writel(udc, INT_ENB,
648 (usba_readl(udc, INT_ENB)
649 | USBA_BF(EPT_INT, 1 << ep->index)));
650 }
651
652 spin_unlock_irqrestore(&udc->lock, flags);
653
654 DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
655 (unsigned long)usba_ep_readl(ep, CFG));
656 DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
657 (unsigned long)usba_readl(udc, INT_ENB));
658
659 return 0;
660}
661
662static int usba_ep_disable(struct usb_ep *_ep)
663{
664 struct usba_ep *ep = to_usba_ep(_ep);
665 struct usba_udc *udc = ep->udc;
666 LIST_HEAD(req_list);
667 unsigned long flags;
668
669 DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name);
670
671 spin_lock_irqsave(&udc->lock, flags);
672
673 if (!ep->desc) {
674 spin_unlock_irqrestore(&udc->lock, flags);
675 DBG(DBG_ERR, "ep_disable: %s not enabled\n", ep->ep.name);
676 return -EINVAL;
677 }
678 ep->desc = NULL;
679
680 list_splice_init(&ep->queue, &req_list);
681 if (ep->can_dma) {
682 usba_dma_writel(ep, CONTROL, 0);
683 usba_dma_writel(ep, ADDRESS, 0);
684 usba_dma_readl(ep, STATUS);
685 }
686 usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE);
687 usba_writel(udc, INT_ENB,
688 usba_readl(udc, INT_ENB)
689 & ~USBA_BF(EPT_INT, 1 << ep->index));
690
691 request_complete_list(ep, &req_list, -ESHUTDOWN);
692
693 spin_unlock_irqrestore(&udc->lock, flags);
694
695 return 0;
696}
697
698static struct usb_request *
699usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
700{
701 struct usba_request *req;
702
703 DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags);
704
705 req = kzalloc(sizeof(*req), gfp_flags);
706 if (!req)
707 return NULL;
708
709 INIT_LIST_HEAD(&req->queue);
710 req->req.dma = DMA_ADDR_INVALID;
711
712 return &req->req;
713}
714
715static void
716usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
717{
718 struct usba_request *req = to_usba_req(_req);
719
720 DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req);
721
722 kfree(req);
723}
724
725static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
726 struct usba_request *req, gfp_t gfp_flags)
727{
728 unsigned long flags;
729 int ret;
730
731 DBG(DBG_DMA, "%s: req l/%u d/%08x %c%c%c\n",
732 ep->ep.name, req->req.length, req->req.dma,
733 req->req.zero ? 'Z' : 'z',
734 req->req.short_not_ok ? 'S' : 's',
735 req->req.no_interrupt ? 'I' : 'i');
736
737 if (req->req.length > 0x10000) {
738 /* Lengths from 0 to 65536 (inclusive) are supported */
739 DBG(DBG_ERR, "invalid request length %u\n", req->req.length);
740 return -EINVAL;
741 }
742
743 req->using_dma = 1;
744
745 if (req->req.dma == DMA_ADDR_INVALID) {
746 req->req.dma = dma_map_single(
747 &udc->pdev->dev, req->req.buf, req->req.length,
748 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
749 req->mapped = 1;
750 } else {
751 dma_sync_single_for_device(
752 &udc->pdev->dev, req->req.dma, req->req.length,
753 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
754 req->mapped = 0;
755 }
756
757 req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
758 | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
759 | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
760
761 if (ep->is_in)
762 req->ctrl |= USBA_DMA_END_BUF_EN;
763
764 /*
765 * Add this request to the queue and submit for DMA if
766 * possible. Check if we're still alive first -- we may have
767 * received a reset since last time we checked.
768 */
769 ret = -ESHUTDOWN;
770 spin_lock_irqsave(&udc->lock, flags);
771 if (ep->desc) {
772 if (list_empty(&ep->queue))
773 submit_request(ep, req);
774
775 list_add_tail(&req->queue, &ep->queue);
776 ret = 0;
777 }
778 spin_unlock_irqrestore(&udc->lock, flags);
779
780 return ret;
781}
782
783static int
784usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
785{
786 struct usba_request *req = to_usba_req(_req);
787 struct usba_ep *ep = to_usba_ep(_ep);
788 struct usba_udc *udc = ep->udc;
789 unsigned long flags;
790 int ret;
791
792 DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n",
793 ep->ep.name, req, _req->length);
794
795 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN || !ep->desc)
796 return -ESHUTDOWN;
797
798 req->submitted = 0;
799 req->using_dma = 0;
800 req->last_transaction = 0;
801
802 _req->status = -EINPROGRESS;
803 _req->actual = 0;
804
805 if (ep->can_dma)
806 return queue_dma(udc, ep, req, gfp_flags);
807
808 /* May have received a reset since last time we checked */
809 ret = -ESHUTDOWN;
810 spin_lock_irqsave(&udc->lock, flags);
811 if (ep->desc) {
812 list_add_tail(&req->queue, &ep->queue);
813
814 if (ep->is_in || (ep_is_control(ep)
815 && (ep->state == DATA_STAGE_IN
816 || ep->state == STATUS_STAGE_IN)))
817 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
818 else
819 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
820 ret = 0;
821 }
822 spin_unlock_irqrestore(&udc->lock, flags);
823
824 return ret;
825}
826
827static void
828usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status)
829{
830 req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status);
831}
832
833static int stop_dma(struct usba_ep *ep, u32 *pstatus)
834{
835 unsigned int timeout;
836 u32 status;
837
838 /*
839 * Stop the DMA controller. When writing both CH_EN
840 * and LINK to 0, the other bits are not affected.
841 */
842 usba_dma_writel(ep, CONTROL, 0);
843
844 /* Wait for the FIFO to empty */
845 for (timeout = 40; timeout; --timeout) {
846 status = usba_dma_readl(ep, STATUS);
847 if (!(status & USBA_DMA_CH_EN))
848 break;
849 udelay(1);
850 }
851
852 if (pstatus)
853 *pstatus = status;
854
855 if (timeout == 0) {
856 dev_err(&ep->udc->pdev->dev,
857 "%s: timed out waiting for DMA FIFO to empty\n",
858 ep->ep.name);
859 return -ETIMEDOUT;
860 }
861
862 return 0;
863}
864
865static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
866{
867 struct usba_ep *ep = to_usba_ep(_ep);
868 struct usba_udc *udc = ep->udc;
869 struct usba_request *req = to_usba_req(_req);
870 unsigned long flags;
871 u32 status;
872
873 DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
874 ep->ep.name, req);
875
876 spin_lock_irqsave(&udc->lock, flags);
877
878 if (req->using_dma) {
879 /*
880 * If this request is currently being transferred,
881 * stop the DMA controller and reset the FIFO.
882 */
883 if (ep->queue.next == &req->queue) {
884 status = usba_dma_readl(ep, STATUS);
885 if (status & USBA_DMA_CH_EN)
886 stop_dma(ep, &status);
887
888#ifdef CONFIG_USB_GADGET_DEBUG_FS
889 ep->last_dma_status = status;
890#endif
891
892 usba_writel(udc, EPT_RST, 1 << ep->index);
893
894 usba_update_req(ep, req, status);
895 }
896 }
897
898 /*
899 * Errors should stop the queue from advancing until the
900 * completion function returns.
901 */
902 list_del_init(&req->queue);
903
904 request_complete(ep, req, -ECONNRESET);
905
906 /* Process the next request if any */
907 submit_next_request(ep);
908 spin_unlock_irqrestore(&udc->lock, flags);
909
910 return 0;
911}
912
913static int usba_ep_set_halt(struct usb_ep *_ep, int value)
914{
915 struct usba_ep *ep = to_usba_ep(_ep);
916 struct usba_udc *udc = ep->udc;
917 unsigned long flags;
918 int ret = 0;
919
920 DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name,
921 value ? "set" : "clear");
922
923 if (!ep->desc) {
924 DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n",
925 ep->ep.name);
926 return -ENODEV;
927 }
928 if (ep->is_isoc) {
929 DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n",
930 ep->ep.name);
931 return -ENOTTY;
932 }
933
934 spin_lock_irqsave(&udc->lock, flags);
935
936 /*
937 * We can't halt IN endpoints while there are still data to be
938 * transferred
939 */
940 if (!list_empty(&ep->queue)
941 || ((value && ep->is_in && (usba_ep_readl(ep, STA)
942 & USBA_BF(BUSY_BANKS, -1L))))) {
943 ret = -EAGAIN;
944 } else {
945 if (value)
946 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
947 else
948 usba_ep_writel(ep, CLR_STA,
949 USBA_FORCE_STALL | USBA_TOGGLE_CLR);
950 usba_ep_readl(ep, STA);
951 }
952
953 spin_unlock_irqrestore(&udc->lock, flags);
954
955 return ret;
956}
957
958static int usba_ep_fifo_status(struct usb_ep *_ep)
959{
960 struct usba_ep *ep = to_usba_ep(_ep);
961
962 return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
963}
964
965static void usba_ep_fifo_flush(struct usb_ep *_ep)
966{
967 struct usba_ep *ep = to_usba_ep(_ep);
968 struct usba_udc *udc = ep->udc;
969
970 usba_writel(udc, EPT_RST, 1 << ep->index);
971}
972
973static const struct usb_ep_ops usba_ep_ops = {
974 .enable = usba_ep_enable,
975 .disable = usba_ep_disable,
976 .alloc_request = usba_ep_alloc_request,
977 .free_request = usba_ep_free_request,
978 .queue = usba_ep_queue,
979 .dequeue = usba_ep_dequeue,
980 .set_halt = usba_ep_set_halt,
981 .fifo_status = usba_ep_fifo_status,
982 .fifo_flush = usba_ep_fifo_flush,
983};
984
985static int usba_udc_get_frame(struct usb_gadget *gadget)
986{
987 struct usba_udc *udc = to_usba_udc(gadget);
988
989 return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM));
990}
991
992static int usba_udc_wakeup(struct usb_gadget *gadget)
993{
994 struct usba_udc *udc = to_usba_udc(gadget);
995 unsigned long flags;
996 u32 ctrl;
997 int ret = -EINVAL;
998
999 spin_lock_irqsave(&udc->lock, flags);
1000 if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
1001 ctrl = usba_readl(udc, CTRL);
1002 usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP);
1003 ret = 0;
1004 }
1005 spin_unlock_irqrestore(&udc->lock, flags);
1006
1007 return ret;
1008}
1009
1010static int
1011usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
1012{
1013 struct usba_udc *udc = to_usba_udc(gadget);
1014 unsigned long flags;
1015
1016 spin_lock_irqsave(&udc->lock, flags);
1017 if (is_selfpowered)
1018 udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
1019 else
1020 udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
1021 spin_unlock_irqrestore(&udc->lock, flags);
1022
1023 return 0;
1024}
1025
1026static const struct usb_gadget_ops usba_udc_ops = {
1027 .get_frame = usba_udc_get_frame,
1028 .wakeup = usba_udc_wakeup,
1029 .set_selfpowered = usba_udc_set_selfpowered,
1030};
1031
1032#define EP(nam, idx, maxpkt, maxbk, dma, isoc) \
1033{ \
1034 .ep = { \
1035 .ops = &usba_ep_ops, \
1036 .name = nam, \
1037 .maxpacket = maxpkt, \
1038 }, \
1039 .udc = &the_udc, \
1040 .queue = LIST_HEAD_INIT(usba_ep[idx].queue), \
1041 .fifo_size = maxpkt, \
1042 .nr_banks = maxbk, \
1043 .index = idx, \
1044 .can_dma = dma, \
1045 .can_isoc = isoc, \
1046}
1047
1048static struct usba_ep usba_ep[] = {
1049 EP("ep0", 0, 64, 1, 0, 0),
1050 EP("ep1in-bulk", 1, 512, 2, 1, 1),
1051 EP("ep2out-bulk", 2, 512, 2, 1, 1),
1052 EP("ep3in-int", 3, 64, 3, 1, 0),
1053 EP("ep4out-int", 4, 64, 3, 1, 0),
1054 EP("ep5in-iso", 5, 1024, 3, 1, 1),
1055 EP("ep6out-iso", 6, 1024, 3, 1, 1),
1056};
1057#undef EP
1058
1059static struct usb_endpoint_descriptor usba_ep0_desc = {
1060 .bLength = USB_DT_ENDPOINT_SIZE,
1061 .bDescriptorType = USB_DT_ENDPOINT,
1062 .bEndpointAddress = 0,
1063 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1064 .wMaxPacketSize = __constant_cpu_to_le16(64),
1065 /* FIXME: I have no idea what to put here */
1066 .bInterval = 1,
1067};
1068
1069static void nop_release(struct device *dev)
1070{
1071
1072}
1073
1074static struct usba_udc the_udc = {
1075 .gadget = {
1076 .ops = &usba_udc_ops,
1077 .ep0 = &usba_ep[0].ep,
1078 .ep_list = LIST_HEAD_INIT(the_udc.gadget.ep_list),
1079 .is_dualspeed = 1,
1080 .name = "atmel_usba_udc",
1081 .dev = {
1082 .bus_id = "gadget",
1083 .release = nop_release,
1084 },
1085 },
1086
1087 .lock = SPIN_LOCK_UNLOCKED,
1088};
1089
1090/*
1091 * Called with interrupts disabled and udc->lock held.
1092 */
1093static void reset_all_endpoints(struct usba_udc *udc)
1094{
1095 struct usba_ep *ep;
1096 struct usba_request *req, *tmp_req;
1097
1098 usba_writel(udc, EPT_RST, ~0UL);
1099
1100 ep = to_usba_ep(udc->gadget.ep0);
1101 list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
1102 list_del_init(&req->queue);
1103 request_complete(ep, req, -ECONNRESET);
1104 }
1105
1106 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1107 if (ep->desc) {
1108 spin_unlock(&udc->lock);
1109 usba_ep_disable(&ep->ep);
1110 spin_lock(&udc->lock);
1111 }
1112 }
1113}
1114
1115static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
1116{
1117 struct usba_ep *ep;
1118
1119 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1120 return to_usba_ep(udc->gadget.ep0);
1121
1122 list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
1123 u8 bEndpointAddress;
1124
1125 if (!ep->desc)
1126 continue;
1127 bEndpointAddress = ep->desc->bEndpointAddress;
1128 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1129 continue;
1130 if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
1131 == (wIndex & USB_ENDPOINT_NUMBER_MASK))
1132 return ep;
1133 }
1134
1135 return NULL;
1136}
1137
1138/* Called with interrupts disabled and udc->lock held */
1139static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep)
1140{
1141 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
1142 ep->state = WAIT_FOR_SETUP;
1143}
1144
1145static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep)
1146{
1147 if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL)
1148 return 1;
1149 return 0;
1150}
1151
1152static inline void set_address(struct usba_udc *udc, unsigned int addr)
1153{
1154 u32 regval;
1155
1156 DBG(DBG_BUS, "setting address %u...\n", addr);
1157 regval = usba_readl(udc, CTRL);
1158 regval = USBA_BFINS(DEV_ADDR, addr, regval);
1159 usba_writel(udc, CTRL, regval);
1160}
1161
1162static int do_test_mode(struct usba_udc *udc)
1163{
1164 static const char test_packet_buffer[] = {
1165 /* JKJKJKJK * 9 */
1166 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1167 /* JJKKJJKK * 8 */
1168 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1169 /* JJKKJJKK * 8 */
1170 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1171 /* JJJJJJJKKKKKKK * 8 */
1172 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1173 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1174 /* JJJJJJJK * 8 */
1175 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1176 /* {JKKKKKKK * 10}, JK */
1177 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E
1178 };
1179 struct usba_ep *ep;
1180 struct device *dev = &udc->pdev->dev;
1181 int test_mode;
1182
1183 test_mode = udc->test_mode;
1184
1185 /* Start from a clean slate */
1186 reset_all_endpoints(udc);
1187
1188 switch (test_mode) {
1189 case 0x0100:
1190 /* Test_J */
1191 usba_writel(udc, TST, USBA_TST_J_MODE);
1192 dev_info(dev, "Entering Test_J mode...\n");
1193 break;
1194 case 0x0200:
1195 /* Test_K */
1196 usba_writel(udc, TST, USBA_TST_K_MODE);
1197 dev_info(dev, "Entering Test_K mode...\n");
1198 break;
1199 case 0x0300:
1200 /*
1201 * Test_SE0_NAK: Force high-speed mode and set up ep0
1202 * for Bulk IN transfers
1203 */
1204 ep = &usba_ep[0];
1205 usba_writel(udc, TST,
1206 USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH));
1207 usba_ep_writel(ep, CFG,
1208 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
1209 | USBA_EPT_DIR_IN
1210 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
1211 | USBA_BF(BK_NUMBER, 1));
1212 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
1213 set_protocol_stall(udc, ep);
1214 dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n");
1215 } else {
1216 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
1217 dev_info(dev, "Entering Test_SE0_NAK mode...\n");
1218 }
1219 break;
1220 case 0x0400:
1221 /* Test_Packet */
1222 ep = &usba_ep[0];
1223 usba_ep_writel(ep, CFG,
1224 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
1225 | USBA_EPT_DIR_IN
1226 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
1227 | USBA_BF(BK_NUMBER, 1));
1228 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
1229 set_protocol_stall(udc, ep);
1230 dev_err(dev, "Test_Packet: ep0 not mapped\n");
1231 } else {
1232 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
1233 usba_writel(udc, TST, USBA_TST_PKT_MODE);
1234 copy_to_fifo(ep->fifo, test_packet_buffer,
1235 sizeof(test_packet_buffer));
1236 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
1237 dev_info(dev, "Entering Test_Packet mode...\n");
1238 }
1239 break;
1240 default:
1241 dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode);
1242 return -EINVAL;
1243 }
1244
1245 return 0;
1246}
1247
1248/* Avoid overly long expressions */
1249static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq)
1250{
1251 if (crq->wValue == __constant_cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
1252 return true;
1253 return false;
1254}
1255
1256static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq)
1257{
1258 if (crq->wValue == __constant_cpu_to_le16(USB_DEVICE_TEST_MODE))
1259 return true;
1260 return false;
1261}
1262
1263static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq)
1264{
1265 if (crq->wValue == __constant_cpu_to_le16(USB_ENDPOINT_HALT))
1266 return true;
1267 return false;
1268}
1269
1270static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
1271 struct usb_ctrlrequest *crq)
1272{
1273 int retval = 0;;
1274
1275 switch (crq->bRequest) {
1276 case USB_REQ_GET_STATUS: {
1277 u16 status;
1278
1279 if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) {
1280 status = cpu_to_le16(udc->devstatus);
1281 } else if (crq->bRequestType
1282 == (USB_DIR_IN | USB_RECIP_INTERFACE)) {
1283 status = __constant_cpu_to_le16(0);
1284 } else if (crq->bRequestType
1285 == (USB_DIR_IN | USB_RECIP_ENDPOINT)) {
1286 struct usba_ep *target;
1287
1288 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1289 if (!target)
1290 goto stall;
1291
1292 status = 0;
1293 if (is_stalled(udc, target))
1294 status |= __constant_cpu_to_le16(1);
1295 } else
1296 goto delegate;
1297
1298 /* Write directly to the FIFO. No queueing is done. */
1299 if (crq->wLength != __constant_cpu_to_le16(sizeof(status)))
1300 goto stall;
1301 ep->state = DATA_STAGE_IN;
1302 __raw_writew(status, ep->fifo);
1303 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
1304 break;
1305 }
1306
1307 case USB_REQ_CLEAR_FEATURE: {
1308 if (crq->bRequestType == USB_RECIP_DEVICE) {
1309 if (feature_is_dev_remote_wakeup(crq))
1310 udc->devstatus
1311 &= ~(1 << USB_DEVICE_REMOTE_WAKEUP);
1312 else
1313 /* Can't CLEAR_FEATURE TEST_MODE */
1314 goto stall;
1315 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1316 struct usba_ep *target;
1317
1318 if (crq->wLength != __constant_cpu_to_le16(0)
1319 || !feature_is_ep_halt(crq))
1320 goto stall;
1321 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1322 if (!target)
1323 goto stall;
1324
1325 usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL);
1326 if (target->index != 0)
1327 usba_ep_writel(target, CLR_STA,
1328 USBA_TOGGLE_CLR);
1329 } else {
1330 goto delegate;
1331 }
1332
1333 send_status(udc, ep);
1334 break;
1335 }
1336
1337 case USB_REQ_SET_FEATURE: {
1338 if (crq->bRequestType == USB_RECIP_DEVICE) {
1339 if (feature_is_dev_test_mode(crq)) {
1340 send_status(udc, ep);
1341 ep->state = STATUS_STAGE_TEST;
1342 udc->test_mode = le16_to_cpu(crq->wIndex);
1343 return 0;
1344 } else if (feature_is_dev_remote_wakeup(crq)) {
1345 udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP;
1346 } else {
1347 goto stall;
1348 }
1349 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1350 struct usba_ep *target;
1351
1352 if (crq->wLength != __constant_cpu_to_le16(0)
1353 || !feature_is_ep_halt(crq))
1354 goto stall;
1355
1356 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1357 if (!target)
1358 goto stall;
1359
1360 usba_ep_writel(target, SET_STA, USBA_FORCE_STALL);
1361 } else
1362 goto delegate;
1363
1364 send_status(udc, ep);
1365 break;
1366 }
1367
1368 case USB_REQ_SET_ADDRESS:
1369 if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
1370 goto delegate;
1371
1372 set_address(udc, le16_to_cpu(crq->wValue));
1373 send_status(udc, ep);
1374 ep->state = STATUS_STAGE_ADDR;
1375 break;
1376
1377 default:
1378delegate:
1379 spin_unlock(&udc->lock);
1380 retval = udc->driver->setup(&udc->gadget, crq);
1381 spin_lock(&udc->lock);
1382 }
1383
1384 return retval;
1385
1386stall:
1387 printk(KERN_ERR
1388 "udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, "
1389 "halting endpoint...\n",
1390 ep->ep.name, crq->bRequestType, crq->bRequest,
1391 le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex),
1392 le16_to_cpu(crq->wLength));
1393 set_protocol_stall(udc, ep);
1394 return -1;
1395}
1396
1397static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep)
1398{
1399 struct usba_request *req;
1400 u32 epstatus;
1401 u32 epctrl;
1402
1403restart:
1404 epstatus = usba_ep_readl(ep, STA);
1405 epctrl = usba_ep_readl(ep, CTL);
1406
1407 DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n",
1408 ep->ep.name, ep->state, epstatus, epctrl);
1409
1410 req = NULL;
1411 if (!list_empty(&ep->queue))
1412 req = list_entry(ep->queue.next,
1413 struct usba_request, queue);
1414
1415 if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
1416 if (req->submitted)
1417 next_fifo_transaction(ep, req);
1418 else
1419 submit_request(ep, req);
1420
1421 if (req->last_transaction) {
1422 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
1423 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
1424 }
1425 goto restart;
1426 }
1427 if ((epstatus & epctrl) & USBA_TX_COMPLETE) {
1428 usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE);
1429
1430 switch (ep->state) {
1431 case DATA_STAGE_IN:
1432 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
1433 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1434 ep->state = STATUS_STAGE_OUT;
1435 break;
1436 case STATUS_STAGE_ADDR:
1437 /* Activate our new address */
1438 usba_writel(udc, CTRL, (usba_readl(udc, CTRL)
1439 | USBA_FADDR_EN));
1440 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1441 ep->state = WAIT_FOR_SETUP;
1442 break;
1443 case STATUS_STAGE_IN:
1444 if (req) {
1445 list_del_init(&req->queue);
1446 request_complete(ep, req, 0);
1447 submit_next_request(ep);
1448 }
1449 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1450 ep->state = WAIT_FOR_SETUP;
1451 break;
1452 case STATUS_STAGE_TEST:
1453 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1454 ep->state = WAIT_FOR_SETUP;
1455 if (do_test_mode(udc))
1456 set_protocol_stall(udc, ep);
1457 break;
1458 default:
1459 printk(KERN_ERR
1460 "udc: %s: TXCOMP: Invalid endpoint state %d, "
1461 "halting endpoint...\n",
1462 ep->ep.name, ep->state);
1463 set_protocol_stall(udc, ep);
1464 break;
1465 }
1466
1467 goto restart;
1468 }
1469 if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
1470 switch (ep->state) {
1471 case STATUS_STAGE_OUT:
1472 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1473 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1474
1475 if (req) {
1476 list_del_init(&req->queue);
1477 request_complete(ep, req, 0);
1478 }
1479 ep->state = WAIT_FOR_SETUP;
1480 break;
1481
1482 case DATA_STAGE_OUT:
1483 receive_data(ep);
1484 break;
1485
1486 default:
1487 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1488 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1489 printk(KERN_ERR
1490 "udc: %s: RXRDY: Invalid endpoint state %d, "
1491 "halting endpoint...\n",
1492 ep->ep.name, ep->state);
1493 set_protocol_stall(udc, ep);
1494 break;
1495 }
1496
1497 goto restart;
1498 }
1499 if (epstatus & USBA_RX_SETUP) {
1500 union {
1501 struct usb_ctrlrequest crq;
1502 unsigned long data[2];
1503 } crq;
1504 unsigned int pkt_len;
1505 int ret;
1506
1507 if (ep->state != WAIT_FOR_SETUP) {
1508 /*
1509 * Didn't expect a SETUP packet at this
1510 * point. Clean up any pending requests (which
1511 * may be successful).
1512 */
1513 int status = -EPROTO;
1514
1515 /*
1516 * RXRDY and TXCOMP are dropped when SETUP
1517 * packets arrive. Just pretend we received
1518 * the status packet.
1519 */
1520 if (ep->state == STATUS_STAGE_OUT
1521 || ep->state == STATUS_STAGE_IN) {
1522 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1523 status = 0;
1524 }
1525
1526 if (req) {
1527 list_del_init(&req->queue);
1528 request_complete(ep, req, status);
1529 }
1530 }
1531
1532 pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
1533 DBG(DBG_HW, "Packet length: %u\n", pkt_len);
1534 if (pkt_len != sizeof(crq)) {
1535 printk(KERN_WARNING "udc: Invalid packet length %u "
1536 "(expected %lu)\n", pkt_len, sizeof(crq));
1537 set_protocol_stall(udc, ep);
1538 return;
1539 }
1540
1541 DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo);
1542 copy_from_fifo(crq.data, ep->fifo, sizeof(crq));
1543
1544 /* Free up one bank in the FIFO so that we can
1545 * generate or receive a reply right away. */
1546 usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP);
1547
1548 /* printk(KERN_DEBUG "setup: %d: %02x.%02x\n",
1549 ep->state, crq.crq.bRequestType,
1550 crq.crq.bRequest); */
1551
1552 if (crq.crq.bRequestType & USB_DIR_IN) {
1553 /*
1554 * The USB 2.0 spec states that "if wLength is
1555 * zero, there is no data transfer phase."
1556 * However, testusb #14 seems to actually
1557 * expect a data phase even if wLength = 0...
1558 */
1559 ep->state = DATA_STAGE_IN;
1560 } else {
1561 if (crq.crq.wLength != __constant_cpu_to_le16(0))
1562 ep->state = DATA_STAGE_OUT;
1563 else
1564 ep->state = STATUS_STAGE_IN;
1565 }
1566
1567 ret = -1;
1568 if (ep->index == 0)
1569 ret = handle_ep0_setup(udc, ep, &crq.crq);
1570 else {
1571 spin_unlock(&udc->lock);
1572 ret = udc->driver->setup(&udc->gadget, &crq.crq);
1573 spin_lock(&udc->lock);
1574 }
1575
1576 DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n",
1577 crq.crq.bRequestType, crq.crq.bRequest,
1578 le16_to_cpu(crq.crq.wLength), ep->state, ret);
1579
1580 if (ret < 0) {
1581 /* Let the host know that we failed */
1582 set_protocol_stall(udc, ep);
1583 }
1584 }
1585}
1586
1587static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
1588{
1589 struct usba_request *req;
1590 u32 epstatus;
1591 u32 epctrl;
1592
1593 epstatus = usba_ep_readl(ep, STA);
1594 epctrl = usba_ep_readl(ep, CTL);
1595
1596 DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus);
1597
1598 while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
1599 DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name);
1600
1601 if (list_empty(&ep->queue)) {
1602 dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n");
1603 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
1604 return;
1605 }
1606
1607 req = list_entry(ep->queue.next, struct usba_request, queue);
1608
1609 if (req->using_dma) {
1610 /* Send a zero-length packet */
1611 usba_ep_writel(ep, SET_STA,
1612 USBA_TX_PK_RDY);
1613 usba_ep_writel(ep, CTL_DIS,
1614 USBA_TX_PK_RDY);
1615 list_del_init(&req->queue);
1616 submit_next_request(ep);
1617 request_complete(ep, req, 0);
1618 } else {
1619 if (req->submitted)
1620 next_fifo_transaction(ep, req);
1621 else
1622 submit_request(ep, req);
1623
1624 if (req->last_transaction) {
1625 list_del_init(&req->queue);
1626 submit_next_request(ep);
1627 request_complete(ep, req, 0);
1628 }
1629 }
1630
1631 epstatus = usba_ep_readl(ep, STA);
1632 epctrl = usba_ep_readl(ep, CTL);
1633 }
1634 if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
1635 DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
1636 receive_data(ep);
1637 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1638 }
1639}
1640
1641static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep)
1642{
1643 struct usba_request *req;
1644 u32 status, control, pending;
1645
1646 status = usba_dma_readl(ep, STATUS);
1647 control = usba_dma_readl(ep, CONTROL);
1648#ifdef CONFIG_USB_GADGET_DEBUG_FS
1649 ep->last_dma_status = status;
1650#endif
1651 pending = status & control;
1652 DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control);
1653
1654 if (status & USBA_DMA_CH_EN) {
1655 dev_err(&udc->pdev->dev,
1656 "DMA_CH_EN is set after transfer is finished!\n");
1657 dev_err(&udc->pdev->dev,
1658 "status=%#08x, pending=%#08x, control=%#08x\n",
1659 status, pending, control);
1660
1661 /*
1662 * try to pretend nothing happened. We might have to
1663 * do something here...
1664 */
1665 }
1666
1667 if (list_empty(&ep->queue))
1668 /* Might happen if a reset comes along at the right moment */
1669 return;
1670
1671 if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) {
1672 req = list_entry(ep->queue.next, struct usba_request, queue);
1673 usba_update_req(ep, req, status);
1674
1675 list_del_init(&req->queue);
1676 submit_next_request(ep);
1677 request_complete(ep, req, 0);
1678 }
1679}
1680
1681static irqreturn_t usba_udc_irq(int irq, void *devid)
1682{
1683 struct usba_udc *udc = devid;
1684 u32 status;
1685 u32 dma_status;
1686 u32 ep_status;
1687
1688 spin_lock(&udc->lock);
1689
1690 status = usba_readl(udc, INT_STA);
1691 DBG(DBG_INT, "irq, status=%#08x\n", status);
1692
1693 if (status & USBA_DET_SUSPEND) {
1694 usba_writel(udc, INT_CLR, USBA_DET_SUSPEND);
1695 DBG(DBG_BUS, "Suspend detected\n");
1696 if (udc->gadget.speed != USB_SPEED_UNKNOWN
1697 && udc->driver && udc->driver->suspend) {
1698 spin_unlock(&udc->lock);
1699 udc->driver->suspend(&udc->gadget);
1700 spin_lock(&udc->lock);
1701 }
1702 }
1703
1704 if (status & USBA_WAKE_UP) {
1705 usba_writel(udc, INT_CLR, USBA_WAKE_UP);
1706 DBG(DBG_BUS, "Wake Up CPU detected\n");
1707 }
1708
1709 if (status & USBA_END_OF_RESUME) {
1710 usba_writel(udc, INT_CLR, USBA_END_OF_RESUME);
1711 DBG(DBG_BUS, "Resume detected\n");
1712 if (udc->gadget.speed != USB_SPEED_UNKNOWN
1713 && udc->driver && udc->driver->resume) {
1714 spin_unlock(&udc->lock);
1715 udc->driver->resume(&udc->gadget);
1716 spin_lock(&udc->lock);
1717 }
1718 }
1719
1720 dma_status = USBA_BFEXT(DMA_INT, status);
1721 if (dma_status) {
1722 int i;
1723
1724 for (i = 1; i < USBA_NR_ENDPOINTS; i++)
1725 if (dma_status & (1 << i))
1726 usba_dma_irq(udc, &usba_ep[i]);
1727 }
1728
1729 ep_status = USBA_BFEXT(EPT_INT, status);
1730 if (ep_status) {
1731 int i;
1732
1733 for (i = 0; i < USBA_NR_ENDPOINTS; i++)
1734 if (ep_status & (1 << i)) {
1735 if (ep_is_control(&usba_ep[i]))
1736 usba_control_irq(udc, &usba_ep[i]);
1737 else
1738 usba_ep_irq(udc, &usba_ep[i]);
1739 }
1740 }
1741
1742 if (status & USBA_END_OF_RESET) {
1743 struct usba_ep *ep0;
1744
1745 usba_writel(udc, INT_CLR, USBA_END_OF_RESET);
1746 reset_all_endpoints(udc);
1747
1748 if (status & USBA_HIGH_SPEED) {
1749 DBG(DBG_BUS, "High-speed bus reset detected\n");
1750 udc->gadget.speed = USB_SPEED_HIGH;
1751 } else {
1752 DBG(DBG_BUS, "Full-speed bus reset detected\n");
1753 udc->gadget.speed = USB_SPEED_FULL;
1754 }
1755
1756 ep0 = &usba_ep[0];
1757 ep0->desc = &usba_ep0_desc;
1758 ep0->state = WAIT_FOR_SETUP;
1759 usba_ep_writel(ep0, CFG,
1760 (USBA_BF(EPT_SIZE, EP0_EPT_SIZE)
1761 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL)
1762 | USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE)));
1763 usba_ep_writel(ep0, CTL_ENB,
1764 USBA_EPT_ENABLE | USBA_RX_SETUP);
1765 usba_writel(udc, INT_ENB,
1766 (usba_readl(udc, INT_ENB)
1767 | USBA_BF(EPT_INT, 1)
1768 | USBA_DET_SUSPEND
1769 | USBA_END_OF_RESUME));
1770
1771 if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
1772 dev_warn(&udc->pdev->dev,
1773 "WARNING: EP0 configuration is invalid!\n");
1774 }
1775
1776 spin_unlock(&udc->lock);
1777
1778 return IRQ_HANDLED;
1779}
1780
1781static irqreturn_t usba_vbus_irq(int irq, void *devid)
1782{
1783 struct usba_udc *udc = devid;
1784 int vbus;
1785
1786 /* debounce */
1787 udelay(10);
1788
1789 spin_lock(&udc->lock);
1790
1791 /* May happen if Vbus pin toggles during probe() */
1792 if (!udc->driver)
1793 goto out;
1794
1795 vbus = gpio_get_value(udc->vbus_pin);
1796 if (vbus != udc->vbus_prev) {
1797 if (vbus) {
1798 usba_writel(udc, CTRL, USBA_EN_USBA);
1799 usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
1800 } else {
1801 udc->gadget.speed = USB_SPEED_UNKNOWN;
1802 reset_all_endpoints(udc);
1803 usba_writel(udc, CTRL, 0);
1804 spin_unlock(&udc->lock);
1805 udc->driver->disconnect(&udc->gadget);
1806 spin_lock(&udc->lock);
1807 }
1808 udc->vbus_prev = vbus;
1809 }
1810
1811out:
1812 spin_unlock(&udc->lock);
1813
1814 return IRQ_HANDLED;
1815}
1816
1817int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1818{
1819 struct usba_udc *udc = &the_udc;
1820 unsigned long flags;
1821 int ret;
1822
1823 if (!udc->pdev)
1824 return -ENODEV;
1825
1826 spin_lock_irqsave(&udc->lock, flags);
1827 if (udc->driver) {
1828 spin_unlock_irqrestore(&udc->lock, flags);
1829 return -EBUSY;
1830 }
1831
1832 udc->devstatus = 1 << USB_DEVICE_SELF_POWERED;
1833 udc->driver = driver;
1834 udc->gadget.dev.driver = &driver->driver;
1835 spin_unlock_irqrestore(&udc->lock, flags);
1836
1837 clk_enable(udc->pclk);
1838 clk_enable(udc->hclk);
1839
1840 ret = driver->bind(&udc->gadget);
1841 if (ret) {
1842 DBG(DBG_ERR, "Could not bind to driver %s: error %d\n",
1843 driver->driver.name, ret);
1844 goto err_driver_bind;
1845 }
1846
1847 DBG(DBG_GADGET, "registered driver `%s'\n", driver->driver.name);
1848
1849 udc->vbus_prev = 0;
1850 if (udc->vbus_pin != -1)
1851 enable_irq(gpio_to_irq(udc->vbus_pin));
1852
1853 /* If Vbus is present, enable the controller and wait for reset */
1854 spin_lock_irqsave(&udc->lock, flags);
1855 if (vbus_is_present(udc) && udc->vbus_prev == 0) {
1856 usba_writel(udc, CTRL, USBA_EN_USBA);
1857 usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
1858 }
1859 spin_unlock_irqrestore(&udc->lock, flags);
1860
1861 return 0;
1862
1863err_driver_bind:
1864 udc->driver = NULL;
1865 udc->gadget.dev.driver = NULL;
1866 return ret;
1867}
1868EXPORT_SYMBOL(usb_gadget_register_driver);
1869
1870int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1871{
1872 struct usba_udc *udc = &the_udc;
1873 unsigned long flags;
1874
1875 if (!udc->pdev)
1876 return -ENODEV;
1877 if (driver != udc->driver)
1878 return -EINVAL;
1879
1880 if (udc->vbus_pin != -1)
1881 disable_irq(gpio_to_irq(udc->vbus_pin));
1882
1883 spin_lock_irqsave(&udc->lock, flags);
1884 udc->gadget.speed = USB_SPEED_UNKNOWN;
1885 reset_all_endpoints(udc);
1886 spin_unlock_irqrestore(&udc->lock, flags);
1887
1888 /* This will also disable the DP pullup */
1889 usba_writel(udc, CTRL, 0);
1890
1891 driver->unbind(&udc->gadget);
1892 udc->gadget.dev.driver = NULL;
1893 udc->driver = NULL;
1894
1895 clk_disable(udc->hclk);
1896 clk_disable(udc->pclk);
1897
1898 DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
1899
1900 return 0;
1901}
1902EXPORT_SYMBOL(usb_gadget_unregister_driver);
1903
1904static int __init usba_udc_probe(struct platform_device *pdev)
1905{
1906 struct usba_platform_data *pdata = pdev->dev.platform_data;
1907 struct resource *regs, *fifo;
1908 struct clk *pclk, *hclk;
1909 struct usba_udc *udc = &the_udc;
1910 int irq, ret, i;
1911
1912 regs = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID);
1913 fifo = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID);
1914 if (!regs || !fifo)
1915 return -ENXIO;
1916
1917 irq = platform_get_irq(pdev, 0);
1918 if (irq < 0)
1919 return irq;
1920
1921 pclk = clk_get(&pdev->dev, "pclk");
1922 if (IS_ERR(pclk))
1923 return PTR_ERR(pclk);
1924 hclk = clk_get(&pdev->dev, "hclk");
1925 if (IS_ERR(hclk)) {
1926 ret = PTR_ERR(hclk);
1927 goto err_get_hclk;
1928 }
1929
1930 udc->pdev = pdev;
1931 udc->pclk = pclk;
1932 udc->hclk = hclk;
1933 udc->vbus_pin = -1;
1934
1935 ret = -ENOMEM;
1936 udc->regs = ioremap(regs->start, regs->end - regs->start + 1);
1937 if (!udc->regs) {
1938 dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
1939 goto err_map_regs;
1940 }
1941 dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
1942 (unsigned long)regs->start, udc->regs);
1943 udc->fifo = ioremap(fifo->start, fifo->end - fifo->start + 1);
1944 if (!udc->fifo) {
1945 dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
1946 goto err_map_fifo;
1947 }
1948 dev_info(&pdev->dev, "FIFO at 0x%08lx mapped at %p\n",
1949 (unsigned long)fifo->start, udc->fifo);
1950
1951 device_initialize(&udc->gadget.dev);
1952 udc->gadget.dev.parent = &pdev->dev;
1953 udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
1954
1955 platform_set_drvdata(pdev, udc);
1956
1957 /* Make sure we start from a clean slate */
1958 clk_enable(pclk);
1959 usba_writel(udc, CTRL, 0);
1960 clk_disable(pclk);
1961
1962 INIT_LIST_HEAD(&usba_ep[0].ep.ep_list);
1963 usba_ep[0].ep_regs = udc->regs + USBA_EPT_BASE(0);
1964 usba_ep[0].dma_regs = udc->regs + USBA_DMA_BASE(0);
1965 usba_ep[0].fifo = udc->fifo + USBA_FIFO_BASE(0);
1966 for (i = 1; i < ARRAY_SIZE(usba_ep); i++) {
1967 struct usba_ep *ep = &usba_ep[i];
1968
1969 ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
1970 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
1971 ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
1972
1973 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1974 }
1975
1976 ret = request_irq(irq, usba_udc_irq, 0, "atmel_usba_udc", udc);
1977 if (ret) {
1978 dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n",
1979 irq, ret);
1980 goto err_request_irq;
1981 }
1982 udc->irq = irq;
1983
1984 ret = device_add(&udc->gadget.dev);
1985 if (ret) {
1986 dev_dbg(&pdev->dev, "Could not add gadget: %d\n", ret);
1987 goto err_device_add;
1988 }
1989
1990 if (pdata && pdata->vbus_pin != GPIO_PIN_NONE) {
1991 if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) {
1992 udc->vbus_pin = pdata->vbus_pin;
1993
1994 ret = request_irq(gpio_to_irq(udc->vbus_pin),
1995 usba_vbus_irq, 0,
1996 "atmel_usba_udc", udc);
1997 if (ret) {
1998 gpio_free(udc->vbus_pin);
1999 udc->vbus_pin = -1;
2000 dev_warn(&udc->pdev->dev,
2001 "failed to request vbus irq; "
2002 "assuming always on\n");
2003 } else {
2004 disable_irq(gpio_to_irq(udc->vbus_pin));
2005 }
2006 }
2007 }
2008
2009 usba_init_debugfs(udc);
2010 for (i = 1; i < ARRAY_SIZE(usba_ep); i++)
2011 usba_ep_init_debugfs(udc, &usba_ep[i]);
2012
2013 return 0;
2014
2015err_device_add:
2016 free_irq(irq, udc);
2017err_request_irq:
2018 iounmap(udc->fifo);
2019err_map_fifo:
2020 iounmap(udc->regs);
2021err_map_regs:
2022 clk_put(hclk);
2023err_get_hclk:
2024 clk_put(pclk);
2025
2026 platform_set_drvdata(pdev, NULL);
2027
2028 return ret;
2029}
2030
2031static int __exit usba_udc_remove(struct platform_device *pdev)
2032{
2033 struct usba_udc *udc;
2034 int i;
2035
2036 udc = platform_get_drvdata(pdev);
2037
2038 for (i = 1; i < ARRAY_SIZE(usba_ep); i++)
2039 usba_ep_cleanup_debugfs(&usba_ep[i]);
2040 usba_cleanup_debugfs(udc);
2041
2042 if (udc->vbus_pin != -1)
2043 gpio_free(udc->vbus_pin);
2044
2045 free_irq(udc->irq, udc);
2046 iounmap(udc->fifo);
2047 iounmap(udc->regs);
2048 clk_put(udc->hclk);
2049 clk_put(udc->pclk);
2050
2051 device_unregister(&udc->gadget.dev);
2052
2053 return 0;
2054}
2055
2056static struct platform_driver udc_driver = {
2057 .remove = __exit_p(usba_udc_remove),
2058 .driver = {
2059 .name = "atmel_usba_udc",
2060 },
2061};
2062
2063static int __init udc_init(void)
2064{
2065 return platform_driver_probe(&udc_driver, usba_udc_probe);
2066}
2067module_init(udc_init);
2068
2069static void __exit udc_exit(void)
2070{
2071 platform_driver_unregister(&udc_driver);
2072}
2073module_exit(udc_exit);
2074
2075MODULE_DESCRIPTION("Atmel USBA UDC driver");
2076MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
2077MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/atmel_usba_udc.h b/drivers/usb/gadget/atmel_usba_udc.h
new file mode 100644
index 0000000000..a68304e31a
--- /dev/null
+++ b/drivers/usb/gadget/atmel_usba_udc.h
@@ -0,0 +1,352 @@
1/*
2 * Driver for the Atmel USBA high speed USB device controller
3 *
4 * Copyright (C) 2005-2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __LINUX_USB_GADGET_USBA_UDC_H__
11#define __LINUX_USB_GADGET_USBA_UDC_H__
12
13/* USB register offsets */
14#define USBA_CTRL 0x0000
15#define USBA_FNUM 0x0004
16#define USBA_INT_ENB 0x0010
17#define USBA_INT_STA 0x0014
18#define USBA_INT_CLR 0x0018
19#define USBA_EPT_RST 0x001c
20#define USBA_TST 0x00e0
21
22/* USB endpoint register offsets */
23#define USBA_EPT_CFG 0x0000
24#define USBA_EPT_CTL_ENB 0x0004
25#define USBA_EPT_CTL_DIS 0x0008
26#define USBA_EPT_CTL 0x000c
27#define USBA_EPT_SET_STA 0x0014
28#define USBA_EPT_CLR_STA 0x0018
29#define USBA_EPT_STA 0x001c
30
31/* USB DMA register offsets */
32#define USBA_DMA_NXT_DSC 0x0000
33#define USBA_DMA_ADDRESS 0x0004
34#define USBA_DMA_CONTROL 0x0008
35#define USBA_DMA_STATUS 0x000c
36
37/* Bitfields in CTRL */
38#define USBA_DEV_ADDR_OFFSET 0
39#define USBA_DEV_ADDR_SIZE 7
40#define USBA_FADDR_EN (1 << 7)
41#define USBA_EN_USBA (1 << 8)
42#define USBA_DETACH (1 << 9)
43#define USBA_REMOTE_WAKE_UP (1 << 10)
44
45/* Bitfields in FNUM */
46#define USBA_MICRO_FRAME_NUM_OFFSET 0
47#define USBA_MICRO_FRAME_NUM_SIZE 3
48#define USBA_FRAME_NUMBER_OFFSET 3
49#define USBA_FRAME_NUMBER_SIZE 11
50#define USBA_FRAME_NUM_ERROR (1 << 31)
51
52/* Bitfields in INT_ENB/INT_STA/INT_CLR */
53#define USBA_HIGH_SPEED (1 << 0)
54#define USBA_DET_SUSPEND (1 << 1)
55#define USBA_MICRO_SOF (1 << 2)
56#define USBA_SOF (1 << 3)
57#define USBA_END_OF_RESET (1 << 4)
58#define USBA_WAKE_UP (1 << 5)
59#define USBA_END_OF_RESUME (1 << 6)
60#define USBA_UPSTREAM_RESUME (1 << 7)
61#define USBA_EPT_INT_OFFSET 8
62#define USBA_EPT_INT_SIZE 16
63#define USBA_DMA_INT_OFFSET 24
64#define USBA_DMA_INT_SIZE 8
65
66/* Bitfields in EPT_RST */
67#define USBA_RST_OFFSET 0
68#define USBA_RST_SIZE 16
69
70/* Bitfields in USBA_TST */
71#define USBA_SPEED_CFG_OFFSET 0
72#define USBA_SPEED_CFG_SIZE 2
73#define USBA_TST_J_MODE (1 << 2)
74#define USBA_TST_K_MODE (1 << 3)
75#define USBA_TST_PKT_MODE (1 << 4)
76#define USBA_OPMODE2 (1 << 5)
77
78/* Bitfields in EPT_CFG */
79#define USBA_EPT_SIZE_OFFSET 0
80#define USBA_EPT_SIZE_SIZE 3
81#define USBA_EPT_DIR_IN (1 << 3)
82#define USBA_EPT_TYPE_OFFSET 4
83#define USBA_EPT_TYPE_SIZE 2
84#define USBA_BK_NUMBER_OFFSET 6
85#define USBA_BK_NUMBER_SIZE 2
86#define USBA_NB_TRANS_OFFSET 8
87#define USBA_NB_TRANS_SIZE 2
88#define USBA_EPT_MAPPED (1 << 31)
89
90/* Bitfields in EPT_CTL/EPT_CTL_ENB/EPT_CTL_DIS */
91#define USBA_EPT_ENABLE (1 << 0)
92#define USBA_AUTO_VALID (1 << 1)
93#define USBA_INTDIS_DMA (1 << 3)
94#define USBA_NYET_DIS (1 << 4)
95#define USBA_DATAX_RX (1 << 6)
96#define USBA_MDATA_RX (1 << 7)
97/* Bits 8-15 and 31 enable interrupts for respective bits in EPT_STA */
98#define USBA_BUSY_BANK_IE (1 << 18)
99
100/* Bitfields in EPT_SET_STA/EPT_CLR_STA/EPT_STA */
101#define USBA_FORCE_STALL (1 << 5)
102#define USBA_TOGGLE_CLR (1 << 6)
103#define USBA_TOGGLE_SEQ_OFFSET 6
104#define USBA_TOGGLE_SEQ_SIZE 2
105#define USBA_ERR_OVFLW (1 << 8)
106#define USBA_RX_BK_RDY (1 << 9)
107#define USBA_KILL_BANK (1 << 9)
108#define USBA_TX_COMPLETE (1 << 10)
109#define USBA_TX_PK_RDY (1 << 11)
110#define USBA_ISO_ERR_TRANS (1 << 11)
111#define USBA_RX_SETUP (1 << 12)
112#define USBA_ISO_ERR_FLOW (1 << 12)
113#define USBA_STALL_SENT (1 << 13)
114#define USBA_ISO_ERR_CRC (1 << 13)
115#define USBA_ISO_ERR_NBTRANS (1 << 13)
116#define USBA_NAK_IN (1 << 14)
117#define USBA_ISO_ERR_FLUSH (1 << 14)
118#define USBA_NAK_OUT (1 << 15)
119#define USBA_CURRENT_BANK_OFFSET 16
120#define USBA_CURRENT_BANK_SIZE 2
121#define USBA_BUSY_BANKS_OFFSET 18
122#define USBA_BUSY_BANKS_SIZE 2
123#define USBA_BYTE_COUNT_OFFSET 20
124#define USBA_BYTE_COUNT_SIZE 11
125#define USBA_SHORT_PACKET (1 << 31)
126
127/* Bitfields in DMA_CONTROL */
128#define USBA_DMA_CH_EN (1 << 0)
129#define USBA_DMA_LINK (1 << 1)
130#define USBA_DMA_END_TR_EN (1 << 2)
131#define USBA_DMA_END_BUF_EN (1 << 3)
132#define USBA_DMA_END_TR_IE (1 << 4)
133#define USBA_DMA_END_BUF_IE (1 << 5)
134#define USBA_DMA_DESC_LOAD_IE (1 << 6)
135#define USBA_DMA_BURST_LOCK (1 << 7)
136#define USBA_DMA_BUF_LEN_OFFSET 16
137#define USBA_DMA_BUF_LEN_SIZE 16
138
139/* Bitfields in DMA_STATUS */
140#define USBA_DMA_CH_ACTIVE (1 << 1)
141#define USBA_DMA_END_TR_ST (1 << 4)
142#define USBA_DMA_END_BUF_ST (1 << 5)
143#define USBA_DMA_DESC_LOAD_ST (1 << 6)
144
145/* Constants for SPEED_CFG */
146#define USBA_SPEED_CFG_NORMAL 0
147#define USBA_SPEED_CFG_FORCE_HIGH 2
148#define USBA_SPEED_CFG_FORCE_FULL 3
149
150/* Constants for EPT_SIZE */
151#define USBA_EPT_SIZE_8 0
152#define USBA_EPT_SIZE_16 1
153#define USBA_EPT_SIZE_32 2
154#define USBA_EPT_SIZE_64 3
155#define USBA_EPT_SIZE_128 4
156#define USBA_EPT_SIZE_256 5
157#define USBA_EPT_SIZE_512 6
158#define USBA_EPT_SIZE_1024 7
159
160/* Constants for EPT_TYPE */
161#define USBA_EPT_TYPE_CONTROL 0
162#define USBA_EPT_TYPE_ISO 1
163#define USBA_EPT_TYPE_BULK 2
164#define USBA_EPT_TYPE_INT 3
165
166/* Constants for BK_NUMBER */
167#define USBA_BK_NUMBER_ZERO 0
168#define USBA_BK_NUMBER_ONE 1
169#define USBA_BK_NUMBER_DOUBLE 2
170#define USBA_BK_NUMBER_TRIPLE 3
171
172/* Bit manipulation macros */
173#define USBA_BF(name, value) \
174 (((value) & ((1 << USBA_##name##_SIZE) - 1)) \
175 << USBA_##name##_OFFSET)
176#define USBA_BFEXT(name, value) \
177 (((value) >> USBA_##name##_OFFSET) \
178 & ((1 << USBA_##name##_SIZE) - 1))
179#define USBA_BFINS(name, value, old) \
180 (((old) & ~(((1 << USBA_##name##_SIZE) - 1) \
181 << USBA_##name##_OFFSET)) \
182 | USBA_BF(name, value))
183
184/* Register access macros */
185#define usba_readl(udc, reg) \
186 __raw_readl((udc)->regs + USBA_##reg)
187#define usba_writel(udc, reg, value) \
188 __raw_writel((value), (udc)->regs + USBA_##reg)
189#define usba_ep_readl(ep, reg) \
190 __raw_readl((ep)->ep_regs + USBA_EPT_##reg)
191#define usba_ep_writel(ep, reg, value) \
192 __raw_writel((value), (ep)->ep_regs + USBA_EPT_##reg)
193#define usba_dma_readl(ep, reg) \
194 __raw_readl((ep)->dma_regs + USBA_DMA_##reg)
195#define usba_dma_writel(ep, reg, value) \
196 __raw_writel((value), (ep)->dma_regs + USBA_DMA_##reg)
197
198/* Calculate base address for a given endpoint or DMA controller */
199#define USBA_EPT_BASE(x) (0x100 + (x) * 0x20)
200#define USBA_DMA_BASE(x) (0x300 + (x) * 0x10)
201#define USBA_FIFO_BASE(x) ((x) << 16)
202
203/* Synth parameters */
204#define USBA_NR_ENDPOINTS 7
205
206#define EP0_FIFO_SIZE 64
207#define EP0_EPT_SIZE USBA_EPT_SIZE_64
208#define EP0_NR_BANKS 1
209
210/*
211 * REVISIT: Try to eliminate this value. Can we rely on req->mapped to
212 * provide this information?
213 */
214#define DMA_ADDR_INVALID (~(dma_addr_t)0)
215
216#define FIFO_IOMEM_ID 0
217#define CTRL_IOMEM_ID 1
218
219#ifdef DEBUG
220#define DBG_ERR 0x0001 /* report all error returns */
221#define DBG_HW 0x0002 /* debug hardware initialization */
222#define DBG_GADGET 0x0004 /* calls to/from gadget driver */
223#define DBG_INT 0x0008 /* interrupts */
224#define DBG_BUS 0x0010 /* report changes in bus state */
225#define DBG_QUEUE 0x0020 /* debug request queue processing */
226#define DBG_FIFO 0x0040 /* debug FIFO contents */
227#define DBG_DMA 0x0080 /* debug DMA handling */
228#define DBG_REQ 0x0100 /* print out queued request length */
229#define DBG_ALL 0xffff
230#define DBG_NONE 0x0000
231
232#define DEBUG_LEVEL (DBG_ERR)
233#define DBG(level, fmt, ...) \
234 do { \
235 if ((level) & DEBUG_LEVEL) \
236 printk(KERN_DEBUG "udc: " fmt, ## __VA_ARGS__); \
237 } while (0)
238#else
239#define DBG(level, fmt...)
240#endif
241
242enum usba_ctrl_state {
243 WAIT_FOR_SETUP,
244 DATA_STAGE_IN,
245 DATA_STAGE_OUT,
246 STATUS_STAGE_IN,
247 STATUS_STAGE_OUT,
248 STATUS_STAGE_ADDR,
249 STATUS_STAGE_TEST,
250};
251/*
252 EP_STATE_IDLE,
253 EP_STATE_SETUP,
254 EP_STATE_IN_DATA,
255 EP_STATE_OUT_DATA,
256 EP_STATE_SET_ADDR_STATUS,
257 EP_STATE_RX_STATUS,
258 EP_STATE_TX_STATUS,
259 EP_STATE_HALT,
260*/
261
262struct usba_dma_desc {
263 dma_addr_t next;
264 dma_addr_t addr;
265 u32 ctrl;
266};
267
268struct usba_ep {
269 int state;
270 void __iomem *ep_regs;
271 void __iomem *dma_regs;
272 void __iomem *fifo;
273 struct usb_ep ep;
274 struct usba_udc *udc;
275
276 struct list_head queue;
277 const struct usb_endpoint_descriptor *desc;
278
279 u16 fifo_size;
280 u8 nr_banks;
281 u8 index;
282 unsigned int can_dma:1;
283 unsigned int can_isoc:1;
284 unsigned int is_isoc:1;
285 unsigned int is_in:1;
286
287#ifdef CONFIG_USB_GADGET_DEBUG_FS
288 u32 last_dma_status;
289 struct dentry *debugfs_dir;
290 struct dentry *debugfs_queue;
291 struct dentry *debugfs_dma_status;
292 struct dentry *debugfs_state;
293#endif
294};
295
296struct usba_request {
297 struct usb_request req;
298 struct list_head queue;
299
300 u32 ctrl;
301
302 unsigned int submitted:1;
303 unsigned int last_transaction:1;
304 unsigned int using_dma:1;
305 unsigned int mapped:1;
306};
307
308struct usba_udc {
309 /* Protect hw registers from concurrent modifications */
310 spinlock_t lock;
311
312 void __iomem *regs;
313 void __iomem *fifo;
314
315 struct usb_gadget gadget;
316 struct usb_gadget_driver *driver;
317 struct platform_device *pdev;
318 int irq;
319 int vbus_pin;
320 struct clk *pclk;
321 struct clk *hclk;
322
323 u16 devstatus;
324
325 u16 test_mode;
326 int vbus_prev;
327
328#ifdef CONFIG_USB_GADGET_DEBUG_FS
329 struct dentry *debugfs_root;
330 struct dentry *debugfs_regs;
331#endif
332};
333
334static inline struct usba_ep *to_usba_ep(struct usb_ep *ep)
335{
336 return container_of(ep, struct usba_ep, ep);
337}
338
339static inline struct usba_request *to_usba_req(struct usb_request *req)
340{
341 return container_of(req, struct usba_request, req);
342}
343
344static inline struct usba_udc *to_usba_udc(struct usb_gadget *gadget)
345{
346 return container_of(gadget, struct usba_udc, gadget);
347}
348
349#define ep_is_control(ep) ((ep)->index == 0)
350#define ep_is_idle(ep) ((ep)->state == EP_STATE_IDLE)
351
352#endif /* __LINUX_USB_GADGET_USBA_UDC_H */
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index c6760aee1e..a4e54b2743 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -25,7 +25,7 @@
25#include <linux/device.h> 25#include <linux/device.h>
26 26
27#include <linux/usb/ch9.h> 27#include <linux/usb/ch9.h>
28#include <linux/usb_gadget.h> 28#include <linux/usb/gadget.h>
29 29
30 30
31/** 31/**
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index d008d1360a..9db2482bdf 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -46,7 +46,7 @@
46#include <linux/interrupt.h> 46#include <linux/interrupt.h>
47#include <linux/platform_device.h> 47#include <linux/platform_device.h>
48#include <linux/usb.h> 48#include <linux/usb.h>
49#include <linux/usb_gadget.h> 49#include <linux/usb/gadget.h>
50 50
51#include <asm/byteorder.h> 51#include <asm/byteorder.h>
52#include <asm/io.h> 52#include <asm/io.h>
@@ -962,13 +962,13 @@ static struct platform_driver dummy_udc_driver = {
962 962
963static int dummy_urb_enqueue ( 963static int dummy_urb_enqueue (
964 struct usb_hcd *hcd, 964 struct usb_hcd *hcd,
965 struct usb_host_endpoint *ep,
966 struct urb *urb, 965 struct urb *urb,
967 gfp_t mem_flags 966 gfp_t mem_flags
968) { 967) {
969 struct dummy *dum; 968 struct dummy *dum;
970 struct urbp *urbp; 969 struct urbp *urbp;
971 unsigned long flags; 970 unsigned long flags;
971 int rc;
972 972
973 if (!urb->transfer_buffer && urb->transfer_buffer_length) 973 if (!urb->transfer_buffer && urb->transfer_buffer_length)
974 return -EINVAL; 974 return -EINVAL;
@@ -980,6 +980,11 @@ static int dummy_urb_enqueue (
980 980
981 dum = hcd_to_dummy (hcd); 981 dum = hcd_to_dummy (hcd);
982 spin_lock_irqsave (&dum->lock, flags); 982 spin_lock_irqsave (&dum->lock, flags);
983 rc = usb_hcd_link_urb_to_ep(hcd, urb);
984 if (rc) {
985 kfree(urbp);
986 goto done;
987 }
983 988
984 if (!dum->udev) { 989 if (!dum->udev) {
985 dum->udev = urb->dev; 990 dum->udev = urb->dev;
@@ -996,36 +1001,35 @@ static int dummy_urb_enqueue (
996 if (!timer_pending (&dum->timer)) 1001 if (!timer_pending (&dum->timer))
997 mod_timer (&dum->timer, jiffies + 1); 1002 mod_timer (&dum->timer, jiffies + 1);
998 1003
999 spin_unlock_irqrestore (&dum->lock, flags); 1004 done:
1000 return 0; 1005 spin_unlock_irqrestore(&dum->lock, flags);
1006 return rc;
1001} 1007}
1002 1008
1003static int dummy_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) 1009static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1004{ 1010{
1005 struct dummy *dum; 1011 struct dummy *dum;
1006 unsigned long flags; 1012 unsigned long flags;
1013 int rc;
1007 1014
1008 /* giveback happens automatically in timer callback, 1015 /* giveback happens automatically in timer callback,
1009 * so make sure the callback happens */ 1016 * so make sure the callback happens */
1010 dum = hcd_to_dummy (hcd); 1017 dum = hcd_to_dummy (hcd);
1011 spin_lock_irqsave (&dum->lock, flags); 1018 spin_lock_irqsave (&dum->lock, flags);
1012 if (dum->rh_state != DUMMY_RH_RUNNING && !list_empty(&dum->urbp_list)) 1019
1020 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1021 if (!rc && dum->rh_state != DUMMY_RH_RUNNING &&
1022 !list_empty(&dum->urbp_list))
1013 mod_timer (&dum->timer, jiffies); 1023 mod_timer (&dum->timer, jiffies);
1014 spin_unlock_irqrestore (&dum->lock, flags);
1015 return 0;
1016}
1017 1024
1018static void maybe_set_status (struct urb *urb, int status) 1025 spin_unlock_irqrestore (&dum->lock, flags);
1019{ 1026 return rc;
1020 spin_lock (&urb->lock);
1021 if (urb->status == -EINPROGRESS)
1022 urb->status = status;
1023 spin_unlock (&urb->lock);
1024} 1027}
1025 1028
1026/* transfer up to a frame's worth; caller must own lock */ 1029/* transfer up to a frame's worth; caller must own lock */
1027static int 1030static int
1028transfer (struct dummy *dum, struct urb *urb, struct dummy_ep *ep, int limit) 1031transfer(struct dummy *dum, struct urb *urb, struct dummy_ep *ep, int limit,
1032 int *status)
1029{ 1033{
1030 struct dummy_request *req; 1034 struct dummy_request *req;
1031 1035
@@ -1088,24 +1092,20 @@ top:
1088 * 1092 *
1089 * partially filling a buffer optionally blocks queue advances 1093 * partially filling a buffer optionally blocks queue advances
1090 * (so completion handlers can clean up the queue) but we don't 1094 * (so completion handlers can clean up the queue) but we don't
1091 * need to emulate such data-in-flight. so we only show part 1095 * need to emulate such data-in-flight.
1092 * of the URB_SHORT_NOT_OK effect: completion status.
1093 */ 1096 */
1094 if (is_short) { 1097 if (is_short) {
1095 if (host_len == dev_len) { 1098 if (host_len == dev_len) {
1096 req->req.status = 0; 1099 req->req.status = 0;
1097 maybe_set_status (urb, 0); 1100 *status = 0;
1098 } else if (to_host) { 1101 } else if (to_host) {
1099 req->req.status = 0; 1102 req->req.status = 0;
1100 if (dev_len > host_len) 1103 if (dev_len > host_len)
1101 maybe_set_status (urb, -EOVERFLOW); 1104 *status = -EOVERFLOW;
1102 else 1105 else
1103 maybe_set_status (urb, 1106 *status = 0;
1104 (urb->transfer_flags
1105 & URB_SHORT_NOT_OK)
1106 ? -EREMOTEIO : 0);
1107 } else if (!to_host) { 1107 } else if (!to_host) {
1108 maybe_set_status (urb, 0); 1108 *status = 0;
1109 if (host_len > dev_len) 1109 if (host_len > dev_len)
1110 req->req.status = -EOVERFLOW; 1110 req->req.status = -EOVERFLOW;
1111 else 1111 else
@@ -1119,9 +1119,8 @@ top:
1119 req->req.status = 0; 1119 req->req.status = 0;
1120 if (urb->transfer_buffer_length == urb->actual_length 1120 if (urb->transfer_buffer_length == urb->actual_length
1121 && !(urb->transfer_flags 1121 && !(urb->transfer_flags
1122 & URB_ZERO_PACKET)) { 1122 & URB_ZERO_PACKET))
1123 maybe_set_status (urb, 0); 1123 *status = 0;
1124 }
1125 } 1124 }
1126 1125
1127 /* device side completion --> continuable */ 1126 /* device side completion --> continuable */
@@ -1137,7 +1136,7 @@ top:
1137 } 1136 }
1138 1137
1139 /* host side completion --> terminate */ 1138 /* host side completion --> terminate */
1140 if (urb->status != -EINPROGRESS) 1139 if (*status != -EINPROGRESS)
1141 break; 1140 break;
1142 1141
1143 /* rescan to continue with any other queued i/o */ 1142 /* rescan to continue with any other queued i/o */
@@ -1248,12 +1247,12 @@ restart:
1248 u8 address; 1247 u8 address;
1249 struct dummy_ep *ep = NULL; 1248 struct dummy_ep *ep = NULL;
1250 int type; 1249 int type;
1250 int status = -EINPROGRESS;
1251 1251
1252 urb = urbp->urb; 1252 urb = urbp->urb;
1253 if (urb->status != -EINPROGRESS) { 1253 if (urb->unlinked)
1254 /* likely it was just unlinked */
1255 goto return_urb; 1254 goto return_urb;
1256 } else if (dum->rh_state != DUMMY_RH_RUNNING) 1255 else if (dum->rh_state != DUMMY_RH_RUNNING)
1257 continue; 1256 continue;
1258 type = usb_pipetype (urb->pipe); 1257 type = usb_pipetype (urb->pipe);
1259 1258
@@ -1274,7 +1273,7 @@ restart:
1274 dev_dbg (dummy_dev(dum), 1273 dev_dbg (dummy_dev(dum),
1275 "no ep configured for urb %p\n", 1274 "no ep configured for urb %p\n",
1276 urb); 1275 urb);
1277 maybe_set_status (urb, -EPROTO); 1276 status = -EPROTO;
1278 goto return_urb; 1277 goto return_urb;
1279 } 1278 }
1280 1279
@@ -1289,7 +1288,7 @@ restart:
1289 /* NOTE: must not be iso! */ 1288 /* NOTE: must not be iso! */
1290 dev_dbg (dummy_dev(dum), "ep %s halted, urb %p\n", 1289 dev_dbg (dummy_dev(dum), "ep %s halted, urb %p\n",
1291 ep->ep.name, urb); 1290 ep->ep.name, urb);
1292 maybe_set_status (urb, -EPIPE); 1291 status = -EPIPE;
1293 goto return_urb; 1292 goto return_urb;
1294 } 1293 }
1295 /* FIXME make sure both ends agree on maxpacket */ 1294 /* FIXME make sure both ends agree on maxpacket */
@@ -1307,7 +1306,7 @@ restart:
1307 w_value = le16_to_cpu(setup.wValue); 1306 w_value = le16_to_cpu(setup.wValue);
1308 if (le16_to_cpu(setup.wLength) != 1307 if (le16_to_cpu(setup.wLength) !=
1309 urb->transfer_buffer_length) { 1308 urb->transfer_buffer_length) {
1310 maybe_set_status (urb, -EOVERFLOW); 1309 status = -EOVERFLOW;
1311 goto return_urb; 1310 goto return_urb;
1312 } 1311 }
1313 1312
@@ -1337,7 +1336,7 @@ restart:
1337 if (setup.bRequestType != Dev_Request) 1336 if (setup.bRequestType != Dev_Request)
1338 break; 1337 break;
1339 dum->address = w_value; 1338 dum->address = w_value;
1340 maybe_set_status (urb, 0); 1339 status = 0;
1341 dev_dbg (udc_dev(dum), "set_address = %d\n", 1340 dev_dbg (udc_dev(dum), "set_address = %d\n",
1342 w_value); 1341 w_value);
1343 value = 0; 1342 value = 0;
@@ -1364,7 +1363,7 @@ restart:
1364 if (value == 0) { 1363 if (value == 0) {
1365 dum->devstatus |= 1364 dum->devstatus |=
1366 (1 << w_value); 1365 (1 << w_value);
1367 maybe_set_status (urb, 0); 1366 status = 0;
1368 } 1367 }
1369 1368
1370 } else if (setup.bRequestType == Ep_Request) { 1369 } else if (setup.bRequestType == Ep_Request) {
@@ -1376,7 +1375,7 @@ restart:
1376 } 1375 }
1377 ep2->halted = 1; 1376 ep2->halted = 1;
1378 value = 0; 1377 value = 0;
1379 maybe_set_status (urb, 0); 1378 status = 0;
1380 } 1379 }
1381 break; 1380 break;
1382 case USB_REQ_CLEAR_FEATURE: 1381 case USB_REQ_CLEAR_FEATURE:
@@ -1386,7 +1385,7 @@ restart:
1386 dum->devstatus &= ~(1 << 1385 dum->devstatus &= ~(1 <<
1387 USB_DEVICE_REMOTE_WAKEUP); 1386 USB_DEVICE_REMOTE_WAKEUP);
1388 value = 0; 1387 value = 0;
1389 maybe_set_status (urb, 0); 1388 status = 0;
1390 break; 1389 break;
1391 default: 1390 default:
1392 value = -EOPNOTSUPP; 1391 value = -EOPNOTSUPP;
@@ -1401,7 +1400,7 @@ restart:
1401 } 1400 }
1402 ep2->halted = 0; 1401 ep2->halted = 0;
1403 value = 0; 1402 value = 0;
1404 maybe_set_status (urb, 0); 1403 status = 0;
1405 } 1404 }
1406 break; 1405 break;
1407 case USB_REQ_GET_STATUS: 1406 case USB_REQ_GET_STATUS:
@@ -1438,7 +1437,7 @@ restart:
1438 urb->actual_length = min (2, 1437 urb->actual_length = min (2,
1439 urb->transfer_buffer_length); 1438 urb->transfer_buffer_length);
1440 value = 0; 1439 value = 0;
1441 maybe_set_status (urb, 0); 1440 status = 0;
1442 } 1441 }
1443 break; 1442 break;
1444 } 1443 }
@@ -1465,7 +1464,7 @@ restart:
1465 dev_dbg (udc_dev(dum), 1464 dev_dbg (udc_dev(dum),
1466 "setup --> %d\n", 1465 "setup --> %d\n",
1467 value); 1466 value);
1468 maybe_set_status (urb, -EPIPE); 1467 status = -EPIPE;
1469 urb->actual_length = 0; 1468 urb->actual_length = 0;
1470 } 1469 }
1471 1470
@@ -1482,7 +1481,7 @@ restart:
1482 * report random errors, to debug drivers. 1481 * report random errors, to debug drivers.
1483 */ 1482 */
1484 limit = max (limit, periodic_bytes (dum, ep)); 1483 limit = max (limit, periodic_bytes (dum, ep));
1485 maybe_set_status (urb, -ENOSYS); 1484 status = -ENOSYS;
1486 break; 1485 break;
1487 1486
1488 case PIPE_INTERRUPT: 1487 case PIPE_INTERRUPT:
@@ -1496,23 +1495,23 @@ restart:
1496 default: 1495 default:
1497 treat_control_like_bulk: 1496 treat_control_like_bulk:
1498 ep->last_io = jiffies; 1497 ep->last_io = jiffies;
1499 total = transfer (dum, urb, ep, limit); 1498 total = transfer(dum, urb, ep, limit, &status);
1500 break; 1499 break;
1501 } 1500 }
1502 1501
1503 /* incomplete transfer? */ 1502 /* incomplete transfer? */
1504 if (urb->status == -EINPROGRESS) 1503 if (status == -EINPROGRESS)
1505 continue; 1504 continue;
1506 1505
1507return_urb: 1506return_urb:
1508 urb->hcpriv = NULL;
1509 list_del (&urbp->urbp_list); 1507 list_del (&urbp->urbp_list);
1510 kfree (urbp); 1508 kfree (urbp);
1511 if (ep) 1509 if (ep)
1512 ep->already_seen = ep->setup_stage = 0; 1510 ep->already_seen = ep->setup_stage = 0;
1513 1511
1512 usb_hcd_unlink_urb_from_ep(dummy_to_hcd(dum), urb);
1514 spin_unlock (&dum->lock); 1513 spin_unlock (&dum->lock);
1515 usb_hcd_giveback_urb (dummy_to_hcd(dum), urb); 1514 usb_hcd_giveback_urb(dummy_to_hcd(dum), urb, status);
1516 spin_lock (&dum->lock); 1515 spin_lock (&dum->lock);
1517 1516
1518 goto restart; 1517 goto restart;
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 3aa46cfa66..f9d07108bc 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -28,7 +28,7 @@
28#include <linux/string.h> 28#include <linux/string.h>
29 29
30#include <linux/usb/ch9.h> 30#include <linux/usb/ch9.h>
31#include <linux/usb_gadget.h> 31#include <linux/usb/gadget.h>
32 32
33#include "gadget_chips.h" 33#include "gadget_chips.h"
34 34
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index f70055473a..9e732bff9d 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -19,40 +19,18 @@
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22/* #define VERBOSE_DEBUG */
22 23
23// #define DEBUG 1
24// #define VERBOSE
25
26#include <linux/module.h>
27#include <linux/kernel.h> 24#include <linux/kernel.h>
28#include <linux/delay.h>
29#include <linux/ioport.h>
30#include <linux/slab.h>
31#include <linux/errno.h>
32#include <linux/init.h>
33#include <linux/timer.h>
34#include <linux/list.h>
35#include <linux/interrupt.h>
36#include <linux/utsname.h> 25#include <linux/utsname.h>
37#include <linux/device.h> 26#include <linux/device.h>
38#include <linux/moduleparam.h>
39#include <linux/ctype.h> 27#include <linux/ctype.h>
40 28#include <linux/etherdevice.h>
41#include <asm/byteorder.h> 29#include <linux/ethtool.h>
42#include <asm/io.h>
43#include <asm/irq.h>
44#include <asm/system.h>
45#include <asm/uaccess.h>
46#include <asm/unaligned.h>
47 30
48#include <linux/usb/ch9.h> 31#include <linux/usb/ch9.h>
49#include <linux/usb/cdc.h> 32#include <linux/usb/cdc.h>
50#include <linux/usb_gadget.h> 33#include <linux/usb/gadget.h>
51
52#include <linux/random.h>
53#include <linux/netdevice.h>
54#include <linux/etherdevice.h>
55#include <linux/ethtool.h>
56 34
57#include "gadget_chips.h" 35#include "gadget_chips.h"
58 36
@@ -356,15 +334,15 @@ module_param (qmult, uint, S_IRUGO|S_IWUSR);
356#define qlen(gadget) \ 334#define qlen(gadget) \
357 (DEFAULT_QLEN*((gadget->speed == USB_SPEED_HIGH) ? qmult : 1)) 335 (DEFAULT_QLEN*((gadget->speed == USB_SPEED_HIGH) ? qmult : 1))
358 336
359/* also defer IRQs on highspeed TX */
360#define TX_DELAY qmult
361
362static inline int BITRATE(struct usb_gadget *g) 337static inline int BITRATE(struct usb_gadget *g)
363{ 338{
364 return (g->speed == USB_SPEED_HIGH) ? HS_BPS : FS_BPS; 339 return (g->speed == USB_SPEED_HIGH) ? HS_BPS : FS_BPS;
365} 340}
366 341
367#else /* full speed (low speed doesn't do bulk) */ 342#else /* full speed (low speed doesn't do bulk) */
343
344#define qmult 1
345
368#define DEVSPEED USB_SPEED_FULL 346#define DEVSPEED USB_SPEED_FULL
369 347
370#define qlen(gadget) DEFAULT_QLEN 348#define qlen(gadget) DEFAULT_QLEN
@@ -390,7 +368,7 @@ static inline int BITRATE(struct usb_gadget *g)
390 do { } while (0) 368 do { } while (0)
391#endif /* DEBUG */ 369#endif /* DEBUG */
392 370
393#ifdef VERBOSE 371#ifdef VERBOSE_DEBUG
394#define VDEBUG DEBUG 372#define VDEBUG DEBUG
395#else 373#else
396#define VDEBUG(dev,fmt,args...) \ 374#define VDEBUG(dev,fmt,args...) \
@@ -830,8 +808,6 @@ static const struct usb_descriptor_header *fs_rndis_function [] = {
830}; 808};
831#endif 809#endif
832 810
833#ifdef CONFIG_USB_GADGET_DUALSPEED
834
835/* 811/*
836 * usb 2.0 devices need to expose both high speed and full speed 812 * usb 2.0 devices need to expose both high speed and full speed
837 * descriptors, unless they only run at full speed. 813 * descriptors, unless they only run at full speed.
@@ -934,18 +910,15 @@ static const struct usb_descriptor_header *hs_rndis_function [] = {
934 910
935 911
936/* maxpacket and other transfer characteristics vary by speed. */ 912/* maxpacket and other transfer characteristics vary by speed. */
937#define ep_desc(g,hs,fs) (((g)->speed==USB_SPEED_HIGH)?(hs):(fs)) 913static inline struct usb_endpoint_descriptor *
938 914ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *hs,
939#else 915 struct usb_endpoint_descriptor *fs)
940
941/* if there's no high speed support, maxpacket doesn't change. */
942#define ep_desc(g,hs,fs) (((void)(g)), (fs))
943
944static inline void __init hs_subset_descriptors(void)
945{ 916{
917 if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
918 return hs;
919 return fs;
946} 920}
947 921
948#endif /* !CONFIG_USB_GADGET_DUALSPEED */
949 922
950/*-------------------------------------------------------------------------*/ 923/*-------------------------------------------------------------------------*/
951 924
@@ -989,22 +962,19 @@ static struct usb_gadget_strings stringtab = {
989 * complications: class descriptors, and an altsetting. 962 * complications: class descriptors, and an altsetting.
990 */ 963 */
991static int 964static int
992config_buf (enum usb_device_speed speed, 965config_buf(struct usb_gadget *g, u8 *buf, u8 type, unsigned index, int is_otg)
993 u8 *buf, u8 type,
994 unsigned index, int is_otg)
995{ 966{
996 int len; 967 int len;
997 const struct usb_config_descriptor *config; 968 const struct usb_config_descriptor *config;
998 const struct usb_descriptor_header **function; 969 const struct usb_descriptor_header **function;
999#ifdef CONFIG_USB_GADGET_DUALSPEED 970 int hs = 0;
1000 int hs = (speed == USB_SPEED_HIGH);
1001 971
1002 if (type == USB_DT_OTHER_SPEED_CONFIG) 972 if (gadget_is_dualspeed(g)) {
1003 hs = !hs; 973 hs = (g->speed == USB_SPEED_HIGH);
974 if (type == USB_DT_OTHER_SPEED_CONFIG)
975 hs = !hs;
976 }
1004#define which_fn(t) (hs ? hs_ ## t ## _function : fs_ ## t ## _function) 977#define which_fn(t) (hs ? hs_ ## t ## _function : fs_ ## t ## _function)
1005#else
1006#define which_fn(t) (fs_ ## t ## _function)
1007#endif
1008 978
1009 if (index >= device_desc.bNumConfigurations) 979 if (index >= device_desc.bNumConfigurations)
1010 return -EINVAL; 980 return -EINVAL;
@@ -1217,7 +1187,7 @@ eth_set_config (struct eth_dev *dev, unsigned number, gfp_t gfp_flags)
1217 if (number) 1187 if (number)
1218 eth_reset_config (dev); 1188 eth_reset_config (dev);
1219 usb_gadget_vbus_draw(dev->gadget, 1189 usb_gadget_vbus_draw(dev->gadget,
1220 dev->gadget->is_otg ? 8 : 100); 1190 gadget_is_otg(dev->gadget) ? 8 : 100);
1221 } else { 1191 } else {
1222 char *speed; 1192 char *speed;
1223 unsigned power; 1193 unsigned power;
@@ -1399,24 +1369,22 @@ eth_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1399 value = min (wLength, (u16) sizeof device_desc); 1369 value = min (wLength, (u16) sizeof device_desc);
1400 memcpy (req->buf, &device_desc, value); 1370 memcpy (req->buf, &device_desc, value);
1401 break; 1371 break;
1402#ifdef CONFIG_USB_GADGET_DUALSPEED
1403 case USB_DT_DEVICE_QUALIFIER: 1372 case USB_DT_DEVICE_QUALIFIER:
1404 if (!gadget->is_dualspeed) 1373 if (!gadget_is_dualspeed(gadget))
1405 break; 1374 break;
1406 value = min (wLength, (u16) sizeof dev_qualifier); 1375 value = min (wLength, (u16) sizeof dev_qualifier);
1407 memcpy (req->buf, &dev_qualifier, value); 1376 memcpy (req->buf, &dev_qualifier, value);
1408 break; 1377 break;
1409 1378
1410 case USB_DT_OTHER_SPEED_CONFIG: 1379 case USB_DT_OTHER_SPEED_CONFIG:
1411 if (!gadget->is_dualspeed) 1380 if (!gadget_is_dualspeed(gadget))
1412 break; 1381 break;
1413 // FALLTHROUGH 1382 // FALLTHROUGH
1414#endif /* CONFIG_USB_GADGET_DUALSPEED */
1415 case USB_DT_CONFIG: 1383 case USB_DT_CONFIG:
1416 value = config_buf (gadget->speed, req->buf, 1384 value = config_buf(gadget, req->buf,
1417 wValue >> 8, 1385 wValue >> 8,
1418 wValue & 0xff, 1386 wValue & 0xff,
1419 gadget->is_otg); 1387 gadget_is_otg(gadget));
1420 if (value >= 0) 1388 if (value >= 0)
1421 value = min (wLength, (u16) value); 1389 value = min (wLength, (u16) value);
1422 break; 1390 break;
@@ -1585,12 +1553,12 @@ done_set_intf:
1585 && rndis_control_intf.bInterfaceNumber 1553 && rndis_control_intf.bInterfaceNumber
1586 == wIndex) { 1554 == wIndex) {
1587 u8 *buf; 1555 u8 *buf;
1556 u32 n;
1588 1557
1589 /* return the result */ 1558 /* return the result */
1590 buf = rndis_get_next_response (dev->rndis_config, 1559 buf = rndis_get_next_response(dev->rndis_config, &n);
1591 &value);
1592 if (buf) { 1560 if (buf) {
1593 memcpy (req->buf, buf, value); 1561 memcpy(req->buf, buf, n);
1594 req->complete = rndis_response_complete; 1562 req->complete = rndis_response_complete;
1595 rndis_free_response(dev->rndis_config, buf); 1563 rndis_free_response(dev->rndis_config, buf);
1596 } 1564 }
@@ -1989,8 +1957,20 @@ static int eth_start_xmit (struct sk_buff *skb, struct net_device *net)
1989 } 1957 }
1990 1958
1991 spin_lock_irqsave(&dev->req_lock, flags); 1959 spin_lock_irqsave(&dev->req_lock, flags);
1960 /*
1961 * this freelist can be empty if an interrupt triggered disconnect()
1962 * and reconfigured the gadget (shutting down this queue) after the
1963 * network stack decided to xmit but before we got the spinlock.
1964 */
1965 if (list_empty(&dev->tx_reqs)) {
1966 spin_unlock_irqrestore(&dev->req_lock, flags);
1967 return 1;
1968 }
1969
1992 req = container_of (dev->tx_reqs.next, struct usb_request, list); 1970 req = container_of (dev->tx_reqs.next, struct usb_request, list);
1993 list_del (&req->list); 1971 list_del (&req->list);
1972
1973 /* temporarily stop TX queue when the freelist empties */
1994 if (list_empty (&dev->tx_reqs)) 1974 if (list_empty (&dev->tx_reqs))
1995 netif_stop_queue (net); 1975 netif_stop_queue (net);
1996 spin_unlock_irqrestore(&dev->req_lock, flags); 1976 spin_unlock_irqrestore(&dev->req_lock, flags);
@@ -2026,12 +2006,11 @@ static int eth_start_xmit (struct sk_buff *skb, struct net_device *net)
2026 2006
2027 req->length = length; 2007 req->length = length;
2028 2008
2029#ifdef CONFIG_USB_GADGET_DUALSPEED
2030 /* throttle highspeed IRQ rate back slightly */ 2009 /* throttle highspeed IRQ rate back slightly */
2031 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH) 2010 if (gadget_is_dualspeed(dev->gadget))
2032 ? ((atomic_read (&dev->tx_qlen) % TX_DELAY) != 0) 2011 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
2033 : 0; 2012 ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
2034#endif 2013 : 0;
2035 2014
2036 retval = usb_ep_queue (dev->in_ep, req, GFP_ATOMIC); 2015 retval = usb_ep_queue (dev->in_ep, req, GFP_ATOMIC);
2037 switch (retval) { 2016 switch (retval) {
@@ -2188,8 +2167,7 @@ static int eth_stop (struct net_device *net)
2188 } 2167 }
2189 2168
2190 if (rndis_active(dev)) { 2169 if (rndis_active(dev)) {
2191 rndis_set_param_medium (dev->rndis_config, 2170 rndis_set_param_medium(dev->rndis_config, NDIS_MEDIUM_802_3, 0);
2192 NDIS_MEDIUM_802_3, 0);
2193 (void) rndis_signal_disconnect (dev->rndis_config); 2171 (void) rndis_signal_disconnect (dev->rndis_config);
2194 } 2172 }
2195 2173
@@ -2443,26 +2421,28 @@ autoconf_fail:
2443 if (rndis) 2421 if (rndis)
2444 device_desc.bNumConfigurations = 2; 2422 device_desc.bNumConfigurations = 2;
2445 2423
2446#ifdef CONFIG_USB_GADGET_DUALSPEED 2424 if (gadget_is_dualspeed(gadget)) {
2447 if (rndis) 2425 if (rndis)
2448 dev_qualifier.bNumConfigurations = 2; 2426 dev_qualifier.bNumConfigurations = 2;
2449 else if (!cdc) 2427 else if (!cdc)
2450 dev_qualifier.bDeviceClass = USB_CLASS_VENDOR_SPEC; 2428 dev_qualifier.bDeviceClass = USB_CLASS_VENDOR_SPEC;
2451 2429
2452 /* assumes ep0 uses the same value for both speeds ... */ 2430 /* assumes ep0 uses the same value for both speeds ... */
2453 dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0; 2431 dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0;
2454 2432
2455 /* and that all endpoints are dual-speed */ 2433 /* and that all endpoints are dual-speed */
2456 hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; 2434 hs_source_desc.bEndpointAddress =
2457 hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; 2435 fs_source_desc.bEndpointAddress;
2436 hs_sink_desc.bEndpointAddress =
2437 fs_sink_desc.bEndpointAddress;
2458#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS) 2438#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
2459 if (status_ep) 2439 if (status_ep)
2460 hs_status_desc.bEndpointAddress = 2440 hs_status_desc.bEndpointAddress =
2461 fs_status_desc.bEndpointAddress; 2441 fs_status_desc.bEndpointAddress;
2462#endif 2442#endif
2463#endif /* DUALSPEED */ 2443 }
2464 2444
2465 if (gadget->is_otg) { 2445 if (gadget_is_otg(gadget)) {
2466 otg_descriptor.bmAttributes |= USB_OTG_HNP, 2446 otg_descriptor.bmAttributes |= USB_OTG_HNP,
2467 eth_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP; 2447 eth_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
2468 eth_config.bMaxPower = 4; 2448 eth_config.bMaxPower = 4;
@@ -2598,12 +2578,11 @@ fail0:
2598 if (rndis_set_param_dev (dev->rndis_config, dev->net, 2578 if (rndis_set_param_dev (dev->rndis_config, dev->net,
2599 &dev->stats, &dev->cdc_filter)) 2579 &dev->stats, &dev->cdc_filter))
2600 goto fail0; 2580 goto fail0;
2601 if (rndis_set_param_vendor (dev->rndis_config, vendorID, 2581 if (rndis_set_param_vendor(dev->rndis_config, vendorID,
2602 manufacturer)) 2582 manufacturer))
2603 goto fail0; 2583 goto fail0;
2604 if (rndis_set_param_medium (dev->rndis_config, 2584 if (rndis_set_param_medium(dev->rndis_config,
2605 NDIS_MEDIUM_802_3, 2585 NDIS_MEDIUM_802_3, 0))
2606 0))
2607 goto fail0; 2586 goto fail0;
2608 INFO (dev, "RNDIS ready\n"); 2587 INFO (dev, "RNDIS ready\n");
2609 } 2588 }
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 965ad7bec7..73726c570a 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * file_storage.c -- File-backed USB Storage Gadget, for USB development 2 * file_storage.c -- File-backed USB Storage Gadget, for USB development
3 * 3 *
4 * Copyright (C) 2003-2005 Alan Stern 4 * Copyright (C) 2003-2007 Alan Stern
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
@@ -217,17 +217,11 @@
217 */ 217 */
218 218
219 219
220#undef DEBUG 220/* #define VERBOSE_DEBUG */
221#undef VERBOSE 221/* #define DUMP_MSGS */
222#undef DUMP_MSGS
223
224 222
225#include <asm/system.h>
226#include <asm/uaccess.h>
227 223
228#include <linux/bitops.h>
229#include <linux/blkdev.h> 224#include <linux/blkdev.h>
230#include <linux/compiler.h>
231#include <linux/completion.h> 225#include <linux/completion.h>
232#include <linux/dcache.h> 226#include <linux/dcache.h>
233#include <linux/delay.h> 227#include <linux/delay.h>
@@ -235,18 +229,10 @@
235#include <linux/fcntl.h> 229#include <linux/fcntl.h>
236#include <linux/file.h> 230#include <linux/file.h>
237#include <linux/fs.h> 231#include <linux/fs.h>
238#include <linux/init.h>
239#include <linux/kernel.h>
240#include <linux/kref.h> 232#include <linux/kref.h>
241#include <linux/kthread.h> 233#include <linux/kthread.h>
242#include <linux/limits.h> 234#include <linux/limits.h>
243#include <linux/list.h>
244#include <linux/module.h>
245#include <linux/moduleparam.h>
246#include <linux/pagemap.h>
247#include <linux/rwsem.h> 235#include <linux/rwsem.h>
248#include <linux/sched.h>
249#include <linux/signal.h>
250#include <linux/slab.h> 236#include <linux/slab.h>
251#include <linux/spinlock.h> 237#include <linux/spinlock.h>
252#include <linux/string.h> 238#include <linux/string.h>
@@ -254,7 +240,7 @@
254#include <linux/utsname.h> 240#include <linux/utsname.h>
255 241
256#include <linux/usb/ch9.h> 242#include <linux/usb/ch9.h>
257#include <linux/usb_gadget.h> 243#include <linux/usb/gadget.h>
258 244
259#include "gadget_chips.h" 245#include "gadget_chips.h"
260 246
@@ -263,7 +249,7 @@
263 249
264#define DRIVER_DESC "File-backed Storage Gadget" 250#define DRIVER_DESC "File-backed Storage Gadget"
265#define DRIVER_NAME "g_file_storage" 251#define DRIVER_NAME "g_file_storage"
266#define DRIVER_VERSION "28 November 2005" 252#define DRIVER_VERSION "7 August 2007"
267 253
268static const char longname[] = DRIVER_DESC; 254static const char longname[] = DRIVER_DESC;
269static const char shortname[] = DRIVER_NAME; 255static const char shortname[] = DRIVER_NAME;
@@ -289,57 +275,48 @@ MODULE_LICENSE("Dual BSD/GPL");
289 275
290/*-------------------------------------------------------------------------*/ 276/*-------------------------------------------------------------------------*/
291 277
292#define xprintk(f,level,fmt,args...) \
293 dev_printk(level , &(f)->gadget->dev , fmt , ## args)
294#define yprintk(l,level,fmt,args...) \
295 dev_printk(level , &(l)->dev , fmt , ## args)
296
297#ifdef DEBUG 278#ifdef DEBUG
298#define DBG(fsg,fmt,args...) \
299 xprintk(fsg , KERN_DEBUG , fmt , ## args)
300#define LDBG(lun,fmt,args...) \ 279#define LDBG(lun,fmt,args...) \
301 yprintk(lun , KERN_DEBUG , fmt , ## args) 280 dev_dbg(&(lun)->dev , fmt , ## args)
302#define MDBG(fmt,args...) \ 281#define MDBG(fmt,args...) \
303 printk(KERN_DEBUG DRIVER_NAME ": " fmt , ## args) 282 printk(KERN_DEBUG DRIVER_NAME ": " fmt , ## args)
304#else 283#else
305#define DBG(fsg,fmt,args...) \
306 do { } while (0)
307#define LDBG(lun,fmt,args...) \ 284#define LDBG(lun,fmt,args...) \
308 do { } while (0) 285 do { } while (0)
309#define MDBG(fmt,args...) \ 286#define MDBG(fmt,args...) \
310 do { } while (0) 287 do { } while (0)
311#undef VERBOSE 288#undef VERBOSE_DEBUG
312#undef DUMP_MSGS 289#undef DUMP_MSGS
313#endif /* DEBUG */ 290#endif /* DEBUG */
314 291
315#ifdef VERBOSE 292#ifdef VERBOSE_DEBUG
316#define VDBG DBG
317#define VLDBG LDBG 293#define VLDBG LDBG
318#else 294#else
319#define VDBG(fsg,fmt,args...) \
320 do { } while (0)
321#define VLDBG(lun,fmt,args...) \ 295#define VLDBG(lun,fmt,args...) \
322 do { } while (0) 296 do { } while (0)
323#endif /* VERBOSE */ 297#endif /* VERBOSE_DEBUG */
324 298
325#define ERROR(fsg,fmt,args...) \
326 xprintk(fsg , KERN_ERR , fmt , ## args)
327#define LERROR(lun,fmt,args...) \ 299#define LERROR(lun,fmt,args...) \
328 yprintk(lun , KERN_ERR , fmt , ## args) 300 dev_err(&(lun)->dev , fmt , ## args)
329
330#define WARN(fsg,fmt,args...) \
331 xprintk(fsg , KERN_WARNING , fmt , ## args)
332#define LWARN(lun,fmt,args...) \ 301#define LWARN(lun,fmt,args...) \
333 yprintk(lun , KERN_WARNING , fmt , ## args) 302 dev_warn(&(lun)->dev , fmt , ## args)
334
335#define INFO(fsg,fmt,args...) \
336 xprintk(fsg , KERN_INFO , fmt , ## args)
337#define LINFO(lun,fmt,args...) \ 303#define LINFO(lun,fmt,args...) \
338 yprintk(lun , KERN_INFO , fmt , ## args) 304 dev_info(&(lun)->dev , fmt , ## args)
339 305
340#define MINFO(fmt,args...) \ 306#define MINFO(fmt,args...) \
341 printk(KERN_INFO DRIVER_NAME ": " fmt , ## args) 307 printk(KERN_INFO DRIVER_NAME ": " fmt , ## args)
342 308
309#define DBG(d, fmt, args...) \
310 dev_dbg(&(d)->gadget->dev , fmt , ## args)
311#define VDBG(d, fmt, args...) \
312 dev_vdbg(&(d)->gadget->dev , fmt , ## args)
313#define ERROR(d, fmt, args...) \
314 dev_err(&(d)->gadget->dev , fmt , ## args)
315#define WARN(d, fmt, args...) \
316 dev_warn(&(d)->gadget->dev , fmt , ## args)
317#define INFO(d, fmt, args...) \
318 dev_info(&(d)->gadget->dev , fmt , ## args)
319
343 320
344/*-------------------------------------------------------------------------*/ 321/*-------------------------------------------------------------------------*/
345 322
@@ -350,8 +327,8 @@ MODULE_LICENSE("Dual BSD/GPL");
350static struct { 327static struct {
351 char *file[MAX_LUNS]; 328 char *file[MAX_LUNS];
352 int ro[MAX_LUNS]; 329 int ro[MAX_LUNS];
353 int num_filenames; 330 unsigned int num_filenames;
354 int num_ros; 331 unsigned int num_ros;
355 unsigned int nluns; 332 unsigned int nluns;
356 333
357 int removable; 334 int removable;
@@ -578,7 +555,7 @@ struct lun {
578 555
579#define backing_file_is_open(curlun) ((curlun)->filp != NULL) 556#define backing_file_is_open(curlun) ((curlun)->filp != NULL)
580 557
581static inline struct lun *dev_to_lun(struct device *dev) 558static struct lun *dev_to_lun(struct device *dev)
582{ 559{
583 return container_of(dev, struct lun, dev); 560 return container_of(dev, struct lun, dev);
584} 561}
@@ -711,13 +688,13 @@ struct fsg_dev {
711 688
712typedef void (*fsg_routine_t)(struct fsg_dev *); 689typedef void (*fsg_routine_t)(struct fsg_dev *);
713 690
714static int inline exception_in_progress(struct fsg_dev *fsg) 691static int exception_in_progress(struct fsg_dev *fsg)
715{ 692{
716 return (fsg->state > FSG_STATE_IDLE); 693 return (fsg->state > FSG_STATE_IDLE);
717} 694}
718 695
719/* Make bulk-out requests be divisible by the maxpacket size */ 696/* Make bulk-out requests be divisible by the maxpacket size */
720static void inline set_bulk_out_req_length(struct fsg_dev *fsg, 697static void set_bulk_out_req_length(struct fsg_dev *fsg,
721 struct fsg_buffhd *bh, unsigned int length) 698 struct fsg_buffhd *bh, unsigned int length)
722{ 699{
723 unsigned int rem; 700 unsigned int rem;
@@ -743,50 +720,36 @@ static void close_all_backing_files(struct fsg_dev *fsg);
743static void dump_msg(struct fsg_dev *fsg, const char *label, 720static void dump_msg(struct fsg_dev *fsg, const char *label,
744 const u8 *buf, unsigned int length) 721 const u8 *buf, unsigned int length)
745{ 722{
746 unsigned int start, num, i; 723 if (length < 512) {
747 char line[52], *p; 724 DBG(fsg, "%s, length %u:\n", label, length);
748 725 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
749 if (length >= 512) 726 16, 1, buf, length, 0);
750 return;
751 DBG(fsg, "%s, length %u:\n", label, length);
752
753 start = 0;
754 while (length > 0) {
755 num = min(length, 16u);
756 p = line;
757 for (i = 0; i < num; ++i) {
758 if (i == 8)
759 *p++ = ' ';
760 sprintf(p, " %02x", buf[i]);
761 p += 3;
762 }
763 *p = 0;
764 printk(KERN_DEBUG "%6x: %s\n", start, line);
765 buf += num;
766 start += num;
767 length -= num;
768 } 727 }
769} 728}
770 729
771static void inline dump_cdb(struct fsg_dev *fsg) 730static void dump_cdb(struct fsg_dev *fsg)
772{} 731{}
773 732
774#else 733#else
775 734
776static void inline dump_msg(struct fsg_dev *fsg, const char *label, 735static void dump_msg(struct fsg_dev *fsg, const char *label,
777 const u8 *buf, unsigned int length) 736 const u8 *buf, unsigned int length)
778{} 737{}
779 738
780static void inline dump_cdb(struct fsg_dev *fsg) 739#ifdef VERBOSE_DEBUG
781{
782 int i;
783 char cmdbuf[3*MAX_COMMAND_SIZE + 1];
784 740
785 for (i = 0; i < fsg->cmnd_size; ++i) 741static void dump_cdb(struct fsg_dev *fsg)
786 sprintf(cmdbuf + i*3, " %02x", fsg->cmnd[i]); 742{
787 VDBG(fsg, "SCSI CDB: %s\n", cmdbuf); 743 print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE,
744 16, 1, fsg->cmnd, fsg->cmnd_size, 0);
788} 745}
789 746
747#else
748
749static void dump_cdb(struct fsg_dev *fsg)
750{}
751
752#endif /* VERBOSE_DEBUG */
790#endif /* DUMP_MSGS */ 753#endif /* DUMP_MSGS */
791 754
792 755
@@ -809,24 +772,24 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
809 772
810/* Routines for unaligned data access */ 773/* Routines for unaligned data access */
811 774
812static u16 inline get_be16(u8 *buf) 775static u16 get_be16(u8 *buf)
813{ 776{
814 return ((u16) buf[0] << 8) | ((u16) buf[1]); 777 return ((u16) buf[0] << 8) | ((u16) buf[1]);
815} 778}
816 779
817static u32 inline get_be32(u8 *buf) 780static u32 get_be32(u8 *buf)
818{ 781{
819 return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) | 782 return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
820 ((u32) buf[2] << 8) | ((u32) buf[3]); 783 ((u32) buf[2] << 8) | ((u32) buf[3]);
821} 784}
822 785
823static void inline put_be16(u8 *buf, u16 val) 786static void put_be16(u8 *buf, u16 val)
824{ 787{
825 buf[0] = val >> 8; 788 buf[0] = val >> 8;
826 buf[1] = val; 789 buf[1] = val;
827} 790}
828 791
829static void inline put_be32(u8 *buf, u32 val) 792static void put_be32(u8 *buf, u32 val)
830{ 793{
831 buf[0] = val >> 24; 794 buf[0] = val >> 24;
832 buf[1] = val >> 16; 795 buf[1] = val >> 16;
@@ -950,8 +913,6 @@ static const struct usb_descriptor_header *fs_function[] = {
950#define FS_FUNCTION_PRE_EP_ENTRIES 2 913#define FS_FUNCTION_PRE_EP_ENTRIES 2
951 914
952 915
953#ifdef CONFIG_USB_GADGET_DUALSPEED
954
955/* 916/*
956 * USB 2.0 devices need to expose both high speed and full speed 917 * USB 2.0 devices need to expose both high speed and full speed
957 * descriptors, unless they only run at full speed. 918 * descriptors, unless they only run at full speed.
@@ -1014,14 +975,14 @@ static const struct usb_descriptor_header *hs_function[] = {
1014#define HS_FUNCTION_PRE_EP_ENTRIES 2 975#define HS_FUNCTION_PRE_EP_ENTRIES 2
1015 976
1016/* Maxpacket and other transfer characteristics vary by speed. */ 977/* Maxpacket and other transfer characteristics vary by speed. */
1017#define ep_desc(g,fs,hs) (((g)->speed==USB_SPEED_HIGH) ? (hs) : (fs)) 978static struct usb_endpoint_descriptor *
1018 979ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
1019#else 980 struct usb_endpoint_descriptor *hs)
1020 981{
1021/* If there's no high speed support, always use the full-speed descriptor. */ 982 if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
1022#define ep_desc(g,fs,hs) fs 983 return hs;
1023 984 return fs;
1024#endif /* !CONFIG_USB_GADGET_DUALSPEED */ 985}
1025 986
1026 987
1027/* The CBI specification limits the serial string to 12 uppercase hexadecimal 988/* The CBI specification limits the serial string to 12 uppercase hexadecimal
@@ -1053,26 +1014,22 @@ static struct usb_gadget_strings stringtab = {
1053static int populate_config_buf(struct usb_gadget *gadget, 1014static int populate_config_buf(struct usb_gadget *gadget,
1054 u8 *buf, u8 type, unsigned index) 1015 u8 *buf, u8 type, unsigned index)
1055{ 1016{
1056#ifdef CONFIG_USB_GADGET_DUALSPEED
1057 enum usb_device_speed speed = gadget->speed; 1017 enum usb_device_speed speed = gadget->speed;
1058#endif
1059 int len; 1018 int len;
1060 const struct usb_descriptor_header **function; 1019 const struct usb_descriptor_header **function;
1061 1020
1062 if (index > 0) 1021 if (index > 0)
1063 return -EINVAL; 1022 return -EINVAL;
1064 1023
1065#ifdef CONFIG_USB_GADGET_DUALSPEED 1024 if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG)
1066 if (type == USB_DT_OTHER_SPEED_CONFIG)
1067 speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed; 1025 speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
1068 if (speed == USB_SPEED_HIGH) 1026 if (gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH)
1069 function = hs_function; 1027 function = hs_function;
1070 else 1028 else
1071#endif
1072 function = fs_function; 1029 function = fs_function;
1073 1030
1074 /* for now, don't advertise srp-only devices */ 1031 /* for now, don't advertise srp-only devices */
1075 if (!gadget->is_otg) 1032 if (!gadget_is_otg(gadget))
1076 function++; 1033 function++;
1077 1034
1078 len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function); 1035 len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function);
@@ -1394,10 +1351,9 @@ static int standard_setup_req(struct fsg_dev *fsg,
1394 value = sizeof device_desc; 1351 value = sizeof device_desc;
1395 memcpy(req->buf, &device_desc, value); 1352 memcpy(req->buf, &device_desc, value);
1396 break; 1353 break;
1397#ifdef CONFIG_USB_GADGET_DUALSPEED
1398 case USB_DT_DEVICE_QUALIFIER: 1354 case USB_DT_DEVICE_QUALIFIER:
1399 VDBG(fsg, "get device qualifier\n"); 1355 VDBG(fsg, "get device qualifier\n");
1400 if (!fsg->gadget->is_dualspeed) 1356 if (!gadget_is_dualspeed(fsg->gadget))
1401 break; 1357 break;
1402 value = sizeof dev_qualifier; 1358 value = sizeof dev_qualifier;
1403 memcpy(req->buf, &dev_qualifier, value); 1359 memcpy(req->buf, &dev_qualifier, value);
@@ -1405,15 +1361,12 @@ static int standard_setup_req(struct fsg_dev *fsg,
1405 1361
1406 case USB_DT_OTHER_SPEED_CONFIG: 1362 case USB_DT_OTHER_SPEED_CONFIG:
1407 VDBG(fsg, "get other-speed config descriptor\n"); 1363 VDBG(fsg, "get other-speed config descriptor\n");
1408 if (!fsg->gadget->is_dualspeed) 1364 if (!gadget_is_dualspeed(fsg->gadget))
1409 break; 1365 break;
1410 goto get_config; 1366 goto get_config;
1411#endif
1412 case USB_DT_CONFIG: 1367 case USB_DT_CONFIG:
1413 VDBG(fsg, "get configuration descriptor\n"); 1368 VDBG(fsg, "get configuration descriptor\n");
1414#ifdef CONFIG_USB_GADGET_DUALSPEED 1369get_config:
1415 get_config:
1416#endif
1417 value = populate_config_buf(fsg->gadget, 1370 value = populate_config_buf(fsg->gadget,
1418 req->buf, 1371 req->buf,
1419 w_value >> 8, 1372 w_value >> 8,
@@ -1646,7 +1599,8 @@ static int do_read(struct fsg_dev *fsg)
1646 /* Wait for the next buffer to become available */ 1599 /* Wait for the next buffer to become available */
1647 bh = fsg->next_buffhd_to_fill; 1600 bh = fsg->next_buffhd_to_fill;
1648 while (bh->state != BUF_STATE_EMPTY) { 1601 while (bh->state != BUF_STATE_EMPTY) {
1649 if ((rc = sleep_thread(fsg)) != 0) 1602 rc = sleep_thread(fsg);
1603 if (rc)
1650 return rc; 1604 return rc;
1651 } 1605 }
1652 1606
@@ -1885,7 +1839,8 @@ static int do_write(struct fsg_dev *fsg)
1885 } 1839 }
1886 1840
1887 /* Wait for something to happen */ 1841 /* Wait for something to happen */
1888 if ((rc = sleep_thread(fsg)) != 0) 1842 rc = sleep_thread(fsg);
1843 if (rc)
1889 return rc; 1844 return rc;
1890 } 1845 }
1891 1846
@@ -2369,7 +2324,8 @@ static int pad_with_zeros(struct fsg_dev *fsg)
2369 2324
2370 /* Wait for the next buffer to be free */ 2325 /* Wait for the next buffer to be free */
2371 while (bh->state != BUF_STATE_EMPTY) { 2326 while (bh->state != BUF_STATE_EMPTY) {
2372 if ((rc = sleep_thread(fsg)) != 0) 2327 rc = sleep_thread(fsg);
2328 if (rc)
2373 return rc; 2329 return rc;
2374 } 2330 }
2375 2331
@@ -2429,7 +2385,8 @@ static int throw_away_data(struct fsg_dev *fsg)
2429 } 2385 }
2430 2386
2431 /* Otherwise wait for something to happen */ 2387 /* Otherwise wait for something to happen */
2432 if ((rc = sleep_thread(fsg)) != 0) 2388 rc = sleep_thread(fsg);
2389 if (rc)
2433 return rc; 2390 return rc;
2434 } 2391 }
2435 return 0; 2392 return 0;
@@ -2551,7 +2508,8 @@ static int send_status(struct fsg_dev *fsg)
2551 /* Wait for the next buffer to become available */ 2508 /* Wait for the next buffer to become available */
2552 bh = fsg->next_buffhd_to_fill; 2509 bh = fsg->next_buffhd_to_fill;
2553 while (bh->state != BUF_STATE_EMPTY) { 2510 while (bh->state != BUF_STATE_EMPTY) {
2554 if ((rc = sleep_thread(fsg)) != 0) 2511 rc = sleep_thread(fsg);
2512 if (rc)
2555 return rc; 2513 return rc;
2556 } 2514 }
2557 2515
@@ -2771,9 +2729,10 @@ static int do_scsi_command(struct fsg_dev *fsg)
2771 /* Wait for the next buffer to become available for data or status */ 2729 /* Wait for the next buffer to become available for data or status */
2772 bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill; 2730 bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
2773 while (bh->state != BUF_STATE_EMPTY) { 2731 while (bh->state != BUF_STATE_EMPTY) {
2774 if ((rc = sleep_thread(fsg)) != 0) 2732 rc = sleep_thread(fsg);
2733 if (rc)
2775 return rc; 2734 return rc;
2776 } 2735 }
2777 fsg->phase_error = 0; 2736 fsg->phase_error = 0;
2778 fsg->short_packet_received = 0; 2737 fsg->short_packet_received = 0;
2779 2738
@@ -3005,7 +2964,7 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
3005 2964
3006 /* Is the CBW meaningful? */ 2965 /* Is the CBW meaningful? */
3007 if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG || 2966 if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
3008 cbw->Length < 6 || cbw->Length > MAX_COMMAND_SIZE) { 2967 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
3009 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, " 2968 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
3010 "cmdlen %u\n", 2969 "cmdlen %u\n",
3011 cbw->Lun, cbw->Flags, cbw->Length); 2970 cbw->Lun, cbw->Flags, cbw->Length);
@@ -3045,9 +3004,10 @@ static int get_next_command(struct fsg_dev *fsg)
3045 /* Wait for the next buffer to become available */ 3004 /* Wait for the next buffer to become available */
3046 bh = fsg->next_buffhd_to_fill; 3005 bh = fsg->next_buffhd_to_fill;
3047 while (bh->state != BUF_STATE_EMPTY) { 3006 while (bh->state != BUF_STATE_EMPTY) {
3048 if ((rc = sleep_thread(fsg)) != 0) 3007 rc = sleep_thread(fsg);
3008 if (rc)
3049 return rc; 3009 return rc;
3050 } 3010 }
3051 3011
3052 /* Queue a request to read a Bulk-only CBW */ 3012 /* Queue a request to read a Bulk-only CBW */
3053 set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN); 3013 set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
@@ -3061,9 +3021,10 @@ static int get_next_command(struct fsg_dev *fsg)
3061 3021
3062 /* Wait for the CBW to arrive */ 3022 /* Wait for the CBW to arrive */
3063 while (bh->state != BUF_STATE_FULL) { 3023 while (bh->state != BUF_STATE_FULL) {
3064 if ((rc = sleep_thread(fsg)) != 0) 3024 rc = sleep_thread(fsg);
3025 if (rc)
3065 return rc; 3026 return rc;
3066 } 3027 }
3067 smp_rmb(); 3028 smp_rmb();
3068 rc = received_cbw(fsg, bh); 3029 rc = received_cbw(fsg, bh);
3069 bh->state = BUF_STATE_EMPTY; 3030 bh->state = BUF_STATE_EMPTY;
@@ -3072,9 +3033,10 @@ static int get_next_command(struct fsg_dev *fsg)
3072 3033
3073 /* Wait for the next command to arrive */ 3034 /* Wait for the next command to arrive */
3074 while (fsg->cbbuf_cmnd_size == 0) { 3035 while (fsg->cbbuf_cmnd_size == 0) {
3075 if ((rc = sleep_thread(fsg)) != 0) 3036 rc = sleep_thread(fsg);
3037 if (rc)
3076 return rc; 3038 return rc;
3077 } 3039 }
3078 3040
3079 /* Is the previous status interrupt request still busy? 3041 /* Is the previous status interrupt request still busy?
3080 * The host is allowed to skip reading the status, 3042 * The host is allowed to skip reading the status,
@@ -3595,7 +3557,8 @@ static ssize_t show_ro(struct device *dev, struct device_attribute *attr, char *
3595 return sprintf(buf, "%d\n", curlun->ro); 3557 return sprintf(buf, "%d\n", curlun->ro);
3596} 3558}
3597 3559
3598static ssize_t show_file(struct device *dev, struct device_attribute *attr, char *buf) 3560static ssize_t show_file(struct device *dev, struct device_attribute *attr,
3561 char *buf)
3599{ 3562{
3600 struct lun *curlun = dev_to_lun(dev); 3563 struct lun *curlun = dev_to_lun(dev);
3601 struct fsg_dev *fsg = dev_get_drvdata(dev); 3564 struct fsg_dev *fsg = dev_get_drvdata(dev);
@@ -3604,8 +3567,8 @@ static ssize_t show_file(struct device *dev, struct device_attribute *attr, char
3604 3567
3605 down_read(&fsg->filesem); 3568 down_read(&fsg->filesem);
3606 if (backing_file_is_open(curlun)) { // Get the complete pathname 3569 if (backing_file_is_open(curlun)) { // Get the complete pathname
3607 p = d_path(curlun->filp->f_path.dentry, curlun->filp->f_path.mnt, 3570 p = d_path(curlun->filp->f_path.dentry,
3608 buf, PAGE_SIZE - 1); 3571 curlun->filp->f_path.mnt, buf, PAGE_SIZE - 1);
3609 if (IS_ERR(p)) 3572 if (IS_ERR(p))
3610 rc = PTR_ERR(p); 3573 rc = PTR_ERR(p);
3611 else { 3574 else {
@@ -3623,7 +3586,8 @@ static ssize_t show_file(struct device *dev, struct device_attribute *attr, char
3623} 3586}
3624 3587
3625 3588
3626static ssize_t store_ro(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 3589static ssize_t store_ro(struct device *dev, struct device_attribute *attr,
3590 const char *buf, size_t count)
3627{ 3591{
3628 ssize_t rc = count; 3592 ssize_t rc = count;
3629 struct lun *curlun = dev_to_lun(dev); 3593 struct lun *curlun = dev_to_lun(dev);
@@ -3647,7 +3611,8 @@ static ssize_t store_ro(struct device *dev, struct device_attribute *attr, const
3647 return rc; 3611 return rc;
3648} 3612}
3649 3613
3650static ssize_t store_file(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 3614static ssize_t store_file(struct device *dev, struct device_attribute *attr,
3615 const char *buf, size_t count)
3651{ 3616{
3652 struct lun *curlun = dev_to_lun(dev); 3617 struct lun *curlun = dev_to_lun(dev);
3653 struct fsg_dev *fsg = dev_get_drvdata(dev); 3618 struct fsg_dev *fsg = dev_get_drvdata(dev);
@@ -3859,7 +3824,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
3859 /* Find out how many LUNs there should be */ 3824 /* Find out how many LUNs there should be */
3860 i = mod_data.nluns; 3825 i = mod_data.nluns;
3861 if (i == 0) 3826 if (i == 0)
3862 i = max(mod_data.num_filenames, 1); 3827 i = max(mod_data.num_filenames, 1u);
3863 if (i > MAX_LUNS) { 3828 if (i > MAX_LUNS) {
3864 ERROR(fsg, "invalid number of LUNs: %d\n", i); 3829 ERROR(fsg, "invalid number of LUNs: %d\n", i);
3865 rc = -EINVAL; 3830 rc = -EINVAL;
@@ -3944,21 +3909,23 @@ static int __init fsg_bind(struct usb_gadget *gadget)
3944 intf_desc.bInterfaceProtocol = mod_data.transport_type; 3909 intf_desc.bInterfaceProtocol = mod_data.transport_type;
3945 fs_function[i + FS_FUNCTION_PRE_EP_ENTRIES] = NULL; 3910 fs_function[i + FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
3946 3911
3947#ifdef CONFIG_USB_GADGET_DUALSPEED 3912 if (gadget_is_dualspeed(gadget)) {
3948 hs_function[i + HS_FUNCTION_PRE_EP_ENTRIES] = NULL; 3913 hs_function[i + HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
3949 3914
3950 /* Assume ep0 uses the same maxpacket value for both speeds */ 3915 /* Assume ep0 uses the same maxpacket value for both speeds */
3951 dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket; 3916 dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
3952 3917
3953 /* Assume that all endpoint addresses are the same for both speeds */ 3918 /* Assume endpoint addresses are the same for both speeds */
3954 hs_bulk_in_desc.bEndpointAddress = fs_bulk_in_desc.bEndpointAddress; 3919 hs_bulk_in_desc.bEndpointAddress =
3955 hs_bulk_out_desc.bEndpointAddress = fs_bulk_out_desc.bEndpointAddress; 3920 fs_bulk_in_desc.bEndpointAddress;
3956 hs_intr_in_desc.bEndpointAddress = fs_intr_in_desc.bEndpointAddress; 3921 hs_bulk_out_desc.bEndpointAddress =
3957#endif 3922 fs_bulk_out_desc.bEndpointAddress;
3923 hs_intr_in_desc.bEndpointAddress =
3924 fs_intr_in_desc.bEndpointAddress;
3925 }
3958 3926
3959 if (gadget->is_otg) { 3927 if (gadget_is_otg(gadget))
3960 otg_desc.bmAttributes |= USB_OTG_HNP; 3928 otg_desc.bmAttributes |= USB_OTG_HNP;
3961 }
3962 3929
3963 rc = -ENOMEM; 3930 rc = -ENOMEM;
3964 3931
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_usb2_udc.c
index d57bcfbc08..9bb7f64a85 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.c
+++ b/drivers/usb/gadget/fsl_usb2_udc.c
@@ -35,7 +35,7 @@
35#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
36#include <linux/device.h> 36#include <linux/device.h>
37#include <linux/usb/ch9.h> 37#include <linux/usb/ch9.h>
38#include <linux/usb_gadget.h> 38#include <linux/usb/gadget.h>
39#include <linux/usb/otg.h> 39#include <linux/usb/otg.h>
40#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
41#include <linux/platform_device.h> 41#include <linux/platform_device.h>
@@ -1090,14 +1090,11 @@ static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
1090 */ 1090 */
1091static int fsl_vbus_draw(struct usb_gadget *gadget, unsigned mA) 1091static int fsl_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1092{ 1092{
1093#ifdef CONFIG_USB_OTG
1094 struct fsl_udc *udc; 1093 struct fsl_udc *udc;
1095 1094
1096 udc = container_of(gadget, struct fsl_udc, gadget); 1095 udc = container_of(gadget, struct fsl_udc, gadget);
1097
1098 if (udc->transceiver) 1096 if (udc->transceiver)
1099 return otg_set_power(udc->transceiver, mA); 1097 return otg_set_power(udc->transceiver, mA);
1100#endif
1101 return -ENOTSUPP; 1098 return -ENOTSUPP;
1102} 1099}
1103 1100
@@ -1120,7 +1117,7 @@ static int fsl_pullup(struct usb_gadget *gadget, int is_on)
1120 return 0; 1117 return 0;
1121} 1118}
1122 1119
1123/* defined in usb_gadget.h */ 1120/* defined in gadget.h */
1124static struct usb_gadget_ops fsl_gadget_ops = { 1121static struct usb_gadget_ops fsl_gadget_ops = {
1125 .get_frame = fsl_get_frame, 1122 .get_frame = fsl_get_frame,
1126 .wakeup = fsl_wakeup, 1123 .wakeup = fsl_wakeup,
@@ -1321,7 +1318,7 @@ static void setup_received_irq(struct fsl_udc *udc,
1321 | USB_TYPE_STANDARD)) { 1318 | USB_TYPE_STANDARD)) {
1322 /* Note: The driver has not include OTG support yet. 1319 /* Note: The driver has not include OTG support yet.
1323 * This will be set when OTG support is added */ 1320 * This will be set when OTG support is added */
1324 if (!udc->gadget.is_otg) 1321 if (!gadget_is_otg(udc->gadget))
1325 break; 1322 break;
1326 else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) 1323 else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE)
1327 udc->gadget.b_hnp_enable = 1; 1324 udc->gadget.b_hnp_enable = 1;
@@ -1330,6 +1327,8 @@ static void setup_received_irq(struct fsl_udc *udc,
1330 else if (setup->bRequest == 1327 else if (setup->bRequest ==
1331 USB_DEVICE_A_ALT_HNP_SUPPORT) 1328 USB_DEVICE_A_ALT_HNP_SUPPORT)
1332 udc->gadget.a_alt_hnp_support = 1; 1329 udc->gadget.a_alt_hnp_support = 1;
1330 else
1331 break;
1333 rc = 0; 1332 rc = 0;
1334 } else 1333 } else
1335 break; 1334 break;
@@ -1840,10 +1839,8 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1840 if (!driver || driver != udc_controller->driver || !driver->unbind) 1839 if (!driver || driver != udc_controller->driver || !driver->unbind)
1841 return -EINVAL; 1840 return -EINVAL;
1842 1841
1843#ifdef CONFIG_USB_OTG
1844 if (udc_controller->transceiver) 1842 if (udc_controller->transceiver)
1845 (void)otg_set_peripheral(udc_controller->transceiver, 0); 1843 (void)otg_set_peripheral(udc_controller->transceiver, 0);
1846#endif
1847 1844
1848 /* stop DR, disable intr */ 1845 /* stop DR, disable intr */
1849 dr_controller_stop(udc_controller); 1846 dr_controller_stop(udc_controller);
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 1c5aa49d74..0689189550 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -18,17 +18,11 @@
18 * http://www.usb.org/developers/devclass_docs/midi10.pdf 18 * http://www.usb.org/developers/devclass_docs/midi10.pdf
19 */ 19 */
20 20
21#define DEBUG 1 21/* #define VERBOSE_DEBUG */
22// #define VERBOSE
23 22
24#include <linux/module.h>
25#include <linux/kernel.h> 23#include <linux/kernel.h>
26#include <linux/delay.h>
27#include <linux/errno.h>
28#include <linux/init.h>
29#include <linux/utsname.h> 24#include <linux/utsname.h>
30#include <linux/device.h> 25#include <linux/device.h>
31#include <linux/moduleparam.h>
32 26
33#include <sound/driver.h> 27#include <sound/driver.h>
34#include <sound/core.h> 28#include <sound/core.h>
@@ -36,7 +30,7 @@
36#include <sound/rawmidi.h> 30#include <sound/rawmidi.h>
37 31
38#include <linux/usb/ch9.h> 32#include <linux/usb/ch9.h>
39#include <linux/usb_gadget.h> 33#include <linux/usb/gadget.h>
40#include <linux/usb/audio.h> 34#include <linux/usb/audio.h>
41#include <linux/usb/midi.h> 35#include <linux/usb/midi.h>
42 36
@@ -139,30 +133,16 @@ struct gmidi_device {
139static void gmidi_transmit(struct gmidi_device* dev, struct usb_request* req); 133static void gmidi_transmit(struct gmidi_device* dev, struct usb_request* req);
140 134
141 135
142#define xprintk(d,level,fmt,args...) \ 136#define DBG(d, fmt, args...) \
143 dev_printk(level , &(d)->gadget->dev , fmt , ## args) 137 dev_dbg(&(d)->gadget->dev , fmt , ## args)
144 138#define VDBG(d, fmt, args...) \
145#ifdef DEBUG 139 dev_vdbg(&(d)->gadget->dev , fmt , ## args)
146#define DBG(dev,fmt,args...) \ 140#define ERROR(d, fmt, args...) \
147 xprintk(dev , KERN_DEBUG , fmt , ## args) 141 dev_err(&(d)->gadget->dev , fmt , ## args)
148#else 142#define WARN(d, fmt, args...) \
149#define DBG(dev,fmt,args...) \ 143 dev_warn(&(d)->gadget->dev , fmt , ## args)
150 do { } while (0) 144#define INFO(d, fmt, args...) \
151#endif /* DEBUG */ 145 dev_info(&(d)->gadget->dev , fmt , ## args)
152
153#ifdef VERBOSE
154#define VDBG DBG
155#else
156#define VDBG(dev,fmt,args...) \
157 do { } while (0)
158#endif /* VERBOSE */
159
160#define ERROR(dev,fmt,args...) \
161 xprintk(dev , KERN_ERR , fmt , ## args)
162#define WARN(dev,fmt,args...) \
163 xprintk(dev , KERN_WARNING , fmt , ## args)
164#define INFO(dev,fmt,args...) \
165 xprintk(dev , KERN_INFO , fmt , ## args)
166 146
167 147
168static unsigned buflen = 256; 148static unsigned buflen = 256;
@@ -425,7 +405,7 @@ static int config_buf(struct usb_gadget *gadget,
425 return len; 405 return len;
426} 406}
427 407
428static struct usb_request* alloc_ep_req(struct usb_ep *ep, unsigned length) 408static struct usb_request *alloc_ep_req(struct usb_ep *ep, unsigned length)
429{ 409{
430 struct usb_request *req; 410 struct usb_request *req;
431 411
@@ -455,7 +435,7 @@ static const uint8_t gmidi_cin_length[] = {
455 * Receives a chunk of MIDI data. 435 * Receives a chunk of MIDI data.
456 */ 436 */
457static void gmidi_read_data(struct usb_ep *ep, int cable, 437static void gmidi_read_data(struct usb_ep *ep, int cable,
458 uint8_t* data, int length) 438 uint8_t *data, int length)
459{ 439{
460 struct gmidi_device *dev = ep->driver_data; 440 struct gmidi_device *dev = ep->driver_data;
461 /* cable is ignored, because for now we only have one. */ 441 /* cable is ignored, because for now we only have one. */
@@ -541,7 +521,7 @@ static int set_gmidi_config(struct gmidi_device *dev, gfp_t gfp_flags)
541{ 521{
542 int err = 0; 522 int err = 0;
543 struct usb_request *req; 523 struct usb_request *req;
544 struct usb_ep* ep; 524 struct usb_ep *ep;
545 unsigned i; 525 unsigned i;
546 526
547 err = usb_ep_enable(dev->in_ep, &bulk_in_desc); 527 err = usb_ep_enable(dev->in_ep, &bulk_in_desc);
@@ -628,7 +608,7 @@ gmidi_set_config(struct gmidi_device *dev, unsigned number, gfp_t gfp_flags)
628 608
629 if (gadget_is_sa1100(gadget) && dev->config) { 609 if (gadget_is_sa1100(gadget) && dev->config) {
630 /* tx fifo is full, but we can't clear it...*/ 610 /* tx fifo is full, but we can't clear it...*/
631 INFO(dev, "can't change configurations\n"); 611 ERROR(dev, "can't change configurations\n");
632 return -ESPIPE; 612 return -ESPIPE;
633 } 613 }
634 gmidi_reset_config(dev); 614 gmidi_reset_config(dev);
@@ -843,7 +823,7 @@ static void gmidi_disconnect(struct usb_gadget *gadget)
843static void /* __init_or_exit */ gmidi_unbind(struct usb_gadget *gadget) 823static void /* __init_or_exit */ gmidi_unbind(struct usb_gadget *gadget)
844{ 824{
845 struct gmidi_device *dev = get_gadget_data(gadget); 825 struct gmidi_device *dev = get_gadget_data(gadget);
846 struct snd_card* card; 826 struct snd_card *card;
847 827
848 DBG(dev, "unbind\n"); 828 DBG(dev, "unbind\n");
849 829
@@ -867,12 +847,12 @@ static int gmidi_snd_free(struct snd_device *device)
867 return 0; 847 return 0;
868} 848}
869 849
870static void gmidi_transmit_packet(struct usb_request* req, uint8_t p0, 850static void gmidi_transmit_packet(struct usb_request *req, uint8_t p0,
871 uint8_t p1, uint8_t p2, uint8_t p3) 851 uint8_t p1, uint8_t p2, uint8_t p3)
872{ 852{
873 unsigned length = req->length; 853 unsigned length = req->length;
854 u8 *buf = (u8 *)req->buf + length;
874 855
875 uint8_t* buf = (uint8_t*)req->buf + length;
876 buf[0] = p0; 856 buf[0] = p0;
877 buf[1] = p1; 857 buf[1] = p1;
878 buf[2] = p2; 858 buf[2] = p2;
@@ -883,8 +863,8 @@ static void gmidi_transmit_packet(struct usb_request* req, uint8_t p0,
883/* 863/*
884 * Converts MIDI commands to USB MIDI packets. 864 * Converts MIDI commands to USB MIDI packets.
885 */ 865 */
886static void gmidi_transmit_byte(struct usb_request* req, 866static void gmidi_transmit_byte(struct usb_request *req,
887 struct gmidi_in_port* port, uint8_t b) 867 struct gmidi_in_port *port, uint8_t b)
888{ 868{
889 uint8_t p0 = port->cable; 869 uint8_t p0 = port->cable;
890 870
@@ -981,10 +961,10 @@ static void gmidi_transmit_byte(struct usb_request* req,
981 } 961 }
982} 962}
983 963
984static void gmidi_transmit(struct gmidi_device* dev, struct usb_request* req) 964static void gmidi_transmit(struct gmidi_device *dev, struct usb_request *req)
985{ 965{
986 struct usb_ep* ep = dev->in_ep; 966 struct usb_ep *ep = dev->in_ep;
987 struct gmidi_in_port* port = &dev->in_port; 967 struct gmidi_in_port *port = &dev->in_port;
988 968
989 if (!ep) { 969 if (!ep) {
990 return; 970 return;
@@ -1020,14 +1000,14 @@ static void gmidi_transmit(struct gmidi_device* dev, struct usb_request* req)
1020 1000
1021static void gmidi_in_tasklet(unsigned long data) 1001static void gmidi_in_tasklet(unsigned long data)
1022{ 1002{
1023 struct gmidi_device* dev = (struct gmidi_device*)data; 1003 struct gmidi_device *dev = (struct gmidi_device *)data;
1024 1004
1025 gmidi_transmit(dev, NULL); 1005 gmidi_transmit(dev, NULL);
1026} 1006}
1027 1007
1028static int gmidi_in_open(struct snd_rawmidi_substream *substream) 1008static int gmidi_in_open(struct snd_rawmidi_substream *substream)
1029{ 1009{
1030 struct gmidi_device* dev = substream->rmidi->private_data; 1010 struct gmidi_device *dev = substream->rmidi->private_data;
1031 1011
1032 VDBG(dev, "gmidi_in_open\n"); 1012 VDBG(dev, "gmidi_in_open\n");
1033 dev->in_substream = substream; 1013 dev->in_substream = substream;
@@ -1037,13 +1017,15 @@ static int gmidi_in_open(struct snd_rawmidi_substream *substream)
1037 1017
1038static int gmidi_in_close(struct snd_rawmidi_substream *substream) 1018static int gmidi_in_close(struct snd_rawmidi_substream *substream)
1039{ 1019{
1020 struct gmidi_device *dev = substream->rmidi->private_data;
1021
1040 VDBG(dev, "gmidi_in_close\n"); 1022 VDBG(dev, "gmidi_in_close\n");
1041 return 0; 1023 return 0;
1042} 1024}
1043 1025
1044static void gmidi_in_trigger(struct snd_rawmidi_substream *substream, int up) 1026static void gmidi_in_trigger(struct snd_rawmidi_substream *substream, int up)
1045{ 1027{
1046 struct gmidi_device* dev = substream->rmidi->private_data; 1028 struct gmidi_device *dev = substream->rmidi->private_data;
1047 1029
1048 VDBG(dev, "gmidi_in_trigger %d\n", up); 1030 VDBG(dev, "gmidi_in_trigger %d\n", up);
1049 dev->in_port.active = up; 1031 dev->in_port.active = up;
@@ -1054,7 +1036,7 @@ static void gmidi_in_trigger(struct snd_rawmidi_substream *substream, int up)
1054 1036
1055static int gmidi_out_open(struct snd_rawmidi_substream *substream) 1037static int gmidi_out_open(struct snd_rawmidi_substream *substream)
1056{ 1038{
1057 struct gmidi_device* dev = substream->rmidi->private_data; 1039 struct gmidi_device *dev = substream->rmidi->private_data;
1058 1040
1059 VDBG(dev, "gmidi_out_open\n"); 1041 VDBG(dev, "gmidi_out_open\n");
1060 dev->out_substream = substream; 1042 dev->out_substream = substream;
@@ -1063,13 +1045,15 @@ static int gmidi_out_open(struct snd_rawmidi_substream *substream)
1063 1045
1064static int gmidi_out_close(struct snd_rawmidi_substream *substream) 1046static int gmidi_out_close(struct snd_rawmidi_substream *substream)
1065{ 1047{
1048 struct gmidi_device *dev = substream->rmidi->private_data;
1049
1066 VDBG(dev, "gmidi_out_close\n"); 1050 VDBG(dev, "gmidi_out_close\n");
1067 return 0; 1051 return 0;
1068} 1052}
1069 1053
1070static void gmidi_out_trigger(struct snd_rawmidi_substream *substream, int up) 1054static void gmidi_out_trigger(struct snd_rawmidi_substream *substream, int up)
1071{ 1055{
1072 struct gmidi_device* dev = substream->rmidi->private_data; 1056 struct gmidi_device *dev = substream->rmidi->private_data;
1073 1057
1074 VDBG(dev, "gmidi_out_trigger %d\n", up); 1058 VDBG(dev, "gmidi_out_trigger %d\n", up);
1075 if (up) { 1059 if (up) {
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 349b8166f3..2ec9d196a8 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -37,7 +37,7 @@
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/usb/ch9.h> 39#include <linux/usb/ch9.h>
40#include <linux/usb_gadget.h> 40#include <linux/usb/gadget.h>
41 41
42#include <asm/byteorder.h> 42#include <asm/byteorder.h>
43#include <asm/io.h> 43#include <asm/io.h>
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 173004f60f..47ef8bd58a 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -20,8 +20,7 @@
20 */ 20 */
21 21
22 22
23// #define DEBUG /* data to help fault diagnosis */ 23/* #define VERBOSE_DEBUG */
24// #define VERBOSE /* extra debug messages (success too) */
25 24
26#include <linux/init.h> 25#include <linux/init.h>
27#include <linux/module.h> 26#include <linux/module.h>
@@ -38,7 +37,7 @@
38#include <linux/moduleparam.h> 37#include <linux/moduleparam.h>
39 38
40#include <linux/usb/gadgetfs.h> 39#include <linux/usb/gadgetfs.h>
41#include <linux/usb_gadget.h> 40#include <linux/usb/gadget.h>
42 41
43 42
44/* 43/*
@@ -253,7 +252,7 @@ static const char *CHIP;
253 do { } while (0) 252 do { } while (0)
254#endif /* DEBUG */ 253#endif /* DEBUG */
255 254
256#ifdef VERBOSE 255#ifdef VERBOSE_DEBUG
257#define VDEBUG DBG 256#define VDEBUG DBG
258#else 257#else
259#define VDEBUG(dev,fmt,args...) \ 258#define VDEBUG(dev,fmt,args...) \
@@ -1010,11 +1009,12 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
1010 /* assume that was SET_CONFIGURATION */ 1009 /* assume that was SET_CONFIGURATION */
1011 if (dev->current_config) { 1010 if (dev->current_config) {
1012 unsigned power; 1011 unsigned power;
1013#ifdef CONFIG_USB_GADGET_DUALSPEED 1012
1014 if (dev->gadget->speed == USB_SPEED_HIGH) 1013 if (gadget_is_dualspeed(dev->gadget)
1014 && (dev->gadget->speed
1015 == USB_SPEED_HIGH))
1015 power = dev->hs_config->bMaxPower; 1016 power = dev->hs_config->bMaxPower;
1016 else 1017 else
1017#endif
1018 power = dev->config->bMaxPower; 1018 power = dev->config->bMaxPower;
1019 usb_gadget_vbus_draw(dev->gadget, 2 * power); 1019 usb_gadget_vbus_draw(dev->gadget, 2 * power);
1020 } 1020 }
@@ -1355,24 +1355,21 @@ static int
1355config_buf (struct dev_data *dev, u8 type, unsigned index) 1355config_buf (struct dev_data *dev, u8 type, unsigned index)
1356{ 1356{
1357 int len; 1357 int len;
1358#ifdef CONFIG_USB_GADGET_DUALSPEED 1358 int hs = 0;
1359 int hs;
1360#endif
1361 1359
1362 /* only one configuration */ 1360 /* only one configuration */
1363 if (index > 0) 1361 if (index > 0)
1364 return -EINVAL; 1362 return -EINVAL;
1365 1363
1366#ifdef CONFIG_USB_GADGET_DUALSPEED 1364 if (gadget_is_dualspeed(dev->gadget)) {
1367 hs = (dev->gadget->speed == USB_SPEED_HIGH); 1365 hs = (dev->gadget->speed == USB_SPEED_HIGH);
1368 if (type == USB_DT_OTHER_SPEED_CONFIG) 1366 if (type == USB_DT_OTHER_SPEED_CONFIG)
1369 hs = !hs; 1367 hs = !hs;
1368 }
1370 if (hs) { 1369 if (hs) {
1371 dev->req->buf = dev->hs_config; 1370 dev->req->buf = dev->hs_config;
1372 len = le16_to_cpu(dev->hs_config->wTotalLength); 1371 len = le16_to_cpu(dev->hs_config->wTotalLength);
1373 } else 1372 } else {
1374#endif
1375 {
1376 dev->req->buf = dev->config; 1373 dev->req->buf = dev->config;
1377 len = le16_to_cpu(dev->config->wTotalLength); 1374 len = le16_to_cpu(dev->config->wTotalLength);
1378 } 1375 }
@@ -1393,13 +1390,13 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1393 spin_lock (&dev->lock); 1390 spin_lock (&dev->lock);
1394 dev->setup_abort = 0; 1391 dev->setup_abort = 0;
1395 if (dev->state == STATE_DEV_UNCONNECTED) { 1392 if (dev->state == STATE_DEV_UNCONNECTED) {
1396#ifdef CONFIG_USB_GADGET_DUALSPEED 1393 if (gadget_is_dualspeed(gadget)
1397 if (gadget->speed == USB_SPEED_HIGH && dev->hs_config == NULL) { 1394 && gadget->speed == USB_SPEED_HIGH
1395 && dev->hs_config == NULL) {
1398 spin_unlock(&dev->lock); 1396 spin_unlock(&dev->lock);
1399 ERROR (dev, "no high speed config??\n"); 1397 ERROR (dev, "no high speed config??\n");
1400 return -EINVAL; 1398 return -EINVAL;
1401 } 1399 }
1402#endif /* CONFIG_USB_GADGET_DUALSPEED */
1403 1400
1404 dev->state = STATE_DEV_CONNECTED; 1401 dev->state = STATE_DEV_CONNECTED;
1405 dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket; 1402 dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket;
@@ -1469,13 +1466,12 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1469 // user mode expected to disable endpoints 1466 // user mode expected to disable endpoints
1470 } else { 1467 } else {
1471 u8 config, power; 1468 u8 config, power;
1472#ifdef CONFIG_USB_GADGET_DUALSPEED 1469
1473 if (gadget->speed == USB_SPEED_HIGH) { 1470 if (gadget_is_dualspeed(gadget)
1471 && gadget->speed == USB_SPEED_HIGH) {
1474 config = dev->hs_config->bConfigurationValue; 1472 config = dev->hs_config->bConfigurationValue;
1475 power = dev->hs_config->bMaxPower; 1473 power = dev->hs_config->bMaxPower;
1476 } else 1474 } else {
1477#endif
1478 {
1479 config = dev->config->bConfigurationValue; 1475 config = dev->config->bConfigurationValue;
1480 power = dev->config->bMaxPower; 1476 power = dev->config->bMaxPower;
1481 } 1477 }
diff --git a/drivers/usb/gadget/lh7a40x_udc.h b/drivers/usb/gadget/lh7a40x_udc.h
index b3fe197e1e..1ecfd6366b 100644
--- a/drivers/usb/gadget/lh7a40x_udc.h
+++ b/drivers/usb/gadget/lh7a40x_udc.h
@@ -50,7 +50,7 @@
50#include <asm/hardware.h> 50#include <asm/hardware.h>
51 51
52#include <linux/usb/ch9.h> 52#include <linux/usb/ch9.h>
53#include <linux/usb_gadget.h> 53#include <linux/usb/gadget.h>
54 54
55/* 55/*
56 * Memory map 56 * Memory map
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 4b27d12f04..ebc5536aa2 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -27,7 +27,7 @@
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28 28
29#include <linux/usb/ch9.h> 29#include <linux/usb/ch9.h>
30#include <linux/usb_gadget.h> 30#include <linux/usb/gadget.h>
31 31
32#include "m66592-udc.h" 32#include "m66592-udc.h"
33 33
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index c3d364ecd4..d5d473f814 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -62,7 +62,7 @@
62#include <linux/moduleparam.h> 62#include <linux/moduleparam.h>
63#include <linux/device.h> 63#include <linux/device.h>
64#include <linux/usb/ch9.h> 64#include <linux/usb/ch9.h>
65#include <linux/usb_gadget.h> 65#include <linux/usb/gadget.h>
66 66
67#include <asm/byteorder.h> 67#include <asm/byteorder.h>
68#include <asm/io.h> 68#include <asm/io.h>
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 9b0f0925dd..87c4f50dfb 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -38,7 +38,7 @@
38#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <linux/usb/ch9.h> 40#include <linux/usb/ch9.h>
41#include <linux/usb_gadget.h> 41#include <linux/usb/gadget.h>
42#include <linux/usb/otg.h> 42#include <linux/usb/otg.h>
43#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
44#include <linux/clk.h> 44#include <linux/clk.h>
@@ -1241,19 +1241,15 @@ static void pullup_enable(struct omap_udc *udc)
1241 udc->gadget.dev.parent->power.power_state = PMSG_ON; 1241 udc->gadget.dev.parent->power.power_state = PMSG_ON;
1242 udc->gadget.dev.power.power_state = PMSG_ON; 1242 udc->gadget.dev.power.power_state = PMSG_ON;
1243 UDC_SYSCON1_REG |= UDC_PULLUP_EN; 1243 UDC_SYSCON1_REG |= UDC_PULLUP_EN;
1244#ifndef CONFIG_USB_OTG 1244 if (!gadget_is_otg(udc->gadget) && !cpu_is_omap15xx())
1245 if (!cpu_is_omap15xx())
1246 OTG_CTRL_REG |= OTG_BSESSVLD; 1245 OTG_CTRL_REG |= OTG_BSESSVLD;
1247#endif
1248 UDC_IRQ_EN_REG = UDC_DS_CHG_IE; 1246 UDC_IRQ_EN_REG = UDC_DS_CHG_IE;
1249} 1247}
1250 1248
1251static void pullup_disable(struct omap_udc *udc) 1249static void pullup_disable(struct omap_udc *udc)
1252{ 1250{
1253#ifndef CONFIG_USB_OTG 1251 if (!gadget_is_otg(udc->gadget) && !cpu_is_omap15xx())
1254 if (!cpu_is_omap15xx())
1255 OTG_CTRL_REG &= ~OTG_BSESSVLD; 1252 OTG_CTRL_REG &= ~OTG_BSESSVLD;
1256#endif
1257 UDC_IRQ_EN_REG = UDC_DS_CHG_IE; 1253 UDC_IRQ_EN_REG = UDC_DS_CHG_IE;
1258 UDC_SYSCON1_REG &= ~UDC_PULLUP_EN; 1254 UDC_SYSCON1_REG &= ~UDC_PULLUP_EN;
1259} 1255}
@@ -1390,7 +1386,7 @@ static void update_otg(struct omap_udc *udc)
1390{ 1386{
1391 u16 devstat; 1387 u16 devstat;
1392 1388
1393 if (!udc->gadget.is_otg) 1389 if (!gadget_is_otg(udc->gadget))
1394 return; 1390 return;
1395 1391
1396 if (OTG_CTRL_REG & OTG_ID) 1392 if (OTG_CTRL_REG & OTG_ID)
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
index 1407ad1c81..3e715082de 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -54,7 +54,7 @@
54#include <asm/hardware.h> 54#include <asm/hardware.h>
55 55
56#include <linux/usb/ch9.h> 56#include <linux/usb/ch9.h>
57#include <linux/usb_gadget.h> 57#include <linux/usb/gadget.h>
58 58
59#include <asm/mach/udc_pxa2xx.h> 59#include <asm/mach/udc_pxa2xx.h>
60 60
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 0be80c635c..e3e90f8a75 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -42,7 +42,7 @@
42#include <linux/seq_file.h> 42#include <linux/seq_file.h>
43 43
44#include <linux/usb.h> 44#include <linux/usb.h>
45#include <linux/usb_gadget.h> 45#include <linux/usb/gadget.h>
46 46
47#include <asm/byteorder.h> 47#include <asm/byteorder.h>
48#include <asm/io.h> 48#include <asm/io.h>
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index ce4d2e0963..f5738eb8e7 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -17,34 +17,15 @@
17 * 17 *
18 */ 18 */
19 19
20#include <linux/module.h>
21#include <linux/kernel.h> 20#include <linux/kernel.h>
22#include <linux/delay.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/timer.h>
28#include <linux/list.h>
29#include <linux/interrupt.h>
30#include <linux/utsname.h> 21#include <linux/utsname.h>
31#include <linux/wait.h>
32#include <linux/proc_fs.h>
33#include <linux/device.h> 22#include <linux/device.h>
34#include <linux/tty.h> 23#include <linux/tty.h>
35#include <linux/tty_flip.h> 24#include <linux/tty_flip.h>
36#include <linux/mutex.h>
37
38#include <asm/byteorder.h>
39#include <asm/io.h>
40#include <asm/irq.h>
41#include <asm/system.h>
42#include <asm/unaligned.h>
43#include <asm/uaccess.h>
44 25
45#include <linux/usb/ch9.h> 26#include <linux/usb/ch9.h>
46#include <linux/usb/cdc.h> 27#include <linux/usb/cdc.h>
47#include <linux/usb_gadget.h> 28#include <linux/usb/gadget.h>
48 29
49#include "gadget_chips.h" 30#include "gadget_chips.h"
50 31
@@ -89,30 +70,29 @@
89#define GS_DEFAULT_PARITY USB_CDC_NO_PARITY 70#define GS_DEFAULT_PARITY USB_CDC_NO_PARITY
90#define GS_DEFAULT_CHAR_FORMAT USB_CDC_1_STOP_BITS 71#define GS_DEFAULT_CHAR_FORMAT USB_CDC_1_STOP_BITS
91 72
92/* select highspeed/fullspeed, hiding highspeed if not configured */ 73/* maxpacket and other transfer characteristics vary by speed. */
93#ifdef CONFIG_USB_GADGET_DUALSPEED 74static inline struct usb_endpoint_descriptor *
94#define GS_SPEED_SELECT(is_hs,hs,fs) ((is_hs) ? (hs) : (fs)) 75choose_ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *hs,
95#else 76 struct usb_endpoint_descriptor *fs)
96#define GS_SPEED_SELECT(is_hs,hs,fs) (fs) 77{
97#endif /* CONFIG_USB_GADGET_DUALSPEED */ 78 if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
79 return hs;
80 return fs;
81}
82
98 83
99/* debug settings */ 84/* debug settings */
100#ifdef GS_DEBUG 85#ifdef DEBUG
101static int debug = 1; 86static int debug = 1;
87#else
88#define debug 0
89#endif
102 90
103#define gs_debug(format, arg...) \ 91#define gs_debug(format, arg...) \
104 do { if (debug) printk(KERN_DEBUG format, ## arg); } while(0) 92 do { if (debug) printk(KERN_DEBUG format, ## arg); } while(0)
105#define gs_debug_level(level, format, arg...) \ 93#define gs_debug_level(level, format, arg...) \
106 do { if (debug>=level) printk(KERN_DEBUG format, ## arg); } while(0) 94 do { if (debug>=level) printk(KERN_DEBUG format, ## arg); } while(0)
107 95
108#else
109
110#define gs_debug(format, arg...) \
111 do { } while(0)
112#define gs_debug_level(level, format, arg...) \
113 do { } while(0)
114
115#endif /* GS_DEBUG */
116 96
117/* Thanks to NetChip Technologies for donating this product ID. 97/* Thanks to NetChip Technologies for donating this product ID.
118 * 98 *
@@ -147,10 +127,10 @@ struct gs_req_entry {
147 127
148/* the port structure holds info for each port, one for each minor number */ 128/* the port structure holds info for each port, one for each minor number */
149struct gs_port { 129struct gs_port {
150 struct gs_dev *port_dev; /* pointer to device struct */ 130 struct gs_dev *port_dev; /* pointer to device struct */
151 struct tty_struct *port_tty; /* pointer to tty struct */ 131 struct tty_struct *port_tty; /* pointer to tty struct */
152 spinlock_t port_lock; 132 spinlock_t port_lock;
153 int port_num; 133 int port_num;
154 int port_open_count; 134 int port_open_count;
155 int port_in_use; /* open/close in progress */ 135 int port_in_use; /* open/close in progress */
156 wait_queue_head_t port_write_wait;/* waiting to write */ 136 wait_queue_head_t port_write_wait;/* waiting to write */
@@ -188,7 +168,7 @@ static void __exit gs_module_exit(void);
188/* tty driver */ 168/* tty driver */
189static int gs_open(struct tty_struct *tty, struct file *file); 169static int gs_open(struct tty_struct *tty, struct file *file);
190static void gs_close(struct tty_struct *tty, struct file *file); 170static void gs_close(struct tty_struct *tty, struct file *file);
191static int gs_write(struct tty_struct *tty, 171static int gs_write(struct tty_struct *tty,
192 const unsigned char *buf, int count); 172 const unsigned char *buf, int count);
193static void gs_put_char(struct tty_struct *tty, unsigned char ch); 173static void gs_put_char(struct tty_struct *tty, unsigned char ch);
194static void gs_flush_chars(struct tty_struct *tty); 174static void gs_flush_chars(struct tty_struct *tty);
@@ -222,7 +202,7 @@ static void gs_setup_complete(struct usb_ep *ep, struct usb_request *req);
222static void gs_disconnect(struct usb_gadget *gadget); 202static void gs_disconnect(struct usb_gadget *gadget);
223static int gs_set_config(struct gs_dev *dev, unsigned config); 203static int gs_set_config(struct gs_dev *dev, unsigned config);
224static void gs_reset_config(struct gs_dev *dev); 204static void gs_reset_config(struct gs_dev *dev);
225static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed, 205static int gs_build_config_buf(u8 *buf, struct usb_gadget *g,
226 u8 type, unsigned int index, int is_otg); 206 u8 type, unsigned int index, int is_otg);
227 207
228static struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned int len, 208static struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned int len,
@@ -415,18 +395,18 @@ static const struct usb_cdc_header_desc gs_header_desc = {
415}; 395};
416 396
417static const struct usb_cdc_call_mgmt_descriptor gs_call_mgmt_descriptor = { 397static const struct usb_cdc_call_mgmt_descriptor gs_call_mgmt_descriptor = {
418 .bLength = sizeof(gs_call_mgmt_descriptor), 398 .bLength = sizeof(gs_call_mgmt_descriptor),
419 .bDescriptorType = USB_DT_CS_INTERFACE, 399 .bDescriptorType = USB_DT_CS_INTERFACE,
420 .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE, 400 .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
421 .bmCapabilities = 0, 401 .bmCapabilities = 0,
422 .bDataInterface = 1, /* index of data interface */ 402 .bDataInterface = 1, /* index of data interface */
423}; 403};
424 404
425static struct usb_cdc_acm_descriptor gs_acm_descriptor = { 405static struct usb_cdc_acm_descriptor gs_acm_descriptor = {
426 .bLength = sizeof(gs_acm_descriptor), 406 .bLength = sizeof(gs_acm_descriptor),
427 .bDescriptorType = USB_DT_CS_INTERFACE, 407 .bDescriptorType = USB_DT_CS_INTERFACE,
428 .bDescriptorSubType = USB_CDC_ACM_TYPE, 408 .bDescriptorSubType = USB_CDC_ACM_TYPE,
429 .bmCapabilities = 0, 409 .bmCapabilities = 0,
430}; 410};
431 411
432static const struct usb_cdc_union_desc gs_union_desc = { 412static const struct usb_cdc_union_desc gs_union_desc = {
@@ -436,7 +416,7 @@ static const struct usb_cdc_union_desc gs_union_desc = {
436 .bMasterInterface0 = 0, /* index of control interface */ 416 .bMasterInterface0 = 0, /* index of control interface */
437 .bSlaveInterface0 = 1, /* index of data interface */ 417 .bSlaveInterface0 = 1, /* index of data interface */
438}; 418};
439 419
440static struct usb_endpoint_descriptor gs_fullspeed_notify_desc = { 420static struct usb_endpoint_descriptor gs_fullspeed_notify_desc = {
441 .bLength = USB_DT_ENDPOINT_SIZE, 421 .bLength = USB_DT_ENDPOINT_SIZE,
442 .bDescriptorType = USB_DT_ENDPOINT, 422 .bDescriptorType = USB_DT_ENDPOINT,
@@ -482,7 +462,6 @@ static const struct usb_descriptor_header *gs_acm_fullspeed_function[] = {
482 NULL, 462 NULL,
483}; 463};
484 464
485#ifdef CONFIG_USB_GADGET_DUALSPEED
486static struct usb_endpoint_descriptor gs_highspeed_notify_desc = { 465static struct usb_endpoint_descriptor gs_highspeed_notify_desc = {
487 .bLength = USB_DT_ENDPOINT_SIZE, 466 .bLength = USB_DT_ENDPOINT_SIZE,
488 .bDescriptorType = USB_DT_ENDPOINT, 467 .bDescriptorType = USB_DT_ENDPOINT,
@@ -536,15 +515,13 @@ static const struct usb_descriptor_header *gs_acm_highspeed_function[] = {
536 NULL, 515 NULL,
537}; 516};
538 517
539#endif /* CONFIG_USB_GADGET_DUALSPEED */
540
541 518
542/* Module */ 519/* Module */
543MODULE_DESCRIPTION(GS_LONG_NAME); 520MODULE_DESCRIPTION(GS_LONG_NAME);
544MODULE_AUTHOR("Al Borchers"); 521MODULE_AUTHOR("Al Borchers");
545MODULE_LICENSE("GPL"); 522MODULE_LICENSE("GPL");
546 523
547#ifdef GS_DEBUG 524#ifdef DEBUG
548module_param(debug, int, S_IRUGO|S_IWUSR); 525module_param(debug, int, S_IRUGO|S_IWUSR);
549MODULE_PARM_DESC(debug, "Enable debugging, 0=off, 1=on"); 526MODULE_PARM_DESC(debug, "Enable debugging, 0=off, 1=on");
550#endif 527#endif
@@ -915,7 +892,8 @@ static void gs_put_char(struct tty_struct *tty, unsigned char ch)
915 return; 892 return;
916 } 893 }
917 894
918 gs_debug("gs_put_char: (%d,%p) char=0x%x, called from %p, %p, %p\n", port->port_num, tty, ch, __builtin_return_address(0), __builtin_return_address(1), __builtin_return_address(2)); 895 gs_debug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
896 port->port_num, tty, ch, __builtin_return_address(0));
919 897
920 spin_lock_irqsave(&port->port_lock, flags); 898 spin_lock_irqsave(&port->port_lock, flags);
921 899
@@ -1116,7 +1094,11 @@ static int gs_send(struct gs_dev *dev)
1116 len = gs_send_packet(dev, req->buf, ep->maxpacket); 1094 len = gs_send_packet(dev, req->buf, ep->maxpacket);
1117 1095
1118 if (len > 0) { 1096 if (len > 0) {
1119gs_debug_level(3, "gs_send: len=%d, 0x%2.2x 0x%2.2x 0x%2.2x ...\n", len, *((unsigned char *)req->buf), *((unsigned char *)req->buf+1), *((unsigned char *)req->buf+2)); 1097 gs_debug_level(3, "gs_send: len=%d, 0x%2.2x "
1098 "0x%2.2x 0x%2.2x ...\n", len,
1099 *((unsigned char *)req->buf),
1100 *((unsigned char *)req->buf+1),
1101 *((unsigned char *)req->buf+2));
1120 list_del(&req_entry->re_entry); 1102 list_del(&req_entry->re_entry);
1121 req->length = len; 1103 req->length = len;
1122 spin_unlock_irqrestore(&dev->dev_lock, flags); 1104 spin_unlock_irqrestore(&dev->dev_lock, flags);
@@ -1269,7 +1251,7 @@ static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
1269 1251
1270 switch(req->status) { 1252 switch(req->status) {
1271 case 0: 1253 case 0:
1272 /* normal completion */ 1254 /* normal completion */
1273 gs_recv_packet(dev, req->buf, req->actual); 1255 gs_recv_packet(dev, req->buf, req->actual);
1274requeue: 1256requeue:
1275 req->length = ep->maxpacket; 1257 req->length = ep->maxpacket;
@@ -1406,23 +1388,24 @@ static int __init gs_bind(struct usb_gadget *gadget)
1406 ? USB_CLASS_COMM : USB_CLASS_VENDOR_SPEC; 1388 ? USB_CLASS_COMM : USB_CLASS_VENDOR_SPEC;
1407 gs_device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket; 1389 gs_device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
1408 1390
1409#ifdef CONFIG_USB_GADGET_DUALSPEED 1391 if (gadget_is_dualspeed(gadget)) {
1410 gs_qualifier_desc.bDeviceClass = use_acm 1392 gs_qualifier_desc.bDeviceClass = use_acm
1411 ? USB_CLASS_COMM : USB_CLASS_VENDOR_SPEC; 1393 ? USB_CLASS_COMM : USB_CLASS_VENDOR_SPEC;
1412 /* assume ep0 uses the same packet size for both speeds */ 1394 /* assume ep0 uses the same packet size for both speeds */
1413 gs_qualifier_desc.bMaxPacketSize0 = gs_device_desc.bMaxPacketSize0; 1395 gs_qualifier_desc.bMaxPacketSize0 =
1414 /* assume endpoints are dual-speed */ 1396 gs_device_desc.bMaxPacketSize0;
1415 gs_highspeed_notify_desc.bEndpointAddress = 1397 /* assume endpoints are dual-speed */
1416 gs_fullspeed_notify_desc.bEndpointAddress; 1398 gs_highspeed_notify_desc.bEndpointAddress =
1417 gs_highspeed_in_desc.bEndpointAddress = 1399 gs_fullspeed_notify_desc.bEndpointAddress;
1418 gs_fullspeed_in_desc.bEndpointAddress; 1400 gs_highspeed_in_desc.bEndpointAddress =
1419 gs_highspeed_out_desc.bEndpointAddress = 1401 gs_fullspeed_in_desc.bEndpointAddress;
1420 gs_fullspeed_out_desc.bEndpointAddress; 1402 gs_highspeed_out_desc.bEndpointAddress =
1421#endif /* CONFIG_USB_GADGET_DUALSPEED */ 1403 gs_fullspeed_out_desc.bEndpointAddress;
1404 }
1422 1405
1423 usb_gadget_set_selfpowered(gadget); 1406 usb_gadget_set_selfpowered(gadget);
1424 1407
1425 if (gadget->is_otg) { 1408 if (gadget_is_otg(gadget)) {
1426 gs_otg_descriptor.bmAttributes |= USB_OTG_HNP, 1409 gs_otg_descriptor.bmAttributes |= USB_OTG_HNP,
1427 gs_bulk_config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP; 1410 gs_bulk_config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
1428 gs_acm_config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP; 1411 gs_acm_config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
@@ -1487,6 +1470,12 @@ static void /* __init_or_exit */ gs_unbind(struct usb_gadget *gadget)
1487 dev->dev_ctrl_req = NULL; 1470 dev->dev_ctrl_req = NULL;
1488 } 1471 }
1489 gs_free_ports(dev); 1472 gs_free_ports(dev);
1473 if (dev->dev_notify_ep)
1474 usb_ep_disable(dev->dev_notify_ep);
1475 if (dev->dev_in_ep)
1476 usb_ep_disable(dev->dev_in_ep);
1477 if (dev->dev_out_ep)
1478 usb_ep_disable(dev->dev_out_ep);
1490 kfree(dev); 1479 kfree(dev);
1491 set_gadget_data(gadget, NULL); 1480 set_gadget_data(gadget, NULL);
1492 } 1481 }
@@ -1570,9 +1559,8 @@ static int gs_setup_standard(struct usb_gadget *gadget,
1570 memcpy(req->buf, &gs_device_desc, ret); 1559 memcpy(req->buf, &gs_device_desc, ret);
1571 break; 1560 break;
1572 1561
1573#ifdef CONFIG_USB_GADGET_DUALSPEED
1574 case USB_DT_DEVICE_QUALIFIER: 1562 case USB_DT_DEVICE_QUALIFIER:
1575 if (!gadget->is_dualspeed) 1563 if (!gadget_is_dualspeed(gadget))
1576 break; 1564 break;
1577 ret = min(wLength, 1565 ret = min(wLength,
1578 (u16)sizeof(struct usb_qualifier_descriptor)); 1566 (u16)sizeof(struct usb_qualifier_descriptor));
@@ -1580,14 +1568,13 @@ static int gs_setup_standard(struct usb_gadget *gadget,
1580 break; 1568 break;
1581 1569
1582 case USB_DT_OTHER_SPEED_CONFIG: 1570 case USB_DT_OTHER_SPEED_CONFIG:
1583 if (!gadget->is_dualspeed) 1571 if (!gadget_is_dualspeed(gadget))
1584 break; 1572 break;
1585 /* fall through */ 1573 /* fall through */
1586#endif /* CONFIG_USB_GADGET_DUALSPEED */
1587 case USB_DT_CONFIG: 1574 case USB_DT_CONFIG:
1588 ret = gs_build_config_buf(req->buf, gadget->speed, 1575 ret = gs_build_config_buf(req->buf, gadget,
1589 wValue >> 8, wValue & 0xff, 1576 wValue >> 8, wValue & 0xff,
1590 gadget->is_otg); 1577 gadget_is_otg(gadget));
1591 if (ret >= 0) 1578 if (ret >= 0)
1592 ret = min(wLength, (u16)ret); 1579 ret = min(wLength, (u16)ret);
1593 break; 1580 break;
@@ -1827,8 +1814,7 @@ static int gs_set_config(struct gs_dev *dev, unsigned config)
1827 1814
1828 if (EP_NOTIFY_NAME 1815 if (EP_NOTIFY_NAME
1829 && strcmp(ep->name, EP_NOTIFY_NAME) == 0) { 1816 && strcmp(ep->name, EP_NOTIFY_NAME) == 0) {
1830 ep_desc = GS_SPEED_SELECT( 1817 ep_desc = choose_ep_desc(gadget,
1831 gadget->speed == USB_SPEED_HIGH,
1832 &gs_highspeed_notify_desc, 1818 &gs_highspeed_notify_desc,
1833 &gs_fullspeed_notify_desc); 1819 &gs_fullspeed_notify_desc);
1834 ret = usb_ep_enable(ep,ep_desc); 1820 ret = usb_ep_enable(ep,ep_desc);
@@ -1844,9 +1830,8 @@ static int gs_set_config(struct gs_dev *dev, unsigned config)
1844 } 1830 }
1845 1831
1846 else if (strcmp(ep->name, EP_IN_NAME) == 0) { 1832 else if (strcmp(ep->name, EP_IN_NAME) == 0) {
1847 ep_desc = GS_SPEED_SELECT( 1833 ep_desc = choose_ep_desc(gadget,
1848 gadget->speed == USB_SPEED_HIGH, 1834 &gs_highspeed_in_desc,
1849 &gs_highspeed_in_desc,
1850 &gs_fullspeed_in_desc); 1835 &gs_fullspeed_in_desc);
1851 ret = usb_ep_enable(ep,ep_desc); 1836 ret = usb_ep_enable(ep,ep_desc);
1852 if (ret == 0) { 1837 if (ret == 0) {
@@ -1861,8 +1846,7 @@ static int gs_set_config(struct gs_dev *dev, unsigned config)
1861 } 1846 }
1862 1847
1863 else if (strcmp(ep->name, EP_OUT_NAME) == 0) { 1848 else if (strcmp(ep->name, EP_OUT_NAME) == 0) {
1864 ep_desc = GS_SPEED_SELECT( 1849 ep_desc = choose_ep_desc(gadget,
1865 gadget->speed == USB_SPEED_HIGH,
1866 &gs_highspeed_out_desc, 1850 &gs_highspeed_out_desc,
1867 &gs_fullspeed_out_desc); 1851 &gs_fullspeed_out_desc);
1868 ret = usb_ep_enable(ep,ep_desc); 1852 ret = usb_ep_enable(ep,ep_desc);
@@ -1981,11 +1965,11 @@ static void gs_reset_config(struct gs_dev *dev)
1981 * Builds the config descriptors in the given buffer and returns the 1965 * Builds the config descriptors in the given buffer and returns the
1982 * length, or a negative error number. 1966 * length, or a negative error number.
1983 */ 1967 */
1984static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed, 1968static int gs_build_config_buf(u8 *buf, struct usb_gadget *g,
1985 u8 type, unsigned int index, int is_otg) 1969 u8 type, unsigned int index, int is_otg)
1986{ 1970{
1987 int len; 1971 int len;
1988 int high_speed; 1972 int high_speed = 0;
1989 const struct usb_config_descriptor *config_desc; 1973 const struct usb_config_descriptor *config_desc;
1990 const struct usb_descriptor_header **function; 1974 const struct usb_descriptor_header **function;
1991 1975
@@ -1993,20 +1977,22 @@ static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed,
1993 return -EINVAL; 1977 return -EINVAL;
1994 1978
1995 /* other speed switches high and full speed */ 1979 /* other speed switches high and full speed */
1996 high_speed = (speed == USB_SPEED_HIGH); 1980 if (gadget_is_dualspeed(g)) {
1997 if (type == USB_DT_OTHER_SPEED_CONFIG) 1981 high_speed = (g->speed == USB_SPEED_HIGH);
1998 high_speed = !high_speed; 1982 if (type == USB_DT_OTHER_SPEED_CONFIG)
1983 high_speed = !high_speed;
1984 }
1999 1985
2000 if (use_acm) { 1986 if (use_acm) {
2001 config_desc = &gs_acm_config_desc; 1987 config_desc = &gs_acm_config_desc;
2002 function = GS_SPEED_SELECT(high_speed, 1988 function = high_speed
2003 gs_acm_highspeed_function, 1989 ? gs_acm_highspeed_function
2004 gs_acm_fullspeed_function); 1990 : gs_acm_fullspeed_function;
2005 } else { 1991 } else {
2006 config_desc = &gs_bulk_config_desc; 1992 config_desc = &gs_bulk_config_desc;
2007 function = GS_SPEED_SELECT(high_speed, 1993 function = high_speed
2008 gs_bulk_highspeed_function, 1994 ? gs_bulk_highspeed_function
2009 gs_bulk_fullspeed_function); 1995 : gs_bulk_fullspeed_function;
2010 } 1996 }
2011 1997
2012 /* for now, don't advertise srp-only devices */ 1998 /* for now, don't advertise srp-only devices */
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index 3459ea6c6c..878e428a0e 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -15,7 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16 16
17#include <linux/usb/ch9.h> 17#include <linux/usb/ch9.h>
18#include <linux/usb_gadget.h> 18#include <linux/usb/gadget.h>
19 19
20#include <asm/unaligned.h> 20#include <asm/unaligned.h>
21 21
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index fcfe869acb..fcde5d9c87 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -1,38 +1,22 @@
1/* 1/*
2 * zero.c -- Gadget Zero, for USB development 2 * zero.c -- Gadget Zero, for USB development
3 * 3 *
4 * Copyright (C) 2003-2004 David Brownell 4 * Copyright (C) 2003-2007 David Brownell
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * This program is free software; you can redistribute it and/or modify
8 * modification, are permitted provided that the following conditions 8 * it under the terms of the GNU General Public License as published by
9 * are met: 9 * the Free Software Foundation; either version 2 of the License, or
10 * 1. Redistributions of source code must retain the above copyright 10 * (at your option) any later version.
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The names of the above-listed copyright holders may not be used
17 * to endorse or promote products derived from this software without
18 * specific prior written permission.
19 * 11 *
20 * ALTERNATIVELY, this software may be distributed under the terms of the 12 * This program is distributed in the hope that it will be useful,
21 * GNU General Public License ("GPL") as published by the Free Software 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * Foundation, either version 2 of that License or (at your option) any 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * later version. 15 * GNU General Public License for more details.
24 * 16 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 17 * You should have received a copy of the GNU General Public License
26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 18 * along with this program; if not, write to the Free Software
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */ 20 */
37 21
38 22
@@ -57,40 +41,28 @@
57 * Many drivers will only have one configuration, letting them be much 41 * Many drivers will only have one configuration, letting them be much
58 * simpler if they also don't support high speed operation (like this 42 * simpler if they also don't support high speed operation (like this
59 * driver does). 43 * driver does).
44 *
45 * Why is *this* driver using two configurations, rather than setting up
46 * two interfaces with different functions? To help verify that multiple
47 * configuration infrastucture is working correctly; also, so that it can
48 * work with low capability USB controllers without four bulk endpoints.
60 */ 49 */
61 50
62#define DEBUG 1 51/* #define VERBOSE_DEBUG */
63// #define VERBOSE
64 52
65#include <linux/module.h>
66#include <linux/kernel.h> 53#include <linux/kernel.h>
67#include <linux/delay.h>
68#include <linux/ioport.h>
69#include <linux/slab.h>
70#include <linux/errno.h>
71#include <linux/init.h>
72#include <linux/timer.h>
73#include <linux/list.h>
74#include <linux/interrupt.h>
75#include <linux/utsname.h> 54#include <linux/utsname.h>
76#include <linux/device.h> 55#include <linux/device.h>
77#include <linux/moduleparam.h>
78
79#include <asm/byteorder.h>
80#include <asm/io.h>
81#include <asm/irq.h>
82#include <asm/system.h>
83#include <asm/unaligned.h>
84 56
85#include <linux/usb/ch9.h> 57#include <linux/usb/ch9.h>
86#include <linux/usb_gadget.h> 58#include <linux/usb/gadget.h>
87 59
88#include "gadget_chips.h" 60#include "gadget_chips.h"
89 61
90 62
91/*-------------------------------------------------------------------------*/ 63/*-------------------------------------------------------------------------*/
92 64
93#define DRIVER_VERSION "St Patrick's Day 2004" 65#define DRIVER_VERSION "Lughnasadh, 2007"
94 66
95static const char shortname [] = "zero"; 67static const char shortname [] = "zero";
96static const char longname [] = "Gadget Zero"; 68static const char longname [] = "Gadget Zero";
@@ -131,30 +103,16 @@ struct zero_dev {
131 struct timer_list resume; 103 struct timer_list resume;
132}; 104};
133 105
134#define xprintk(d,level,fmt,args...) \ 106#define DBG(d, fmt, args...) \
135 dev_printk(level , &(d)->gadget->dev , fmt , ## args) 107 dev_dbg(&(d)->gadget->dev , fmt , ## args)
136 108#define VDBG(d, fmt, args...) \
137#ifdef DEBUG 109 dev_vdbg(&(d)->gadget->dev , fmt , ## args)
138#define DBG(dev,fmt,args...) \ 110#define ERROR(d, fmt, args...) \
139 xprintk(dev , KERN_DEBUG , fmt , ## args) 111 dev_err(&(d)->gadget->dev , fmt , ## args)
140#else 112#define WARN(d, fmt, args...) \
141#define DBG(dev,fmt,args...) \ 113 dev_warn(&(d)->gadget->dev , fmt , ## args)
142 do { } while (0) 114#define INFO(d, fmt, args...) \
143#endif /* DEBUG */ 115 dev_info(&(d)->gadget->dev , fmt , ## args)
144
145#ifdef VERBOSE
146#define VDBG DBG
147#else
148#define VDBG(dev,fmt,args...) \
149 do { } while (0)
150#endif /* VERBOSE */
151
152#define ERROR(dev,fmt,args...) \
153 xprintk(dev , KERN_ERR , fmt , ## args)
154#define WARN(dev,fmt,args...) \
155 xprintk(dev , KERN_WARNING , fmt , ## args)
156#define INFO(dev,fmt,args...) \
157 xprintk(dev , KERN_INFO , fmt , ## args)
158 116
159/*-------------------------------------------------------------------------*/ 117/*-------------------------------------------------------------------------*/
160 118
@@ -326,8 +284,6 @@ static const struct usb_descriptor_header *fs_loopback_function [] = {
326 NULL, 284 NULL,
327}; 285};
328 286
329#ifdef CONFIG_USB_GADGET_DUALSPEED
330
331/* 287/*
332 * usb 2.0 devices need to expose both high speed and full speed 288 * usb 2.0 devices need to expose both high speed and full speed
333 * descriptors, unless they only run at full speed. 289 * descriptors, unless they only run at full speed.
@@ -383,17 +339,20 @@ static const struct usb_descriptor_header *hs_loopback_function [] = {
383}; 339};
384 340
385/* maxpacket and other transfer characteristics vary by speed. */ 341/* maxpacket and other transfer characteristics vary by speed. */
386#define ep_desc(g,hs,fs) (((g)->speed==USB_SPEED_HIGH)?(hs):(fs)) 342static inline struct usb_endpoint_descriptor *
387 343ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *hs,
388#else 344 struct usb_endpoint_descriptor *fs)
345{
346 if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
347 return hs;
348 return fs;
349}
389 350
390/* if there's no high speed support, maxpacket doesn't change. */ 351static char manufacturer[50];
391#define ep_desc(g,hs,fs) fs
392 352
393#endif /* !CONFIG_USB_GADGET_DUALSPEED */ 353/* default serial number takes at least two packets */
354static char serial[] = "0123456789.0123456789.0123456789";
394 355
395static char manufacturer [50];
396static char serial [40];
397 356
398/* static strings, in UTF-8 */ 357/* static strings, in UTF-8 */
399static struct usb_string strings [] = { 358static struct usb_string strings [] = {
@@ -435,30 +394,29 @@ config_buf (struct usb_gadget *gadget,
435 int is_source_sink; 394 int is_source_sink;
436 int len; 395 int len;
437 const struct usb_descriptor_header **function; 396 const struct usb_descriptor_header **function;
438#ifdef CONFIG_USB_GADGET_DUALSPEED 397 int hs = 0;
439 int hs = (gadget->speed == USB_SPEED_HIGH);
440#endif
441 398
442 /* two configurations will always be index 0 and index 1 */ 399 /* two configurations will always be index 0 and index 1 */
443 if (index > 1) 400 if (index > 1)
444 return -EINVAL; 401 return -EINVAL;
445 is_source_sink = loopdefault ? (index == 1) : (index == 0); 402 is_source_sink = loopdefault ? (index == 1) : (index == 0);
446 403
447#ifdef CONFIG_USB_GADGET_DUALSPEED 404 if (gadget_is_dualspeed(gadget)) {
448 if (type == USB_DT_OTHER_SPEED_CONFIG) 405 hs = (gadget->speed == USB_SPEED_HIGH);
449 hs = !hs; 406 if (type == USB_DT_OTHER_SPEED_CONFIG)
407 hs = !hs;
408 }
450 if (hs) 409 if (hs)
451 function = is_source_sink 410 function = is_source_sink
452 ? hs_source_sink_function 411 ? hs_source_sink_function
453 : hs_loopback_function; 412 : hs_loopback_function;
454 else 413 else
455#endif
456 function = is_source_sink 414 function = is_source_sink
457 ? fs_source_sink_function 415 ? fs_source_sink_function
458 : fs_loopback_function; 416 : fs_loopback_function;
459 417
460 /* for now, don't advertise srp-only devices */ 418 /* for now, don't advertise srp-only devices */
461 if (!gadget->is_otg) 419 if (!gadget_is_otg(gadget))
462 function++; 420 function++;
463 421
464 len = usb_gadget_config_buf (is_source_sink 422 len = usb_gadget_config_buf (is_source_sink
@@ -498,6 +456,19 @@ static void free_ep_req (struct usb_ep *ep, struct usb_request *req)
498 456
499/*-------------------------------------------------------------------------*/ 457/*-------------------------------------------------------------------------*/
500 458
459/*
460 * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripherals,
461 * this just sinks bulk packets OUT to the peripheral and sources them IN
462 * to the host, optionally with specific data patterns.
463 *
464 * In terms of control messaging, this supports all the standard requests
465 * plus two that support control-OUT tests.
466 *
467 * Note that because this doesn't queue more than one request at a time,
468 * some other function must be used to test queueing logic. The network
469 * link (g_ether) is probably the best option for that.
470 */
471
501/* optionally require specific source/sink data patterns */ 472/* optionally require specific source/sink data patterns */
502 473
503static int 474static int
@@ -534,12 +505,7 @@ check_read_data (
534 return 0; 505 return 0;
535} 506}
536 507
537static void 508static void reinit_write_data(struct usb_ep *ep, struct usb_request *req)
538reinit_write_data (
539 struct zero_dev *dev,
540 struct usb_ep *ep,
541 struct usb_request *req
542)
543{ 509{
544 unsigned i; 510 unsigned i;
545 u8 *buf = req->buf; 511 u8 *buf = req->buf;
@@ -566,16 +532,16 @@ static void source_sink_complete (struct usb_ep *ep, struct usb_request *req)
566 532
567 switch (status) { 533 switch (status) {
568 534
569 case 0: /* normal completion? */ 535 case 0: /* normal completion? */
570 if (ep == dev->out_ep) { 536 if (ep == dev->out_ep) {
571 check_read_data (dev, ep, req); 537 check_read_data (dev, ep, req);
572 memset (req->buf, 0x55, req->length); 538 memset (req->buf, 0x55, req->length);
573 } else 539 } else
574 reinit_write_data (dev, ep, req); 540 reinit_write_data(ep, req);
575 break; 541 break;
576 542
577 /* this endpoint is normally active while we're configured */ 543 /* this endpoint is normally active while we're configured */
578 case -ECONNABORTED: /* hardware forced ep reset */ 544 case -ECONNABORTED: /* hardware forced ep reset */
579 case -ECONNRESET: /* request dequeued */ 545 case -ECONNRESET: /* request dequeued */
580 case -ESHUTDOWN: /* disconnect from host */ 546 case -ESHUTDOWN: /* disconnect from host */
581 VDBG (dev, "%s gone (%d), %d/%d\n", ep->name, status, 547 VDBG (dev, "%s gone (%d), %d/%d\n", ep->name, status,
@@ -607,8 +573,7 @@ static void source_sink_complete (struct usb_ep *ep, struct usb_request *req)
607 } 573 }
608} 574}
609 575
610static struct usb_request * 576static struct usb_request *source_sink_start_ep(struct usb_ep *ep)
611source_sink_start_ep (struct usb_ep *ep, gfp_t gfp_flags)
612{ 577{
613 struct usb_request *req; 578 struct usb_request *req;
614 int status; 579 int status;
@@ -621,11 +586,11 @@ source_sink_start_ep (struct usb_ep *ep, gfp_t gfp_flags)
621 req->complete = source_sink_complete; 586 req->complete = source_sink_complete;
622 587
623 if (strcmp (ep->name, EP_IN_NAME) == 0) 588 if (strcmp (ep->name, EP_IN_NAME) == 0)
624 reinit_write_data (ep->driver_data, ep, req); 589 reinit_write_data(ep, req);
625 else 590 else
626 memset (req->buf, 0x55, req->length); 591 memset (req->buf, 0x55, req->length);
627 592
628 status = usb_ep_queue (ep, req, gfp_flags); 593 status = usb_ep_queue(ep, req, GFP_ATOMIC);
629 if (status) { 594 if (status) {
630 struct zero_dev *dev = ep->driver_data; 595 struct zero_dev *dev = ep->driver_data;
631 596
@@ -637,8 +602,7 @@ source_sink_start_ep (struct usb_ep *ep, gfp_t gfp_flags)
637 return req; 602 return req;
638} 603}
639 604
640static int 605static int set_source_sink_config(struct zero_dev *dev)
641set_source_sink_config (struct zero_dev *dev, gfp_t gfp_flags)
642{ 606{
643 int result = 0; 607 int result = 0;
644 struct usb_ep *ep; 608 struct usb_ep *ep;
@@ -653,8 +617,7 @@ set_source_sink_config (struct zero_dev *dev, gfp_t gfp_flags)
653 result = usb_ep_enable (ep, d); 617 result = usb_ep_enable (ep, d);
654 if (result == 0) { 618 if (result == 0) {
655 ep->driver_data = dev; 619 ep->driver_data = dev;
656 if (source_sink_start_ep(ep, gfp_flags) 620 if (source_sink_start_ep(ep) != NULL) {
657 != NULL) {
658 dev->in_ep = ep; 621 dev->in_ep = ep;
659 continue; 622 continue;
660 } 623 }
@@ -668,8 +631,7 @@ set_source_sink_config (struct zero_dev *dev, gfp_t gfp_flags)
668 result = usb_ep_enable (ep, d); 631 result = usb_ep_enable (ep, d);
669 if (result == 0) { 632 if (result == 0) {
670 ep->driver_data = dev; 633 ep->driver_data = dev;
671 if (source_sink_start_ep(ep, gfp_flags) 634 if (source_sink_start_ep(ep) != NULL) {
672 != NULL) {
673 dev->out_ep = ep; 635 dev->out_ep = ep;
674 continue; 636 continue;
675 } 637 }
@@ -701,7 +663,7 @@ static void loopback_complete (struct usb_ep *ep, struct usb_request *req)
701 663
702 switch (status) { 664 switch (status) {
703 665
704 case 0: /* normal completion? */ 666 case 0: /* normal completion? */
705 if (ep == dev->out_ep) { 667 if (ep == dev->out_ep) {
706 /* loop this OUT packet back IN to the host */ 668 /* loop this OUT packet back IN to the host */
707 req->zero = (req->actual < req->length); 669 req->zero = (req->actual < req->length);
@@ -735,7 +697,7 @@ static void loopback_complete (struct usb_ep *ep, struct usb_request *req)
735 * rely on the hardware driver to clean up on disconnect or 697 * rely on the hardware driver to clean up on disconnect or
736 * endpoint disable. 698 * endpoint disable.
737 */ 699 */
738 case -ECONNABORTED: /* hardware forced ep reset */ 700 case -ECONNABORTED: /* hardware forced ep reset */
739 case -ECONNRESET: /* request dequeued */ 701 case -ECONNRESET: /* request dequeued */
740 case -ESHUTDOWN: /* disconnect from host */ 702 case -ESHUTDOWN: /* disconnect from host */
741 free_ep_req (ep, req); 703 free_ep_req (ep, req);
@@ -743,8 +705,7 @@ static void loopback_complete (struct usb_ep *ep, struct usb_request *req)
743 } 705 }
744} 706}
745 707
746static int 708static int set_loopback_config(struct zero_dev *dev)
747set_loopback_config (struct zero_dev *dev, gfp_t gfp_flags)
748{ 709{
749 int result = 0; 710 int result = 0;
750 struct usb_ep *ep; 711 struct usb_ep *ep;
@@ -844,8 +805,7 @@ static void zero_reset_config (struct zero_dev *dev)
844 * code can do, perhaps by disallowing more than one configuration or 805 * code can do, perhaps by disallowing more than one configuration or
845 * by limiting configuration choices (like the pxa2xx). 806 * by limiting configuration choices (like the pxa2xx).
846 */ 807 */
847static int 808static int zero_set_config(struct zero_dev *dev, unsigned number)
848zero_set_config (struct zero_dev *dev, unsigned number, gfp_t gfp_flags)
849{ 809{
850 int result = 0; 810 int result = 0;
851 struct usb_gadget *gadget = dev->gadget; 811 struct usb_gadget *gadget = dev->gadget;
@@ -855,17 +815,17 @@ zero_set_config (struct zero_dev *dev, unsigned number, gfp_t gfp_flags)
855 815
856 if (gadget_is_sa1100 (gadget) && dev->config) { 816 if (gadget_is_sa1100 (gadget) && dev->config) {
857 /* tx fifo is full, but we can't clear it...*/ 817 /* tx fifo is full, but we can't clear it...*/
858 INFO (dev, "can't change configurations\n"); 818 ERROR(dev, "can't change configurations\n");
859 return -ESPIPE; 819 return -ESPIPE;
860 } 820 }
861 zero_reset_config (dev); 821 zero_reset_config (dev);
862 822
863 switch (number) { 823 switch (number) {
864 case CONFIG_SOURCE_SINK: 824 case CONFIG_SOURCE_SINK:
865 result = set_source_sink_config (dev, gfp_flags); 825 result = set_source_sink_config(dev);
866 break; 826 break;
867 case CONFIG_LOOPBACK: 827 case CONFIG_LOOPBACK:
868 result = set_loopback_config (dev, gfp_flags); 828 result = set_loopback_config(dev);
869 break; 829 break;
870 default: 830 default:
871 result = -EINVAL; 831 result = -EINVAL;
@@ -885,7 +845,7 @@ zero_set_config (struct zero_dev *dev, unsigned number, gfp_t gfp_flags)
885 case USB_SPEED_LOW: speed = "low"; break; 845 case USB_SPEED_LOW: speed = "low"; break;
886 case USB_SPEED_FULL: speed = "full"; break; 846 case USB_SPEED_FULL: speed = "full"; break;
887 case USB_SPEED_HIGH: speed = "high"; break; 847 case USB_SPEED_HIGH: speed = "high"; break;
888 default: speed = "?"; break; 848 default: speed = "?"; break;
889 } 849 }
890 850
891 dev->config = number; 851 dev->config = number;
@@ -938,19 +898,17 @@ zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
938 value = min (w_length, (u16) sizeof device_desc); 898 value = min (w_length, (u16) sizeof device_desc);
939 memcpy (req->buf, &device_desc, value); 899 memcpy (req->buf, &device_desc, value);
940 break; 900 break;
941#ifdef CONFIG_USB_GADGET_DUALSPEED
942 case USB_DT_DEVICE_QUALIFIER: 901 case USB_DT_DEVICE_QUALIFIER:
943 if (!gadget->is_dualspeed) 902 if (!gadget_is_dualspeed(gadget))
944 break; 903 break;
945 value = min (w_length, (u16) sizeof dev_qualifier); 904 value = min (w_length, (u16) sizeof dev_qualifier);
946 memcpy (req->buf, &dev_qualifier, value); 905 memcpy (req->buf, &dev_qualifier, value);
947 break; 906 break;
948 907
949 case USB_DT_OTHER_SPEED_CONFIG: 908 case USB_DT_OTHER_SPEED_CONFIG:
950 if (!gadget->is_dualspeed) 909 if (!gadget_is_dualspeed(gadget))
951 break; 910 break;
952 // FALLTHROUGH 911 // FALLTHROUGH
953#endif /* CONFIG_USB_GADGET_DUALSPEED */
954 case USB_DT_CONFIG: 912 case USB_DT_CONFIG:
955 value = config_buf (gadget, req->buf, 913 value = config_buf (gadget, req->buf,
956 w_value >> 8, 914 w_value >> 8,
@@ -984,7 +942,7 @@ zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
984 else 942 else
985 VDBG (dev, "HNP inactive\n"); 943 VDBG (dev, "HNP inactive\n");
986 spin_lock (&dev->lock); 944 spin_lock (&dev->lock);
987 value = zero_set_config (dev, w_value, GFP_ATOMIC); 945 value = zero_set_config(dev, w_value);
988 spin_unlock (&dev->lock); 946 spin_unlock (&dev->lock);
989 break; 947 break;
990 case USB_REQ_GET_CONFIGURATION: 948 case USB_REQ_GET_CONFIGURATION:
@@ -1013,7 +971,7 @@ zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1013 * use this "reset the config" shortcut. 971 * use this "reset the config" shortcut.
1014 */ 972 */
1015 zero_reset_config (dev); 973 zero_reset_config (dev);
1016 zero_set_config (dev, config, GFP_ATOMIC); 974 zero_set_config(dev, config);
1017 value = 0; 975 value = 0;
1018 } 976 }
1019 spin_unlock (&dev->lock); 977 spin_unlock (&dev->lock);
@@ -1163,7 +1121,7 @@ autoconf_fail:
1163 } 1121 }
1164 EP_IN_NAME = ep->name; 1122 EP_IN_NAME = ep->name;
1165 ep->driver_data = ep; /* claim */ 1123 ep->driver_data = ep; /* claim */
1166 1124
1167 ep = usb_ep_autoconfig (gadget, &fs_sink_desc); 1125 ep = usb_ep_autoconfig (gadget, &fs_sink_desc);
1168 if (!ep) 1126 if (!ep)
1169 goto autoconf_fail; 1127 goto autoconf_fail;
@@ -1207,16 +1165,18 @@ autoconf_fail:
1207 1165
1208 device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket; 1166 device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
1209 1167
1210#ifdef CONFIG_USB_GADGET_DUALSPEED 1168 if (gadget_is_dualspeed(gadget)) {
1211 /* assume ep0 uses the same value for both speeds ... */ 1169 /* assume ep0 uses the same value for both speeds ... */
1212 dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0; 1170 dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0;
1213 1171
1214 /* and that all endpoints are dual-speed */ 1172 /* and that all endpoints are dual-speed */
1215 hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; 1173 hs_source_desc.bEndpointAddress =
1216 hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; 1174 fs_source_desc.bEndpointAddress;
1217#endif 1175 hs_sink_desc.bEndpointAddress =
1176 fs_sink_desc.bEndpointAddress;
1177 }
1218 1178
1219 if (gadget->is_otg) { 1179 if (gadget_is_otg(gadget)) {
1220 otg_descriptor.bmAttributes |= USB_OTG_HNP, 1180 otg_descriptor.bmAttributes |= USB_OTG_HNP,
1221 source_sink_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP; 1181 source_sink_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
1222 loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP; 1182 loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
@@ -1294,23 +1254,18 @@ static struct usb_gadget_driver zero_driver = {
1294 .suspend = zero_suspend, 1254 .suspend = zero_suspend,
1295 .resume = zero_resume, 1255 .resume = zero_resume,
1296 1256
1297 .driver = { 1257 .driver = {
1298 .name = (char *) shortname, 1258 .name = (char *) shortname,
1299 .owner = THIS_MODULE, 1259 .owner = THIS_MODULE,
1300 }, 1260 },
1301}; 1261};
1302 1262
1303MODULE_AUTHOR ("David Brownell"); 1263MODULE_AUTHOR("David Brownell");
1304MODULE_LICENSE ("Dual BSD/GPL"); 1264MODULE_LICENSE("GPL");
1305 1265
1306 1266
1307static int __init init (void) 1267static int __init init (void)
1308{ 1268{
1309 /* a real value would likely come through some id prom
1310 * or module option. this one takes at least two packets.
1311 */
1312 strlcpy (serial, "0123456789.0123456789.0123456789", sizeof serial);
1313
1314 return usb_gadget_register_driver (&zero_driver); 1269 return usb_gadget_register_driver (&zero_driver);
1315} 1270}
1316module_init (init); 1271module_init (init);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 565d6ef4c4..c978d622fa 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -154,6 +154,19 @@ config USB_OHCI_HCD_PCI
154 Enables support for PCI-bus plug-in USB controller cards. 154 Enables support for PCI-bus plug-in USB controller cards.
155 If unsure, say Y. 155 If unsure, say Y.
156 156
157config USB_OHCI_HCD_SSB
158 bool "OHCI support for Broadcom SSB OHCI core"
159 depends on USB_OHCI_HCD && SSB && EXPERIMENTAL
160 default n
161 ---help---
162 Support for the Sonics Silicon Backplane (SSB) attached
163 Broadcom USB OHCI core.
164
165 This device is present in some embedded devices with
166 Broadcom based SSB bus.
167
168 If unsure, say N.
169
157config USB_OHCI_BIG_ENDIAN_DESC 170config USB_OHCI_BIG_ENDIAN_DESC
158 bool 171 bool
159 depends on USB_OHCI_HCD 172 depends on USB_OHCI_HCD
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index b1d19268cb..766ef68a0b 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -220,10 +220,8 @@ static const struct hc_driver ehci_au1xxx_hc_driver = {
220 */ 220 */
221 .hub_status_data = ehci_hub_status_data, 221 .hub_status_data = ehci_hub_status_data,
222 .hub_control = ehci_hub_control, 222 .hub_control = ehci_hub_control,
223#ifdef CONFIG_PM 223 .bus_suspend = ehci_bus_suspend,
224 .hub_suspend = ehci_hub_suspend, 224 .bus_resume = ehci_bus_resume,
225 .hub_resume = ehci_hub_resume,
226#endif
227}; 225};
228 226
229/*-------------------------------------------------------------------------*/ 227/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 35cdba1041..c151444288 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -570,10 +570,18 @@ static int ehci_run (struct usb_hcd *hcd)
570 * are explicitly handed to companion controller(s), so no TT is 570 * are explicitly handed to companion controller(s), so no TT is
571 * involved with the root hub. (Except where one is integrated, 571 * involved with the root hub. (Except where one is integrated,
572 * and there's no companion controller unless maybe for USB OTG.) 572 * and there's no companion controller unless maybe for USB OTG.)
573 *
574 * Turning on the CF flag will transfer ownership of all ports
575 * from the companions to the EHCI controller. If any of the
576 * companions are in the middle of a port reset at the time, it
577 * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
578 * guarantees that no resets are in progress.
573 */ 579 */
580 down_write(&ehci_cf_port_reset_rwsem);
574 hcd->state = HC_STATE_RUNNING; 581 hcd->state = HC_STATE_RUNNING;
575 ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); 582 ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
576 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ 583 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
584 up_write(&ehci_cf_port_reset_rwsem);
577 585
578 temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase)); 586 temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
579 ehci_info (ehci, 587 ehci_info (ehci,
@@ -719,7 +727,6 @@ dead:
719 */ 727 */
720static int ehci_urb_enqueue ( 728static int ehci_urb_enqueue (
721 struct usb_hcd *hcd, 729 struct usb_hcd *hcd,
722 struct usb_host_endpoint *ep,
723 struct urb *urb, 730 struct urb *urb,
724 gfp_t mem_flags 731 gfp_t mem_flags
725) { 732) {
@@ -734,12 +741,12 @@ static int ehci_urb_enqueue (
734 default: 741 default:
735 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) 742 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
736 return -ENOMEM; 743 return -ENOMEM;
737 return submit_async (ehci, ep, urb, &qtd_list, mem_flags); 744 return submit_async(ehci, urb, &qtd_list, mem_flags);
738 745
739 case PIPE_INTERRUPT: 746 case PIPE_INTERRUPT:
740 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) 747 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
741 return -ENOMEM; 748 return -ENOMEM;
742 return intr_submit (ehci, ep, urb, &qtd_list, mem_flags); 749 return intr_submit(ehci, urb, &qtd_list, mem_flags);
743 750
744 case PIPE_ISOCHRONOUS: 751 case PIPE_ISOCHRONOUS:
745 if (urb->dev->speed == USB_SPEED_HIGH) 752 if (urb->dev->speed == USB_SPEED_HIGH)
@@ -777,13 +784,18 @@ static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
777 * completions normally happen asynchronously 784 * completions normally happen asynchronously
778 */ 785 */
779 786
780static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) 787static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
781{ 788{
782 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 789 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
783 struct ehci_qh *qh; 790 struct ehci_qh *qh;
784 unsigned long flags; 791 unsigned long flags;
792 int rc;
785 793
786 spin_lock_irqsave (&ehci->lock, flags); 794 spin_lock_irqsave (&ehci->lock, flags);
795 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
796 if (rc)
797 goto done;
798
787 switch (usb_pipetype (urb->pipe)) { 799 switch (usb_pipetype (urb->pipe)) {
788 // case PIPE_CONTROL: 800 // case PIPE_CONTROL:
789 // case PIPE_BULK: 801 // case PIPE_BULK:
@@ -838,7 +850,7 @@ static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
838 } 850 }
839done: 851done:
840 spin_unlock_irqrestore (&ehci->lock, flags); 852 spin_unlock_irqrestore (&ehci->lock, flags);
841 return 0; 853 return rc;
842} 854}
843 855
844/*-------------------------------------------------------------------------*/ 856/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index a7816e392a..ad0d4965f2 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -58,8 +58,6 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
58 if (!retval) 58 if (!retval)
59 ehci_dbg(ehci, "MWI active\n"); 59 ehci_dbg(ehci, "MWI active\n");
60 60
61 ehci_port_power(ehci, 0);
62
63 return 0; 61 return 0;
64} 62}
65 63
@@ -156,8 +154,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
156 break; 154 break;
157 } 155 }
158 156
159 if (ehci_is_TDI(ehci)) 157 ehci_reset(ehci);
160 ehci_reset(ehci);
161 158
162 /* at least the Genesys GL880S needs fixup here */ 159 /* at least the Genesys GL880S needs fixup here */
163 temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params); 160 temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
diff --git a/drivers/usb/host/ehci-ppc-soc.c b/drivers/usb/host/ehci-ppc-soc.c
index 4f99b0eb27..452d4b1bc8 100644
--- a/drivers/usb/host/ehci-ppc-soc.c
+++ b/drivers/usb/host/ehci-ppc-soc.c
@@ -160,10 +160,8 @@ static const struct hc_driver ehci_ppc_soc_hc_driver = {
160 */ 160 */
161 .hub_status_data = ehci_hub_status_data, 161 .hub_status_data = ehci_hub_status_data,
162 .hub_control = ehci_hub_control, 162 .hub_control = ehci_hub_control,
163#ifdef CONFIG_PM 163 .bus_suspend = ehci_bus_suspend,
164 .hub_suspend = ehci_hub_suspend, 164 .bus_resume = ehci_bus_resume,
165 .hub_resume = ehci_hub_resume,
166#endif
167}; 165};
168 166
169static int ehci_hcd_ppc_soc_drv_probe(struct platform_device *pdev) 167static int ehci_hcd_ppc_soc_drv_probe(struct platform_device *pdev)
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 829fe649a9..03a6b2f4e6 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -47,7 +47,7 @@ static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
47 if (result) 47 if (result)
48 return result; 48 return result;
49 49
50 ehci_port_power(ehci, 0); 50 ehci_reset(ehci);
51 51
52 return result; 52 return result;
53} 53}
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 140bfa423e..b10f39c047 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -139,63 +139,65 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
139 139
140/*-------------------------------------------------------------------------*/ 140/*-------------------------------------------------------------------------*/
141 141
142static void qtd_copy_status ( 142static int qtd_copy_status (
143 struct ehci_hcd *ehci, 143 struct ehci_hcd *ehci,
144 struct urb *urb, 144 struct urb *urb,
145 size_t length, 145 size_t length,
146 u32 token 146 u32 token
147) 147)
148{ 148{
149 int status = -EINPROGRESS;
150
149 /* count IN/OUT bytes, not SETUP (even short packets) */ 151 /* count IN/OUT bytes, not SETUP (even short packets) */
150 if (likely (QTD_PID (token) != 2)) 152 if (likely (QTD_PID (token) != 2))
151 urb->actual_length += length - QTD_LENGTH (token); 153 urb->actual_length += length - QTD_LENGTH (token);
152 154
153 /* don't modify error codes */ 155 /* don't modify error codes */
154 if (unlikely (urb->status != -EINPROGRESS)) 156 if (unlikely(urb->unlinked))
155 return; 157 return status;
156 158
157 /* force cleanup after short read; not always an error */ 159 /* force cleanup after short read; not always an error */
158 if (unlikely (IS_SHORT_READ (token))) 160 if (unlikely (IS_SHORT_READ (token)))
159 urb->status = -EREMOTEIO; 161 status = -EREMOTEIO;
160 162
161 /* serious "can't proceed" faults reported by the hardware */ 163 /* serious "can't proceed" faults reported by the hardware */
162 if (token & QTD_STS_HALT) { 164 if (token & QTD_STS_HALT) {
163 if (token & QTD_STS_BABBLE) { 165 if (token & QTD_STS_BABBLE) {
164 /* FIXME "must" disable babbling device's port too */ 166 /* FIXME "must" disable babbling device's port too */
165 urb->status = -EOVERFLOW; 167 status = -EOVERFLOW;
166 } else if (token & QTD_STS_MMF) { 168 } else if (token & QTD_STS_MMF) {
167 /* fs/ls interrupt xfer missed the complete-split */ 169 /* fs/ls interrupt xfer missed the complete-split */
168 urb->status = -EPROTO; 170 status = -EPROTO;
169 } else if (token & QTD_STS_DBE) { 171 } else if (token & QTD_STS_DBE) {
170 urb->status = (QTD_PID (token) == 1) /* IN ? */ 172 status = (QTD_PID (token) == 1) /* IN ? */
171 ? -ENOSR /* hc couldn't read data */ 173 ? -ENOSR /* hc couldn't read data */
172 : -ECOMM; /* hc couldn't write data */ 174 : -ECOMM; /* hc couldn't write data */
173 } else if (token & QTD_STS_XACT) { 175 } else if (token & QTD_STS_XACT) {
174 /* timeout, bad crc, wrong PID, etc; retried */ 176 /* timeout, bad crc, wrong PID, etc; retried */
175 if (QTD_CERR (token)) 177 if (QTD_CERR (token))
176 urb->status = -EPIPE; 178 status = -EPIPE;
177 else { 179 else {
178 ehci_dbg (ehci, "devpath %s ep%d%s 3strikes\n", 180 ehci_dbg (ehci, "devpath %s ep%d%s 3strikes\n",
179 urb->dev->devpath, 181 urb->dev->devpath,
180 usb_pipeendpoint (urb->pipe), 182 usb_pipeendpoint (urb->pipe),
181 usb_pipein (urb->pipe) ? "in" : "out"); 183 usb_pipein (urb->pipe) ? "in" : "out");
182 urb->status = -EPROTO; 184 status = -EPROTO;
183 } 185 }
184 /* CERR nonzero + no errors + halt --> stall */ 186 /* CERR nonzero + no errors + halt --> stall */
185 } else if (QTD_CERR (token)) 187 } else if (QTD_CERR (token))
186 urb->status = -EPIPE; 188 status = -EPIPE;
187 else /* unknown */ 189 else /* unknown */
188 urb->status = -EPROTO; 190 status = -EPROTO;
189 191
190 ehci_vdbg (ehci, 192 ehci_vdbg (ehci,
191 "dev%d ep%d%s qtd token %08x --> status %d\n", 193 "dev%d ep%d%s qtd token %08x --> status %d\n",
192 usb_pipedevice (urb->pipe), 194 usb_pipedevice (urb->pipe),
193 usb_pipeendpoint (urb->pipe), 195 usb_pipeendpoint (urb->pipe),
194 usb_pipein (urb->pipe) ? "in" : "out", 196 usb_pipein (urb->pipe) ? "in" : "out",
195 token, urb->status); 197 token, status);
196 198
197 /* if async CSPLIT failed, try cleaning out the TT buffer */ 199 /* if async CSPLIT failed, try cleaning out the TT buffer */
198 if (urb->status != -EPIPE 200 if (status != -EPIPE
199 && urb->dev->tt && !usb_pipeint (urb->pipe) 201 && urb->dev->tt && !usb_pipeint (urb->pipe)
200 && ((token & QTD_STS_MMF) != 0 202 && ((token & QTD_STS_MMF) != 0
201 || QTD_CERR(token) == 0) 203 || QTD_CERR(token) == 0)
@@ -212,10 +214,12 @@ static void qtd_copy_status (
212 usb_hub_tt_clear_buffer (urb->dev, urb->pipe); 214 usb_hub_tt_clear_buffer (urb->dev, urb->pipe);
213 } 215 }
214 } 216 }
217
218 return status;
215} 219}
216 220
217static void 221static void
218ehci_urb_done (struct ehci_hcd *ehci, struct urb *urb) 222ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
219__releases(ehci->lock) 223__releases(ehci->lock)
220__acquires(ehci->lock) 224__acquires(ehci->lock)
221{ 225{
@@ -231,25 +235,13 @@ __acquires(ehci->lock)
231 qh_put (qh); 235 qh_put (qh);
232 } 236 }
233 237
234 spin_lock (&urb->lock); 238 if (unlikely(urb->unlinked)) {
235 urb->hcpriv = NULL; 239 COUNT(ehci->stats.unlink);
236 switch (urb->status) { 240 } else {
237 case -EINPROGRESS: /* success */ 241 if (likely(status == -EINPROGRESS))
238 urb->status = 0; 242 status = 0;
239 default: /* fault */ 243 COUNT(ehci->stats.complete);
240 COUNT (ehci->stats.complete);
241 break;
242 case -EREMOTEIO: /* fault or normal */
243 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
244 urb->status = 0;
245 COUNT (ehci->stats.complete);
246 break;
247 case -ECONNRESET: /* canceled */
248 case -ENOENT:
249 COUNT (ehci->stats.unlink);
250 break;
251 } 244 }
252 spin_unlock (&urb->lock);
253 245
254#ifdef EHCI_URB_TRACE 246#ifdef EHCI_URB_TRACE
255 ehci_dbg (ehci, 247 ehci_dbg (ehci,
@@ -257,13 +249,14 @@ __acquires(ehci->lock)
257 __FUNCTION__, urb->dev->devpath, urb, 249 __FUNCTION__, urb->dev->devpath, urb,
258 usb_pipeendpoint (urb->pipe), 250 usb_pipeendpoint (urb->pipe),
259 usb_pipein (urb->pipe) ? "in" : "out", 251 usb_pipein (urb->pipe) ? "in" : "out",
260 urb->status, 252 status,
261 urb->actual_length, urb->transfer_buffer_length); 253 urb->actual_length, urb->transfer_buffer_length);
262#endif 254#endif
263 255
264 /* complete() can reenter this HCD */ 256 /* complete() can reenter this HCD */
257 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
265 spin_unlock (&ehci->lock); 258 spin_unlock (&ehci->lock);
266 usb_hcd_giveback_urb (ehci_to_hcd(ehci), urb); 259 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
267 spin_lock (&ehci->lock); 260 spin_lock (&ehci->lock);
268} 261}
269 262
@@ -283,6 +276,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
283{ 276{
284 struct ehci_qtd *last = NULL, *end = qh->dummy; 277 struct ehci_qtd *last = NULL, *end = qh->dummy;
285 struct list_head *entry, *tmp; 278 struct list_head *entry, *tmp;
279 int last_status = -EINPROGRESS;
286 int stopped; 280 int stopped;
287 unsigned count = 0; 281 unsigned count = 0;
288 int do_status = 0; 282 int do_status = 0;
@@ -311,6 +305,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
311 struct ehci_qtd *qtd; 305 struct ehci_qtd *qtd;
312 struct urb *urb; 306 struct urb *urb;
313 u32 token = 0; 307 u32 token = 0;
308 int qtd_status;
314 309
315 qtd = list_entry (entry, struct ehci_qtd, qtd_list); 310 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
316 urb = qtd->urb; 311 urb = qtd->urb;
@@ -318,11 +313,12 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
318 /* clean up any state from previous QTD ...*/ 313 /* clean up any state from previous QTD ...*/
319 if (last) { 314 if (last) {
320 if (likely (last->urb != urb)) { 315 if (likely (last->urb != urb)) {
321 ehci_urb_done (ehci, last->urb); 316 ehci_urb_done(ehci, last->urb, last_status);
322 count++; 317 count++;
323 } 318 }
324 ehci_qtd_free (ehci, last); 319 ehci_qtd_free (ehci, last);
325 last = NULL; 320 last = NULL;
321 last_status = -EINPROGRESS;
326 } 322 }
327 323
328 /* ignore urbs submitted during completions we reported */ 324 /* ignore urbs submitted during completions we reported */
@@ -358,13 +354,14 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
358 stopped = 1; 354 stopped = 1;
359 355
360 if (unlikely (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) 356 if (unlikely (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)))
361 urb->status = -ESHUTDOWN; 357 last_status = -ESHUTDOWN;
362 358
363 /* ignore active urbs unless some previous qtd 359 /* ignore active urbs unless some previous qtd
364 * for the urb faulted (including short read) or 360 * for the urb faulted (including short read) or
365 * its urb was canceled. we may patch qh or qtds. 361 * its urb was canceled. we may patch qh or qtds.
366 */ 362 */
367 if (likely (urb->status == -EINPROGRESS)) 363 if (likely(last_status == -EINPROGRESS &&
364 !urb->unlinked))
368 continue; 365 continue;
369 366
370 /* issue status after short control reads */ 367 /* issue status after short control reads */
@@ -392,11 +389,14 @@ halt:
392 } 389 }
393 390
394 /* remove it from the queue */ 391 /* remove it from the queue */
395 spin_lock (&urb->lock); 392 qtd_status = qtd_copy_status(ehci, urb, qtd->length, token);
396 qtd_copy_status (ehci, urb, qtd->length, token); 393 if (unlikely(qtd_status == -EREMOTEIO)) {
397 do_status = (urb->status == -EREMOTEIO) 394 do_status = (!urb->unlinked &&
398 && usb_pipecontrol (urb->pipe); 395 usb_pipecontrol(urb->pipe));
399 spin_unlock (&urb->lock); 396 qtd_status = 0;
397 }
398 if (likely(last_status == -EINPROGRESS))
399 last_status = qtd_status;
400 400
401 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { 401 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
402 last = list_entry (qtd->qtd_list.prev, 402 last = list_entry (qtd->qtd_list.prev,
@@ -409,7 +409,7 @@ halt:
409 409
410 /* last urb's completion might still need calling */ 410 /* last urb's completion might still need calling */
411 if (likely (last != NULL)) { 411 if (likely (last != NULL)) {
412 ehci_urb_done (ehci, last->urb); 412 ehci_urb_done(ehci, last->urb, last_status);
413 count++; 413 count++;
414 ehci_qtd_free (ehci, last); 414 ehci_qtd_free (ehci, last);
415 } 415 }
@@ -913,7 +913,6 @@ static struct ehci_qh *qh_append_tds (
913static int 913static int
914submit_async ( 914submit_async (
915 struct ehci_hcd *ehci, 915 struct ehci_hcd *ehci,
916 struct usb_host_endpoint *ep,
917 struct urb *urb, 916 struct urb *urb,
918 struct list_head *qtd_list, 917 struct list_head *qtd_list,
919 gfp_t mem_flags 918 gfp_t mem_flags
@@ -922,10 +921,10 @@ submit_async (
922 int epnum; 921 int epnum;
923 unsigned long flags; 922 unsigned long flags;
924 struct ehci_qh *qh = NULL; 923 struct ehci_qh *qh = NULL;
925 int rc = 0; 924 int rc;
926 925
927 qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list); 926 qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
928 epnum = ep->desc.bEndpointAddress; 927 epnum = urb->ep->desc.bEndpointAddress;
929 928
930#ifdef EHCI_URB_TRACE 929#ifdef EHCI_URB_TRACE
931 ehci_dbg (ehci, 930 ehci_dbg (ehci,
@@ -933,7 +932,7 @@ submit_async (
933 __FUNCTION__, urb->dev->devpath, urb, 932 __FUNCTION__, urb->dev->devpath, urb,
934 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", 933 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
935 urb->transfer_buffer_length, 934 urb->transfer_buffer_length,
936 qtd, ep->hcpriv); 935 qtd, urb->ep->hcpriv);
937#endif 936#endif
938 937
939 spin_lock_irqsave (&ehci->lock, flags); 938 spin_lock_irqsave (&ehci->lock, flags);
@@ -942,9 +941,13 @@ submit_async (
942 rc = -ESHUTDOWN; 941 rc = -ESHUTDOWN;
943 goto done; 942 goto done;
944 } 943 }
944 rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
945 if (unlikely(rc))
946 goto done;
945 947
946 qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv); 948 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
947 if (unlikely(qh == NULL)) { 949 if (unlikely(qh == NULL)) {
950 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
948 rc = -ENOMEM; 951 rc = -ENOMEM;
949 goto done; 952 goto done;
950 } 953 }
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index e682f2342e..80d99bce2b 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -797,7 +797,6 @@ done:
797 797
798static int intr_submit ( 798static int intr_submit (
799 struct ehci_hcd *ehci, 799 struct ehci_hcd *ehci,
800 struct usb_host_endpoint *ep,
801 struct urb *urb, 800 struct urb *urb,
802 struct list_head *qtd_list, 801 struct list_head *qtd_list,
803 gfp_t mem_flags 802 gfp_t mem_flags
@@ -805,23 +804,26 @@ static int intr_submit (
805 unsigned epnum; 804 unsigned epnum;
806 unsigned long flags; 805 unsigned long flags;
807 struct ehci_qh *qh; 806 struct ehci_qh *qh;
808 int status = 0; 807 int status;
809 struct list_head empty; 808 struct list_head empty;
810 809
811 /* get endpoint and transfer/schedule data */ 810 /* get endpoint and transfer/schedule data */
812 epnum = ep->desc.bEndpointAddress; 811 epnum = urb->ep->desc.bEndpointAddress;
813 812
814 spin_lock_irqsave (&ehci->lock, flags); 813 spin_lock_irqsave (&ehci->lock, flags);
815 814
816 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, 815 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
817 &ehci_to_hcd(ehci)->flags))) { 816 &ehci_to_hcd(ehci)->flags))) {
818 status = -ESHUTDOWN; 817 status = -ESHUTDOWN;
819 goto done; 818 goto done_not_linked;
820 } 819 }
820 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
821 if (unlikely(status))
822 goto done_not_linked;
821 823
822 /* get qh and force any scheduling errors */ 824 /* get qh and force any scheduling errors */
823 INIT_LIST_HEAD (&empty); 825 INIT_LIST_HEAD (&empty);
824 qh = qh_append_tds (ehci, urb, &empty, epnum, &ep->hcpriv); 826 qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
825 if (qh == NULL) { 827 if (qh == NULL) {
826 status = -ENOMEM; 828 status = -ENOMEM;
827 goto done; 829 goto done;
@@ -832,13 +834,16 @@ static int intr_submit (
832 } 834 }
833 835
834 /* then queue the urb's tds to the qh */ 836 /* then queue the urb's tds to the qh */
835 qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv); 837 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
836 BUG_ON (qh == NULL); 838 BUG_ON (qh == NULL);
837 839
838 /* ... update usbfs periodic stats */ 840 /* ... update usbfs periodic stats */
839 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++; 841 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
840 842
841done: 843done:
844 if (unlikely(status))
845 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
846done_not_linked:
842 spin_unlock_irqrestore (&ehci->lock, flags); 847 spin_unlock_irqrestore (&ehci->lock, flags);
843 if (status) 848 if (status)
844 qtd_list_free (ehci, urb, qtd_list); 849 qtd_list_free (ehci, urb, qtd_list);
@@ -1622,7 +1627,7 @@ itd_complete (
1622 1627
1623 /* give urb back to the driver ... can be out-of-order */ 1628 /* give urb back to the driver ... can be out-of-order */
1624 dev = urb->dev; 1629 dev = urb->dev;
1625 ehci_urb_done (ehci, urb); 1630 ehci_urb_done(ehci, urb, 0);
1626 urb = NULL; 1631 urb = NULL;
1627 1632
1628 /* defer stopping schedule; completion can submit */ 1633 /* defer stopping schedule; completion can submit */
@@ -1686,12 +1691,19 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
1686 /* schedule ... need to lock */ 1691 /* schedule ... need to lock */
1687 spin_lock_irqsave (&ehci->lock, flags); 1692 spin_lock_irqsave (&ehci->lock, flags);
1688 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, 1693 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
1689 &ehci_to_hcd(ehci)->flags))) 1694 &ehci_to_hcd(ehci)->flags))) {
1690 status = -ESHUTDOWN; 1695 status = -ESHUTDOWN;
1691 else 1696 goto done_not_linked;
1692 status = iso_stream_schedule (ehci, urb, stream); 1697 }
1698 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1699 if (unlikely(status))
1700 goto done_not_linked;
1701 status = iso_stream_schedule(ehci, urb, stream);
1693 if (likely (status == 0)) 1702 if (likely (status == 0))
1694 itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); 1703 itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1704 else
1705 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1706done_not_linked:
1695 spin_unlock_irqrestore (&ehci->lock, flags); 1707 spin_unlock_irqrestore (&ehci->lock, flags);
1696 1708
1697done: 1709done:
@@ -1988,7 +2000,7 @@ sitd_complete (
1988 2000
1989 /* give urb back to the driver */ 2001 /* give urb back to the driver */
1990 dev = urb->dev; 2002 dev = urb->dev;
1991 ehci_urb_done (ehci, urb); 2003 ehci_urb_done(ehci, urb, 0);
1992 urb = NULL; 2004 urb = NULL;
1993 2005
1994 /* defer stopping schedule; completion can submit */ 2006 /* defer stopping schedule; completion can submit */
@@ -2049,12 +2061,19 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
2049 /* schedule ... need to lock */ 2061 /* schedule ... need to lock */
2050 spin_lock_irqsave (&ehci->lock, flags); 2062 spin_lock_irqsave (&ehci->lock, flags);
2051 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, 2063 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
2052 &ehci_to_hcd(ehci)->flags))) 2064 &ehci_to_hcd(ehci)->flags))) {
2053 status = -ESHUTDOWN; 2065 status = -ESHUTDOWN;
2054 else 2066 goto done_not_linked;
2055 status = iso_stream_schedule (ehci, urb, stream); 2067 }
2068 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2069 if (unlikely(status))
2070 goto done_not_linked;
2071 status = iso_stream_schedule(ehci, urb, stream);
2056 if (status == 0) 2072 if (status == 0)
2057 sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); 2073 sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
2074 else
2075 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2076done_not_linked:
2058 spin_unlock_irqrestore (&ehci->lock, flags); 2077 spin_unlock_irqrestore (&ehci->lock, flags);
2059 2078
2060done: 2079done:
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 5c851a36de..c27417f5b9 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -277,12 +277,11 @@ static void preproc_atl_queue(struct isp116x *isp116x)
277 processed urbs. 277 processed urbs.
278*/ 278*/
279static void finish_request(struct isp116x *isp116x, struct isp116x_ep *ep, 279static void finish_request(struct isp116x *isp116x, struct isp116x_ep *ep,
280 struct urb *urb) 280 struct urb *urb, int status)
281__releases(isp116x->lock) __acquires(isp116x->lock) 281__releases(isp116x->lock) __acquires(isp116x->lock)
282{ 282{
283 unsigned i; 283 unsigned i;
284 284
285 urb->hcpriv = NULL;
286 ep->error_count = 0; 285 ep->error_count = 0;
287 286
288 if (usb_pipecontrol(urb->pipe)) 287 if (usb_pipecontrol(urb->pipe))
@@ -290,8 +289,9 @@ __releases(isp116x->lock) __acquires(isp116x->lock)
290 289
291 urb_dbg(urb, "Finish"); 290 urb_dbg(urb, "Finish");
292 291
292 usb_hcd_unlink_urb_from_ep(isp116x_to_hcd(isp116x), urb);
293 spin_unlock(&isp116x->lock); 293 spin_unlock(&isp116x->lock);
294 usb_hcd_giveback_urb(isp116x_to_hcd(isp116x), urb); 294 usb_hcd_giveback_urb(isp116x_to_hcd(isp116x), urb, status);
295 spin_lock(&isp116x->lock); 295 spin_lock(&isp116x->lock);
296 296
297 /* take idle endpoints out of the schedule */ 297 /* take idle endpoints out of the schedule */
@@ -445,12 +445,7 @@ static void postproc_atl_queue(struct isp116x *isp116x)
445 if (PTD_GET_ACTIVE(ptd) 445 if (PTD_GET_ACTIVE(ptd)
446 || (cc != TD_CC_NOERROR && cc < 0x0E)) 446 || (cc != TD_CC_NOERROR && cc < 0x0E))
447 break; 447 break;
448 if ((urb->transfer_flags & URB_SHORT_NOT_OK) && 448 status = 0;
449 urb->actual_length <
450 urb->transfer_buffer_length)
451 status = -EREMOTEIO;
452 else
453 status = 0;
454 ep->nextpid = 0; 449 ep->nextpid = 0;
455 break; 450 break;
456 default: 451 default:
@@ -458,14 +453,8 @@ static void postproc_atl_queue(struct isp116x *isp116x)
458 } 453 }
459 454
460 done: 455 done:
461 if (status != -EINPROGRESS) { 456 if (status != -EINPROGRESS || urb->unlinked)
462 spin_lock(&urb->lock); 457 finish_request(isp116x, ep, urb, status);
463 if (urb->status == -EINPROGRESS)
464 urb->status = status;
465 spin_unlock(&urb->lock);
466 }
467 if (urb->status != -EINPROGRESS)
468 finish_request(isp116x, ep, urb);
469 } 458 }
470} 459}
471 460
@@ -673,7 +662,7 @@ static int balance(struct isp116x *isp116x, u16 period, u16 load)
673/*-----------------------------------------------------------------*/ 662/*-----------------------------------------------------------------*/
674 663
675static int isp116x_urb_enqueue(struct usb_hcd *hcd, 664static int isp116x_urb_enqueue(struct usb_hcd *hcd,
676 struct usb_host_endpoint *hep, struct urb *urb, 665 struct urb *urb,
677 gfp_t mem_flags) 666 gfp_t mem_flags)
678{ 667{
679 struct isp116x *isp116x = hcd_to_isp116x(hcd); 668 struct isp116x *isp116x = hcd_to_isp116x(hcd);
@@ -682,6 +671,7 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
682 int is_out = !usb_pipein(pipe); 671 int is_out = !usb_pipein(pipe);
683 int type = usb_pipetype(pipe); 672 int type = usb_pipetype(pipe);
684 int epnum = usb_pipeendpoint(pipe); 673 int epnum = usb_pipeendpoint(pipe);
674 struct usb_host_endpoint *hep = urb->ep;
685 struct isp116x_ep *ep = NULL; 675 struct isp116x_ep *ep = NULL;
686 unsigned long flags; 676 unsigned long flags;
687 int i; 677 int i;
@@ -705,7 +695,12 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
705 if (!HC_IS_RUNNING(hcd->state)) { 695 if (!HC_IS_RUNNING(hcd->state)) {
706 kfree(ep); 696 kfree(ep);
707 ret = -ENODEV; 697 ret = -ENODEV;
708 goto fail; 698 goto fail_not_linked;
699 }
700 ret = usb_hcd_link_urb_to_ep(hcd, urb);
701 if (ret) {
702 kfree(ep);
703 goto fail_not_linked;
709 } 704 }
710 705
711 if (hep->hcpriv) 706 if (hep->hcpriv)
@@ -808,16 +803,13 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
808 } 803 }
809 } 804 }
810 805
811 /* in case of unlink-during-submit */
812 if (urb->status != -EINPROGRESS) {
813 finish_request(isp116x, ep, urb);
814 ret = 0;
815 goto fail;
816 }
817 urb->hcpriv = hep; 806 urb->hcpriv = hep;
818 start_atl_transfers(isp116x); 807 start_atl_transfers(isp116x);
819 808
820 fail: 809 fail:
810 if (ret)
811 usb_hcd_unlink_urb_from_ep(hcd, urb);
812 fail_not_linked:
821 spin_unlock_irqrestore(&isp116x->lock, flags); 813 spin_unlock_irqrestore(&isp116x->lock, flags);
822 return ret; 814 return ret;
823} 815}
@@ -825,20 +817,21 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
825/* 817/*
826 Dequeue URBs. 818 Dequeue URBs.
827*/ 819*/
828static int isp116x_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) 820static int isp116x_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
821 int status)
829{ 822{
830 struct isp116x *isp116x = hcd_to_isp116x(hcd); 823 struct isp116x *isp116x = hcd_to_isp116x(hcd);
831 struct usb_host_endpoint *hep; 824 struct usb_host_endpoint *hep;
832 struct isp116x_ep *ep, *ep_act; 825 struct isp116x_ep *ep, *ep_act;
833 unsigned long flags; 826 unsigned long flags;
827 int rc;
834 828
835 spin_lock_irqsave(&isp116x->lock, flags); 829 spin_lock_irqsave(&isp116x->lock, flags);
830 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
831 if (rc)
832 goto done;
833
836 hep = urb->hcpriv; 834 hep = urb->hcpriv;
837 /* URB already unlinked (or never linked)? */
838 if (!hep) {
839 spin_unlock_irqrestore(&isp116x->lock, flags);
840 return 0;
841 }
842 ep = hep->hcpriv; 835 ep = hep->hcpriv;
843 WARN_ON(hep != ep->hep); 836 WARN_ON(hep != ep->hep);
844 837
@@ -855,10 +848,10 @@ static int isp116x_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
855 } 848 }
856 849
857 if (urb) 850 if (urb)
858 finish_request(isp116x, ep, urb); 851 finish_request(isp116x, ep, urb, status);
859 852 done:
860 spin_unlock_irqrestore(&isp116x->lock, flags); 853 spin_unlock_irqrestore(&isp116x->lock, flags);
861 return 0; 854 return rc;
862} 855}
863 856
864static void isp116x_endpoint_disable(struct usb_hcd *hcd, 857static void isp116x_endpoint_disable(struct usb_hcd *hcd,
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index f61c6cdd06..ebab5ce8f5 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -24,7 +24,7 @@
24 * small: 0) header + data packets 1) just header 24 * small: 0) header + data packets 1) just header
25 */ 25 */
26static void __maybe_unused 26static void __maybe_unused
27urb_print (struct urb * urb, char * str, int small) 27urb_print(struct urb * urb, char * str, int small, int status)
28{ 28{
29 unsigned int pipe= urb->pipe; 29 unsigned int pipe= urb->pipe;
30 30
@@ -34,7 +34,7 @@ urb_print (struct urb * urb, char * str, int small)
34 } 34 }
35 35
36#ifndef OHCI_VERBOSE_DEBUG 36#ifndef OHCI_VERBOSE_DEBUG
37 if (urb->status != 0) 37 if (status != 0)
38#endif 38#endif
39 dbg("%s %p dev=%d ep=%d%s-%s flags=%x len=%d/%d stat=%d", 39 dbg("%s %p dev=%d ep=%d%s-%s flags=%x len=%d/%d stat=%d",
40 str, 40 str,
@@ -46,7 +46,7 @@ urb_print (struct urb * urb, char * str, int small)
46 urb->transfer_flags, 46 urb->transfer_flags,
47 urb->actual_length, 47 urb->actual_length,
48 urb->transfer_buffer_length, 48 urb->transfer_buffer_length,
49 urb->status); 49 status);
50 50
51#ifdef OHCI_VERBOSE_DEBUG 51#ifdef OHCI_VERBOSE_DEBUG
52 if (!small) { 52 if (!small) {
@@ -66,7 +66,7 @@ urb_print (struct urb * urb, char * str, int small)
66 urb->transfer_buffer_length: urb->actual_length; 66 urb->transfer_buffer_length: urb->actual_length;
67 for (i = 0; i < 16 && i < len; i++) 67 for (i = 0; i < 16 && i < len; i++)
68 printk (" %02x", ((__u8 *) urb->transfer_buffer) [i]); 68 printk (" %02x", ((__u8 *) urb->transfer_buffer) [i]);
69 printk ("%s stat:%d\n", i < len? "...": "", urb->status); 69 printk ("%s stat:%d\n", i < len? "...": "", status);
70 } 70 }
71 } 71 }
72#endif 72#endif
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 6edf4097d2..240c7f5075 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -81,7 +81,6 @@ static void ohci_dump (struct ohci_hcd *ohci, int verbose);
81static int ohci_init (struct ohci_hcd *ohci); 81static int ohci_init (struct ohci_hcd *ohci);
82static void ohci_stop (struct usb_hcd *hcd); 82static void ohci_stop (struct usb_hcd *hcd);
83static int ohci_restart (struct ohci_hcd *ohci); 83static int ohci_restart (struct ohci_hcd *ohci);
84static void ohci_quirk_nec_worker (struct work_struct *work);
85 84
86#include "ohci-hub.c" 85#include "ohci-hub.c"
87#include "ohci-dbg.c" 86#include "ohci-dbg.c"
@@ -118,7 +117,6 @@ MODULE_PARM_DESC (no_handshake, "true (not default) disables BIOS handshake");
118 */ 117 */
119static int ohci_urb_enqueue ( 118static int ohci_urb_enqueue (
120 struct usb_hcd *hcd, 119 struct usb_hcd *hcd,
121 struct usb_host_endpoint *ep,
122 struct urb *urb, 120 struct urb *urb,
123 gfp_t mem_flags 121 gfp_t mem_flags
124) { 122) {
@@ -131,11 +129,11 @@ static int ohci_urb_enqueue (
131 int retval = 0; 129 int retval = 0;
132 130
133#ifdef OHCI_VERBOSE_DEBUG 131#ifdef OHCI_VERBOSE_DEBUG
134 urb_print (urb, "SUB", usb_pipein (pipe)); 132 urb_print(urb, "SUB", usb_pipein(pipe), -EINPROGRESS);
135#endif 133#endif
136 134
137 /* every endpoint has a ed, locate and maybe (re)initialize it */ 135 /* every endpoint has a ed, locate and maybe (re)initialize it */
138 if (! (ed = ed_get (ohci, ep, urb->dev, pipe, urb->interval))) 136 if (! (ed = ed_get (ohci, urb->ep, urb->dev, pipe, urb->interval)))
139 return -ENOMEM; 137 return -ENOMEM;
140 138
141 /* for the private part of the URB we need the number of TDs (size) */ 139 /* for the private part of the URB we need the number of TDs (size) */
@@ -200,22 +198,17 @@ static int ohci_urb_enqueue (
200 retval = -ENODEV; 198 retval = -ENODEV;
201 goto fail; 199 goto fail;
202 } 200 }
203 201 retval = usb_hcd_link_urb_to_ep(hcd, urb);
204 /* in case of unlink-during-submit */ 202 if (retval)
205 spin_lock (&urb->lock);
206 if (urb->status != -EINPROGRESS) {
207 spin_unlock (&urb->lock);
208 urb->hcpriv = urb_priv;
209 finish_urb (ohci, urb);
210 retval = 0;
211 goto fail; 203 goto fail;
212 }
213 204
214 /* schedule the ed if needed */ 205 /* schedule the ed if needed */
215 if (ed->state == ED_IDLE) { 206 if (ed->state == ED_IDLE) {
216 retval = ed_schedule (ohci, ed); 207 retval = ed_schedule (ohci, ed);
217 if (retval < 0) 208 if (retval < 0) {
218 goto fail0; 209 usb_hcd_unlink_urb_from_ep(hcd, urb);
210 goto fail;
211 }
219 if (ed->type == PIPE_ISOCHRONOUS) { 212 if (ed->type == PIPE_ISOCHRONOUS) {
220 u16 frame = ohci_frame_no(ohci); 213 u16 frame = ohci_frame_no(ohci);
221 214
@@ -239,8 +232,6 @@ static int ohci_urb_enqueue (
239 urb->hcpriv = urb_priv; 232 urb->hcpriv = urb_priv;
240 td_submit_urb (ohci, urb); 233 td_submit_urb (ohci, urb);
241 234
242fail0:
243 spin_unlock (&urb->lock);
244fail: 235fail:
245 if (retval) 236 if (retval)
246 urb_free_priv (ohci, urb_priv); 237 urb_free_priv (ohci, urb_priv);
@@ -249,22 +240,26 @@ fail:
249} 240}
250 241
251/* 242/*
252 * decouple the URB from the HC queues (TDs, urb_priv); it's 243 * decouple the URB from the HC queues (TDs, urb_priv).
253 * already marked using urb->status. reporting is always done 244 * reporting is always done
254 * asynchronously, and we might be dealing with an urb that's 245 * asynchronously, and we might be dealing with an urb that's
255 * partially transferred, or an ED with other urbs being unlinked. 246 * partially transferred, or an ED with other urbs being unlinked.
256 */ 247 */
257static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) 248static int ohci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
258{ 249{
259 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 250 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
260 unsigned long flags; 251 unsigned long flags;
252 int rc;
261 253
262#ifdef OHCI_VERBOSE_DEBUG 254#ifdef OHCI_VERBOSE_DEBUG
263 urb_print (urb, "UNLINK", 1); 255 urb_print(urb, "UNLINK", 1, status);
264#endif 256#endif
265 257
266 spin_lock_irqsave (&ohci->lock, flags); 258 spin_lock_irqsave (&ohci->lock, flags);
267 if (HC_IS_RUNNING(hcd->state)) { 259 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
260 if (rc) {
261 ; /* Do nothing */
262 } else if (HC_IS_RUNNING(hcd->state)) {
268 urb_priv_t *urb_priv; 263 urb_priv_t *urb_priv;
269 264
270 /* Unless an IRQ completed the unlink while it was being 265 /* Unless an IRQ completed the unlink while it was being
@@ -282,10 +277,10 @@ static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
282 * any more ... just clean up every urb's memory. 277 * any more ... just clean up every urb's memory.
283 */ 278 */
284 if (urb->hcpriv) 279 if (urb->hcpriv)
285 finish_urb (ohci, urb); 280 finish_urb(ohci, urb, status);
286 } 281 }
287 spin_unlock_irqrestore (&ohci->lock, flags); 282 spin_unlock_irqrestore (&ohci->lock, flags);
288 return 0; 283 return rc;
289} 284}
290 285
291/*-------------------------------------------------------------------------*/ 286/*-------------------------------------------------------------------------*/
@@ -314,6 +309,8 @@ rescan:
314 if (!HC_IS_RUNNING (hcd->state)) { 309 if (!HC_IS_RUNNING (hcd->state)) {
315sanitize: 310sanitize:
316 ed->state = ED_IDLE; 311 ed->state = ED_IDLE;
312 if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
313 ohci->eds_scheduled--;
317 finish_unlinks (ohci, 0); 314 finish_unlinks (ohci, 0);
318 } 315 }
319 316
@@ -321,7 +318,12 @@ sanitize:
321 case ED_UNLINK: /* wait for hw to finish? */ 318 case ED_UNLINK: /* wait for hw to finish? */
322 /* major IRQ delivery trouble loses INTR_SF too... */ 319 /* major IRQ delivery trouble loses INTR_SF too... */
323 if (limit-- == 0) { 320 if (limit-- == 0) {
324 ohci_warn (ohci, "IRQ INTR_SF lossage\n"); 321 ohci_warn(ohci, "ED unlink timeout\n");
322 if (quirk_zfmicro(ohci)) {
323 ohci_warn(ohci, "Attempting ZF TD recovery\n");
324 ohci->ed_to_check = ed;
325 ohci->zf_delay = 2;
326 }
325 goto sanitize; 327 goto sanitize;
326 } 328 }
327 spin_unlock_irqrestore (&ohci->lock, flags); 329 spin_unlock_irqrestore (&ohci->lock, flags);
@@ -379,6 +381,93 @@ ohci_shutdown (struct usb_hcd *hcd)
379 (void) ohci_readl (ohci, &ohci->regs->control); 381 (void) ohci_readl (ohci, &ohci->regs->control);
380} 382}
381 383
384static int check_ed(struct ohci_hcd *ohci, struct ed *ed)
385{
386 return (hc32_to_cpu(ohci, ed->hwINFO) & ED_IN) != 0
387 && (hc32_to_cpu(ohci, ed->hwHeadP) & TD_MASK)
388 == (hc32_to_cpu(ohci, ed->hwTailP) & TD_MASK)
389 && !list_empty(&ed->td_list);
390}
391
392/* ZF Micro watchdog timer callback. The ZF Micro chipset sometimes completes
393 * an interrupt TD but neglects to add it to the donelist. On systems with
394 * this chipset, we need to periodically check the state of the queues to look
395 * for such "lost" TDs.
396 */
397static void unlink_watchdog_func(unsigned long _ohci)
398{
399 long flags;
400 unsigned max;
401 unsigned seen_count = 0;
402 unsigned i;
403 struct ed **seen = NULL;
404 struct ohci_hcd *ohci = (struct ohci_hcd *) _ohci;
405
406 spin_lock_irqsave(&ohci->lock, flags);
407 max = ohci->eds_scheduled;
408 if (!max)
409 goto done;
410
411 if (ohci->ed_to_check)
412 goto out;
413
414 seen = kcalloc(max, sizeof *seen, GFP_ATOMIC);
415 if (!seen)
416 goto out;
417
418 for (i = 0; i < NUM_INTS; i++) {
419 struct ed *ed = ohci->periodic[i];
420
421 while (ed) {
422 unsigned temp;
423
424 /* scan this branch of the periodic schedule tree */
425 for (temp = 0; temp < seen_count; temp++) {
426 if (seen[temp] == ed) {
427 /* we've checked it and what's after */
428 ed = NULL;
429 break;
430 }
431 }
432 if (!ed)
433 break;
434 seen[seen_count++] = ed;
435 if (!check_ed(ohci, ed)) {
436 ed = ed->ed_next;
437 continue;
438 }
439
440 /* HC's TD list is empty, but HCD sees at least one
441 * TD that's not been sent through the donelist.
442 */
443 ohci->ed_to_check = ed;
444 ohci->zf_delay = 2;
445
446 /* The HC may wait until the next frame to report the
447 * TD as done through the donelist and INTR_WDH. (We
448 * just *assume* it's not a multi-TD interrupt URB;
449 * those could defer the IRQ more than one frame, using
450 * DI...) Check again after the next INTR_SF.
451 */
452 ohci_writel(ohci, OHCI_INTR_SF,
453 &ohci->regs->intrstatus);
454 ohci_writel(ohci, OHCI_INTR_SF,
455 &ohci->regs->intrenable);
456
457 /* flush those writes */
458 (void) ohci_readl(ohci, &ohci->regs->control);
459
460 goto out;
461 }
462 }
463out:
464 kfree(seen);
465 if (ohci->eds_scheduled)
466 mod_timer(&ohci->unlink_watchdog, round_jiffies_relative(HZ));
467done:
468 spin_unlock_irqrestore(&ohci->lock, flags);
469}
470
382/*-------------------------------------------------------------------------* 471/*-------------------------------------------------------------------------*
383 * HC functions 472 * HC functions
384 *-------------------------------------------------------------------------*/ 473 *-------------------------------------------------------------------------*/
@@ -616,6 +705,15 @@ retry:
616 mdelay ((temp >> 23) & 0x1fe); 705 mdelay ((temp >> 23) & 0x1fe);
617 hcd->state = HC_STATE_RUNNING; 706 hcd->state = HC_STATE_RUNNING;
618 707
708 if (quirk_zfmicro(ohci)) {
709 /* Create timer to watch for bad queue state on ZF Micro */
710 setup_timer(&ohci->unlink_watchdog, unlink_watchdog_func,
711 (unsigned long) ohci);
712
713 ohci->eds_scheduled = 0;
714 ohci->ed_to_check = NULL;
715 }
716
619 ohci_dump (ohci, 1); 717 ohci_dump (ohci, 1);
620 718
621 return 0; 719 return 0;
@@ -629,10 +727,11 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
629{ 727{
630 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 728 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
631 struct ohci_regs __iomem *regs = ohci->regs; 729 struct ohci_regs __iomem *regs = ohci->regs;
632 int ints; 730 int ints;
633 731
634 /* we can eliminate a (slow) ohci_readl() 732 /* we can eliminate a (slow) ohci_readl()
635 if _only_ WDH caused this irq */ 733 * if _only_ WDH caused this irq
734 */
636 if ((ohci->hcca->done_head != 0) 735 if ((ohci->hcca->done_head != 0)
637 && ! (hc32_to_cpup (ohci, &ohci->hcca->done_head) 736 && ! (hc32_to_cpup (ohci, &ohci->hcca->done_head)
638 & 0x01)) { 737 & 0x01)) {
@@ -651,7 +750,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
651 750
652 if (ints & OHCI_INTR_UE) { 751 if (ints & OHCI_INTR_UE) {
653 // e.g. due to PCI Master/Target Abort 752 // e.g. due to PCI Master/Target Abort
654 if (ohci->flags & OHCI_QUIRK_NEC) { 753 if (quirk_nec(ohci)) {
655 /* Workaround for a silicon bug in some NEC chips used 754 /* Workaround for a silicon bug in some NEC chips used
656 * in Apple's PowerBooks. Adapted from Darwin code. 755 * in Apple's PowerBooks. Adapted from Darwin code.
657 */ 756 */
@@ -713,6 +812,31 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
713 ohci_writel (ohci, OHCI_INTR_WDH, &regs->intrenable); 812 ohci_writel (ohci, OHCI_INTR_WDH, &regs->intrenable);
714 } 813 }
715 814
815 if (quirk_zfmicro(ohci) && (ints & OHCI_INTR_SF)) {
816 spin_lock(&ohci->lock);
817 if (ohci->ed_to_check) {
818 struct ed *ed = ohci->ed_to_check;
819
820 if (check_ed(ohci, ed)) {
821 /* HC thinks the TD list is empty; HCD knows
822 * at least one TD is outstanding
823 */
824 if (--ohci->zf_delay == 0) {
825 struct td *td = list_entry(
826 ed->td_list.next,
827 struct td, td_list);
828 ohci_warn(ohci,
829 "Reclaiming orphan TD %p\n",
830 td);
831 takeback_td(ohci, td);
832 ohci->ed_to_check = NULL;
833 }
834 } else
835 ohci->ed_to_check = NULL;
836 }
837 spin_unlock(&ohci->lock);
838 }
839
716 /* could track INTR_SO to reduce available PCI/... bandwidth */ 840 /* could track INTR_SO to reduce available PCI/... bandwidth */
717 841
718 /* handle any pending URB/ED unlinks, leaving INTR_SF enabled 842 /* handle any pending URB/ED unlinks, leaving INTR_SF enabled
@@ -721,7 +845,9 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
721 spin_lock (&ohci->lock); 845 spin_lock (&ohci->lock);
722 if (ohci->ed_rm_list) 846 if (ohci->ed_rm_list)
723 finish_unlinks (ohci, ohci_frame_no(ohci)); 847 finish_unlinks (ohci, ohci_frame_no(ohci));
724 if ((ints & OHCI_INTR_SF) != 0 && !ohci->ed_rm_list 848 if ((ints & OHCI_INTR_SF) != 0
849 && !ohci->ed_rm_list
850 && !ohci->ed_to_check
725 && HC_IS_RUNNING(hcd->state)) 851 && HC_IS_RUNNING(hcd->state))
726 ohci_writel (ohci, OHCI_INTR_SF, &regs->intrdisable); 852 ohci_writel (ohci, OHCI_INTR_SF, &regs->intrdisable);
727 spin_unlock (&ohci->lock); 853 spin_unlock (&ohci->lock);
@@ -751,6 +877,9 @@ static void ohci_stop (struct usb_hcd *hcd)
751 free_irq(hcd->irq, hcd); 877 free_irq(hcd->irq, hcd);
752 hcd->irq = -1; 878 hcd->irq = -1;
753 879
880 if (quirk_zfmicro(ohci))
881 del_timer(&ohci->unlink_watchdog);
882
754 remove_debug_files (ohci); 883 remove_debug_files (ohci);
755 ohci_mem_cleanup (ohci); 884 ohci_mem_cleanup (ohci);
756 if (ohci->hcca) { 885 if (ohci->hcca) {
@@ -798,9 +927,8 @@ static int ohci_restart (struct ohci_hcd *ohci)
798 ed, ed->state); 927 ed, ed->state);
799 } 928 }
800 929
801 spin_lock (&urb->lock); 930 if (!urb->unlinked)
802 urb->status = -ESHUTDOWN; 931 urb->unlinked = -ESHUTDOWN;
803 spin_unlock (&urb->lock);
804 } 932 }
805 finish_unlinks (ohci, 0); 933 finish_unlinks (ohci, 0);
806 spin_unlock_irq(&ohci->lock); 934 spin_unlock_irq(&ohci->lock);
@@ -828,27 +956,6 @@ static int ohci_restart (struct ohci_hcd *ohci)
828 956
829/*-------------------------------------------------------------------------*/ 957/*-------------------------------------------------------------------------*/
830 958
831/* NEC workaround */
832static void ohci_quirk_nec_worker(struct work_struct *work)
833{
834 struct ohci_hcd *ohci = container_of(work, struct ohci_hcd, nec_work);
835 int status;
836
837 status = ohci_init(ohci);
838 if (status != 0) {
839 ohci_err(ohci, "Restarting NEC controller failed "
840 "in ohci_init, %d\n", status);
841 return;
842 }
843
844 status = ohci_restart(ohci);
845 if (status != 0)
846 ohci_err(ohci, "Restarting NEC controller failed "
847 "in ohci_restart, %d\n", status);
848}
849
850/*-------------------------------------------------------------------------*/
851
852#define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC 959#define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC
853 960
854MODULE_AUTHOR (DRIVER_AUTHOR); 961MODULE_AUTHOR (DRIVER_AUTHOR);
@@ -926,11 +1033,17 @@ MODULE_LICENSE ("GPL");
926#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_driver 1033#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_driver
927#endif 1034#endif
928 1035
1036#ifdef CONFIG_USB_OHCI_HCD_SSB
1037#include "ohci-ssb.c"
1038#define SSB_OHCI_DRIVER ssb_ohci_driver
1039#endif
1040
929#if !defined(PCI_DRIVER) && \ 1041#if !defined(PCI_DRIVER) && \
930 !defined(PLATFORM_DRIVER) && \ 1042 !defined(PLATFORM_DRIVER) && \
931 !defined(OF_PLATFORM_DRIVER) && \ 1043 !defined(OF_PLATFORM_DRIVER) && \
932 !defined(SA1111_DRIVER) && \ 1044 !defined(SA1111_DRIVER) && \
933 !defined(PS3_SYSTEM_BUS_DRIVER) 1045 !defined(PS3_SYSTEM_BUS_DRIVER) && \
1046 !defined(SSB_OHCI_DRIVER)
934#error "missing bus glue for ohci-hcd" 1047#error "missing bus glue for ohci-hcd"
935#endif 1048#endif
936 1049
@@ -975,10 +1088,20 @@ static int __init ohci_hcd_mod_init(void)
975 goto error_pci; 1088 goto error_pci;
976#endif 1089#endif
977 1090
1091#ifdef SSB_OHCI_DRIVER
1092 retval = ssb_driver_register(&SSB_OHCI_DRIVER);
1093 if (retval)
1094 goto error_ssb;
1095#endif
1096
978 return retval; 1097 return retval;
979 1098
980 /* Error path */ 1099 /* Error path */
1100#ifdef SSB_OHCI_DRIVER
1101 error_ssb:
1102#endif
981#ifdef PCI_DRIVER 1103#ifdef PCI_DRIVER
1104 pci_unregister_driver(&PCI_DRIVER);
982 error_pci: 1105 error_pci:
983#endif 1106#endif
984#ifdef SA1111_DRIVER 1107#ifdef SA1111_DRIVER
@@ -1003,6 +1126,9 @@ module_init(ohci_hcd_mod_init);
1003 1126
1004static void __exit ohci_hcd_mod_exit(void) 1127static void __exit ohci_hcd_mod_exit(void)
1005{ 1128{
1129#ifdef SSB_OHCI_DRIVER
1130 ssb_driver_unregister(&SSB_OHCI_DRIVER);
1131#endif
1006#ifdef PCI_DRIVER 1132#ifdef PCI_DRIVER
1007 pci_unregister_driver(&PCI_DRIVER); 1133 pci_unregister_driver(&PCI_DRIVER);
1008#endif 1134#endif
diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c
index 450c7b460c..2f20d3dc89 100644
--- a/drivers/usb/host/ohci-mem.c
+++ b/drivers/usb/host/ohci-mem.c
@@ -28,7 +28,6 @@ static void ohci_hcd_init (struct ohci_hcd *ohci)
28 ohci->next_statechange = jiffies; 28 ohci->next_statechange = jiffies;
29 spin_lock_init (&ohci->lock); 29 spin_lock_init (&ohci->lock);
30 INIT_LIST_HEAD (&ohci->pending); 30 INIT_LIST_HEAD (&ohci->pending);
31 INIT_WORK (&ohci->nec_work, ohci_quirk_nec_worker);
32} 31}
33 32
34/*-------------------------------------------------------------------------*/ 33/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index a5e2eb85d0..d0360f65eb 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -84,7 +84,7 @@ static int ohci_quirk_zfmicro(struct usb_hcd *hcd)
84 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 84 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
85 85
86 ohci->flags |= OHCI_QUIRK_ZFMICRO; 86 ohci->flags |= OHCI_QUIRK_ZFMICRO;
87 ohci_dbg (ohci, "enabled Compaq ZFMicro chipset quirk\n"); 87 ohci_dbg(ohci, "enabled Compaq ZFMicro chipset quirks\n");
88 88
89 return 0; 89 return 0;
90} 90}
@@ -113,11 +113,31 @@ static int ohci_quirk_toshiba_scc(struct usb_hcd *hcd)
113 113
114/* Check for NEC chip and apply quirk for allegedly lost interrupts. 114/* Check for NEC chip and apply quirk for allegedly lost interrupts.
115 */ 115 */
116
117static void ohci_quirk_nec_worker(struct work_struct *work)
118{
119 struct ohci_hcd *ohci = container_of(work, struct ohci_hcd, nec_work);
120 int status;
121
122 status = ohci_init(ohci);
123 if (status != 0) {
124 ohci_err(ohci, "Restarting NEC controller failed in %s, %d\n",
125 "ohci_init", status);
126 return;
127 }
128
129 status = ohci_restart(ohci);
130 if (status != 0)
131 ohci_err(ohci, "Restarting NEC controller failed in %s, %d\n",
132 "ohci_restart", status);
133}
134
116static int ohci_quirk_nec(struct usb_hcd *hcd) 135static int ohci_quirk_nec(struct usb_hcd *hcd)
117{ 136{
118 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 137 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
119 138
120 ohci->flags |= OHCI_QUIRK_NEC; 139 ohci->flags |= OHCI_QUIRK_NEC;
140 INIT_WORK(&ohci->nec_work, ohci_quirk_nec_worker);
121 ohci_dbg (ohci, "enabled NEC chipset lost interrupt quirk\n"); 141 ohci_dbg (ohci, "enabled NEC chipset lost interrupt quirk\n");
122 142
123 return 0; 143 return 0;
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index c43b66acd4..0a74269201 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -134,8 +134,11 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
134 } 134 }
135 135
136 ohci = hcd_to_ohci(hcd); 136 ohci = hcd_to_ohci(hcd);
137 if (is_bigendian) 137 if (is_bigendian) {
138 ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC; 138 ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
139 if (of_device_is_compatible(dn, "mpc5200-ohci"))
140 ohci->flags |= OHCI_QUIRK_FRAME_NO;
141 }
139 142
140 ohci_hcd_init(ohci); 143 ohci_hcd_init(ohci);
141 144
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index 1a2e1777ca..f95be1896b 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -73,6 +73,11 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
73 73
74 ohci = hcd_to_ohci(hcd); 74 ohci = hcd_to_ohci(hcd);
75 ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC; 75 ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
76
77#ifdef CONFIG_PPC_MPC52xx
78 /* MPC52xx doesn't need frame_no shift */
79 ohci->flags |= OHCI_QUIRK_FRAME_NO;
80#endif
76 ohci_hcd_init(ohci); 81 ohci_hcd_init(ohci);
77 82
78 retval = usb_add_hcd(hcd, irq, IRQF_DISABLED); 83 retval = usb_add_hcd(hcd, irq, IRQF_DISABLED);
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 830a3fe861..5181732223 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -36,29 +36,15 @@ static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
36 * PRECONDITION: ohci lock held, irqs blocked. 36 * PRECONDITION: ohci lock held, irqs blocked.
37 */ 37 */
38static void 38static void
39finish_urb (struct ohci_hcd *ohci, struct urb *urb) 39finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
40__releases(ohci->lock) 40__releases(ohci->lock)
41__acquires(ohci->lock) 41__acquires(ohci->lock)
42{ 42{
43 // ASSERT (urb->hcpriv != 0); 43 // ASSERT (urb->hcpriv != 0);
44 44
45 urb_free_priv (ohci, urb->hcpriv); 45 urb_free_priv (ohci, urb->hcpriv);
46 urb->hcpriv = NULL; 46 if (likely(status == -EINPROGRESS))
47 47 status = 0;
48 spin_lock (&urb->lock);
49 if (likely (urb->status == -EINPROGRESS))
50 urb->status = 0;
51 /* report short control reads right even though the data TD always
52 * has TD_R set. (much simpler, but creates the 1-td limit.)
53 */
54 if (unlikely (urb->transfer_flags & URB_SHORT_NOT_OK)
55 && unlikely (usb_pipecontrol (urb->pipe))
56 && urb->actual_length < urb->transfer_buffer_length
57 && usb_pipein (urb->pipe)
58 && urb->status == 0) {
59 urb->status = -EREMOTEIO;
60 }
61 spin_unlock (&urb->lock);
62 48
63 switch (usb_pipetype (urb->pipe)) { 49 switch (usb_pipetype (urb->pipe)) {
64 case PIPE_ISOCHRONOUS: 50 case PIPE_ISOCHRONOUS:
@@ -70,12 +56,13 @@ __acquires(ohci->lock)
70 } 56 }
71 57
72#ifdef OHCI_VERBOSE_DEBUG 58#ifdef OHCI_VERBOSE_DEBUG
73 urb_print (urb, "RET", usb_pipeout (urb->pipe)); 59 urb_print(urb, "RET", usb_pipeout (urb->pipe), status);
74#endif 60#endif
75 61
76 /* urb->complete() can reenter this HCD */ 62 /* urb->complete() can reenter this HCD */
63 usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
77 spin_unlock (&ohci->lock); 64 spin_unlock (&ohci->lock);
78 usb_hcd_giveback_urb (ohci_to_hcd(ohci), urb); 65 usb_hcd_giveback_urb(ohci_to_hcd(ohci), urb, status);
79 spin_lock (&ohci->lock); 66 spin_lock (&ohci->lock);
80 67
81 /* stop periodic dma if it's not needed */ 68 /* stop periodic dma if it's not needed */
@@ -179,6 +166,10 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
179 ed->ed_prev = NULL; 166 ed->ed_prev = NULL;
180 ed->ed_next = NULL; 167 ed->ed_next = NULL;
181 ed->hwNextED = 0; 168 ed->hwNextED = 0;
169 if (quirk_zfmicro(ohci)
170 && (ed->type == PIPE_INTERRUPT)
171 && !(ohci->eds_scheduled++))
172 mod_timer(&ohci->unlink_watchdog, round_jiffies_relative(HZ));
182 wmb (); 173 wmb ();
183 174
184 /* we care about rm_list when setting CLE/BLE in case the HC was at 175 /* we care about rm_list when setting CLE/BLE in case the HC was at
@@ -708,19 +699,18 @@ static void td_submit_urb (
708 * Done List handling functions 699 * Done List handling functions
709 *-------------------------------------------------------------------------*/ 700 *-------------------------------------------------------------------------*/
710 701
711/* calculate transfer length/status and update the urb 702/* calculate transfer length/status and update the urb */
712 * PRECONDITION: irqsafe (only for urb->status locking) 703static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
713 */
714static void td_done (struct ohci_hcd *ohci, struct urb *urb, struct td *td)
715{ 704{
716 u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO); 705 u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO);
717 int cc = 0; 706 int cc = 0;
707 int status = -EINPROGRESS;
718 708
719 list_del (&td->td_list); 709 list_del (&td->td_list);
720 710
721 /* ISO ... drivers see per-TD length/status */ 711 /* ISO ... drivers see per-TD length/status */
722 if (tdINFO & TD_ISO) { 712 if (tdINFO & TD_ISO) {
723 u16 tdPSW = ohci_hwPSW (ohci, td, 0); 713 u16 tdPSW = ohci_hwPSW(ohci, td, 0);
724 int dlen = 0; 714 int dlen = 0;
725 715
726 /* NOTE: assumes FC in tdINFO == 0, and that 716 /* NOTE: assumes FC in tdINFO == 0, and that
@@ -729,7 +719,7 @@ static void td_done (struct ohci_hcd *ohci, struct urb *urb, struct td *td)
729 719
730 cc = (tdPSW >> 12) & 0xF; 720 cc = (tdPSW >> 12) & 0xF;
731 if (tdINFO & TD_CC) /* hc didn't touch? */ 721 if (tdINFO & TD_CC) /* hc didn't touch? */
732 return; 722 return status;
733 723
734 if (usb_pipeout (urb->pipe)) 724 if (usb_pipeout (urb->pipe))
735 dlen = urb->iso_frame_desc [td->index].length; 725 dlen = urb->iso_frame_desc [td->index].length;
@@ -762,12 +752,8 @@ static void td_done (struct ohci_hcd *ohci, struct urb *urb, struct td *td)
762 if (cc == TD_DATAUNDERRUN 752 if (cc == TD_DATAUNDERRUN
763 && !(urb->transfer_flags & URB_SHORT_NOT_OK)) 753 && !(urb->transfer_flags & URB_SHORT_NOT_OK))
764 cc = TD_CC_NOERROR; 754 cc = TD_CC_NOERROR;
765 if (cc != TD_CC_NOERROR && cc < 0x0E) { 755 if (cc != TD_CC_NOERROR && cc < 0x0E)
766 spin_lock (&urb->lock); 756 status = cc_to_error[cc];
767 if (urb->status == -EINPROGRESS)
768 urb->status = cc_to_error [cc];
769 spin_unlock (&urb->lock);
770 }
771 757
772 /* count all non-empty packets except control SETUP packet */ 758 /* count all non-empty packets except control SETUP packet */
773 if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) { 759 if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {
@@ -786,14 +772,15 @@ static void td_done (struct ohci_hcd *ohci, struct urb *urb, struct td *td)
786 urb->actual_length, 772 urb->actual_length,
787 urb->transfer_buffer_length); 773 urb->transfer_buffer_length);
788 } 774 }
775 return status;
789} 776}
790 777
791/*-------------------------------------------------------------------------*/ 778/*-------------------------------------------------------------------------*/
792 779
793static inline struct td * 780static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc)
794ed_halted (struct ohci_hcd *ohci, struct td *td, int cc, struct td *rev)
795{ 781{
796 struct urb *urb = td->urb; 782 struct urb *urb = td->urb;
783 urb_priv_t *urb_priv = urb->hcpriv;
797 struct ed *ed = td->ed; 784 struct ed *ed = td->ed;
798 struct list_head *tmp = td->td_list.next; 785 struct list_head *tmp = td->td_list.next;
799 __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C); 786 __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C);
@@ -805,13 +792,12 @@ ed_halted (struct ohci_hcd *ohci, struct td *td, int cc, struct td *rev)
805 wmb (); 792 wmb ();
806 ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H); 793 ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H);
807 794
808 /* put any later tds from this urb onto the donelist, after 'td', 795 /* Get rid of all later tds from this urb. We don't have
809 * order won't matter here: no errors, and nothing was transferred. 796 * to be careful: no errors and nothing was transferred.
810 * also patch the ed so it looks as if those tds completed normally. 797 * Also patch the ed so it looks as if those tds completed normally.
811 */ 798 */
812 while (tmp != &ed->td_list) { 799 while (tmp != &ed->td_list) {
813 struct td *next; 800 struct td *next;
814 __hc32 info;
815 801
816 next = list_entry (tmp, struct td, td_list); 802 next = list_entry (tmp, struct td, td_list);
817 tmp = next->td_list.next; 803 tmp = next->td_list.next;
@@ -826,14 +812,9 @@ ed_halted (struct ohci_hcd *ohci, struct td *td, int cc, struct td *rev)
826 * then we need to leave the control STATUS packet queued 812 * then we need to leave the control STATUS packet queued
827 * and clear ED_SKIP. 813 * and clear ED_SKIP.
828 */ 814 */
829 info = next->hwINFO;
830 info |= cpu_to_hc32 (ohci, TD_DONE);
831 info &= ~cpu_to_hc32 (ohci, TD_CC);
832 next->hwINFO = info;
833
834 next->next_dl_td = rev;
835 rev = next;
836 815
816 list_del(&next->td_list);
817 urb_priv->td_cnt++;
837 ed->hwHeadP = next->hwNextTD | toggle; 818 ed->hwHeadP = next->hwNextTD | toggle;
838 } 819 }
839 820
@@ -859,8 +840,6 @@ ed_halted (struct ohci_hcd *ohci, struct td *td, int cc, struct td *rev)
859 hc32_to_cpu (ohci, td->hwINFO), 840 hc32_to_cpu (ohci, td->hwINFO),
860 cc, cc_to_error [cc]); 841 cc, cc_to_error [cc]);
861 } 842 }
862
863 return rev;
864} 843}
865 844
866/* replies to the request have to be on a FIFO basis so 845/* replies to the request have to be on a FIFO basis so
@@ -897,7 +876,7 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
897 */ 876 */
898 if (cc != TD_CC_NOERROR 877 if (cc != TD_CC_NOERROR
899 && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H))) 878 && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H)))
900 td_rev = ed_halted (ohci, td, cc, td_rev); 879 ed_halted(ohci, td, cc);
901 880
902 td->next_dl_td = td_rev; 881 td->next_dl_td = td_rev;
903 td_rev = td; 882 td_rev = td;
@@ -940,8 +919,12 @@ skip_ed:
940 TD_MASK; 919 TD_MASK;
941 920
942 /* INTR_WDH may need to clean up first */ 921 /* INTR_WDH may need to clean up first */
943 if (td->td_dma != head) 922 if (td->td_dma != head) {
944 goto skip_ed; 923 if (ed == ohci->ed_to_check)
924 ohci->ed_to_check = NULL;
925 else
926 goto skip_ed;
927 }
945 } 928 }
946 } 929 }
947 930
@@ -974,7 +957,7 @@ rescan_this:
974 urb = td->urb; 957 urb = td->urb;
975 urb_priv = td->urb->hcpriv; 958 urb_priv = td->urb->hcpriv;
976 959
977 if (urb->status == -EINPROGRESS) { 960 if (!urb->unlinked) {
978 prev = &td->hwNextTD; 961 prev = &td->hwNextTD;
979 continue; 962 continue;
980 } 963 }
@@ -990,7 +973,7 @@ rescan_this:
990 /* if URB is done, clean up */ 973 /* if URB is done, clean up */
991 if (urb_priv->td_cnt == urb_priv->length) { 974 if (urb_priv->td_cnt == urb_priv->length) {
992 modified = completed = 1; 975 modified = completed = 1;
993 finish_urb (ohci, urb); 976 finish_urb(ohci, urb, 0);
994 } 977 }
995 } 978 }
996 if (completed && !list_empty (&ed->td_list)) 979 if (completed && !list_empty (&ed->td_list))
@@ -998,6 +981,8 @@ rescan_this:
998 981
999 /* ED's now officially unlinked, hc doesn't see */ 982 /* ED's now officially unlinked, hc doesn't see */
1000 ed->state = ED_IDLE; 983 ed->state = ED_IDLE;
984 if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
985 ohci->eds_scheduled--;
1001 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); 986 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
1002 ed->hwNextED = 0; 987 ed->hwNextED = 0;
1003 wmb (); 988 wmb ();
@@ -1021,7 +1006,7 @@ rescan_this:
1021 1006
1022 if (ohci->ed_controltail) { 1007 if (ohci->ed_controltail) {
1023 command |= OHCI_CLF; 1008 command |= OHCI_CLF;
1024 if (ohci->flags & OHCI_QUIRK_ZFMICRO) 1009 if (quirk_zfmicro(ohci))
1025 mdelay(1); 1010 mdelay(1);
1026 if (!(ohci->hc_control & OHCI_CTRL_CLE)) { 1011 if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
1027 control |= OHCI_CTRL_CLE; 1012 control |= OHCI_CTRL_CLE;
@@ -1031,7 +1016,7 @@ rescan_this:
1031 } 1016 }
1032 if (ohci->ed_bulktail) { 1017 if (ohci->ed_bulktail) {
1033 command |= OHCI_BLF; 1018 command |= OHCI_BLF;
1034 if (ohci->flags & OHCI_QUIRK_ZFMICRO) 1019 if (quirk_zfmicro(ohci))
1035 mdelay(1); 1020 mdelay(1);
1036 if (!(ohci->hc_control & OHCI_CTRL_BLE)) { 1021 if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
1037 control |= OHCI_CTRL_BLE; 1022 control |= OHCI_CTRL_BLE;
@@ -1043,13 +1028,13 @@ rescan_this:
1043 /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */ 1028 /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
1044 if (control) { 1029 if (control) {
1045 ohci->hc_control |= control; 1030 ohci->hc_control |= control;
1046 if (ohci->flags & OHCI_QUIRK_ZFMICRO) 1031 if (quirk_zfmicro(ohci))
1047 mdelay(1); 1032 mdelay(1);
1048 ohci_writel (ohci, ohci->hc_control, 1033 ohci_writel (ohci, ohci->hc_control,
1049 &ohci->regs->control); 1034 &ohci->regs->control);
1050 } 1035 }
1051 if (command) { 1036 if (command) {
1052 if (ohci->flags & OHCI_QUIRK_ZFMICRO) 1037 if (quirk_zfmicro(ohci))
1053 mdelay(1); 1038 mdelay(1);
1054 ohci_writel (ohci, command, &ohci->regs->cmdstatus); 1039 ohci_writel (ohci, command, &ohci->regs->cmdstatus);
1055 } 1040 }
@@ -1061,11 +1046,60 @@ rescan_this:
1061/*-------------------------------------------------------------------------*/ 1046/*-------------------------------------------------------------------------*/
1062 1047
1063/* 1048/*
1049 * Used to take back a TD from the host controller. This would normally be
1050 * called from within dl_done_list, however it may be called directly if the
1051 * HC no longer sees the TD and it has not appeared on the donelist (after
1052 * two frames). This bug has been observed on ZF Micro systems.
1053 */
1054static void takeback_td(struct ohci_hcd *ohci, struct td *td)
1055{
1056 struct urb *urb = td->urb;
1057 urb_priv_t *urb_priv = urb->hcpriv;
1058 struct ed *ed = td->ed;
1059 int status;
1060
1061 /* update URB's length and status from TD */
1062 status = td_done(ohci, urb, td);
1063 urb_priv->td_cnt++;
1064
1065 /* If all this urb's TDs are done, call complete() */
1066 if (urb_priv->td_cnt == urb_priv->length)
1067 finish_urb(ohci, urb, status);
1068
1069 /* clean schedule: unlink EDs that are no longer busy */
1070 if (list_empty(&ed->td_list)) {
1071 if (ed->state == ED_OPER)
1072 start_ed_unlink(ohci, ed);
1073
1074 /* ... reenabling halted EDs only after fault cleanup */
1075 } else if ((ed->hwINFO & cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE))
1076 == cpu_to_hc32(ohci, ED_SKIP)) {
1077 td = list_entry(ed->td_list.next, struct td, td_list);
1078 if (!(td->hwINFO & cpu_to_hc32(ohci, TD_DONE))) {
1079 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP);
1080 /* ... hc may need waking-up */
1081 switch (ed->type) {
1082 case PIPE_CONTROL:
1083 ohci_writel(ohci, OHCI_CLF,
1084 &ohci->regs->cmdstatus);
1085 break;
1086 case PIPE_BULK:
1087 ohci_writel(ohci, OHCI_BLF,
1088 &ohci->regs->cmdstatus);
1089 break;
1090 }
1091 }
1092 }
1093}
1094
1095/*
1064 * Process normal completions (error or success) and clean the schedules. 1096 * Process normal completions (error or success) and clean the schedules.
1065 * 1097 *
1066 * This is the main path for handing urbs back to drivers. The only other 1098 * This is the main path for handing urbs back to drivers. The only other
1067 * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of 1099 * normal path is finish_unlinks(), which unlinks URBs using ed_rm_list,
1068 * scanning the (re-reversed) donelist as this does. 1100 * instead of scanning the (re-reversed) donelist as this does. There's
1101 * an abnormal path too, handling a quirk in some Compaq silicon: URBs
1102 * with TDs that appear to be orphaned are directly reclaimed.
1069 */ 1103 */
1070static void 1104static void
1071dl_done_list (struct ohci_hcd *ohci) 1105dl_done_list (struct ohci_hcd *ohci)
@@ -1074,44 +1108,7 @@ dl_done_list (struct ohci_hcd *ohci)
1074 1108
1075 while (td) { 1109 while (td) {
1076 struct td *td_next = td->next_dl_td; 1110 struct td *td_next = td->next_dl_td;
1077 struct urb *urb = td->urb; 1111 takeback_td(ohci, td);
1078 urb_priv_t *urb_priv = urb->hcpriv;
1079 struct ed *ed = td->ed;
1080
1081 /* update URB's length and status from TD */
1082 td_done (ohci, urb, td);
1083 urb_priv->td_cnt++;
1084
1085 /* If all this urb's TDs are done, call complete() */
1086 if (urb_priv->td_cnt == urb_priv->length)
1087 finish_urb (ohci, urb);
1088
1089 /* clean schedule: unlink EDs that are no longer busy */
1090 if (list_empty (&ed->td_list)) {
1091 if (ed->state == ED_OPER)
1092 start_ed_unlink (ohci, ed);
1093
1094 /* ... reenabling halted EDs only after fault cleanup */
1095 } else if ((ed->hwINFO & cpu_to_hc32 (ohci,
1096 ED_SKIP | ED_DEQUEUE))
1097 == cpu_to_hc32 (ohci, ED_SKIP)) {
1098 td = list_entry (ed->td_list.next, struct td, td_list);
1099 if (!(td->hwINFO & cpu_to_hc32 (ohci, TD_DONE))) {
1100 ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP);
1101 /* ... hc may need waking-up */
1102 switch (ed->type) {
1103 case PIPE_CONTROL:
1104 ohci_writel (ohci, OHCI_CLF,
1105 &ohci->regs->cmdstatus);
1106 break;
1107 case PIPE_BULK:
1108 ohci_writel (ohci, OHCI_BLF,
1109 &ohci->regs->cmdstatus);
1110 break;
1111 }
1112 }
1113 }
1114
1115 td = td_next; 1112 td = td_next;
1116 } 1113 }
1117} 1114}
diff --git a/drivers/usb/host/ohci-ssb.c b/drivers/usb/host/ohci-ssb.c
new file mode 100644
index 0000000000..bc3e785d8c
--- /dev/null
+++ b/drivers/usb/host/ohci-ssb.c
@@ -0,0 +1,247 @@
1/*
2 * Sonics Silicon Backplane
3 * Broadcom USB-core OHCI driver
4 *
5 * Copyright 2007 Michael Buesch <mb@bu3sch.de>
6 *
7 * Derived from the OHCI-PCI driver
8 * Copyright 1999 Roman Weissgaerber
9 * Copyright 2000-2002 David Brownell
10 * Copyright 1999 Linus Torvalds
11 * Copyright 1999 Gregory P. Smith
12 *
13 * Derived from the USBcore related parts of Broadcom-SB
14 * Copyright 2005 Broadcom Corporation
15 *
16 * Licensed under the GNU/GPL. See COPYING for details.
17 */
18#include <linux/ssb/ssb.h>
19
20
21#define SSB_OHCI_TMSLOW_HOSTMODE (1 << 29)
22
23struct ssb_ohci_device {
24 struct ohci_hcd ohci; /* _must_ be at the beginning. */
25
26 u32 enable_flags;
27};
28
29static inline
30struct ssb_ohci_device *hcd_to_ssb_ohci(struct usb_hcd *hcd)
31{
32 return (struct ssb_ohci_device *)(hcd->hcd_priv);
33}
34
35
36static int ssb_ohci_reset(struct usb_hcd *hcd)
37{
38 struct ssb_ohci_device *ohcidev = hcd_to_ssb_ohci(hcd);
39 struct ohci_hcd *ohci = &ohcidev->ohci;
40 int err;
41
42 ohci_hcd_init(ohci);
43 err = ohci_init(ohci);
44
45 return err;
46}
47
48static int ssb_ohci_start(struct usb_hcd *hcd)
49{
50 struct ssb_ohci_device *ohcidev = hcd_to_ssb_ohci(hcd);
51 struct ohci_hcd *ohci = &ohcidev->ohci;
52 int err;
53
54 err = ohci_run(ohci);
55 if (err < 0) {
56 ohci_err(ohci, "can't start\n");
57 ohci_stop(hcd);
58 }
59
60 return err;
61}
62
63#ifdef CONFIG_PM
64static int ssb_ohci_hcd_suspend(struct usb_hcd *hcd, pm_message_t message)
65{
66 struct ssb_ohci_device *ohcidev = hcd_to_ssb_ohci(hcd);
67 struct ohci_hcd *ohci = &ohcidev->ohci;
68 unsigned long flags;
69
70 spin_lock_irqsave(&ohci->lock, flags);
71
72 ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
73 ohci_readl(ohci, &ohci->regs->intrdisable); /* commit write */
74
75 /* make sure snapshot being resumed re-enumerates everything */
76 if (message.event == PM_EVENT_PRETHAW)
77 ohci_usb_reset(ohci);
78
79 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
80
81 spin_unlock_irqrestore(&ohci->lock, flags);
82 return 0;
83}
84
85static int ssb_ohci_hcd_resume(struct usb_hcd *hcd)
86{
87 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
88 usb_hcd_resume_root_hub(hcd);
89 return 0;
90}
91#endif /* CONFIG_PM */
92
93static const struct hc_driver ssb_ohci_hc_driver = {
94 .description = "ssb-usb-ohci",
95 .product_desc = "SSB OHCI Controller",
96 .hcd_priv_size = sizeof(struct ssb_ohci_device),
97
98 .irq = ohci_irq,
99 .flags = HCD_MEMORY | HCD_USB11,
100
101 .reset = ssb_ohci_reset,
102 .start = ssb_ohci_start,
103 .stop = ohci_stop,
104 .shutdown = ohci_shutdown,
105
106#ifdef CONFIG_PM
107 .suspend = ssb_ohci_hcd_suspend,
108 .resume = ssb_ohci_hcd_resume,
109#endif
110
111 .urb_enqueue = ohci_urb_enqueue,
112 .urb_dequeue = ohci_urb_dequeue,
113 .endpoint_disable = ohci_endpoint_disable,
114
115 .get_frame_number = ohci_get_frame,
116
117 .hub_status_data = ohci_hub_status_data,
118 .hub_control = ohci_hub_control,
119 .hub_irq_enable = ohci_rhsc_enable,
120 .bus_suspend = ohci_bus_suspend,
121 .bus_resume = ohci_bus_resume,
122
123 .start_port_reset = ohci_start_port_reset,
124};
125
126static void ssb_ohci_detach(struct ssb_device *dev)
127{
128 struct usb_hcd *hcd = ssb_get_drvdata(dev);
129
130 usb_remove_hcd(hcd);
131 iounmap(hcd->regs);
132 usb_put_hcd(hcd);
133 ssb_device_disable(dev, 0);
134}
135
136static int ssb_ohci_attach(struct ssb_device *dev)
137{
138 struct ssb_ohci_device *ohcidev;
139 struct usb_hcd *hcd;
140 int err = -ENOMEM;
141 u32 tmp, flags = 0;
142
143 if (dev->id.coreid == SSB_DEV_USB11_HOSTDEV)
144 flags |= SSB_OHCI_TMSLOW_HOSTMODE;
145
146 ssb_device_enable(dev, flags);
147
148 hcd = usb_create_hcd(&ssb_ohci_hc_driver, dev->dev,
149 dev->dev->bus_id);
150 if (!hcd)
151 goto err_dev_disable;
152 ohcidev = hcd_to_ssb_ohci(hcd);
153 ohcidev->enable_flags = flags;
154
155 tmp = ssb_read32(dev, SSB_ADMATCH0);
156 hcd->rsrc_start = ssb_admatch_base(tmp);
157 hcd->rsrc_len = ssb_admatch_size(tmp);
158 hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
159 if (!hcd->regs)
160 goto err_put_hcd;
161 err = usb_add_hcd(hcd, dev->irq, IRQF_SHARED);
162 if (err)
163 goto err_iounmap;
164
165 ssb_set_drvdata(dev, hcd);
166
167 return err;
168
169err_iounmap:
170 iounmap(hcd->regs);
171err_put_hcd:
172 usb_put_hcd(hcd);
173err_dev_disable:
174 ssb_device_disable(dev, flags);
175 return err;
176}
177
178static int ssb_ohci_probe(struct ssb_device *dev,
179 const struct ssb_device_id *id)
180{
181 int err;
182 u16 chipid_top;
183
184 /* USBcores are only connected on embedded devices. */
185 chipid_top = (dev->bus->chip_id & 0xFF00);
186 if (chipid_top != 0x4700 && chipid_top != 0x5300)
187 return -ENODEV;
188
189 /* TODO: Probably need checks here; is the core connected? */
190
191 if (usb_disabled())
192 return -ENODEV;
193
194 /* We currently always attach SSB_DEV_USB11_HOSTDEV
195 * as HOST OHCI. If we want to attach it as Client device,
196 * we must branch here and call into the (yet to
197 * be written) Client mode driver. Same for remove(). */
198
199 err = ssb_ohci_attach(dev);
200
201 return err;
202}
203
204static void ssb_ohci_remove(struct ssb_device *dev)
205{
206 ssb_ohci_detach(dev);
207}
208
209#ifdef CONFIG_PM
210
211static int ssb_ohci_suspend(struct ssb_device *dev, pm_message_t state)
212{
213 ssb_device_disable(dev, 0);
214
215 return 0;
216}
217
218static int ssb_ohci_resume(struct ssb_device *dev)
219{
220 struct usb_hcd *hcd = ssb_get_drvdata(dev);
221 struct ssb_ohci_device *ohcidev = hcd_to_ssb_ohci(hcd);
222
223 ssb_device_enable(dev, ohcidev->enable_flags);
224
225 return 0;
226}
227
228#else /* !CONFIG_PM */
229#define ssb_ohci_suspend NULL
230#define ssb_ohci_resume NULL
231#endif /* CONFIG_PM */
232
233static const struct ssb_device_id ssb_ohci_table[] = {
234 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOSTDEV, SSB_ANY_REV),
235 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOST, SSB_ANY_REV),
236 SSB_DEVTABLE_END
237};
238MODULE_DEVICE_TABLE(ssb, ssb_ohci_table);
239
240static struct ssb_driver ssb_ohci_driver = {
241 .name = KBUILD_MODNAME,
242 .id_table = ssb_ohci_table,
243 .probe = ssb_ohci_probe,
244 .remove = ssb_ohci_remove,
245 .suspend = ssb_ohci_suspend,
246 .resume = ssb_ohci_resume,
247};
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 4ada43cf13..47c5c66a28 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -398,11 +398,38 @@ struct ohci_hcd {
398#define OHCI_QUIRK_BE_MMIO 0x10 /* BE registers */ 398#define OHCI_QUIRK_BE_MMIO 0x10 /* BE registers */
399#define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/ 399#define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/
400#define OHCI_QUIRK_NEC 0x40 /* lost interrupts */ 400#define OHCI_QUIRK_NEC 0x40 /* lost interrupts */
401#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */
401 // there are also chip quirks/bugs in init logic 402 // there are also chip quirks/bugs in init logic
402 403
403 struct work_struct nec_work; /* Worker for NEC quirk */ 404 struct work_struct nec_work; /* Worker for NEC quirk */
405
406 /* Needed for ZF Micro quirk */
407 struct timer_list unlink_watchdog;
408 unsigned eds_scheduled;
409 struct ed *ed_to_check;
410 unsigned zf_delay;
404}; 411};
405 412
413#ifdef CONFIG_PCI
414static inline int quirk_nec(struct ohci_hcd *ohci)
415{
416 return ohci->flags & OHCI_QUIRK_NEC;
417}
418static inline int quirk_zfmicro(struct ohci_hcd *ohci)
419{
420 return ohci->flags & OHCI_QUIRK_ZFMICRO;
421}
422#else
423static inline int quirk_nec(struct ohci_hcd *ohci)
424{
425 return 0;
426}
427static inline int quirk_zfmicro(struct ohci_hcd *ohci)
428{
429 return 0;
430}
431#endif
432
406/* convert between an hcd pointer and the corresponding ohci_hcd */ 433/* convert between an hcd pointer and the corresponding ohci_hcd */
407static inline struct ohci_hcd *hcd_to_ohci (struct usb_hcd *hcd) 434static inline struct ohci_hcd *hcd_to_ohci (struct usb_hcd *hcd)
408{ 435{
@@ -607,15 +634,12 @@ static inline u32 hc32_to_cpup (const struct ohci_hcd *ohci, const __hc32 *x)
607/* HCCA frame number is 16 bits, but is accessed as 32 bits since not all 634/* HCCA frame number is 16 bits, but is accessed as 32 bits since not all
608 * hardware handles 16 bit reads. That creates a different confusion on 635 * hardware handles 16 bit reads. That creates a different confusion on
609 * some big-endian SOC implementations. Same thing happens with PSW access. 636 * some big-endian SOC implementations. Same thing happens with PSW access.
610 *
611 * FIXME: Deal with that as a runtime quirk when STB03xxx is ported over
612 * to arch/powerpc
613 */ 637 */
614 638
615#ifdef CONFIG_STB03xxx 639#ifdef CONFIG_PPC_MPC52xx
616#define OHCI_BE_FRAME_NO_SHIFT 16 640#define big_endian_frame_no_quirk(ohci) (ohci->flags & OHCI_QUIRK_FRAME_NO)
617#else 641#else
618#define OHCI_BE_FRAME_NO_SHIFT 0 642#define big_endian_frame_no_quirk(ohci) 0
619#endif 643#endif
620 644
621static inline u16 ohci_frame_no(const struct ohci_hcd *ohci) 645static inline u16 ohci_frame_no(const struct ohci_hcd *ohci)
@@ -623,7 +647,8 @@ static inline u16 ohci_frame_no(const struct ohci_hcd *ohci)
623 u32 tmp; 647 u32 tmp;
624 if (big_endian_desc(ohci)) { 648 if (big_endian_desc(ohci)) {
625 tmp = be32_to_cpup((__force __be32 *)&ohci->hcca->frame_no); 649 tmp = be32_to_cpup((__force __be32 *)&ohci->hcca->frame_no);
626 tmp >>= OHCI_BE_FRAME_NO_SHIFT; 650 if (!big_endian_frame_no_quirk(ohci))
651 tmp >>= 16;
627 } else 652 } else
628 tmp = le32_to_cpup((__force __le32 *)&ohci->hcca->frame_no); 653 tmp = le32_to_cpup((__force __le32 *)&ohci->hcca->frame_no);
629 654
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 40a1de4c25..ae8ec4474e 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -782,10 +782,12 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
782 kfree(td); 782 kfree(td);
783 783
784 if (urb) { 784 if (urb) {
785 urb->status = -ENODEV; 785 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597),
786 urb->hcpriv = NULL; 786 urb);
787
787 spin_unlock(&r8a66597->lock); 788 spin_unlock(&r8a66597->lock);
788 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb); 789 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb,
790 -ENODEV);
789 spin_lock(&r8a66597->lock); 791 spin_lock(&r8a66597->lock);
790 } 792 }
791 break; 793 break;
@@ -832,7 +834,7 @@ static void init_pipe_info(struct r8a66597 *r8a66597, struct urb *urb,
832 info.pipenum = get_empty_pipenum(r8a66597, ep); 834 info.pipenum = get_empty_pipenum(r8a66597, ep);
833 info.address = get_urb_to_r8a66597_addr(r8a66597, urb); 835 info.address = get_urb_to_r8a66597_addr(r8a66597, urb);
834 info.epnum = ep->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 836 info.epnum = ep->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
835 info.maxpacket = ep->wMaxPacketSize; 837 info.maxpacket = le16_to_cpu(ep->wMaxPacketSize);
836 info.type = get_r8a66597_type(ep->bmAttributes 838 info.type = get_r8a66597_type(ep->bmAttributes
837 & USB_ENDPOINT_XFERTYPE_MASK); 839 & USB_ENDPOINT_XFERTYPE_MASK);
838 info.bufnum = get_bufnum(info.pipenum); 840 info.bufnum = get_bufnum(info.pipenum);
@@ -923,7 +925,7 @@ static void prepare_setup_packet(struct r8a66597 *r8a66597,
923 r8a66597_write(r8a66597, ~(SIGN | SACK), INTSTS1); 925 r8a66597_write(r8a66597, ~(SIGN | SACK), INTSTS1);
924 926
925 for (i = 0; i < 4; i++) { 927 for (i = 0; i < 4; i++) {
926 r8a66597_write(r8a66597, p[i], setup_addr); 928 r8a66597_write(r8a66597, cpu_to_le16(p[i]), setup_addr);
927 setup_addr += 2; 929 setup_addr += 2;
928 } 930 }
929 r8a66597_write(r8a66597, SUREQ, DCPCTR); 931 r8a66597_write(r8a66597, SUREQ, DCPCTR);
@@ -1032,6 +1034,15 @@ static void prepare_status_packet(struct r8a66597 *r8a66597,
1032 pipe_start(r8a66597, td->pipe); 1034 pipe_start(r8a66597, td->pipe);
1033} 1035}
1034 1036
1037static int is_set_address(unsigned char *setup_packet)
1038{
1039 if (((setup_packet[0] & USB_TYPE_MASK) == USB_TYPE_STANDARD) &&
1040 setup_packet[1] == USB_REQ_SET_ADDRESS)
1041 return 1;
1042 else
1043 return 0;
1044}
1045
1035/* this function must be called with interrupt disabled */ 1046/* this function must be called with interrupt disabled */
1036static int start_transfer(struct r8a66597 *r8a66597, struct r8a66597_td *td) 1047static int start_transfer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
1037{ 1048{
@@ -1039,7 +1050,7 @@ static int start_transfer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
1039 1050
1040 switch (td->type) { 1051 switch (td->type) {
1041 case USB_PID_SETUP: 1052 case USB_PID_SETUP:
1042 if (td->urb->setup_packet[1] == USB_REQ_SET_ADDRESS) { 1053 if (is_set_address(td->urb->setup_packet)) {
1043 td->set_address = 1; 1054 td->set_address = 1;
1044 td->urb->setup_packet[2] = alloc_usb_address(r8a66597, 1055 td->urb->setup_packet[2] = alloc_usb_address(r8a66597,
1045 td->urb); 1056 td->urb);
@@ -1106,8 +1117,9 @@ static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
1106} 1117}
1107 1118
1108/* this function must be called with interrupt disabled */ 1119/* this function must be called with interrupt disabled */
1109static void done(struct r8a66597 *r8a66597, struct r8a66597_td *td, 1120static void finish_request(struct r8a66597 *r8a66597, struct r8a66597_td *td,
1110 u16 pipenum, struct urb *urb) 1121 u16 pipenum, struct urb *urb, int status)
1122__releases(r8a66597->lock) __acquires(r8a66597->lock)
1111{ 1123{
1112 int restart = 0; 1124 int restart = 0;
1113 struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597); 1125 struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597);
@@ -1115,7 +1127,7 @@ static void done(struct r8a66597 *r8a66597, struct r8a66597_td *td,
1115 r8a66597->timeout_map &= ~(1 << pipenum); 1127 r8a66597->timeout_map &= ~(1 << pipenum);
1116 1128
1117 if (likely(td)) { 1129 if (likely(td)) {
1118 if (td->set_address && urb->status != 0) 1130 if (td->set_address && (status != 0 || urb->unlinked))
1119 r8a66597->address_map &= ~(1 << urb->setup_packet[2]); 1131 r8a66597->address_map &= ~(1 << urb->setup_packet[2]);
1120 1132
1121 pipe_toggle_save(r8a66597, td->pipe, urb); 1133 pipe_toggle_save(r8a66597, td->pipe, urb);
@@ -1130,9 +1142,9 @@ static void done(struct r8a66597 *r8a66597, struct r8a66597_td *td,
1130 if (usb_pipeisoc(urb->pipe)) 1142 if (usb_pipeisoc(urb->pipe))
1131 urb->start_frame = r8a66597_get_frame(hcd); 1143 urb->start_frame = r8a66597_get_frame(hcd);
1132 1144
1133 urb->hcpriv = NULL; 1145 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
1134 spin_unlock(&r8a66597->lock); 1146 spin_unlock(&r8a66597->lock);
1135 usb_hcd_giveback_urb(hcd, urb); 1147 usb_hcd_giveback_urb(hcd, urb, status);
1136 spin_lock(&r8a66597->lock); 1148 spin_lock(&r8a66597->lock);
1137 } 1149 }
1138 1150
@@ -1146,14 +1158,6 @@ static void done(struct r8a66597 *r8a66597, struct r8a66597_td *td,
1146 } 1158 }
1147} 1159}
1148 1160
1149/* this function must be called with interrupt disabled */
1150static void finish_request(struct r8a66597 *r8a66597, struct r8a66597_td *td,
1151 u16 pipenum, struct urb *urb)
1152__releases(r8a66597->lock) __acquires(r8a66597->lock)
1153{
1154 done(r8a66597, td, pipenum, urb);
1155}
1156
1157static void packet_read(struct r8a66597 *r8a66597, u16 pipenum) 1161static void packet_read(struct r8a66597 *r8a66597, u16 pipenum)
1158{ 1162{
1159 u16 tmp; 1163 u16 tmp;
@@ -1162,6 +1166,7 @@ static void packet_read(struct r8a66597 *r8a66597, u16 pipenum)
1162 struct r8a66597_td *td = r8a66597_get_td(r8a66597, pipenum); 1166 struct r8a66597_td *td = r8a66597_get_td(r8a66597, pipenum);
1163 struct urb *urb; 1167 struct urb *urb;
1164 int finish = 0; 1168 int finish = 0;
1169 int status = 0;
1165 1170
1166 if (unlikely(!td)) 1171 if (unlikely(!td))
1167 return; 1172 return;
@@ -1170,17 +1175,15 @@ static void packet_read(struct r8a66597 *r8a66597, u16 pipenum)
1170 fifo_change_from_pipe(r8a66597, td->pipe); 1175 fifo_change_from_pipe(r8a66597, td->pipe);
1171 tmp = r8a66597_read(r8a66597, td->pipe->fifoctr); 1176 tmp = r8a66597_read(r8a66597, td->pipe->fifoctr);
1172 if (unlikely((tmp & FRDY) == 0)) { 1177 if (unlikely((tmp & FRDY) == 0)) {
1173 urb->status = -EPIPE;
1174 pipe_stop(r8a66597, td->pipe); 1178 pipe_stop(r8a66597, td->pipe);
1175 pipe_irq_disable(r8a66597, pipenum); 1179 pipe_irq_disable(r8a66597, pipenum);
1176 err("in fifo not ready (%d)", pipenum); 1180 err("in fifo not ready (%d)", pipenum);
1177 finish_request(r8a66597, td, pipenum, td->urb); 1181 finish_request(r8a66597, td, pipenum, td->urb, -EPIPE);
1178 return; 1182 return;
1179 } 1183 }
1180 1184
1181 /* prepare parameters */ 1185 /* prepare parameters */
1182 rcv_len = tmp & DTLN; 1186 rcv_len = tmp & DTLN;
1183 bufsize = td->maxpacket;
1184 if (usb_pipeisoc(urb->pipe)) { 1187 if (usb_pipeisoc(urb->pipe)) {
1185 buf = (u16 *)(urb->transfer_buffer + 1188 buf = (u16 *)(urb->transfer_buffer +
1186 urb->iso_frame_desc[td->iso_cnt].offset); 1189 urb->iso_frame_desc[td->iso_cnt].offset);
@@ -1189,29 +1192,31 @@ static void packet_read(struct r8a66597 *r8a66597, u16 pipenum)
1189 buf = (void *)urb->transfer_buffer + urb->actual_length; 1192 buf = (void *)urb->transfer_buffer + urb->actual_length;
1190 urb_len = urb->transfer_buffer_length - urb->actual_length; 1193 urb_len = urb->transfer_buffer_length - urb->actual_length;
1191 } 1194 }
1192 if (rcv_len < bufsize) 1195 bufsize = min(urb_len, (int) td->maxpacket);
1193 size = min(rcv_len, urb_len); 1196 if (rcv_len <= bufsize) {
1194 else 1197 size = rcv_len;
1195 size = min(bufsize, urb_len); 1198 } else {
1199 size = bufsize;
1200 status = -EOVERFLOW;
1201 finish = 1;
1202 }
1196 1203
1197 /* update parameters */ 1204 /* update parameters */
1198 urb->actual_length += size; 1205 urb->actual_length += size;
1199 if (rcv_len == 0) 1206 if (rcv_len == 0)
1200 td->zero_packet = 1; 1207 td->zero_packet = 1;
1201 if ((size % td->maxpacket) > 0) { 1208 if (rcv_len < bufsize) {
1202 td->short_packet = 1; 1209 td->short_packet = 1;
1203 if (urb->transfer_buffer_length != urb->actual_length &&
1204 urb->transfer_flags & URB_SHORT_NOT_OK)
1205 td->urb->status = -EREMOTEIO;
1206 } 1210 }
1207 if (usb_pipeisoc(urb->pipe)) { 1211 if (usb_pipeisoc(urb->pipe)) {
1208 urb->iso_frame_desc[td->iso_cnt].actual_length = size; 1212 urb->iso_frame_desc[td->iso_cnt].actual_length = size;
1209 urb->iso_frame_desc[td->iso_cnt].status = 0; 1213 urb->iso_frame_desc[td->iso_cnt].status = status;
1210 td->iso_cnt++; 1214 td->iso_cnt++;
1215 finish = 0;
1211 } 1216 }
1212 1217
1213 /* check transfer finish */ 1218 /* check transfer finish */
1214 if (check_transfer_finish(td, urb)) { 1219 if (finish || check_transfer_finish(td, urb)) {
1215 pipe_stop(r8a66597, td->pipe); 1220 pipe_stop(r8a66597, td->pipe);
1216 pipe_irq_disable(r8a66597, pipenum); 1221 pipe_irq_disable(r8a66597, pipenum);
1217 finish = 1; 1222 finish = 1;
@@ -1226,11 +1231,8 @@ static void packet_read(struct r8a66597 *r8a66597, u16 pipenum)
1226 buf, size); 1231 buf, size);
1227 } 1232 }
1228 1233
1229 if (finish && pipenum != 0) { 1234 if (finish && pipenum != 0)
1230 if (td->urb->status == -EINPROGRESS) 1235 finish_request(r8a66597, td, pipenum, urb, status);
1231 td->urb->status = 0;
1232 finish_request(r8a66597, td, pipenum, urb);
1233 }
1234} 1236}
1235 1237
1236static void packet_write(struct r8a66597 *r8a66597, u16 pipenum) 1238static void packet_write(struct r8a66597 *r8a66597, u16 pipenum)
@@ -1248,11 +1250,10 @@ static void packet_write(struct r8a66597 *r8a66597, u16 pipenum)
1248 fifo_change_from_pipe(r8a66597, td->pipe); 1250 fifo_change_from_pipe(r8a66597, td->pipe);
1249 tmp = r8a66597_read(r8a66597, td->pipe->fifoctr); 1251 tmp = r8a66597_read(r8a66597, td->pipe->fifoctr);
1250 if (unlikely((tmp & FRDY) == 0)) { 1252 if (unlikely((tmp & FRDY) == 0)) {
1251 urb->status = -EPIPE;
1252 pipe_stop(r8a66597, td->pipe); 1253 pipe_stop(r8a66597, td->pipe);
1253 pipe_irq_disable(r8a66597, pipenum); 1254 pipe_irq_disable(r8a66597, pipenum);
1254 err("out write fifo not ready. (%d)", pipenum); 1255 err("out write fifo not ready. (%d)", pipenum);
1255 finish_request(r8a66597, td, pipenum, td->urb); 1256 finish_request(r8a66597, td, pipenum, urb, -EPIPE);
1256 return; 1257 return;
1257 } 1258 }
1258 1259
@@ -1297,7 +1298,7 @@ static void packet_write(struct r8a66597 *r8a66597, u16 pipenum)
1297} 1298}
1298 1299
1299 1300
1300static void check_next_phase(struct r8a66597 *r8a66597) 1301static void check_next_phase(struct r8a66597 *r8a66597, int status)
1301{ 1302{
1302 struct r8a66597_td *td = r8a66597_get_td(r8a66597, 0); 1303 struct r8a66597_td *td = r8a66597_get_td(r8a66597, 0);
1303 struct urb *urb; 1304 struct urb *urb;
@@ -1310,49 +1311,41 @@ static void check_next_phase(struct r8a66597 *r8a66597)
1310 switch (td->type) { 1311 switch (td->type) {
1311 case USB_PID_IN: 1312 case USB_PID_IN:
1312 case USB_PID_OUT: 1313 case USB_PID_OUT:
1313 if (urb->status != -EINPROGRESS) {
1314 finish = 1;
1315 break;
1316 }
1317 if (check_transfer_finish(td, urb)) 1314 if (check_transfer_finish(td, urb))
1318 td->type = USB_PID_ACK; 1315 td->type = USB_PID_ACK;
1319 break; 1316 break;
1320 case USB_PID_SETUP: 1317 case USB_PID_SETUP:
1321 if (urb->status != -EINPROGRESS) 1318 if (urb->transfer_buffer_length == urb->actual_length)
1322 finish = 1;
1323 else if (urb->transfer_buffer_length == urb->actual_length) {
1324 td->type = USB_PID_ACK; 1319 td->type = USB_PID_ACK;
1325 urb->status = 0; 1320 else if (usb_pipeout(urb->pipe))
1326 } else if (usb_pipeout(urb->pipe))
1327 td->type = USB_PID_OUT; 1321 td->type = USB_PID_OUT;
1328 else 1322 else
1329 td->type = USB_PID_IN; 1323 td->type = USB_PID_IN;
1330 break; 1324 break;
1331 case USB_PID_ACK: 1325 case USB_PID_ACK:
1332 finish = 1; 1326 finish = 1;
1333 if (urb->status == -EINPROGRESS)
1334 urb->status = 0;
1335 break; 1327 break;
1336 } 1328 }
1337 1329
1338 if (finish) 1330 if (finish || status != 0 || urb->unlinked)
1339 finish_request(r8a66597, td, 0, urb); 1331 finish_request(r8a66597, td, 0, urb, status);
1340 else 1332 else
1341 start_transfer(r8a66597, td); 1333 start_transfer(r8a66597, td);
1342} 1334}
1343 1335
1344static void set_urb_error(struct r8a66597 *r8a66597, u16 pipenum) 1336static int get_urb_error(struct r8a66597 *r8a66597, u16 pipenum)
1345{ 1337{
1346 struct r8a66597_td *td = r8a66597_get_td(r8a66597, pipenum); 1338 struct r8a66597_td *td = r8a66597_get_td(r8a66597, pipenum);
1347 1339
1348 if (td && td->urb) { 1340 if (td) {
1349 u16 pid = r8a66597_read(r8a66597, td->pipe->pipectr) & PID; 1341 u16 pid = r8a66597_read(r8a66597, td->pipe->pipectr) & PID;
1350 1342
1351 if (pid == PID_NAK) 1343 if (pid == PID_NAK)
1352 td->urb->status = -ECONNRESET; 1344 return -ECONNRESET;
1353 else 1345 else
1354 td->urb->status = -EPIPE; 1346 return -EPIPE;
1355 } 1347 }
1348 return 0;
1356} 1349}
1357 1350
1358static void irq_pipe_ready(struct r8a66597 *r8a66597) 1351static void irq_pipe_ready(struct r8a66597 *r8a66597)
@@ -1371,7 +1364,7 @@ static void irq_pipe_ready(struct r8a66597 *r8a66597)
1371 packet_read(r8a66597, 0); 1364 packet_read(r8a66597, 0);
1372 else 1365 else
1373 pipe_irq_disable(r8a66597, 0); 1366 pipe_irq_disable(r8a66597, 0);
1374 check_next_phase(r8a66597); 1367 check_next_phase(r8a66597, 0);
1375 } 1368 }
1376 1369
1377 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) { 1370 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
@@ -1405,7 +1398,7 @@ static void irq_pipe_empty(struct r8a66597 *r8a66597)
1405 td = r8a66597_get_td(r8a66597, 0); 1398 td = r8a66597_get_td(r8a66597, 0);
1406 if (td && td->type != USB_PID_OUT) 1399 if (td && td->type != USB_PID_OUT)
1407 disable_irq_empty(r8a66597, 0); 1400 disable_irq_empty(r8a66597, 0);
1408 check_next_phase(r8a66597); 1401 check_next_phase(r8a66597, 0);
1409 } 1402 }
1410 1403
1411 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) { 1404 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
@@ -1420,9 +1413,8 @@ static void irq_pipe_empty(struct r8a66597 *r8a66597)
1420 if ((tmp & INBUFM) == 0) { 1413 if ((tmp & INBUFM) == 0) {
1421 disable_irq_empty(r8a66597, pipenum); 1414 disable_irq_empty(r8a66597, pipenum);
1422 pipe_irq_disable(r8a66597, pipenum); 1415 pipe_irq_disable(r8a66597, pipenum);
1423 if (td->urb->status == -EINPROGRESS) 1416 finish_request(r8a66597, td, pipenum, td->urb,
1424 td->urb->status = 0; 1417 0);
1425 finish_request(r8a66597, td, pipenum, td->urb);
1426 } 1418 }
1427 } 1419 }
1428 } 1420 }
@@ -1433,15 +1425,16 @@ static void irq_pipe_nrdy(struct r8a66597 *r8a66597)
1433 u16 check; 1425 u16 check;
1434 u16 pipenum; 1426 u16 pipenum;
1435 u16 mask; 1427 u16 mask;
1428 int status;
1436 1429
1437 mask = r8a66597_read(r8a66597, NRDYSTS) 1430 mask = r8a66597_read(r8a66597, NRDYSTS)
1438 & r8a66597_read(r8a66597, NRDYENB); 1431 & r8a66597_read(r8a66597, NRDYENB);
1439 r8a66597_write(r8a66597, ~mask, NRDYSTS); 1432 r8a66597_write(r8a66597, ~mask, NRDYSTS);
1440 if (mask & NRDY0) { 1433 if (mask & NRDY0) {
1441 cfifo_change(r8a66597, 0); 1434 cfifo_change(r8a66597, 0);
1442 set_urb_error(r8a66597, 0); 1435 status = get_urb_error(r8a66597, 0);
1443 pipe_irq_disable(r8a66597, 0); 1436 pipe_irq_disable(r8a66597, 0);
1444 check_next_phase(r8a66597); 1437 check_next_phase(r8a66597, status);
1445 } 1438 }
1446 1439
1447 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) { 1440 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
@@ -1452,10 +1445,10 @@ static void irq_pipe_nrdy(struct r8a66597 *r8a66597)
1452 if (unlikely(!td)) 1445 if (unlikely(!td))
1453 continue; 1446 continue;
1454 1447
1455 set_urb_error(r8a66597, pipenum); 1448 status = get_urb_error(r8a66597, pipenum);
1456 pipe_irq_disable(r8a66597, pipenum); 1449 pipe_irq_disable(r8a66597, pipenum);
1457 pipe_stop(r8a66597, td->pipe); 1450 pipe_stop(r8a66597, td->pipe);
1458 finish_request(r8a66597, td, pipenum, td->urb); 1451 finish_request(r8a66597, td, pipenum, td->urb, status);
1459 } 1452 }
1460 } 1453 }
1461} 1454}
@@ -1475,6 +1468,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
1475 u16 intsts0, intsts1, intsts2; 1468 u16 intsts0, intsts1, intsts2;
1476 u16 intenb0, intenb1, intenb2; 1469 u16 intenb0, intenb1, intenb2;
1477 u16 mask0, mask1, mask2; 1470 u16 mask0, mask1, mask2;
1471 int status;
1478 1472
1479 spin_lock(&r8a66597->lock); 1473 spin_lock(&r8a66597->lock);
1480 1474
@@ -1518,12 +1512,12 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
1518 } 1512 }
1519 if (mask1 & SIGN) { 1513 if (mask1 & SIGN) {
1520 r8a66597_write(r8a66597, ~SIGN, INTSTS1); 1514 r8a66597_write(r8a66597, ~SIGN, INTSTS1);
1521 set_urb_error(r8a66597, 0); 1515 status = get_urb_error(r8a66597, 0);
1522 check_next_phase(r8a66597); 1516 check_next_phase(r8a66597, status);
1523 } 1517 }
1524 if (mask1 & SACK) { 1518 if (mask1 & SACK) {
1525 r8a66597_write(r8a66597, ~SACK, INTSTS1); 1519 r8a66597_write(r8a66597, ~SACK, INTSTS1);
1526 check_next_phase(r8a66597); 1520 check_next_phase(r8a66597, 0);
1527 } 1521 }
1528 } 1522 }
1529 if (mask0) { 1523 if (mask0) {
@@ -1722,21 +1716,25 @@ static struct r8a66597_td *r8a66597_make_td(struct r8a66597 *r8a66597,
1722} 1716}
1723 1717
1724static int r8a66597_urb_enqueue(struct usb_hcd *hcd, 1718static int r8a66597_urb_enqueue(struct usb_hcd *hcd,
1725 struct usb_host_endpoint *hep,
1726 struct urb *urb, 1719 struct urb *urb,
1727 gfp_t mem_flags) 1720 gfp_t mem_flags)
1728{ 1721{
1722 struct usb_host_endpoint *hep = urb->ep;
1729 struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd); 1723 struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
1730 struct r8a66597_td *td = NULL; 1724 struct r8a66597_td *td = NULL;
1731 int ret = 0, request = 0; 1725 int ret, request = 0;
1732 unsigned long flags; 1726 unsigned long flags;
1733 1727
1734 spin_lock_irqsave(&r8a66597->lock, flags); 1728 spin_lock_irqsave(&r8a66597->lock, flags);
1735 if (!get_urb_to_r8a66597_dev(r8a66597, urb)) { 1729 if (!get_urb_to_r8a66597_dev(r8a66597, urb)) {
1736 ret = -ENODEV; 1730 ret = -ENODEV;
1737 goto error; 1731 goto error_not_linked;
1738 } 1732 }
1739 1733
1734 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1735 if (ret)
1736 goto error_not_linked;
1737
1740 if (!hep->hcpriv) { 1738 if (!hep->hcpriv) {
1741 hep->hcpriv = kzalloc(sizeof(struct r8a66597_pipe), 1739 hep->hcpriv = kzalloc(sizeof(struct r8a66597_pipe),
1742 GFP_ATOMIC); 1740 GFP_ATOMIC);
@@ -1761,15 +1759,7 @@ static int r8a66597_urb_enqueue(struct usb_hcd *hcd,
1761 if (list_empty(&r8a66597->pipe_queue[td->pipenum])) 1759 if (list_empty(&r8a66597->pipe_queue[td->pipenum]))
1762 request = 1; 1760 request = 1;
1763 list_add_tail(&td->queue, &r8a66597->pipe_queue[td->pipenum]); 1761 list_add_tail(&td->queue, &r8a66597->pipe_queue[td->pipenum]);
1764
1765 spin_lock(&urb->lock);
1766 if (urb->status != -EINPROGRESS) {
1767 spin_unlock(&urb->lock);
1768 ret = -EPIPE;
1769 goto error;
1770 }
1771 urb->hcpriv = td; 1762 urb->hcpriv = td;
1772 spin_unlock(&urb->lock);
1773 1763
1774 if (request) { 1764 if (request) {
1775 ret = start_transfer(r8a66597, td); 1765 ret = start_transfer(r8a66597, td);
@@ -1781,26 +1771,36 @@ static int r8a66597_urb_enqueue(struct usb_hcd *hcd,
1781 set_td_timer(r8a66597, td); 1771 set_td_timer(r8a66597, td);
1782 1772
1783error: 1773error:
1774 if (ret)
1775 usb_hcd_unlink_urb_from_ep(hcd, urb);
1776error_not_linked:
1784 spin_unlock_irqrestore(&r8a66597->lock, flags); 1777 spin_unlock_irqrestore(&r8a66597->lock, flags);
1785 return ret; 1778 return ret;
1786} 1779}
1787 1780
1788static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) 1781static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1782 int status)
1789{ 1783{
1790 struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd); 1784 struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
1791 struct r8a66597_td *td; 1785 struct r8a66597_td *td;
1792 unsigned long flags; 1786 unsigned long flags;
1787 int rc;
1793 1788
1794 spin_lock_irqsave(&r8a66597->lock, flags); 1789 spin_lock_irqsave(&r8a66597->lock, flags);
1790 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1791 if (rc)
1792 goto done;
1793
1795 if (urb->hcpriv) { 1794 if (urb->hcpriv) {
1796 td = urb->hcpriv; 1795 td = urb->hcpriv;
1797 pipe_stop(r8a66597, td->pipe); 1796 pipe_stop(r8a66597, td->pipe);
1798 pipe_irq_disable(r8a66597, td->pipenum); 1797 pipe_irq_disable(r8a66597, td->pipenum);
1799 disable_irq_empty(r8a66597, td->pipenum); 1798 disable_irq_empty(r8a66597, td->pipenum);
1800 done(r8a66597, td, td->pipenum, urb); 1799 finish_request(r8a66597, td, td->pipenum, urb, status);
1801 } 1800 }
1801 done:
1802 spin_unlock_irqrestore(&r8a66597->lock, flags); 1802 spin_unlock_irqrestore(&r8a66597->lock, flags);
1803 return 0; 1803 return rc;
1804} 1804}
1805 1805
1806static void r8a66597_endpoint_disable(struct usb_hcd *hcd, 1806static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
@@ -1830,7 +1830,7 @@ static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
1830 td = r8a66597_get_td(r8a66597, pipenum); 1830 td = r8a66597_get_td(r8a66597, pipenum);
1831 if (td) 1831 if (td)
1832 urb = td->urb; 1832 urb = td->urb;
1833 done(r8a66597, td, pipenum, urb); 1833 finish_request(r8a66597, td, pipenum, urb, -ESHUTDOWN);
1834 kfree(hep->hcpriv); 1834 kfree(hep->hcpriv);
1835 hep->hcpriv = NULL; 1835 hep->hcpriv = NULL;
1836 spin_unlock_irqrestore(&r8a66597->lock, flags); 1836 spin_unlock_irqrestore(&r8a66597->lock, flags);
@@ -2027,7 +2027,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2027 case GetPortStatus: 2027 case GetPortStatus:
2028 if (wIndex > R8A66597_MAX_ROOT_HUB) 2028 if (wIndex > R8A66597_MAX_ROOT_HUB)
2029 goto error; 2029 goto error;
2030 *(u32 *)buf = rh->port; 2030 *(u32 *)buf = cpu_to_le32(rh->port);
2031 break; 2031 break;
2032 case SetPortFeature: 2032 case SetPortFeature:
2033 if (wIndex > R8A66597_MAX_ROOT_HUB) 2033 if (wIndex > R8A66597_MAX_ROOT_HUB)
@@ -2126,8 +2126,8 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev)
2126 struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597); 2126 struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597);
2127 2127
2128 del_timer_sync(&r8a66597->rh_timer); 2128 del_timer_sync(&r8a66597->rh_timer);
2129 iounmap((void *)r8a66597->reg);
2130 usb_remove_hcd(hcd); 2129 usb_remove_hcd(hcd);
2130 iounmap((void *)r8a66597->reg);
2131 usb_put_hcd(hcd); 2131 usb_put_hcd(hcd);
2132 return 0; 2132 return 0;
2133} 2133}
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 4cfa3ff2c9..94d859aa73 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -435,14 +435,9 @@ static void finish_request(
435 if (usb_pipecontrol(urb->pipe)) 435 if (usb_pipecontrol(urb->pipe))
436 ep->nextpid = USB_PID_SETUP; 436 ep->nextpid = USB_PID_SETUP;
437 437
438 spin_lock(&urb->lock); 438 usb_hcd_unlink_urb_from_ep(sl811_to_hcd(sl811), urb);
439 if (urb->status == -EINPROGRESS)
440 urb->status = status;
441 urb->hcpriv = NULL;
442 spin_unlock(&urb->lock);
443
444 spin_unlock(&sl811->lock); 439 spin_unlock(&sl811->lock);
445 usb_hcd_giveback_urb(sl811_to_hcd(sl811), urb); 440 usb_hcd_giveback_urb(sl811_to_hcd(sl811), urb, status);
446 spin_lock(&sl811->lock); 441 spin_lock(&sl811->lock);
447 442
448 /* leave active endpoints in the schedule */ 443 /* leave active endpoints in the schedule */
@@ -538,35 +533,21 @@ done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank)
538 bank + SL11H_XFERCNTREG); 533 bank + SL11H_XFERCNTREG);
539 if (len > ep->length) { 534 if (len > ep->length) {
540 len = ep->length; 535 len = ep->length;
541 urb->status = -EOVERFLOW; 536 urbstat = -EOVERFLOW;
542 } 537 }
543 urb->actual_length += len; 538 urb->actual_length += len;
544 sl811_read_buf(sl811, SL811HS_PACKET_BUF(bank == 0), 539 sl811_read_buf(sl811, SL811HS_PACKET_BUF(bank == 0),
545 buf, len); 540 buf, len);
546 usb_dotoggle(udev, ep->epnum, 0); 541 usb_dotoggle(udev, ep->epnum, 0);
547 if (urb->actual_length == urb->transfer_buffer_length) 542 if (urbstat == -EINPROGRESS &&
548 urbstat = 0; 543 (len < ep->maxpacket ||
549 else if (len < ep->maxpacket) { 544 urb->actual_length ==
550 if (urb->transfer_flags & URB_SHORT_NOT_OK) 545 urb->transfer_buffer_length)) {
551 urbstat = -EREMOTEIO; 546 if (usb_pipecontrol(urb->pipe))
547 ep->nextpid = USB_PID_ACK;
552 else 548 else
553 urbstat = 0; 549 urbstat = 0;
554 } 550 }
555 if (usb_pipecontrol(urb->pipe)
556 && (urbstat == -EREMOTEIO
557 || urbstat == 0)) {
558
559 /* NOTE if the status stage STALLs (why?),
560 * this reports the wrong urb status.
561 */
562 spin_lock(&urb->lock);
563 if (urb->status == -EINPROGRESS)
564 urb->status = urbstat;
565 spin_unlock(&urb->lock);
566
567 urb = NULL;
568 ep->nextpid = USB_PID_ACK;
569 }
570 break; 551 break;
571 case USB_PID_SETUP: 552 case USB_PID_SETUP:
572 // PACKET("...ACK/setup_%02x qh%p\n", bank, ep); 553 // PACKET("...ACK/setup_%02x qh%p\n", bank, ep);
@@ -605,7 +586,7 @@ done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank)
605 bank, status, ep, urbstat); 586 bank, status, ep, urbstat);
606 } 587 }
607 588
608 if (urb && (urbstat != -EINPROGRESS || urb->status != -EINPROGRESS)) 589 if (urbstat != -EINPROGRESS || urb->unlinked)
609 finish_request(sl811, ep, urb, urbstat); 590 finish_request(sl811, ep, urb, urbstat);
610} 591}
611 592
@@ -807,7 +788,6 @@ static int balance(struct sl811 *sl811, u16 period, u16 load)
807 788
808static int sl811h_urb_enqueue( 789static int sl811h_urb_enqueue(
809 struct usb_hcd *hcd, 790 struct usb_hcd *hcd,
810 struct usb_host_endpoint *hep,
811 struct urb *urb, 791 struct urb *urb,
812 gfp_t mem_flags 792 gfp_t mem_flags
813) { 793) {
@@ -820,7 +800,8 @@ static int sl811h_urb_enqueue(
820 struct sl811h_ep *ep = NULL; 800 struct sl811h_ep *ep = NULL;
821 unsigned long flags; 801 unsigned long flags;
822 int i; 802 int i;
823 int retval = 0; 803 int retval;
804 struct usb_host_endpoint *hep = urb->ep;
824 805
825#ifdef DISABLE_ISO 806#ifdef DISABLE_ISO
826 if (type == PIPE_ISOCHRONOUS) 807 if (type == PIPE_ISOCHRONOUS)
@@ -838,7 +819,12 @@ static int sl811h_urb_enqueue(
838 || !HC_IS_RUNNING(hcd->state)) { 819 || !HC_IS_RUNNING(hcd->state)) {
839 retval = -ENODEV; 820 retval = -ENODEV;
840 kfree(ep); 821 kfree(ep);
841 goto fail; 822 goto fail_not_linked;
823 }
824 retval = usb_hcd_link_urb_to_ep(hcd, urb);
825 if (retval) {
826 kfree(ep);
827 goto fail_not_linked;
842 } 828 }
843 829
844 if (hep->hcpriv) { 830 if (hep->hcpriv) {
@@ -951,37 +937,31 @@ static int sl811h_urb_enqueue(
951 sofirq_on(sl811); 937 sofirq_on(sl811);
952 } 938 }
953 939
954 /* in case of unlink-during-submit */
955 spin_lock(&urb->lock);
956 if (urb->status != -EINPROGRESS) {
957 spin_unlock(&urb->lock);
958 finish_request(sl811, ep, urb, 0);
959 retval = 0;
960 goto fail;
961 }
962 urb->hcpriv = hep; 940 urb->hcpriv = hep;
963 spin_unlock(&urb->lock);
964
965 start_transfer(sl811); 941 start_transfer(sl811);
966 sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable); 942 sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
967fail: 943fail:
944 if (retval)
945 usb_hcd_unlink_urb_from_ep(hcd, urb);
946fail_not_linked:
968 spin_unlock_irqrestore(&sl811->lock, flags); 947 spin_unlock_irqrestore(&sl811->lock, flags);
969 return retval; 948 return retval;
970} 949}
971 950
972static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) 951static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
973{ 952{
974 struct sl811 *sl811 = hcd_to_sl811(hcd); 953 struct sl811 *sl811 = hcd_to_sl811(hcd);
975 struct usb_host_endpoint *hep; 954 struct usb_host_endpoint *hep;
976 unsigned long flags; 955 unsigned long flags;
977 struct sl811h_ep *ep; 956 struct sl811h_ep *ep;
978 int retval = 0; 957 int retval;
979 958
980 spin_lock_irqsave(&sl811->lock, flags); 959 spin_lock_irqsave(&sl811->lock, flags);
981 hep = urb->hcpriv; 960 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
982 if (!hep) 961 if (retval)
983 goto fail; 962 goto fail;
984 963
964 hep = urb->hcpriv;
985 ep = hep->hcpriv; 965 ep = hep->hcpriv;
986 if (ep) { 966 if (ep) {
987 /* finish right away if this urb can't be active ... 967 /* finish right away if this urb can't be active ...
@@ -1029,8 +1009,8 @@ static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1029 VDBG("dequeue, urb %p active %s; wait4irq\n", urb, 1009 VDBG("dequeue, urb %p active %s; wait4irq\n", urb,
1030 (sl811->active_a == ep) ? "A" : "B"); 1010 (sl811->active_a == ep) ? "A" : "B");
1031 } else 1011 } else
1032fail:
1033 retval = -EINVAL; 1012 retval = -EINVAL;
1013 fail:
1034 spin_unlock_irqrestore(&sl811->lock, flags); 1014 spin_unlock_irqrestore(&sl811->lock, flags);
1035 return retval; 1015 return retval;
1036} 1016}
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index b88eb3c62c..ac283b09a6 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -51,7 +51,6 @@
51#include <linux/usb.h> 51#include <linux/usb.h>
52#include <linux/workqueue.h> 52#include <linux/workqueue.h>
53#include <linux/platform_device.h> 53#include <linux/platform_device.h>
54#include <linux/pci_ids.h>
55#include <linux/mutex.h> 54#include <linux/mutex.h>
56#include <asm/io.h> 55#include <asm/io.h>
57#include <asm/irq.h> 56#include <asm/irq.h>
@@ -184,7 +183,7 @@ struct u132_ring {
184struct u132 { 183struct u132 {
185 struct kref kref; 184 struct kref kref;
186 struct list_head u132_list; 185 struct list_head u132_list;
187 struct semaphore sw_lock; 186 struct mutex sw_lock;
188 struct semaphore scheduler_lock; 187 struct semaphore scheduler_lock;
189 struct u132_platform_data *board; 188 struct u132_platform_data *board;
190 struct platform_device *platform_dev; 189 struct platform_device *platform_dev;
@@ -493,20 +492,20 @@ static void u132_hcd_monitor_work(struct work_struct *work)
493 return; 492 return;
494 } else { 493 } else {
495 int retval; 494 int retval;
496 down(&u132->sw_lock); 495 mutex_lock(&u132->sw_lock);
497 retval = read_roothub_info(u132); 496 retval = read_roothub_info(u132);
498 if (retval) { 497 if (retval) {
499 struct usb_hcd *hcd = u132_to_hcd(u132); 498 struct usb_hcd *hcd = u132_to_hcd(u132);
500 u132_disable(u132); 499 u132_disable(u132);
501 u132->going = 1; 500 u132->going = 1;
502 up(&u132->sw_lock); 501 mutex_unlock(&u132->sw_lock);
503 usb_hc_died(hcd); 502 usb_hc_died(hcd);
504 ftdi_elan_gone_away(u132->platform_dev); 503 ftdi_elan_gone_away(u132->platform_dev);
505 u132_monitor_put_kref(u132); 504 u132_monitor_put_kref(u132);
506 return; 505 return;
507 } else { 506 } else {
508 u132_monitor_requeue_work(u132, 500); 507 u132_monitor_requeue_work(u132, 500);
509 up(&u132->sw_lock); 508 mutex_unlock(&u132->sw_lock);
510 return; 509 return;
511 } 510 }
512 } 511 }
@@ -519,9 +518,8 @@ static void u132_hcd_giveback_urb(struct u132 *u132, struct u132_endp *endp,
519 unsigned long irqs; 518 unsigned long irqs;
520 struct usb_hcd *hcd = u132_to_hcd(u132); 519 struct usb_hcd *hcd = u132_to_hcd(u132);
521 urb->error_count = 0; 520 urb->error_count = 0;
522 urb->status = status;
523 urb->hcpriv = NULL;
524 spin_lock_irqsave(&endp->queue_lock.slock, irqs); 521 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
522 usb_hcd_unlink_urb_from_ep(hcd, urb);
525 endp->queue_next += 1; 523 endp->queue_next += 1;
526 if (ENDP_QUEUE_SIZE > --endp->queue_size) { 524 if (ENDP_QUEUE_SIZE > --endp->queue_size) {
527 endp->active = 0; 525 endp->active = 0;
@@ -543,7 +541,7 @@ static void u132_hcd_giveback_urb(struct u132 *u132, struct u132_endp *endp,
543 u132_ring_queue_work(u132, ring, 0); 541 u132_ring_queue_work(u132, ring, 0);
544 up(&u132->scheduler_lock); 542 up(&u132->scheduler_lock);
545 u132_endp_put_kref(u132, endp); 543 u132_endp_put_kref(u132, endp);
546 usb_hcd_giveback_urb(hcd, urb); 544 usb_hcd_giveback_urb(hcd, urb, status);
547 return; 545 return;
548} 546}
549 547
@@ -559,9 +557,8 @@ static void u132_hcd_abandon_urb(struct u132 *u132, struct u132_endp *endp,
559 unsigned long irqs; 557 unsigned long irqs;
560 struct usb_hcd *hcd = u132_to_hcd(u132); 558 struct usb_hcd *hcd = u132_to_hcd(u132);
561 urb->error_count = 0; 559 urb->error_count = 0;
562 urb->status = status;
563 urb->hcpriv = NULL;
564 spin_lock_irqsave(&endp->queue_lock.slock, irqs); 560 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
561 usb_hcd_unlink_urb_from_ep(hcd, urb);
565 endp->queue_next += 1; 562 endp->queue_next += 1;
566 if (ENDP_QUEUE_SIZE > --endp->queue_size) { 563 if (ENDP_QUEUE_SIZE > --endp->queue_size) {
567 endp->active = 0; 564 endp->active = 0;
@@ -576,7 +573,7 @@ static void u132_hcd_abandon_urb(struct u132 *u132, struct u132_endp *endp,
576 endp->active = 0; 573 endp->active = 0;
577 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); 574 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
578 kfree(urbq); 575 kfree(urbq);
579 } usb_hcd_giveback_urb(hcd, urb); 576 } usb_hcd_giveback_urb(hcd, urb, status);
580 return; 577 return;
581} 578}
582 579
@@ -646,12 +643,12 @@ static void u132_hcd_interrupt_recv(void *data, struct urb *urb, u8 *buf,
646 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); 643 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
647 return; 644 return;
648 } else if (u132->going > 0) { 645 } else if (u132->going > 0) {
649 dev_err(&u132->platform_dev->dev, "device is being removed urb=" 646 dev_err(&u132->platform_dev->dev, "device is being removed "
650 "%p status=%d\n", urb, urb->status); 647 "urb=%p\n", urb);
651 up(&u132->scheduler_lock); 648 up(&u132->scheduler_lock);
652 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); 649 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
653 return; 650 return;
654 } else if (urb->status == -EINPROGRESS) { 651 } else if (!urb->unlinked) {
655 struct u132_ring *ring = endp->ring; 652 struct u132_ring *ring = endp->ring;
656 u8 *u = urb->transfer_buffer + urb->actual_length; 653 u8 *u = urb->transfer_buffer + urb->actual_length;
657 u8 *b = buf; 654 u8 *b = buf;
@@ -717,10 +714,10 @@ static void u132_hcd_interrupt_recv(void *data, struct urb *urb, u8 *buf,
717 return; 714 return;
718 } 715 }
719 } else { 716 } else {
720 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p statu" 717 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
721 "s=%d\n", urb, urb->status); 718 "unlinked=%d\n", urb, urb->unlinked);
722 up(&u132->scheduler_lock); 719 up(&u132->scheduler_lock);
723 u132_hcd_giveback_urb(u132, endp, urb, urb->status); 720 u132_hcd_giveback_urb(u132, endp, urb, 0);
724 return; 721 return;
725 } 722 }
726} 723}
@@ -745,12 +742,12 @@ static void u132_hcd_bulk_output_sent(void *data, struct urb *urb, u8 *buf,
745 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); 742 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
746 return; 743 return;
747 } else if (u132->going > 0) { 744 } else if (u132->going > 0) {
748 dev_err(&u132->platform_dev->dev, "device is being removed urb=" 745 dev_err(&u132->platform_dev->dev, "device is being removed "
749 "%p status=%d\n", urb, urb->status); 746 "urb=%p\n", urb);
750 up(&u132->scheduler_lock); 747 up(&u132->scheduler_lock);
751 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); 748 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
752 return; 749 return;
753 } else if (urb->status == -EINPROGRESS) { 750 } else if (!urb->unlinked) {
754 struct u132_ring *ring = endp->ring; 751 struct u132_ring *ring = endp->ring;
755 urb->actual_length += len; 752 urb->actual_length += len;
756 endp->toggle_bits = toggle_bits; 753 endp->toggle_bits = toggle_bits;
@@ -769,10 +766,10 @@ static void u132_hcd_bulk_output_sent(void *data, struct urb *urb, u8 *buf,
769 return; 766 return;
770 } 767 }
771 } else { 768 } else {
772 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p statu" 769 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
773 "s=%d\n", urb, urb->status); 770 "unlinked=%d\n", urb, urb->unlinked);
774 up(&u132->scheduler_lock); 771 up(&u132->scheduler_lock);
775 u132_hcd_giveback_urb(u132, endp, urb, urb->status); 772 u132_hcd_giveback_urb(u132, endp, urb, 0);
776 return; 773 return;
777 } 774 }
778} 775}
@@ -798,12 +795,12 @@ static void u132_hcd_bulk_input_recv(void *data, struct urb *urb, u8 *buf,
798 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); 795 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
799 return; 796 return;
800 } else if (u132->going > 0) { 797 } else if (u132->going > 0) {
801 dev_err(&u132->platform_dev->dev, "device is being removed urb=" 798 dev_err(&u132->platform_dev->dev, "device is being removed "
802 "%p status=%d\n", urb, urb->status); 799 "urb=%p\n", urb);
803 up(&u132->scheduler_lock); 800 up(&u132->scheduler_lock);
804 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); 801 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
805 return; 802 return;
806 } else if (urb->status == -EINPROGRESS) { 803 } else if (!urb->unlinked) {
807 struct u132_ring *ring = endp->ring; 804 struct u132_ring *ring = endp->ring;
808 u8 *u = urb->transfer_buffer + urb->actual_length; 805 u8 *u = urb->transfer_buffer + urb->actual_length;
809 u8 *b = buf; 806 u8 *b = buf;
@@ -872,10 +869,10 @@ static void u132_hcd_bulk_input_recv(void *data, struct urb *urb, u8 *buf,
872 return; 869 return;
873 } 870 }
874 } else { 871 } else {
875 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p statu" 872 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
876 "s=%d\n", urb, urb->status); 873 "unlinked=%d\n", urb, urb->unlinked);
877 up(&u132->scheduler_lock); 874 up(&u132->scheduler_lock);
878 u132_hcd_giveback_urb(u132, endp, urb, urb->status); 875 u132_hcd_giveback_urb(u132, endp, urb, 0);
879 return; 876 return;
880 } 877 }
881} 878}
@@ -899,20 +896,20 @@ static void u132_hcd_configure_empty_sent(void *data, struct urb *urb, u8 *buf,
899 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); 896 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
900 return; 897 return;
901 } else if (u132->going > 0) { 898 } else if (u132->going > 0) {
902 dev_err(&u132->platform_dev->dev, "device is being removed urb=" 899 dev_err(&u132->platform_dev->dev, "device is being removed "
903 "%p status=%d\n", urb, urb->status); 900 "urb=%p\n", urb);
904 up(&u132->scheduler_lock); 901 up(&u132->scheduler_lock);
905 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); 902 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
906 return; 903 return;
907 } else if (urb->status == -EINPROGRESS) { 904 } else if (!urb->unlinked) {
908 up(&u132->scheduler_lock); 905 up(&u132->scheduler_lock);
909 u132_hcd_giveback_urb(u132, endp, urb, 0); 906 u132_hcd_giveback_urb(u132, endp, urb, 0);
910 return; 907 return;
911 } else { 908 } else {
912 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p statu" 909 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
913 "s=%d\n", urb, urb->status); 910 "unlinked=%d\n", urb, urb->unlinked);
914 up(&u132->scheduler_lock); 911 up(&u132->scheduler_lock);
915 u132_hcd_giveback_urb(u132, endp, urb, urb->status); 912 u132_hcd_giveback_urb(u132, endp, urb, 0);
916 return; 913 return;
917 } 914 }
918} 915}
@@ -937,12 +934,12 @@ static void u132_hcd_configure_input_recv(void *data, struct urb *urb, u8 *buf,
937 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); 934 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
938 return; 935 return;
939 } else if (u132->going > 0) { 936 } else if (u132->going > 0) {
940 dev_err(&u132->platform_dev->dev, "device is being removed urb=" 937 dev_err(&u132->platform_dev->dev, "device is being removed "
941 "%p status=%d\n", urb, urb->status); 938 "urb=%p\n", urb);
942 up(&u132->scheduler_lock); 939 up(&u132->scheduler_lock);
943 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); 940 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
944 return; 941 return;
945 } else if (urb->status == -EINPROGRESS) { 942 } else if (!urb->unlinked) {
946 struct u132_ring *ring = endp->ring; 943 struct u132_ring *ring = endp->ring;
947 u8 *u = urb->transfer_buffer; 944 u8 *u = urb->transfer_buffer;
948 u8 *b = buf; 945 u8 *b = buf;
@@ -981,10 +978,10 @@ static void u132_hcd_configure_input_recv(void *data, struct urb *urb, u8 *buf,
981 return; 978 return;
982 } 979 }
983 } else { 980 } else {
984 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p statu" 981 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
985 "s=%d\n", urb, urb->status); 982 "unlinked=%d\n", urb, urb->unlinked);
986 up(&u132->scheduler_lock); 983 up(&u132->scheduler_lock);
987 u132_hcd_giveback_urb(u132, endp, urb, urb->status); 984 u132_hcd_giveback_urb(u132, endp, urb, 0);
988 return; 985 return;
989 } 986 }
990} 987}
@@ -1008,20 +1005,20 @@ static void u132_hcd_configure_empty_recv(void *data, struct urb *urb, u8 *buf,
1008 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); 1005 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1009 return; 1006 return;
1010 } else if (u132->going > 0) { 1007 } else if (u132->going > 0) {
1011 dev_err(&u132->platform_dev->dev, "device is being removed urb=" 1008 dev_err(&u132->platform_dev->dev, "device is being removed "
1012 "%p status=%d\n", urb, urb->status); 1009 "urb=%p\n", urb);
1013 up(&u132->scheduler_lock); 1010 up(&u132->scheduler_lock);
1014 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); 1011 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1015 return; 1012 return;
1016 } else if (urb->status == -EINPROGRESS) { 1013 } else if (!urb->unlinked) {
1017 up(&u132->scheduler_lock); 1014 up(&u132->scheduler_lock);
1018 u132_hcd_giveback_urb(u132, endp, urb, 0); 1015 u132_hcd_giveback_urb(u132, endp, urb, 0);
1019 return; 1016 return;
1020 } else { 1017 } else {
1021 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p statu" 1018 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1022 "s=%d\n", urb, urb->status); 1019 "unlinked=%d\n", urb, urb->unlinked);
1023 up(&u132->scheduler_lock); 1020 up(&u132->scheduler_lock);
1024 u132_hcd_giveback_urb(u132, endp, urb, urb->status); 1021 u132_hcd_giveback_urb(u132, endp, urb, 0);
1025 return; 1022 return;
1026 } 1023 }
1027} 1024}
@@ -1046,12 +1043,12 @@ static void u132_hcd_configure_setup_sent(void *data, struct urb *urb, u8 *buf,
1046 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); 1043 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1047 return; 1044 return;
1048 } else if (u132->going > 0) { 1045 } else if (u132->going > 0) {
1049 dev_err(&u132->platform_dev->dev, "device is being removed urb=" 1046 dev_err(&u132->platform_dev->dev, "device is being removed "
1050 "%p status=%d\n", urb, urb->status); 1047 "urb=%p\n", urb);
1051 up(&u132->scheduler_lock); 1048 up(&u132->scheduler_lock);
1052 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); 1049 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1053 return; 1050 return;
1054 } else if (urb->status == -EINPROGRESS) { 1051 } else if (!urb->unlinked) {
1055 if (usb_pipein(urb->pipe)) { 1052 if (usb_pipein(urb->pipe)) {
1056 int retval; 1053 int retval;
1057 struct u132_ring *ring = endp->ring; 1054 struct u132_ring *ring = endp->ring;
@@ -1078,10 +1075,10 @@ static void u132_hcd_configure_setup_sent(void *data, struct urb *urb, u8 *buf,
1078 return; 1075 return;
1079 } 1076 }
1080 } else { 1077 } else {
1081 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p statu" 1078 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1082 "s=%d\n", urb, urb->status); 1079 "unlinked=%d\n", urb, urb->unlinked);
1083 up(&u132->scheduler_lock); 1080 up(&u132->scheduler_lock);
1084 u132_hcd_giveback_urb(u132, endp, urb, urb->status); 1081 u132_hcd_giveback_urb(u132, endp, urb, 0);
1085 return; 1082 return;
1086 } 1083 }
1087} 1084}
@@ -1107,22 +1104,22 @@ static void u132_hcd_enumeration_empty_recv(void *data, struct urb *urb,
1107 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); 1104 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1108 return; 1105 return;
1109 } else if (u132->going > 0) { 1106 } else if (u132->going > 0) {
1110 dev_err(&u132->platform_dev->dev, "device is being removed urb=" 1107 dev_err(&u132->platform_dev->dev, "device is being removed "
1111 "%p status=%d\n", urb, urb->status); 1108 "urb=%p\n", urb);
1112 up(&u132->scheduler_lock); 1109 up(&u132->scheduler_lock);
1113 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); 1110 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1114 return; 1111 return;
1115 } else if (urb->status == -EINPROGRESS) { 1112 } else if (!urb->unlinked) {
1116 u132->addr[0].address = 0; 1113 u132->addr[0].address = 0;
1117 endp->usb_addr = udev->usb_addr; 1114 endp->usb_addr = udev->usb_addr;
1118 up(&u132->scheduler_lock); 1115 up(&u132->scheduler_lock);
1119 u132_hcd_giveback_urb(u132, endp, urb, 0); 1116 u132_hcd_giveback_urb(u132, endp, urb, 0);
1120 return; 1117 return;
1121 } else { 1118 } else {
1122 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p statu" 1119 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1123 "s=%d\n", urb, urb->status); 1120 "unlinked=%d\n", urb, urb->unlinked);
1124 up(&u132->scheduler_lock); 1121 up(&u132->scheduler_lock);
1125 u132_hcd_giveback_urb(u132, endp, urb, urb->status); 1122 u132_hcd_giveback_urb(u132, endp, urb, 0);
1126 return; 1123 return;
1127 } 1124 }
1128} 1125}
@@ -1146,12 +1143,12 @@ static void u132_hcd_enumeration_address_sent(void *data, struct urb *urb,
1146 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); 1143 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1147 return; 1144 return;
1148 } else if (u132->going > 0) { 1145 } else if (u132->going > 0) {
1149 dev_err(&u132->platform_dev->dev, "device is being removed urb=" 1146 dev_err(&u132->platform_dev->dev, "device is being removed "
1150 "%p status=%d\n", urb, urb->status); 1147 "urb=%p\n", urb);
1151 up(&u132->scheduler_lock); 1148 up(&u132->scheduler_lock);
1152 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); 1149 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1153 return; 1150 return;
1154 } else if (urb->status == -EINPROGRESS) { 1151 } else if (!urb->unlinked) {
1155 int retval; 1152 int retval;
1156 struct u132_ring *ring = endp->ring; 1153 struct u132_ring *ring = endp->ring;
1157 up(&u132->scheduler_lock); 1154 up(&u132->scheduler_lock);
@@ -1163,10 +1160,10 @@ static void u132_hcd_enumeration_address_sent(void *data, struct urb *urb,
1163 u132_hcd_giveback_urb(u132, endp, urb, retval); 1160 u132_hcd_giveback_urb(u132, endp, urb, retval);
1164 return; 1161 return;
1165 } else { 1162 } else {
1166 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p statu" 1163 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1167 "s=%d\n", urb, urb->status); 1164 "unlinked=%d\n", urb, urb->unlinked);
1168 up(&u132->scheduler_lock); 1165 up(&u132->scheduler_lock);
1169 u132_hcd_giveback_urb(u132, endp, urb, urb->status); 1166 u132_hcd_giveback_urb(u132, endp, urb, 0);
1170 return; 1167 return;
1171 } 1168 }
1172} 1169}
@@ -1190,20 +1187,20 @@ static void u132_hcd_initial_empty_sent(void *data, struct urb *urb, u8 *buf,
1190 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); 1187 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1191 return; 1188 return;
1192 } else if (u132->going > 0) { 1189 } else if (u132->going > 0) {
1193 dev_err(&u132->platform_dev->dev, "device is being removed urb=" 1190 dev_err(&u132->platform_dev->dev, "device is being removed "
1194 "%p status=%d\n", urb, urb->status); 1191 "urb=%p\n", urb);
1195 up(&u132->scheduler_lock); 1192 up(&u132->scheduler_lock);
1196 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); 1193 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1197 return; 1194 return;
1198 } else if (urb->status == -EINPROGRESS) { 1195 } else if (!urb->unlinked) {
1199 up(&u132->scheduler_lock); 1196 up(&u132->scheduler_lock);
1200 u132_hcd_giveback_urb(u132, endp, urb, 0); 1197 u132_hcd_giveback_urb(u132, endp, urb, 0);
1201 return; 1198 return;
1202 } else { 1199 } else {
1203 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p statu" 1200 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1204 "s=%d\n", urb, urb->status); 1201 "unlinked=%d\n", urb, urb->unlinked);
1205 up(&u132->scheduler_lock); 1202 up(&u132->scheduler_lock);
1206 u132_hcd_giveback_urb(u132, endp, urb, urb->status); 1203 u132_hcd_giveback_urb(u132, endp, urb, 0);
1207 return; 1204 return;
1208 } 1205 }
1209} 1206}
@@ -1228,12 +1225,12 @@ static void u132_hcd_initial_input_recv(void *data, struct urb *urb, u8 *buf,
1228 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); 1225 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1229 return; 1226 return;
1230 } else if (u132->going > 0) { 1227 } else if (u132->going > 0) {
1231 dev_err(&u132->platform_dev->dev, "device is being removed urb=" 1228 dev_err(&u132->platform_dev->dev, "device is being removed "
1232 "%p status=%d\n", urb, urb->status); 1229 "urb=%p\n", urb);
1233 up(&u132->scheduler_lock); 1230 up(&u132->scheduler_lock);
1234 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); 1231 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1235 return; 1232 return;
1236 } else if (urb->status == -EINPROGRESS) { 1233 } else if (!urb->unlinked) {
1237 int retval; 1234 int retval;
1238 struct u132_ring *ring = endp->ring; 1235 struct u132_ring *ring = endp->ring;
1239 u8 *u = urb->transfer_buffer; 1236 u8 *u = urb->transfer_buffer;
@@ -1252,10 +1249,10 @@ static void u132_hcd_initial_input_recv(void *data, struct urb *urb, u8 *buf,
1252 u132_hcd_giveback_urb(u132, endp, urb, retval); 1249 u132_hcd_giveback_urb(u132, endp, urb, retval);
1253 return; 1250 return;
1254 } else { 1251 } else {
1255 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p statu" 1252 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1256 "s=%d\n", urb, urb->status); 1253 "unlinked=%d\n", urb, urb->unlinked);
1257 up(&u132->scheduler_lock); 1254 up(&u132->scheduler_lock);
1258 u132_hcd_giveback_urb(u132, endp, urb, urb->status); 1255 u132_hcd_giveback_urb(u132, endp, urb, 0);
1259 return; 1256 return;
1260 } 1257 }
1261} 1258}
@@ -1280,12 +1277,12 @@ static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf,
1280 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); 1277 u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
1281 return; 1278 return;
1282 } else if (u132->going > 0) { 1279 } else if (u132->going > 0) {
1283 dev_err(&u132->platform_dev->dev, "device is being removed urb=" 1280 dev_err(&u132->platform_dev->dev, "device is being removed "
1284 "%p status=%d\n", urb, urb->status); 1281 "urb=%p\n", urb);
1285 up(&u132->scheduler_lock); 1282 up(&u132->scheduler_lock);
1286 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); 1283 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
1287 return; 1284 return;
1288 } else if (urb->status == -EINPROGRESS) { 1285 } else if (!urb->unlinked) {
1289 int retval; 1286 int retval;
1290 struct u132_ring *ring = endp->ring; 1287 struct u132_ring *ring = endp->ring;
1291 up(&u132->scheduler_lock); 1288 up(&u132->scheduler_lock);
@@ -1297,10 +1294,10 @@ static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf,
1297 u132_hcd_giveback_urb(u132, endp, urb, retval); 1294 u132_hcd_giveback_urb(u132, endp, urb, retval);
1298 return; 1295 return;
1299 } else { 1296 } else {
1300 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p statu" 1297 dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
1301 "s=%d\n", urb, urb->status); 1298 "unlinked=%d\n", urb, urb->unlinked);
1302 up(&u132->scheduler_lock); 1299 up(&u132->scheduler_lock);
1303 u132_hcd_giveback_urb(u132, endp, urb, urb->status); 1300 u132_hcd_giveback_urb(u132, endp, urb, 0);
1304 return; 1301 return;
1305 } 1302 }
1306} 1303}
@@ -1805,10 +1802,10 @@ static void u132_hcd_stop(struct usb_hcd *hcd)
1805 dev_err(&u132->platform_dev->dev, "device hcd=%p is being remov" 1802 dev_err(&u132->platform_dev->dev, "device hcd=%p is being remov"
1806 "ed\n", hcd); 1803 "ed\n", hcd);
1807 } else { 1804 } else {
1808 down(&u132->sw_lock); 1805 mutex_lock(&u132->sw_lock);
1809 msleep(100); 1806 msleep(100);
1810 u132_power(u132, 0); 1807 u132_power(u132, 0);
1811 up(&u132->sw_lock); 1808 mutex_unlock(&u132->sw_lock);
1812 } 1809 }
1813} 1810}
1814 1811
@@ -1830,7 +1827,7 @@ static int u132_hcd_start(struct usb_hcd *hcd)
1830 (pdev->dev.platform_data))->vendor; 1827 (pdev->dev.platform_data))->vendor;
1831 u16 device = ((struct u132_platform_data *) 1828 u16 device = ((struct u132_platform_data *)
1832 (pdev->dev.platform_data))->device; 1829 (pdev->dev.platform_data))->device;
1833 down(&u132->sw_lock); 1830 mutex_lock(&u132->sw_lock);
1834 msleep(10); 1831 msleep(10);
1835 if (vendor == PCI_VENDOR_ID_AMD && device == 0x740c) { 1832 if (vendor == PCI_VENDOR_ID_AMD && device == 0x740c) {
1836 u132->flags = OHCI_QUIRK_AMD756; 1833 u132->flags = OHCI_QUIRK_AMD756;
@@ -1845,7 +1842,7 @@ static int u132_hcd_start(struct usb_hcd *hcd)
1845 u132->going = 1; 1842 u132->going = 1;
1846 } 1843 }
1847 msleep(100); 1844 msleep(100);
1848 up(&u132->sw_lock); 1845 mutex_unlock(&u132->sw_lock);
1849 return retval; 1846 return retval;
1850 } else { 1847 } else {
1851 dev_err(&u132->platform_dev->dev, "platform_device missing\n"); 1848 dev_err(&u132->platform_dev->dev, "platform_device missing\n");
@@ -1865,32 +1862,44 @@ static int u132_hcd_reset(struct usb_hcd *hcd)
1865 return -ESHUTDOWN; 1862 return -ESHUTDOWN;
1866 } else { 1863 } else {
1867 int retval; 1864 int retval;
1868 down(&u132->sw_lock); 1865 mutex_lock(&u132->sw_lock);
1869 retval = u132_init(u132); 1866 retval = u132_init(u132);
1870 if (retval) { 1867 if (retval) {
1871 u132_disable(u132); 1868 u132_disable(u132);
1872 u132->going = 1; 1869 u132->going = 1;
1873 } 1870 }
1874 up(&u132->sw_lock); 1871 mutex_unlock(&u132->sw_lock);
1875 return retval; 1872 return retval;
1876 } 1873 }
1877} 1874}
1878 1875
1879static int create_endpoint_and_queue_int(struct u132 *u132, 1876static int create_endpoint_and_queue_int(struct u132 *u132,
1880 struct u132_udev *udev, struct usb_host_endpoint *hep, struct urb *urb, 1877 struct u132_udev *udev, struct urb *urb,
1881 struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address, 1878 struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
1882 gfp_t mem_flags) 1879 gfp_t mem_flags)
1883{ 1880{
1884 struct u132_ring *ring; 1881 struct u132_ring *ring;
1885 unsigned long irqs; 1882 unsigned long irqs;
1886 u8 endp_number = ++u132->num_endpoints; 1883 int rc;
1887 struct u132_endp *endp = hep->hcpriv = u132->endp[endp_number - 1] = 1884 u8 endp_number;
1888 kmalloc(sizeof(struct u132_endp), mem_flags); 1885 struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
1886
1889 if (!endp) { 1887 if (!endp) {
1890 return -ENOMEM; 1888 return -ENOMEM;
1891 } 1889 }
1890
1891 spin_lock_init(&endp->queue_lock.slock);
1892 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
1893 rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
1894 if (rc) {
1895 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
1896 kfree(endp);
1897 return rc;
1898 }
1899
1900 endp_number = ++u132->num_endpoints;
1901 urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
1892 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); 1902 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
1893 spin_lock_init(&endp->queue_lock.slock);
1894 INIT_LIST_HEAD(&endp->urb_more); 1903 INIT_LIST_HEAD(&endp->urb_more);
1895 ring = endp->ring = &u132->ring[0]; 1904 ring = endp->ring = &u132->ring[0];
1896 if (ring->curr_endp) { 1905 if (ring->curr_endp) {
@@ -1906,7 +1915,7 @@ static int create_endpoint_and_queue_int(struct u132 *u132,
1906 endp->delayed = 0; 1915 endp->delayed = 0;
1907 endp->endp_number = endp_number; 1916 endp->endp_number = endp_number;
1908 endp->u132 = u132; 1917 endp->u132 = u132;
1909 endp->hep = hep; 1918 endp->hep = urb->ep;
1910 endp->pipetype = usb_pipetype(urb->pipe); 1919 endp->pipetype = usb_pipetype(urb->pipe);
1911 u132_endp_init_kref(u132, endp); 1920 u132_endp_init_kref(u132, endp);
1912 if (usb_pipein(urb->pipe)) { 1921 if (usb_pipein(urb->pipe)) {
@@ -1925,7 +1934,6 @@ static int create_endpoint_and_queue_int(struct u132 *u132,
1925 u132_udev_get_kref(u132, udev); 1934 u132_udev_get_kref(u132, udev);
1926 } 1935 }
1927 urb->hcpriv = u132; 1936 urb->hcpriv = u132;
1928 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
1929 endp->delayed = 1; 1937 endp->delayed = 1;
1930 endp->jiffies = jiffies + msecs_to_jiffies(urb->interval); 1938 endp->jiffies = jiffies + msecs_to_jiffies(urb->interval);
1931 endp->udev_number = address; 1939 endp->udev_number = address;
@@ -1940,8 +1948,8 @@ static int create_endpoint_and_queue_int(struct u132 *u132,
1940 return 0; 1948 return 0;
1941} 1949}
1942 1950
1943static int queue_int_on_old_endpoint(struct u132 *u132, struct u132_udev *udev, 1951static int queue_int_on_old_endpoint(struct u132 *u132,
1944 struct usb_host_endpoint *hep, struct urb *urb, 1952 struct u132_udev *udev, struct urb *urb,
1945 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, 1953 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
1946 u8 usb_endp, u8 address) 1954 u8 usb_endp, u8 address)
1947{ 1955{
@@ -1965,21 +1973,33 @@ static int queue_int_on_old_endpoint(struct u132 *u132, struct u132_udev *udev,
1965} 1973}
1966 1974
1967static int create_endpoint_and_queue_bulk(struct u132 *u132, 1975static int create_endpoint_and_queue_bulk(struct u132 *u132,
1968 struct u132_udev *udev, struct usb_host_endpoint *hep, struct urb *urb, 1976 struct u132_udev *udev, struct urb *urb,
1969 struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address, 1977 struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
1970 gfp_t mem_flags) 1978 gfp_t mem_flags)
1971{ 1979{
1972 int ring_number; 1980 int ring_number;
1973 struct u132_ring *ring; 1981 struct u132_ring *ring;
1974 unsigned long irqs; 1982 unsigned long irqs;
1975 u8 endp_number = ++u132->num_endpoints; 1983 int rc;
1976 struct u132_endp *endp = hep->hcpriv = u132->endp[endp_number - 1] = 1984 u8 endp_number;
1977 kmalloc(sizeof(struct u132_endp), mem_flags); 1985 struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
1986
1978 if (!endp) { 1987 if (!endp) {
1979 return -ENOMEM; 1988 return -ENOMEM;
1980 } 1989 }
1990
1991 spin_lock_init(&endp->queue_lock.slock);
1992 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
1993 rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
1994 if (rc) {
1995 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
1996 kfree(endp);
1997 return rc;
1998 }
1999
2000 endp_number = ++u132->num_endpoints;
2001 urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
1981 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); 2002 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
1982 spin_lock_init(&endp->queue_lock.slock);
1983 INIT_LIST_HEAD(&endp->urb_more); 2003 INIT_LIST_HEAD(&endp->urb_more);
1984 endp->dequeueing = 0; 2004 endp->dequeueing = 0;
1985 endp->edset_flush = 0; 2005 endp->edset_flush = 0;
@@ -1987,7 +2007,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132,
1987 endp->delayed = 0; 2007 endp->delayed = 0;
1988 endp->endp_number = endp_number; 2008 endp->endp_number = endp_number;
1989 endp->u132 = u132; 2009 endp->u132 = u132;
1990 endp->hep = hep; 2010 endp->hep = urb->ep;
1991 endp->pipetype = usb_pipetype(urb->pipe); 2011 endp->pipetype = usb_pipetype(urb->pipe);
1992 u132_endp_init_kref(u132, endp); 2012 u132_endp_init_kref(u132, endp);
1993 if (usb_pipein(urb->pipe)) { 2013 if (usb_pipein(urb->pipe)) {
@@ -2016,7 +2036,6 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132,
2016 } 2036 }
2017 ring->length += 1; 2037 ring->length += 1;
2018 urb->hcpriv = u132; 2038 urb->hcpriv = u132;
2019 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
2020 endp->udev_number = address; 2039 endp->udev_number = address;
2021 endp->usb_addr = usb_addr; 2040 endp->usb_addr = usb_addr;
2022 endp->usb_endp = usb_endp; 2041 endp->usb_endp = usb_endp;
@@ -2030,7 +2049,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132,
2030} 2049}
2031 2050
2032static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev, 2051static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev,
2033 struct usb_host_endpoint *hep, struct urb *urb, 2052 struct urb *urb,
2034 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, 2053 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
2035 u8 usb_endp, u8 address) 2054 u8 usb_endp, u8 address)
2036{ 2055{
@@ -2052,19 +2071,32 @@ static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev,
2052} 2071}
2053 2072
2054static int create_endpoint_and_queue_control(struct u132 *u132, 2073static int create_endpoint_and_queue_control(struct u132 *u132,
2055 struct usb_host_endpoint *hep, struct urb *urb, 2074 struct urb *urb,
2056 struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, 2075 struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp,
2057 gfp_t mem_flags) 2076 gfp_t mem_flags)
2058{ 2077{
2059 struct u132_ring *ring; 2078 struct u132_ring *ring;
2060 u8 endp_number = ++u132->num_endpoints; 2079 unsigned long irqs;
2061 struct u132_endp *endp = hep->hcpriv = u132->endp[endp_number - 1] = 2080 int rc;
2062 kmalloc(sizeof(struct u132_endp), mem_flags); 2081 u8 endp_number;
2082 struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
2083
2063 if (!endp) { 2084 if (!endp) {
2064 return -ENOMEM; 2085 return -ENOMEM;
2065 } 2086 }
2087
2088 spin_lock_init(&endp->queue_lock.slock);
2089 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
2090 rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
2091 if (rc) {
2092 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2093 kfree(endp);
2094 return rc;
2095 }
2096
2097 endp_number = ++u132->num_endpoints;
2098 urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
2066 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); 2099 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
2067 spin_lock_init(&endp->queue_lock.slock);
2068 INIT_LIST_HEAD(&endp->urb_more); 2100 INIT_LIST_HEAD(&endp->urb_more);
2069 ring = endp->ring = &u132->ring[0]; 2101 ring = endp->ring = &u132->ring[0];
2070 if (ring->curr_endp) { 2102 if (ring->curr_endp) {
@@ -2080,11 +2112,10 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
2080 endp->delayed = 0; 2112 endp->delayed = 0;
2081 endp->endp_number = endp_number; 2113 endp->endp_number = endp_number;
2082 endp->u132 = u132; 2114 endp->u132 = u132;
2083 endp->hep = hep; 2115 endp->hep = urb->ep;
2084 u132_endp_init_kref(u132, endp); 2116 u132_endp_init_kref(u132, endp);
2085 u132_endp_get_kref(u132, endp); 2117 u132_endp_get_kref(u132, endp);
2086 if (usb_addr == 0) { 2118 if (usb_addr == 0) {
2087 unsigned long irqs;
2088 u8 address = u132->addr[usb_addr].address; 2119 u8 address = u132->addr[usb_addr].address;
2089 struct u132_udev *udev = &u132->udev[address]; 2120 struct u132_udev *udev = &u132->udev[address];
2090 endp->udev_number = address; 2121 endp->udev_number = address;
@@ -2098,7 +2129,6 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
2098 udev->endp_number_in[usb_endp] = endp_number; 2129 udev->endp_number_in[usb_endp] = endp_number;
2099 udev->endp_number_out[usb_endp] = endp_number; 2130 udev->endp_number_out[usb_endp] = endp_number;
2100 urb->hcpriv = u132; 2131 urb->hcpriv = u132;
2101 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
2102 endp->queue_size = 1; 2132 endp->queue_size = 1;
2103 endp->queue_last = 0; 2133 endp->queue_last = 0;
2104 endp->queue_next = 0; 2134 endp->queue_next = 0;
@@ -2107,7 +2137,6 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
2107 u132_endp_queue_work(u132, endp, 0); 2137 u132_endp_queue_work(u132, endp, 0);
2108 return 0; 2138 return 0;
2109 } else { /*(usb_addr > 0) */ 2139 } else { /*(usb_addr > 0) */
2110 unsigned long irqs;
2111 u8 address = u132->addr[usb_addr].address; 2140 u8 address = u132->addr[usb_addr].address;
2112 struct u132_udev *udev = &u132->udev[address]; 2141 struct u132_udev *udev = &u132->udev[address];
2113 endp->udev_number = address; 2142 endp->udev_number = address;
@@ -2121,7 +2150,6 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
2121 udev->endp_number_in[usb_endp] = endp_number; 2150 udev->endp_number_in[usb_endp] = endp_number;
2122 udev->endp_number_out[usb_endp] = endp_number; 2151 udev->endp_number_out[usb_endp] = endp_number;
2123 urb->hcpriv = u132; 2152 urb->hcpriv = u132;
2124 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
2125 endp->queue_size = 1; 2153 endp->queue_size = 1;
2126 endp->queue_last = 0; 2154 endp->queue_last = 0;
2127 endp->queue_next = 0; 2155 endp->queue_next = 0;
@@ -2133,7 +2161,7 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
2133} 2161}
2134 2162
2135static int queue_control_on_old_endpoint(struct u132 *u132, 2163static int queue_control_on_old_endpoint(struct u132 *u132,
2136 struct usb_host_endpoint *hep, struct urb *urb, 2164 struct urb *urb,
2137 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, 2165 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
2138 u8 usb_endp) 2166 u8 usb_endp)
2139{ 2167{
@@ -2233,8 +2261,8 @@ static int queue_control_on_old_endpoint(struct u132 *u132,
2233 } 2261 }
2234} 2262}
2235 2263
2236static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep, 2264static int u132_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
2237 struct urb *urb, gfp_t mem_flags) 2265 gfp_t mem_flags)
2238{ 2266{
2239 struct u132 *u132 = hcd_to_u132(hcd); 2267 struct u132 *u132 = hcd_to_u132(hcd);
2240 if (irqs_disabled()) { 2268 if (irqs_disabled()) {
@@ -2249,8 +2277,8 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
2249 , u132->going); 2277 , u132->going);
2250 return -ENODEV; 2278 return -ENODEV;
2251 } else if (u132->going > 0) { 2279 } else if (u132->going > 0) {
2252 dev_err(&u132->platform_dev->dev, "device is being removed urb=" 2280 dev_err(&u132->platform_dev->dev, "device is being removed "
2253 "%p status=%d\n", urb, urb->status); 2281 "urb=%p\n", urb);
2254 return -ESHUTDOWN; 2282 return -ESHUTDOWN;
2255 } else { 2283 } else {
2256 u8 usb_addr = usb_pipedevice(urb->pipe); 2284 u8 usb_addr = usb_pipedevice(urb->pipe);
@@ -2259,16 +2287,24 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
2259 if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { 2287 if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
2260 u8 address = u132->addr[usb_addr].address; 2288 u8 address = u132->addr[usb_addr].address;
2261 struct u132_udev *udev = &u132->udev[address]; 2289 struct u132_udev *udev = &u132->udev[address];
2262 struct u132_endp *endp = hep->hcpriv; 2290 struct u132_endp *endp = urb->ep->hcpriv;
2263 urb->actual_length = 0; 2291 urb->actual_length = 0;
2264 if (endp) { 2292 if (endp) {
2265 unsigned long irqs; 2293 unsigned long irqs;
2266 int retval; 2294 int retval;
2267 spin_lock_irqsave(&endp->queue_lock.slock, 2295 spin_lock_irqsave(&endp->queue_lock.slock,
2268 irqs); 2296 irqs);
2269 retval = queue_int_on_old_endpoint(u132, udev, 2297 retval = usb_hcd_link_urb_to_ep(hcd, urb);
2270 hep, urb, usb_dev, endp, usb_addr, 2298 if (retval == 0) {
2271 usb_endp, address); 2299 retval = queue_int_on_old_endpoint(
2300 u132, udev, urb,
2301 usb_dev, endp,
2302 usb_addr, usb_endp,
2303 address);
2304 if (retval)
2305 usb_hcd_unlink_urb_from_ep(
2306 hcd, urb);
2307 }
2272 spin_unlock_irqrestore(&endp->queue_lock.slock, 2308 spin_unlock_irqrestore(&endp->queue_lock.slock,
2273 irqs); 2309 irqs);
2274 if (retval) { 2310 if (retval) {
@@ -2283,8 +2319,8 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
2283 return -EINVAL; 2319 return -EINVAL;
2284 } else { /*(endp == NULL) */ 2320 } else { /*(endp == NULL) */
2285 return create_endpoint_and_queue_int(u132, udev, 2321 return create_endpoint_and_queue_int(u132, udev,
2286 hep, urb, usb_dev, usb_addr, usb_endp, 2322 urb, usb_dev, usb_addr,
2287 address, mem_flags); 2323 usb_endp, address, mem_flags);
2288 } 2324 }
2289 } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 2325 } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2290 dev_err(&u132->platform_dev->dev, "the hardware does no" 2326 dev_err(&u132->platform_dev->dev, "the hardware does no"
@@ -2293,16 +2329,24 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
2293 } else if (usb_pipetype(urb->pipe) == PIPE_BULK) { 2329 } else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
2294 u8 address = u132->addr[usb_addr].address; 2330 u8 address = u132->addr[usb_addr].address;
2295 struct u132_udev *udev = &u132->udev[address]; 2331 struct u132_udev *udev = &u132->udev[address];
2296 struct u132_endp *endp = hep->hcpriv; 2332 struct u132_endp *endp = urb->ep->hcpriv;
2297 urb->actual_length = 0; 2333 urb->actual_length = 0;
2298 if (endp) { 2334 if (endp) {
2299 unsigned long irqs; 2335 unsigned long irqs;
2300 int retval; 2336 int retval;
2301 spin_lock_irqsave(&endp->queue_lock.slock, 2337 spin_lock_irqsave(&endp->queue_lock.slock,
2302 irqs); 2338 irqs);
2303 retval = queue_bulk_on_old_endpoint(u132, udev, 2339 retval = usb_hcd_link_urb_to_ep(hcd, urb);
2304 hep, urb, usb_dev, endp, usb_addr, 2340 if (retval == 0) {
2305 usb_endp, address); 2341 retval = queue_bulk_on_old_endpoint(
2342 u132, udev, urb,
2343 usb_dev, endp,
2344 usb_addr, usb_endp,
2345 address);
2346 if (retval)
2347 usb_hcd_unlink_urb_from_ep(
2348 hcd, urb);
2349 }
2306 spin_unlock_irqrestore(&endp->queue_lock.slock, 2350 spin_unlock_irqrestore(&endp->queue_lock.slock,
2307 irqs); 2351 irqs);
2308 if (retval) { 2352 if (retval) {
@@ -2315,10 +2359,10 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
2315 return -EINVAL; 2359 return -EINVAL;
2316 } else 2360 } else
2317 return create_endpoint_and_queue_bulk(u132, 2361 return create_endpoint_and_queue_bulk(u132,
2318 udev, hep, urb, usb_dev, usb_addr, 2362 udev, urb, usb_dev, usb_addr,
2319 usb_endp, address, mem_flags); 2363 usb_endp, address, mem_flags);
2320 } else { 2364 } else {
2321 struct u132_endp *endp = hep->hcpriv; 2365 struct u132_endp *endp = urb->ep->hcpriv;
2322 u16 urb_size = 8; 2366 u16 urb_size = 8;
2323 u8 *b = urb->setup_packet; 2367 u8 *b = urb->setup_packet;
2324 int i = 0; 2368 int i = 0;
@@ -2341,9 +2385,16 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
2341 int retval; 2385 int retval;
2342 spin_lock_irqsave(&endp->queue_lock.slock, 2386 spin_lock_irqsave(&endp->queue_lock.slock,
2343 irqs); 2387 irqs);
2344 retval = queue_control_on_old_endpoint(u132, 2388 retval = usb_hcd_link_urb_to_ep(hcd, urb);
2345 hep, urb, usb_dev, endp, usb_addr, 2389 if (retval == 0) {
2346 usb_endp); 2390 retval = queue_control_on_old_endpoint(
2391 u132, urb, usb_dev,
2392 endp, usb_addr,
2393 usb_endp);
2394 if (retval)
2395 usb_hcd_unlink_urb_from_ep(
2396 hcd, urb);
2397 }
2347 spin_unlock_irqrestore(&endp->queue_lock.slock, 2398 spin_unlock_irqrestore(&endp->queue_lock.slock,
2348 irqs); 2399 irqs);
2349 if (retval) { 2400 if (retval) {
@@ -2356,7 +2407,7 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
2356 return -EINVAL; 2407 return -EINVAL;
2357 } else 2408 } else
2358 return create_endpoint_and_queue_control(u132, 2409 return create_endpoint_and_queue_control(u132,
2359 hep, urb, usb_dev, usb_addr, usb_endp, 2410 urb, usb_dev, usb_addr, usb_endp,
2360 mem_flags); 2411 mem_flags);
2361 } 2412 }
2362 } 2413 }
@@ -2375,8 +2426,7 @@ static int dequeue_from_overflow_chain(struct u132 *u132,
2375 list_del(scan); 2426 list_del(scan);
2376 endp->queue_size -= 1; 2427 endp->queue_size -= 1;
2377 urb->error_count = 0; 2428 urb->error_count = 0;
2378 urb->hcpriv = NULL; 2429 usb_hcd_giveback_urb(hcd, urb, 0);
2379 usb_hcd_giveback_urb(hcd, urb);
2380 return 0; 2430 return 0;
2381 } else 2431 } else
2382 continue; 2432 continue;
@@ -2391,10 +2441,17 @@ static int dequeue_from_overflow_chain(struct u132 *u132,
2391} 2441}
2392 2442
2393static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp, 2443static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
2394 struct urb *urb) 2444 struct urb *urb, int status)
2395{ 2445{
2396 unsigned long irqs; 2446 unsigned long irqs;
2447 int rc;
2448
2397 spin_lock_irqsave(&endp->queue_lock.slock, irqs); 2449 spin_lock_irqsave(&endp->queue_lock.slock, irqs);
2450 rc = usb_hcd_check_unlink_urb(u132_to_hcd(u132), urb, status);
2451 if (rc) {
2452 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2453 return rc;
2454 }
2398 if (endp->queue_size == 0) { 2455 if (endp->queue_size == 0) {
2399 dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]" 2456 dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]"
2400 "=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb, 2457 "=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb,
@@ -2410,11 +2467,10 @@ static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
2410 endp->edset_flush = 1; 2467 endp->edset_flush = 1;
2411 u132_endp_queue_work(u132, endp, 0); 2468 u132_endp_queue_work(u132, endp, 0);
2412 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); 2469 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2413 urb->hcpriv = NULL;
2414 return 0; 2470 return 0;
2415 } else { 2471 } else {
2416 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); 2472 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2417 u132_hcd_abandon_urb(u132, endp, urb, urb->status); 2473 u132_hcd_abandon_urb(u132, endp, urb, status);
2418 return 0; 2474 return 0;
2419 } 2475 }
2420 } else { 2476 } else {
@@ -2439,6 +2495,8 @@ static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
2439 } 2495 }
2440 if (urb_slot) { 2496 if (urb_slot) {
2441 struct usb_hcd *hcd = u132_to_hcd(u132); 2497 struct usb_hcd *hcd = u132_to_hcd(u132);
2498
2499 usb_hcd_unlink_urb_from_ep(hcd, urb);
2442 endp->queue_size -= 1; 2500 endp->queue_size -= 1;
2443 if (list_empty(&endp->urb_more)) { 2501 if (list_empty(&endp->urb_more)) {
2444 spin_unlock_irqrestore(&endp->queue_lock.slock, 2502 spin_unlock_irqrestore(&endp->queue_lock.slock,
@@ -2453,8 +2511,7 @@ static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
2453 irqs); 2511 irqs);
2454 kfree(urbq); 2512 kfree(urbq);
2455 } urb->error_count = 0; 2513 } urb->error_count = 0;
2456 urb->hcpriv = NULL; 2514 usb_hcd_giveback_urb(hcd, urb, status);
2457 usb_hcd_giveback_urb(hcd, urb);
2458 return 0; 2515 return 0;
2459 } else if (list_empty(&endp->urb_more)) { 2516 } else if (list_empty(&endp->urb_more)) {
2460 dev_err(&u132->platform_dev->dev, "urb=%p not found in " 2517 dev_err(&u132->platform_dev->dev, "urb=%p not found in "
@@ -2468,7 +2525,10 @@ static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
2468 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); 2525 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2469 return -EINVAL; 2526 return -EINVAL;
2470 } else { 2527 } else {
2471 int retval = dequeue_from_overflow_chain(u132, endp, 2528 int retval;
2529
2530 usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132), urb);
2531 retval = dequeue_from_overflow_chain(u132, endp,
2472 urb); 2532 urb);
2473 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); 2533 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
2474 return retval; 2534 return retval;
@@ -2476,7 +2536,7 @@ static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
2476 } 2536 }
2477} 2537}
2478 2538
2479static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) 2539static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2480{ 2540{
2481 struct u132 *u132 = hcd_to_u132(hcd); 2541 struct u132 *u132 = hcd_to_u132(hcd);
2482 if (u132->going > 2) { 2542 if (u132->going > 2) {
@@ -2491,11 +2551,11 @@ static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
2491 if (usb_pipein(urb->pipe)) { 2551 if (usb_pipein(urb->pipe)) {
2492 u8 endp_number = udev->endp_number_in[usb_endp]; 2552 u8 endp_number = udev->endp_number_in[usb_endp];
2493 struct u132_endp *endp = u132->endp[endp_number - 1]; 2553 struct u132_endp *endp = u132->endp[endp_number - 1];
2494 return u132_endp_urb_dequeue(u132, endp, urb); 2554 return u132_endp_urb_dequeue(u132, endp, urb, status);
2495 } else { 2555 } else {
2496 u8 endp_number = udev->endp_number_out[usb_endp]; 2556 u8 endp_number = udev->endp_number_out[usb_endp];
2497 struct u132_endp *endp = u132->endp[endp_number - 1]; 2557 struct u132_endp *endp = u132->endp[endp_number - 1];
2498 return u132_endp_urb_dequeue(u132, endp, urb); 2558 return u132_endp_urb_dequeue(u132, endp, urb, status);
2499 } 2559 }
2500 } 2560 }
2501} 2561}
@@ -2805,7 +2865,7 @@ static int u132_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2805 return -ESHUTDOWN; 2865 return -ESHUTDOWN;
2806 } else { 2866 } else {
2807 int retval = 0; 2867 int retval = 0;
2808 down(&u132->sw_lock); 2868 mutex_lock(&u132->sw_lock);
2809 switch (typeReq) { 2869 switch (typeReq) {
2810 case ClearHubFeature: 2870 case ClearHubFeature:
2811 switch (wValue) { 2871 switch (wValue) {
@@ -2868,7 +2928,7 @@ static int u132_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2868 stall:retval = -EPIPE; 2928 stall:retval = -EPIPE;
2869 break; 2929 break;
2870 } 2930 }
2871 up(&u132->sw_lock); 2931 mutex_unlock(&u132->sw_lock);
2872 return retval; 2932 return retval;
2873 } 2933 }
2874} 2934}
@@ -3004,7 +3064,7 @@ static int __devexit u132_remove(struct platform_device *pdev)
3004 dev_err(&u132->platform_dev->dev, "removing device u132" 3064 dev_err(&u132->platform_dev->dev, "removing device u132"
3005 ".%d\n", u132->sequence_num); 3065 ".%d\n", u132->sequence_num);
3006 msleep(100); 3066 msleep(100);
3007 down(&u132->sw_lock); 3067 mutex_lock(&u132->sw_lock);
3008 u132_monitor_cancel_work(u132); 3068 u132_monitor_cancel_work(u132);
3009 while (rings-- > 0) { 3069 while (rings-- > 0) {
3010 struct u132_ring *ring = &u132->ring[rings]; 3070 struct u132_ring *ring = &u132->ring[rings];
@@ -3017,7 +3077,7 @@ static int __devexit u132_remove(struct platform_device *pdev)
3017 u132->going += 1; 3077 u132->going += 1;
3018 printk(KERN_INFO "removing device u132.%d\n", 3078 printk(KERN_INFO "removing device u132.%d\n",
3019 u132->sequence_num); 3079 u132->sequence_num);
3020 up(&u132->sw_lock); 3080 mutex_unlock(&u132->sw_lock);
3021 usb_remove_hcd(hcd); 3081 usb_remove_hcd(hcd);
3022 u132_u132_put_kref(u132); 3082 u132_u132_put_kref(u132);
3023 return 0; 3083 return 0;
@@ -3037,7 +3097,7 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
3037 u132->platform_dev = pdev; 3097 u132->platform_dev = pdev;
3038 u132->power = 0; 3098 u132->power = 0;
3039 u132->reset = 0; 3099 u132->reset = 0;
3040 init_MUTEX(&u132->sw_lock); 3100 mutex_init(&u132->sw_lock);
3041 init_MUTEX(&u132->scheduler_lock); 3101 init_MUTEX(&u132->scheduler_lock);
3042 while (rings-- > 0) { 3102 while (rings-- > 0) {
3043 struct u132_ring *ring = &u132->ring[rings]; 3103 struct u132_ring *ring = &u132->ring[rings];
@@ -3047,7 +3107,7 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
3047 ring->curr_endp = NULL; 3107 ring->curr_endp = NULL;
3048 INIT_DELAYED_WORK(&ring->scheduler, 3108 INIT_DELAYED_WORK(&ring->scheduler,
3049 u132_hcd_ring_work_scheduler); 3109 u132_hcd_ring_work_scheduler);
3050 } down(&u132->sw_lock); 3110 } mutex_lock(&u132->sw_lock);
3051 INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work); 3111 INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
3052 while (ports-- > 0) { 3112 while (ports-- > 0) {
3053 struct u132_port *port = &u132->port[ports]; 3113 struct u132_port *port = &u132->port[ports];
@@ -3077,7 +3137,7 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
3077 while (endps-- > 0) { 3137 while (endps-- > 0) {
3078 u132->endp[endps] = NULL; 3138 u132->endp[endps] = NULL;
3079 } 3139 }
3080 up(&u132->sw_lock); 3140 mutex_unlock(&u132->sw_lock);
3081 return; 3141 return;
3082} 3142}
3083 3143
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
index 1497371583..20cc58b978 100644
--- a/drivers/usb/host/uhci-debug.c
+++ b/drivers/usb/host/uhci-debug.c
@@ -120,8 +120,8 @@ static int uhci_show_urbp(struct urb_priv *urbp, char *buf, int len, int space)
120 out += sprintf(out, "%s%s", ptype, (urbp->fsbr ? " FSBR" : "")); 120 out += sprintf(out, "%s%s", ptype, (urbp->fsbr ? " FSBR" : ""));
121 out += sprintf(out, " Actlen=%d", urbp->urb->actual_length); 121 out += sprintf(out, " Actlen=%d", urbp->urb->actual_length);
122 122
123 if (urbp->urb->status != -EINPROGRESS) 123 if (urbp->urb->unlinked)
124 out += sprintf(out, " Status=%d", urbp->urb->status); 124 out += sprintf(out, " Unlinked=%d", urbp->urb->unlinked);
125 out += sprintf(out, "\n"); 125 out += sprintf(out, "\n");
126 126
127 i = nactive = ninactive = 0; 127 i = nactive = ninactive = 0;
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 1b3d23406a..340d6ed3e6 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -146,7 +146,6 @@ struct uhci_qh {
146 short phase; /* Between 0 and period-1 */ 146 short phase; /* Between 0 and period-1 */
147 short load; /* Periodic time requirement, in us */ 147 short load; /* Periodic time requirement, in us */
148 unsigned int iso_frame; /* Frame # for iso_packet_desc */ 148 unsigned int iso_frame; /* Frame # for iso_packet_desc */
149 int iso_status; /* Status for Isochronous URBs */
150 149
151 int state; /* QH_STATE_xxx; see above */ 150 int state; /* QH_STATE_xxx; see above */
152 int type; /* Queue type (control, bulk, etc) */ 151 int type; /* Queue type (control, bulk, etc) */
@@ -457,21 +456,6 @@ struct urb_priv {
457}; 456};
458 457
459 458
460/*
461 * Locking in uhci.c
462 *
463 * Almost everything relating to the hardware schedule and processing
464 * of URBs is protected by uhci->lock. urb->status is protected by
465 * urb->lock; that's the one exception.
466 *
467 * To prevent deadlocks, never lock uhci->lock while holding urb->lock.
468 * The safe order of locking is:
469 *
470 * #1 uhci->lock
471 * #2 urb->lock
472 */
473
474
475/* Some special IDs */ 459/* Some special IDs */
476 460
477#define PCI_VENDOR_ID_GENESYS 0x17a0 461#define PCI_VENDOR_ID_GENESYS 0x17a0
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 3bb908ca38..e5d60d5b10 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -757,7 +757,6 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci,
757 uhci_free_td(uhci, td); 757 uhci_free_td(uhci, td);
758 } 758 }
759 759
760 urbp->urb->hcpriv = NULL;
761 kmem_cache_free(uhci_up_cachep, urbp); 760 kmem_cache_free(uhci_up_cachep, urbp);
762} 761}
763 762
@@ -1324,7 +1323,6 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1324 if (list_empty(&qh->queue)) { 1323 if (list_empty(&qh->queue)) {
1325 qh->iso_packet_desc = &urb->iso_frame_desc[0]; 1324 qh->iso_packet_desc = &urb->iso_frame_desc[0];
1326 qh->iso_frame = urb->start_frame; 1325 qh->iso_frame = urb->start_frame;
1327 qh->iso_status = 0;
1328 } 1326 }
1329 1327
1330 qh->skel = SKEL_ISO; 1328 qh->skel = SKEL_ISO;
@@ -1361,22 +1359,18 @@ static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1361 qh->iso_packet_desc->actual_length = actlength; 1359 qh->iso_packet_desc->actual_length = actlength;
1362 qh->iso_packet_desc->status = status; 1360 qh->iso_packet_desc->status = status;
1363 } 1361 }
1364 1362 if (status)
1365 if (status) {
1366 urb->error_count++; 1363 urb->error_count++;
1367 qh->iso_status = status;
1368 }
1369 1364
1370 uhci_remove_td_from_urbp(td); 1365 uhci_remove_td_from_urbp(td);
1371 uhci_free_td(uhci, td); 1366 uhci_free_td(uhci, td);
1372 qh->iso_frame += qh->period; 1367 qh->iso_frame += qh->period;
1373 ++qh->iso_packet_desc; 1368 ++qh->iso_packet_desc;
1374 } 1369 }
1375 return qh->iso_status; 1370 return 0;
1376} 1371}
1377 1372
1378static int uhci_urb_enqueue(struct usb_hcd *hcd, 1373static int uhci_urb_enqueue(struct usb_hcd *hcd,
1379 struct usb_host_endpoint *hep,
1380 struct urb *urb, gfp_t mem_flags) 1374 struct urb *urb, gfp_t mem_flags)
1381{ 1375{
1382 int ret; 1376 int ret;
@@ -1387,19 +1381,19 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
1387 1381
1388 spin_lock_irqsave(&uhci->lock, flags); 1382 spin_lock_irqsave(&uhci->lock, flags);
1389 1383
1390 ret = urb->status; 1384 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1391 if (ret != -EINPROGRESS) /* URB already unlinked! */ 1385 if (ret)
1392 goto done; 1386 goto done_not_linked;
1393 1387
1394 ret = -ENOMEM; 1388 ret = -ENOMEM;
1395 urbp = uhci_alloc_urb_priv(uhci, urb); 1389 urbp = uhci_alloc_urb_priv(uhci, urb);
1396 if (!urbp) 1390 if (!urbp)
1397 goto done; 1391 goto done;
1398 1392
1399 if (hep->hcpriv) 1393 if (urb->ep->hcpriv)
1400 qh = (struct uhci_qh *) hep->hcpriv; 1394 qh = urb->ep->hcpriv;
1401 else { 1395 else {
1402 qh = uhci_alloc_qh(uhci, urb->dev, hep); 1396 qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
1403 if (!qh) 1397 if (!qh)
1404 goto err_no_qh; 1398 goto err_no_qh;
1405 } 1399 }
@@ -1440,27 +1434,29 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
1440err_submit_failed: 1434err_submit_failed:
1441 if (qh->state == QH_STATE_IDLE) 1435 if (qh->state == QH_STATE_IDLE)
1442 uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */ 1436 uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
1443
1444err_no_qh: 1437err_no_qh:
1445 uhci_free_urb_priv(uhci, urbp); 1438 uhci_free_urb_priv(uhci, urbp);
1446
1447done: 1439done:
1440 if (ret)
1441 usb_hcd_unlink_urb_from_ep(hcd, urb);
1442done_not_linked:
1448 spin_unlock_irqrestore(&uhci->lock, flags); 1443 spin_unlock_irqrestore(&uhci->lock, flags);
1449 return ret; 1444 return ret;
1450} 1445}
1451 1446
1452static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) 1447static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1453{ 1448{
1454 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 1449 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1455 unsigned long flags; 1450 unsigned long flags;
1456 struct urb_priv *urbp;
1457 struct uhci_qh *qh; 1451 struct uhci_qh *qh;
1452 int rc;
1458 1453
1459 spin_lock_irqsave(&uhci->lock, flags); 1454 spin_lock_irqsave(&uhci->lock, flags);
1460 urbp = urb->hcpriv; 1455 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1461 if (!urbp) /* URB was never linked! */ 1456 if (rc)
1462 goto done; 1457 goto done;
1463 qh = urbp->qh; 1458
1459 qh = ((struct urb_priv *) urb->hcpriv)->qh;
1464 1460
1465 /* Remove Isochronous TDs from the frame list ASAP */ 1461 /* Remove Isochronous TDs from the frame list ASAP */
1466 if (qh->type == USB_ENDPOINT_XFER_ISOC) { 1462 if (qh->type == USB_ENDPOINT_XFER_ISOC) {
@@ -1477,14 +1473,14 @@ static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1477 1473
1478done: 1474done:
1479 spin_unlock_irqrestore(&uhci->lock, flags); 1475 spin_unlock_irqrestore(&uhci->lock, flags);
1480 return 0; 1476 return rc;
1481} 1477}
1482 1478
1483/* 1479/*
1484 * Finish unlinking an URB and give it back 1480 * Finish unlinking an URB and give it back
1485 */ 1481 */
1486static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh, 1482static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1487 struct urb *urb) 1483 struct urb *urb, int status)
1488__releases(uhci->lock) 1484__releases(uhci->lock)
1489__acquires(uhci->lock) 1485__acquires(uhci->lock)
1490{ 1486{
@@ -1497,13 +1493,6 @@ __acquires(uhci->lock)
1497 * unlinked first. Regardless, don't confuse people with a 1493 * unlinked first. Regardless, don't confuse people with a
1498 * negative length. */ 1494 * negative length. */
1499 urb->actual_length = max(urb->actual_length, 0); 1495 urb->actual_length = max(urb->actual_length, 0);
1500
1501 /* Report erroneous short transfers */
1502 if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
1503 urb->actual_length <
1504 urb->transfer_buffer_length &&
1505 urb->status == 0))
1506 urb->status = -EREMOTEIO;
1507 } 1496 }
1508 1497
1509 /* When giving back the first URB in an Isochronous queue, 1498 /* When giving back the first URB in an Isochronous queue,
@@ -1516,7 +1505,6 @@ __acquires(uhci->lock)
1516 1505
1517 qh->iso_packet_desc = &nurb->iso_frame_desc[0]; 1506 qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1518 qh->iso_frame = nurb->start_frame; 1507 qh->iso_frame = nurb->start_frame;
1519 qh->iso_status = 0;
1520 } 1508 }
1521 1509
1522 /* Take the URB off the QH's queue. If the queue is now empty, 1510 /* Take the URB off the QH's queue. If the queue is now empty,
@@ -1529,9 +1517,10 @@ __acquires(uhci->lock)
1529 } 1517 }
1530 1518
1531 uhci_free_urb_priv(uhci, urbp); 1519 uhci_free_urb_priv(uhci, urbp);
1520 usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
1532 1521
1533 spin_unlock(&uhci->lock); 1522 spin_unlock(&uhci->lock);
1534 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb); 1523 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
1535 spin_lock(&uhci->lock); 1524 spin_lock(&uhci->lock);
1536 1525
1537 /* If the queue is now empty, we can unlink the QH and give up its 1526 /* If the queue is now empty, we can unlink the QH and give up its
@@ -1567,24 +1556,17 @@ static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1567 if (status == -EINPROGRESS) 1556 if (status == -EINPROGRESS)
1568 break; 1557 break;
1569 1558
1570 spin_lock(&urb->lock);
1571 if (urb->status == -EINPROGRESS) /* Not dequeued */
1572 urb->status = status;
1573 else
1574 status = ECONNRESET; /* Not -ECONNRESET */
1575 spin_unlock(&urb->lock);
1576
1577 /* Dequeued but completed URBs can't be given back unless 1559 /* Dequeued but completed URBs can't be given back unless
1578 * the QH is stopped or has finished unlinking. */ 1560 * the QH is stopped or has finished unlinking. */
1579 if (status == ECONNRESET) { 1561 if (urb->unlinked) {
1580 if (QH_FINISHED_UNLINKING(qh)) 1562 if (QH_FINISHED_UNLINKING(qh))
1581 qh->is_stopped = 1; 1563 qh->is_stopped = 1;
1582 else if (!qh->is_stopped) 1564 else if (!qh->is_stopped)
1583 return; 1565 return;
1584 } 1566 }
1585 1567
1586 uhci_giveback_urb(uhci, qh, urb); 1568 uhci_giveback_urb(uhci, qh, urb, status);
1587 if (status < 0 && qh->type != USB_ENDPOINT_XFER_ISOC) 1569 if (status < 0)
1588 break; 1570 break;
1589 } 1571 }
1590 1572
@@ -1599,7 +1581,7 @@ static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1599restart: 1581restart:
1600 list_for_each_entry(urbp, &qh->queue, node) { 1582 list_for_each_entry(urbp, &qh->queue, node) {
1601 urb = urbp->urb; 1583 urb = urbp->urb;
1602 if (urb->status != -EINPROGRESS) { 1584 if (urb->unlinked) {
1603 1585
1604 /* Fix up the TD links and save the toggles for 1586 /* Fix up the TD links and save the toggles for
1605 * non-Isochronous queues. For Isochronous queues, 1587 * non-Isochronous queues. For Isochronous queues,
@@ -1608,7 +1590,7 @@ restart:
1608 qh->is_stopped = 0; 1590 qh->is_stopped = 0;
1609 return; 1591 return;
1610 } 1592 }
1611 uhci_giveback_urb(uhci, qh, urb); 1593 uhci_giveback_urb(uhci, qh, urb, 0);
1612 goto restart; 1594 goto restart;
1613 } 1595 }
1614 } 1596 }
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index e9fdbc8997..5131cbfb2f 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -188,7 +188,8 @@ static void adu_interrupt_in_callback(struct urb *urb)
188 spin_lock(&dev->buflock); 188 spin_lock(&dev->buflock);
189 189
190 if (status != 0) { 190 if (status != 0) {
191 if ((status != -ENOENT) && (status != -ECONNRESET)) { 191 if ((status != -ENOENT) && (status != -ECONNRESET) &&
192 (status != -ESHUTDOWN)) {
192 dbg(1," %s : nonzero status received: %d", 193 dbg(1," %s : nonzero status received: %d",
193 __FUNCTION__, status); 194 __FUNCTION__, status);
194 } 195 }
diff --git a/drivers/usb/misc/berry_charge.c b/drivers/usb/misc/berry_charge.c
index 92c1d2768d..24e2dc3148 100644
--- a/drivers/usb/misc/berry_charge.c
+++ b/drivers/usb/misc/berry_charge.c
@@ -71,7 +71,7 @@ static int magic_charge(struct usb_device *udev)
71 if (retval != 2) { 71 if (retval != 2) {
72 dev_err(&udev->dev, "First magic command failed: %d.\n", 72 dev_err(&udev->dev, "First magic command failed: %d.\n",
73 retval); 73 retval);
74 return retval; 74 goto exit;
75 } 75 }
76 76
77 dbg(&udev->dev, "Sending second magic command\n"); 77 dbg(&udev->dev, "Sending second magic command\n");
@@ -80,7 +80,7 @@ static int magic_charge(struct usb_device *udev)
80 if (retval != 0) { 80 if (retval != 0) {
81 dev_err(&udev->dev, "Second magic command failed: %d.\n", 81 dev_err(&udev->dev, "Second magic command failed: %d.\n",
82 retval); 82 retval);
83 return retval; 83 goto exit;
84 } 84 }
85 85
86 dbg(&udev->dev, "Calling set_configuration\n"); 86 dbg(&udev->dev, "Calling set_configuration\n");
@@ -88,6 +88,8 @@ static int magic_charge(struct usb_device *udev)
88 if (retval) 88 if (retval)
89 dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval); 89 dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval);
90 90
91exit:
92 kfree(dummy_buffer);
91 return retval; 93 return retval;
92} 94}
93 95
@@ -112,6 +114,7 @@ static int magic_dual_mode(struct usb_device *udev)
112 if (retval) 114 if (retval)
113 dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval); 115 dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval);
114 116
117 kfree(dummy_buffer);
115 return retval; 118 return retval;
116} 119}
117 120
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 538b535e95..d3d8cd6ff1 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -2777,12 +2777,14 @@ static int ftdi_elan_probe(struct usb_interface *interface,
2777 size_t buffer_size; 2777 size_t buffer_size;
2778 int i; 2778 int i;
2779 int retval = -ENOMEM; 2779 int retval = -ENOMEM;
2780 struct usb_ftdi *ftdi = kmalloc(sizeof(struct usb_ftdi), GFP_KERNEL); 2780 struct usb_ftdi *ftdi;
2781 if (ftdi == NULL) { 2781
2782 ftdi = kzalloc(sizeof(struct usb_ftdi), GFP_KERNEL);
2783 if (!ftdi) {
2782 printk(KERN_ERR "Out of memory\n"); 2784 printk(KERN_ERR "Out of memory\n");
2783 return -ENOMEM; 2785 return -ENOMEM;
2784 } 2786 }
2785 memset(ftdi, 0x00, sizeof(struct usb_ftdi)); 2787
2786 mutex_lock(&ftdi_module_lock); 2788 mutex_lock(&ftdi_module_lock);
2787 list_add_tail(&ftdi->ftdi_list, &ftdi_static_list); 2789 list_add_tail(&ftdi->ftdi_list, &ftdi_static_list);
2788 ftdi->sequence_num = ++ftdi_instances; 2790 ftdi->sequence_num = ++ftdi_instances;
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index b64ca91d9b..9244d067ce 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -32,7 +32,7 @@
32 * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 * 34 *
35 * Author: Thomas Winischhofer <thomas@winischhofer.net> 35 * Author: Thomas Winischhofer <thomas@winischhofer.net>
36 * 36 *
37 */ 37 */
38 38
@@ -962,12 +962,12 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
962 packet.address = 0x00000194; 962 packet.address = 0x00000194;
963 packet.data = addr; 963 packet.data = addr;
964 ret = sisusb_send_bridge_packet(sisusb, 10, 964 ret = sisusb_send_bridge_packet(sisusb, 10,
965 &packet, 0); 965 &packet, 0);
966 packet.header = 0x001f; 966 packet.header = 0x001f;
967 packet.address = 0x00000190; 967 packet.address = 0x00000190;
968 packet.data = (length & ~3); 968 packet.data = (length & ~3);
969 ret |= sisusb_send_bridge_packet(sisusb, 10, 969 ret |= sisusb_send_bridge_packet(sisusb, 10,
970 &packet, 0); 970 &packet, 0);
971 if (sisusb->flagb0 != 0x16) { 971 if (sisusb->flagb0 != 0x16) {
972 packet.header = 0x001f; 972 packet.header = 0x001f;
973 packet.address = 0x00000180; 973 packet.address = 0x00000180;
@@ -1003,23 +1003,17 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
1003 if (ret) { 1003 if (ret) {
1004 msgcount++; 1004 msgcount++;
1005 if (msgcount < 500) 1005 if (msgcount < 500)
1006 printk(KERN_ERR 1006 dev_err(&sisusb->sisusb_dev->dev, "Wrote %zd of %d bytes, error %d\n",
1007 "sisusbvga[%d]: Wrote %zd of " 1007 *bytes_written, length, ret);
1008 "%d bytes, error %d\n",
1009 sisusb->minor, *bytes_written,
1010 length, ret);
1011 else if (msgcount == 500) 1008 else if (msgcount == 500)
1012 printk(KERN_ERR 1009 dev_err(&sisusb->sisusb_dev->dev, "Too many errors, logging stopped\n");
1013 "sisusbvga[%d]: Too many errors"
1014 ", logging stopped\n",
1015 sisusb->minor);
1016 } 1010 }
1017 addr += (*bytes_written); 1011 addr += (*bytes_written);
1018 length -= (*bytes_written); 1012 length -= (*bytes_written);
1019 } 1013 }
1020 1014
1021 if (ret) 1015 if (ret)
1022 break; 1016 break;
1023 1017
1024 } 1018 }
1025 1019
@@ -1261,51 +1255,10 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
1261 addr += 4; 1255 addr += 4;
1262 length -= 4; 1256 length -= 4;
1263 } 1257 }
1264#if 0 /* That does not work, as EP 2 is an OUT EP! */
1265 default:
1266 CLEARPACKET(&packet);
1267 packet.header = 0x001f;
1268 packet.address = 0x000001a0;
1269 packet.data = 0x00000006;
1270 ret |= sisusb_send_bridge_packet(sisusb, 10,
1271 &packet, 0);
1272 packet.header = 0x001f;
1273 packet.address = 0x000001b0;
1274 packet.data = (length & ~3) | 0x40000000;
1275 ret |= sisusb_send_bridge_packet(sisusb, 10,
1276 &packet, 0);
1277 packet.header = 0x001f;
1278 packet.address = 0x000001b4;
1279 packet.data = addr;
1280 ret |= sisusb_send_bridge_packet(sisusb, 10,
1281 &packet, 0);
1282 packet.header = 0x001f;
1283 packet.address = 0x000001a4;
1284 packet.data = 0x00000001;
1285 ret |= sisusb_send_bridge_packet(sisusb, 10,
1286 &packet, 0);
1287 if (userbuffer) {
1288 ret |= sisusb_recv_bulk_msg(sisusb,
1289 SISUSB_EP_GFX_BULK_IN,
1290 (length & ~3),
1291 NULL, userbuffer,
1292 bytes_read, 0);
1293 if (!ret) userbuffer += (*bytes_read);
1294 } else {
1295 ret |= sisusb_recv_bulk_msg(sisusb,
1296 SISUSB_EP_GFX_BULK_IN,
1297 (length & ~3),
1298 kernbuffer, NULL,
1299 bytes_read, 0);
1300 if (!ret) kernbuffer += (*bytes_read);
1301 }
1302 addr += (*bytes_read);
1303 length -= (*bytes_read);
1304#endif
1305 } 1258 }
1306 1259
1307 if (ret) 1260 if (ret)
1308 break; 1261 break;
1309 } 1262 }
1310 1263
1311 return ret; 1264 return ret;
@@ -1401,22 +1354,6 @@ sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 *data)
1401 return(sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, adr, data)); 1354 return(sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, adr, data));
1402} 1355}
1403 1356
1404#if 0
1405
1406int
1407sisusb_writew(struct sisusb_usb_data *sisusb, u32 adr, u16 data)
1408{
1409 return(sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM, adr, data));
1410}
1411
1412int
1413sisusb_readw(struct sisusb_usb_data *sisusb, u32 adr, u16 *data)
1414{
1415 return(sisusb_read_memio_word(sisusb, SISUSB_TYPE_MEM, adr, data));
1416}
1417
1418#endif /* 0 */
1419
1420int 1357int
1421sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src, 1358sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src,
1422 u32 dest, int length, size_t *bytes_written) 1359 u32 dest, int length, size_t *bytes_written)
@@ -1446,10 +1383,10 @@ sisusb_testreadwrite(struct sisusb_usb_data *sisusb)
1446 sisusb_copy_memory(sisusb, srcbuffer, sisusb->vrambase, 7, &dummy); 1383 sisusb_copy_memory(sisusb, srcbuffer, sisusb->vrambase, 7, &dummy);
1447 1384
1448 for(i = 1; i <= 7; i++) { 1385 for(i = 1; i <= 7; i++) {
1449 printk(KERN_DEBUG "sisusb: rwtest %d bytes\n", i); 1386 dev_dbg(&sisusb->sisusb_dev->dev, "sisusb: rwtest %d bytes\n", i);
1450 sisusb_read_memory(sisusb, destbuffer, sisusb->vrambase, i, &dummy); 1387 sisusb_read_memory(sisusb, destbuffer, sisusb->vrambase, i, &dummy);
1451 for(j = 0; j < i; j++) { 1388 for(j = 0; j < i; j++) {
1452 printk(KERN_DEBUG "sisusb: rwtest read[%d] = %x\n", j, destbuffer[j]); 1389 dev_dbg(&sisusb->sisusb_dev->dev, "rwtest read[%d] = %x\n", j, destbuffer[j]);
1453 } 1390 }
1454 } 1391 }
1455} 1392}
@@ -1533,9 +1470,9 @@ sisusb_clear_vram(struct sisusb_usb_data *sisusb, u32 address, int length)
1533#define SETIREGAND(r,i,a) sisusb_setidxregand(sisusb, r, i, a) 1470#define SETIREGAND(r,i,a) sisusb_setidxregand(sisusb, r, i, a)
1534#define SETIREGANDOR(r,i,a,o) sisusb_setidxregandor(sisusb, r, i, a, o) 1471#define SETIREGANDOR(r,i,a,o) sisusb_setidxregandor(sisusb, r, i, a, o)
1535#define READL(a,d) sisusb_read_memio_long(sisusb, SISUSB_TYPE_MEM, a, d) 1472#define READL(a,d) sisusb_read_memio_long(sisusb, SISUSB_TYPE_MEM, a, d)
1536#define WRITEL(a,d) sisusb_write_memio_long(sisusb, SISUSB_TYPE_MEM, a, d) 1473#define WRITEL(a,d) sisusb_write_memio_long(sisusb, SISUSB_TYPE_MEM, a, d)
1537#define READB(a,d) sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d) 1474#define READB(a,d) sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d)
1538#define WRITEB(a,d) sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d) 1475#define WRITEB(a,d) sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d)
1539 1476
1540static int 1477static int
1541sisusb_triggersr16(struct sisusb_usb_data *sisusb, u8 ramtype) 1478sisusb_triggersr16(struct sisusb_usb_data *sisusb, u8 ramtype)
@@ -2008,7 +1945,7 @@ sisusb_set_default_mode(struct sisusb_usb_data *sisusb, int touchengines)
2008 SETIREG(SISSR, 0x26, 0x00); 1945 SETIREG(SISSR, 0x26, 0x00);
2009 } 1946 }
2010 1947
2011 SETIREG(SISCR, 0x34, 0x44); /* we just set std mode #44 */ 1948 SETIREG(SISCR, 0x34, 0x44); /* we just set std mode #44 */
2012 1949
2013 return ret; 1950 return ret;
2014} 1951}
@@ -2168,17 +2105,12 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
2168 if (ramtype <= 1) { 2105 if (ramtype <= 1) {
2169 ret |= sisusb_get_sdram_size(sisusb, &iret, bw, chab); 2106 ret |= sisusb_get_sdram_size(sisusb, &iret, bw, chab);
2170 if (iret) { 2107 if (iret) {
2171 printk(KERN_ERR "sisusbvga[%d]: RAM size " 2108 dev_err(&sisusb->sisusb_dev->dev,"RAM size detection failed, assuming 8MB video RAM\n");
2172 "detection failed, "
2173 "assuming 8MB video RAM\n",
2174 sisusb->minor);
2175 ret |= SETIREG(SISSR,0x14,0x31); 2109 ret |= SETIREG(SISSR,0x14,0x31);
2176 /* TODO */ 2110 /* TODO */
2177 } 2111 }
2178 } else { 2112 } else {
2179 printk(KERN_ERR "sisusbvga[%d]: DDR RAM device found, " 2113 dev_err(&sisusb->sisusb_dev->dev, "DDR RAM device found, assuming 8MB video RAM\n");
2180 "assuming 8MB video RAM\n",
2181 sisusb->minor);
2182 ret |= SETIREG(SISSR,0x14,0x31); 2114 ret |= SETIREG(SISSR,0x14,0x31);
2183 /* *** TODO *** */ 2115 /* *** TODO *** */
2184 } 2116 }
@@ -2249,8 +2181,7 @@ sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
2249 break; 2181 break;
2250 } 2182 }
2251 2183
2252 printk(KERN_INFO "sisusbvga[%d]: %dMB %s %s, bus width %d\n", 2184 dev_info(&sisusb->sisusb_dev->dev, "%dMB %s %s, bus width %d\n", (sisusb->vramsize >> 20), ramtypetext1,
2253 sisusb->minor, (sisusb->vramsize >> 20), ramtypetext1,
2254 ramtypetext2[ramtype], bw); 2185 ramtypetext2[ramtype], bw);
2255} 2186}
2256 2187
@@ -2509,11 +2440,8 @@ sisusb_open(struct inode *inode, struct file *file)
2509 struct usb_interface *interface; 2440 struct usb_interface *interface;
2510 int subminor = iminor(inode); 2441 int subminor = iminor(inode);
2511 2442
2512 if (!(interface = usb_find_interface(&sisusb_driver, subminor))) { 2443 if (!(interface = usb_find_interface(&sisusb_driver, subminor)))
2513 printk(KERN_ERR "sisusb[%d]: Failed to find interface\n",
2514 subminor);
2515 return -ENODEV; 2444 return -ENODEV;
2516 }
2517 2445
2518 if (!(sisusb = usb_get_intfdata(interface))) 2446 if (!(sisusb = usb_get_intfdata(interface)))
2519 return -ENODEV; 2447 return -ENODEV;
@@ -2534,18 +2462,12 @@ sisusb_open(struct inode *inode, struct file *file)
2534 if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH) { 2462 if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH) {
2535 if (sisusb_init_gfxdevice(sisusb, 0)) { 2463 if (sisusb_init_gfxdevice(sisusb, 0)) {
2536 mutex_unlock(&sisusb->lock); 2464 mutex_unlock(&sisusb->lock);
2537 printk(KERN_ERR 2465 dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
2538 "sisusbvga[%d]: Failed to initialize "
2539 "device\n",
2540 sisusb->minor);
2541 return -EIO; 2466 return -EIO;
2542 } 2467 }
2543 } else { 2468 } else {
2544 mutex_unlock(&sisusb->lock); 2469 mutex_unlock(&sisusb->lock);
2545 printk(KERN_ERR 2470 dev_err(&sisusb->sisusb_dev->dev, "Device not attached to USB 2.0 hub\n");
2546 "sisusbvga[%d]: Device not attached to "
2547 "USB 2.0 hub\n",
2548 sisusb->minor);
2549 return -EIO; 2471 return -EIO;
2550 } 2472 }
2551 } 2473 }
@@ -2586,7 +2508,6 @@ static int
2586sisusb_release(struct inode *inode, struct file *file) 2508sisusb_release(struct inode *inode, struct file *file)
2587{ 2509{
2588 struct sisusb_usb_data *sisusb; 2510 struct sisusb_usb_data *sisusb;
2589 int myminor;
2590 2511
2591 if (!(sisusb = (struct sisusb_usb_data *)file->private_data)) 2512 if (!(sisusb = (struct sisusb_usb_data *)file->private_data))
2592 return -ENODEV; 2513 return -ENODEV;
@@ -2599,8 +2520,6 @@ sisusb_release(struct inode *inode, struct file *file)
2599 sisusb_kill_all_busy(sisusb); 2520 sisusb_kill_all_busy(sisusb);
2600 } 2521 }
2601 2522
2602 myminor = sisusb->minor;
2603
2604 sisusb->isopen = 0; 2523 sisusb->isopen = 0;
2605 file->private_data = NULL; 2524 file->private_data = NULL;
2606 2525
@@ -2942,7 +2861,7 @@ static int
2942sisusb_handle_command(struct sisusb_usb_data *sisusb, struct sisusb_command *y, 2861sisusb_handle_command(struct sisusb_usb_data *sisusb, struct sisusb_command *y,
2943 unsigned long arg) 2862 unsigned long arg)
2944{ 2863{
2945 int retval, port, length; 2864 int retval, port, length;
2946 u32 address; 2865 u32 address;
2947 2866
2948 /* All our commands require the device 2867 /* All our commands require the device
@@ -3065,12 +2984,12 @@ sisusb_handle_command(struct sisusb_usb_data *sisusb, struct sisusb_command *y,
3065 2984
3066static int 2985static int
3067sisusb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 2986sisusb_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
3068 unsigned long arg) 2987 unsigned long arg)
3069{ 2988{
3070 struct sisusb_usb_data *sisusb; 2989 struct sisusb_usb_data *sisusb;
3071 struct sisusb_info x; 2990 struct sisusb_info x;
3072 struct sisusb_command y; 2991 struct sisusb_command y;
3073 int retval = 0; 2992 int retval = 0;
3074 u32 __user *argp = (u32 __user *)arg; 2993 u32 __user *argp = (u32 __user *)arg;
3075 2994
3076 if (!(sisusb = (struct sisusb_usb_data *)file->private_data)) 2995 if (!(sisusb = (struct sisusb_usb_data *)file->private_data))
@@ -3095,7 +3014,7 @@ sisusb_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
3095 3014
3096 case SISUSB_GET_CONFIG: 3015 case SISUSB_GET_CONFIG:
3097 3016
3098 x.sisusb_id = SISUSB_ID; 3017 x.sisusb_id = SISUSB_ID;
3099 x.sisusb_version = SISUSB_VERSION; 3018 x.sisusb_version = SISUSB_VERSION;
3100 x.sisusb_revision = SISUSB_REVISION; 3019 x.sisusb_revision = SISUSB_REVISION;
3101 x.sisusb_patchlevel = SISUSB_PATCHLEVEL; 3020 x.sisusb_patchlevel = SISUSB_PATCHLEVEL;
@@ -3164,7 +3083,7 @@ static const struct file_operations usb_sisusb_fops = {
3164 .release = sisusb_release, 3083 .release = sisusb_release,
3165 .read = sisusb_read, 3084 .read = sisusb_read,
3166 .write = sisusb_write, 3085 .write = sisusb_write,
3167 .llseek = sisusb_lseek, 3086 .llseek = sisusb_lseek,
3168#ifdef SISUSB_NEW_CONFIG_COMPAT 3087#ifdef SISUSB_NEW_CONFIG_COMPAT
3169 .compat_ioctl = sisusb_compat_ioctl, 3088 .compat_ioctl = sisusb_compat_ioctl,
3170#endif 3089#endif
@@ -3183,17 +3102,13 @@ static int sisusb_probe(struct usb_interface *intf,
3183 struct usb_device *dev = interface_to_usbdev(intf); 3102 struct usb_device *dev = interface_to_usbdev(intf);
3184 struct sisusb_usb_data *sisusb; 3103 struct sisusb_usb_data *sisusb;
3185 int retval = 0, i; 3104 int retval = 0, i;
3186 const char *memfail =
3187 KERN_ERR
3188 "sisusbvga[%d]: Failed to allocate memory for %s buffer\n";
3189 3105
3190 printk(KERN_INFO "sisusb: USB2VGA dongle found at address %d\n", 3106 dev_info(&dev->dev, "USB2VGA dongle found at address %d\n",
3191 dev->devnum); 3107 dev->devnum);
3192 3108
3193 /* Allocate memory for our private */ 3109 /* Allocate memory for our private */
3194 if (!(sisusb = kzalloc(sizeof(*sisusb), GFP_KERNEL))) { 3110 if (!(sisusb = kzalloc(sizeof(*sisusb), GFP_KERNEL))) {
3195 printk(KERN_ERR 3111 dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate memory for private data\n");
3196 "sisusb: Failed to allocate memory for private data\n");
3197 return -ENOMEM; 3112 return -ENOMEM;
3198 } 3113 }
3199 kref_init(&sisusb->kref); 3114 kref_init(&sisusb->kref);
@@ -3202,8 +3117,7 @@ static int sisusb_probe(struct usb_interface *intf,
3202 3117
3203 /* Register device */ 3118 /* Register device */
3204 if ((retval = usb_register_dev(intf, &usb_sisusb_class))) { 3119 if ((retval = usb_register_dev(intf, &usb_sisusb_class))) {
3205 printk(KERN_ERR 3120 dev_err(&sisusb->sisusb_dev->dev, "Failed to get a minor for device %d\n",
3206 "sisusb: Failed to get a minor for device %d\n",
3207 dev->devnum); 3121 dev->devnum);
3208 retval = -ENODEV; 3122 retval = -ENODEV;
3209 goto error_1; 3123 goto error_1;
@@ -3221,7 +3135,7 @@ static int sisusb_probe(struct usb_interface *intf,
3221 sisusb->ibufsize = SISUSB_IBUF_SIZE; 3135 sisusb->ibufsize = SISUSB_IBUF_SIZE;
3222 if (!(sisusb->ibuf = usb_buffer_alloc(dev, SISUSB_IBUF_SIZE, 3136 if (!(sisusb->ibuf = usb_buffer_alloc(dev, SISUSB_IBUF_SIZE,
3223 GFP_KERNEL, &sisusb->transfer_dma_in))) { 3137 GFP_KERNEL, &sisusb->transfer_dma_in))) {
3224 printk(memfail, "input", sisusb->minor); 3138 dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate memory for input buffer");
3225 retval = -ENOMEM; 3139 retval = -ENOMEM;
3226 goto error_2; 3140 goto error_2;
3227 } 3141 }
@@ -3233,7 +3147,7 @@ static int sisusb_probe(struct usb_interface *intf,
3233 GFP_KERNEL, 3147 GFP_KERNEL,
3234 &sisusb->transfer_dma_out[i]))) { 3148 &sisusb->transfer_dma_out[i]))) {
3235 if (i == 0) { 3149 if (i == 0) {
3236 printk(memfail, "output", sisusb->minor); 3150 dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate memory for output buffer\n");
3237 retval = -ENOMEM; 3151 retval = -ENOMEM;
3238 goto error_3; 3152 goto error_3;
3239 } 3153 }
@@ -3245,9 +3159,7 @@ static int sisusb_probe(struct usb_interface *intf,
3245 3159
3246 /* Allocate URBs */ 3160 /* Allocate URBs */
3247 if (!(sisusb->sisurbin = usb_alloc_urb(0, GFP_KERNEL))) { 3161 if (!(sisusb->sisurbin = usb_alloc_urb(0, GFP_KERNEL))) {
3248 printk(KERN_ERR 3162 dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate URBs\n");
3249 "sisusbvga[%d]: Failed to allocate URBs\n",
3250 sisusb->minor);
3251 retval = -ENOMEM; 3163 retval = -ENOMEM;
3252 goto error_3; 3164 goto error_3;
3253 } 3165 }
@@ -3255,9 +3167,7 @@ static int sisusb_probe(struct usb_interface *intf,
3255 3167
3256 for (i = 0; i < sisusb->numobufs; i++) { 3168 for (i = 0; i < sisusb->numobufs; i++) {
3257 if (!(sisusb->sisurbout[i] = usb_alloc_urb(0, GFP_KERNEL))) { 3169 if (!(sisusb->sisurbout[i] = usb_alloc_urb(0, GFP_KERNEL))) {
3258 printk(KERN_ERR 3170 dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate URBs\n");
3259 "sisusbvga[%d]: Failed to allocate URBs\n",
3260 sisusb->minor);
3261 retval = -ENOMEM; 3171 retval = -ENOMEM;
3262 goto error_4; 3172 goto error_4;
3263 } 3173 }
@@ -3266,15 +3176,12 @@ static int sisusb_probe(struct usb_interface *intf,
3266 sisusb->urbstatus[i] = 0; 3176 sisusb->urbstatus[i] = 0;
3267 } 3177 }
3268 3178
3269 printk(KERN_INFO "sisusbvga[%d]: Allocated %d output buffers\n", 3179 dev_info(&sisusb->sisusb_dev->dev, "Allocated %d output buffers\n", sisusb->numobufs);
3270 sisusb->minor, sisusb->numobufs);
3271 3180
3272#ifdef INCL_SISUSB_CON 3181#ifdef INCL_SISUSB_CON
3273 /* Allocate our SiS_Pr */ 3182 /* Allocate our SiS_Pr */
3274 if (!(sisusb->SiS_Pr = kmalloc(sizeof(struct SiS_Private), GFP_KERNEL))) { 3183 if (!(sisusb->SiS_Pr = kmalloc(sizeof(struct SiS_Private), GFP_KERNEL))) {
3275 printk(KERN_ERR 3184 dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate SiS_Pr\n");
3276 "sisusbvga[%d]: Failed to allocate SiS_Pr\n",
3277 sisusb->minor);
3278 } 3185 }
3279#endif 3186#endif
3280 3187
@@ -3296,10 +3203,7 @@ static int sisusb_probe(struct usb_interface *intf,
3296 ret |= register_ioctl32_conversion(SISUSB_GET_CONFIG, NULL); 3203 ret |= register_ioctl32_conversion(SISUSB_GET_CONFIG, NULL);
3297 ret |= register_ioctl32_conversion(SISUSB_COMMAND, NULL); 3204 ret |= register_ioctl32_conversion(SISUSB_COMMAND, NULL);
3298 if (ret) 3205 if (ret)
3299 printk(KERN_ERR 3206 dev_err(&sisusb->sisusb_dev->dev, "Error registering ioctl32 translations\n");
3300 "sisusbvga[%d]: Error registering ioctl32 "
3301 "translations\n",
3302 sisusb->minor);
3303 else 3207 else
3304 sisusb->ioctl32registered = 1; 3208 sisusb->ioctl32registered = 1;
3305 } 3209 }
@@ -3315,23 +3219,17 @@ static int sisusb_probe(struct usb_interface *intf,
3315 initscreen = 0; 3219 initscreen = 0;
3316#endif 3220#endif
3317 if (sisusb_init_gfxdevice(sisusb, initscreen)) 3221 if (sisusb_init_gfxdevice(sisusb, initscreen))
3318 printk(KERN_ERR 3222 dev_err(&sisusb->sisusb_dev->dev, "Failed to early initialize device\n");
3319 "sisusbvga[%d]: Failed to early "
3320 "initialize device\n",
3321 sisusb->minor);
3322 3223
3323 } else 3224 } else
3324 printk(KERN_INFO 3225 dev_info(&sisusb->sisusb_dev->dev, "Not attached to USB 2.0 hub, deferring init\n");
3325 "sisusbvga[%d]: Not attached to USB 2.0 hub, "
3326 "deferring init\n",
3327 sisusb->minor);
3328 3226
3329 sisusb->ready = 1; 3227 sisusb->ready = 1;
3330 3228
3331#ifdef SISUSBENDIANTEST 3229#ifdef SISUSBENDIANTEST
3332 printk(KERN_DEBUG "sisusb: *** RWTEST ***\n"); 3230 dev_dbg(&sisusb->sisusb_dev->dev, "*** RWTEST ***\n");
3333 sisusb_testreadwrite(sisusb); 3231 sisusb_testreadwrite(sisusb);
3334 printk(KERN_DEBUG "sisusb: *** RWTEST END ***\n"); 3232 dev_dbg(&sisusb->sisusb_dev->dev, "*** RWTEST END ***\n");
3335#endif 3233#endif
3336 3234
3337#ifdef INCL_SISUSB_CON 3235#ifdef INCL_SISUSB_CON
@@ -3354,7 +3252,6 @@ error_1:
3354static void sisusb_disconnect(struct usb_interface *intf) 3252static void sisusb_disconnect(struct usb_interface *intf)
3355{ 3253{
3356 struct sisusb_usb_data *sisusb; 3254 struct sisusb_usb_data *sisusb;
3357 int minor;
3358 3255
3359 /* This should *not* happen */ 3256 /* This should *not* happen */
3360 if (!(sisusb = usb_get_intfdata(intf))) 3257 if (!(sisusb = usb_get_intfdata(intf)))
@@ -3364,8 +3261,6 @@ static void sisusb_disconnect(struct usb_interface *intf)
3364 sisusb_console_exit(sisusb); 3261 sisusb_console_exit(sisusb);
3365#endif 3262#endif
3366 3263
3367 minor = sisusb->minor;
3368
3369 usb_deregister_dev(intf, &usb_sisusb_class); 3264 usb_deregister_dev(intf, &usb_sisusb_class);
3370 3265
3371 mutex_lock(&sisusb->lock); 3266 mutex_lock(&sisusb->lock);
@@ -3384,10 +3279,7 @@ static void sisusb_disconnect(struct usb_interface *intf)
3384 ret |= unregister_ioctl32_conversion(SISUSB_GET_CONFIG); 3279 ret |= unregister_ioctl32_conversion(SISUSB_GET_CONFIG);
3385 ret |= unregister_ioctl32_conversion(SISUSB_COMMAND); 3280 ret |= unregister_ioctl32_conversion(SISUSB_COMMAND);
3386 if (ret) { 3281 if (ret) {
3387 printk(KERN_ERR 3282 dev_err(&sisusb->sisusb_dev->dev, "Error unregistering ioctl32 translations\n");
3388 "sisusbvga[%d]: Error unregistering "
3389 "ioctl32 translations\n",
3390 minor);
3391 } 3283 }
3392 } 3284 }
3393#endif 3285#endif
@@ -3400,7 +3292,7 @@ static void sisusb_disconnect(struct usb_interface *intf)
3400 /* decrement our usage count */ 3292 /* decrement our usage count */
3401 kref_put(&sisusb->kref, sisusb_delete); 3293 kref_put(&sisusb->kref, sisusb_delete);
3402 3294
3403 printk(KERN_INFO "sisusbvga[%d]: Disconnected\n", minor); 3295 dev_info(&sisusb->sisusb_dev->dev, "Disconnected\n");
3404} 3296}
3405 3297
3406static struct usb_device_id sisusb_table [] = { 3298static struct usb_device_id sisusb_table [] = {
@@ -3424,22 +3316,12 @@ static struct usb_driver sisusb_driver = {
3424 3316
3425static int __init usb_sisusb_init(void) 3317static int __init usb_sisusb_init(void)
3426{ 3318{
3427 int retval;
3428 3319
3429#ifdef INCL_SISUSB_CON 3320#ifdef INCL_SISUSB_CON
3430 sisusb_init_concode(); 3321 sisusb_init_concode();
3431#endif 3322#endif
3432 3323
3433 if (!(retval = usb_register(&sisusb_driver))) { 3324 return usb_register(&sisusb_driver);
3434
3435 printk(KERN_INFO "sisusb: Driver version %d.%d.%d\n",
3436 SISUSB_VERSION, SISUSB_REVISION, SISUSB_PATCHLEVEL);
3437 printk(KERN_INFO
3438 "sisusb: Copyright (C) 2005 Thomas Winischhofer\n");
3439
3440 }
3441
3442 return retval;
3443} 3325}
3444 3326
3445static void __exit usb_sisusb_exit(void) 3327static void __exit usb_sisusb_exit(void)
diff --git a/drivers/usb/misc/sisusbvga/sisusb.h b/drivers/usb/misc/sisusbvga/sisusb.h
index 8e1120a648..d2d7872cd0 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.h
+++ b/drivers/usb/misc/sisusbvga/sisusb.h
@@ -8,29 +8,29 @@
8 * 8 *
9 * Otherwise, the following license terms apply: 9 * Otherwise, the following license terms apply:
10 * 10 *
11 * * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * * are met: 13 * are met:
14 * * 1) Redistributions of source code must retain the above copyright 14 * 1) Redistributions of source code must retain the above copyright
15 * * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * * 2) Redistributions in binary form must reproduce the above copyright 16 * 2) Redistributions in binary form must reproduce the above copyright
17 * * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * * 3) The name of the author may not be used to endorse or promote products 19 * 3) The name of the author may not be used to endorse or promote products
20 * * derived from this software without specific prior written permission. 20 * derived from this software without specific prior written permission.
21 * *
22 * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESSED OR
23 * * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * 21 *
33 * Author: Thomas Winischhofer <thomas@winischhofer.net> 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESSED OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Author: Thomas Winischhofer <thomas@winischhofer.net>
34 * 34 *
35 */ 35 */
36 36
@@ -44,16 +44,14 @@
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45 45
46/* For older kernels, support for text consoles is by default 46/* For older kernels, support for text consoles is by default
47 * off. To ensable text console support, change the following: 47 * off. To enable text console support, change the following:
48 */ 48 */
49#if 0 49/* #define CONFIG_USB_SISUSBVGA_CON */
50#define CONFIG_USB_SISUSBVGA_CON
51#endif
52 50
53/* Version Information */ 51/* Version Information */
54 52
55#define SISUSB_VERSION 0 53#define SISUSB_VERSION 0
56#define SISUSB_REVISION 0 54#define SISUSB_REVISION 0
57#define SISUSB_PATCHLEVEL 8 55#define SISUSB_PATCHLEVEL 8
58 56
59/* Include console and mode switching code? */ 57/* Include console and mode switching code? */
@@ -74,7 +72,7 @@
74#define SISUSB_IBUF_SIZE 0x01000 72#define SISUSB_IBUF_SIZE 0x01000
75#define SISUSB_OBUF_SIZE 0x10000 /* fixed */ 73#define SISUSB_OBUF_SIZE 0x10000 /* fixed */
76 74
77#define NUMOBUFS 8 /* max number of output buffers/output URBs */ 75#define NUMOBUFS 8 /* max number of output buffers/output URBs */
78 76
79/* About endianness: 77/* About endianness:
80 * 78 *
@@ -93,7 +91,7 @@
93 */ 91 */
94 92
95#ifdef __BIG_ENDIAN 93#ifdef __BIG_ENDIAN
96#define SISUSB_CORRECT_ENDIANNESS_PACKET(p) \ 94#define SISUSB_CORRECT_ENDIANNESS_PACKET(p) \
97 do { \ 95 do { \
98 p->header = cpu_to_le16(p->header); \ 96 p->header = cpu_to_le16(p->header); \
99 p->address = cpu_to_le32(p->address); \ 97 p->address = cpu_to_le32(p->address); \
@@ -105,7 +103,7 @@
105 103
106struct sisusb_usb_data; 104struct sisusb_usb_data;
107 105
108struct sisusb_urb_context { /* urb->context for outbound bulk URBs */ 106struct sisusb_urb_context { /* urb->context for outbound bulk URBs */
109 struct sisusb_usb_data *sisusb; 107 struct sisusb_usb_data *sisusb;
110 int urbindex; 108 int urbindex;
111 int *actual_length; 109 int *actual_length;
@@ -116,16 +114,16 @@ struct sisusb_usb_data {
116 struct usb_interface *interface; 114 struct usb_interface *interface;
117 struct kref kref; 115 struct kref kref;
118 wait_queue_head_t wait_q; /* for syncind and timeouts */ 116 wait_queue_head_t wait_q; /* for syncind and timeouts */
119 struct mutex lock; /* general race avoidance */ 117 struct mutex lock; /* general race avoidance */
120 unsigned int ifnum; /* interface number of the USB device */ 118 unsigned int ifnum; /* interface number of the USB device */
121 int minor; /* minor (for logging clarity) */ 119 int minor; /* minor (for logging clarity) */
122 int isopen; /* !=0 if open */ 120 int isopen; /* !=0 if open */
123 int present; /* !=0 if device is present on the bus */ 121 int present; /* !=0 if device is present on the bus */
124 int ready; /* !=0 if device is ready for userland */ 122 int ready; /* !=0 if device is ready for userland */
125#ifdef SISUSB_OLD_CONFIG_COMPAT 123#ifdef SISUSB_OLD_CONFIG_COMPAT
126 int ioctl32registered; 124 int ioctl32registered;
127#endif 125#endif
128 int numobufs; /* number of obufs = number of out urbs */ 126 int numobufs; /* number of obufs = number of out urbs */
129 char *obuf[NUMOBUFS], *ibuf; /* transfer buffers */ 127 char *obuf[NUMOBUFS], *ibuf; /* transfer buffers */
130 int obufsize, ibufsize; 128 int obufsize, ibufsize;
131 dma_addr_t transfer_dma_out[NUMOBUFS]; 129 dma_addr_t transfer_dma_out[NUMOBUFS];
@@ -136,13 +134,13 @@ struct sisusb_usb_data {
136 unsigned char completein; 134 unsigned char completein;
137 struct sisusb_urb_context urbout_context[NUMOBUFS]; 135 struct sisusb_urb_context urbout_context[NUMOBUFS];
138 unsigned long flagb0; 136 unsigned long flagb0;
139 unsigned long vrambase; /* framebuffer base */ 137 unsigned long vrambase; /* framebuffer base */
140 unsigned int vramsize; /* framebuffer size (bytes) */ 138 unsigned int vramsize; /* framebuffer size (bytes) */
141 unsigned long mmiobase; 139 unsigned long mmiobase;
142 unsigned int mmiosize; 140 unsigned int mmiosize;
143 unsigned long ioportbase; 141 unsigned long ioportbase;
144 unsigned char devinit; /* device initialized? */ 142 unsigned char devinit; /* device initialized? */
145 unsigned char gfxinit; /* graphics core initialized? */ 143 unsigned char gfxinit; /* graphics core initialized? */
146 unsigned short chipid, chipvendor; 144 unsigned short chipid, chipvendor;
147 unsigned short chiprevision; 145 unsigned short chiprevision;
148#ifdef INCL_SISUSB_CON 146#ifdef INCL_SISUSB_CON
@@ -152,7 +150,7 @@ struct sisusb_usb_data {
152 int haveconsole, con_first, con_last; 150 int haveconsole, con_first, con_last;
153 int havethisconsole[MAX_NR_CONSOLES]; 151 int havethisconsole[MAX_NR_CONSOLES];
154 int textmodedestroyed; 152 int textmodedestroyed;
155 unsigned int sisusb_num_columns; /* real number, not vt's idea */ 153 unsigned int sisusb_num_columns; /* real number, not vt's idea */
156 int cur_start_addr, con_rolled_over; 154 int cur_start_addr, con_rolled_over;
157 int sisusb_cursor_loc, bad_cursor_pos; 155 int sisusb_cursor_loc, bad_cursor_pos;
158 int sisusb_cursor_size_from; 156 int sisusb_cursor_size_from;
@@ -197,7 +195,7 @@ struct sisusb_packet {
197 unsigned short header; 195 unsigned short header;
198 u32 address; 196 u32 address;
199 u32 data; 197 u32 data;
200} __attribute__((__packed__)); 198} __attribute__ ((__packed__));
201 199
202#define CLEARPACKET(packet) memset(packet, 0, 10) 200#define CLEARPACKET(packet) memset(packet, 0, 10)
203 201
@@ -265,36 +263,36 @@ struct sisusb_packet {
265 263
266/* Structure argument for SISUSB_GET_INFO ioctl */ 264/* Structure argument for SISUSB_GET_INFO ioctl */
267struct sisusb_info { 265struct sisusb_info {
268 __u32 sisusb_id; /* for identifying sisusb */ 266 __u32 sisusb_id; /* for identifying sisusb */
269#define SISUSB_ID 0x53495355 /* Identify myself with 'SISU' */ 267#define SISUSB_ID 0x53495355 /* Identify myself with 'SISU' */
270 __u8 sisusb_version; 268 __u8 sisusb_version;
271 __u8 sisusb_revision; 269 __u8 sisusb_revision;
272 __u8 sisusb_patchlevel; 270 __u8 sisusb_patchlevel;
273 __u8 sisusb_gfxinit; /* graphics core initialized? */ 271 __u8 sisusb_gfxinit; /* graphics core initialized? */
274 272
275 __u32 sisusb_vrambase; 273 __u32 sisusb_vrambase;
276 __u32 sisusb_mmiobase; 274 __u32 sisusb_mmiobase;
277 __u32 sisusb_iobase; 275 __u32 sisusb_iobase;
278 __u32 sisusb_pcibase; 276 __u32 sisusb_pcibase;
279 277
280 __u32 sisusb_vramsize; /* framebuffer size in bytes */ 278 __u32 sisusb_vramsize; /* framebuffer size in bytes */
281 279
282 __u32 sisusb_minor; 280 __u32 sisusb_minor;
283 281
284 __u32 sisusb_fbdevactive; /* != 0 if framebuffer device active */ 282 __u32 sisusb_fbdevactive; /* != 0 if framebuffer device active */
285 283
286 __u32 sisusb_conactive; /* != 0 if console driver active */ 284 __u32 sisusb_conactive; /* != 0 if console driver active */
287 285
288 __u8 sisusb_reserved[28]; /* for future use */ 286 __u8 sisusb_reserved[28]; /* for future use */
289}; 287};
290 288
291struct sisusb_command { 289struct sisusb_command {
292 __u8 operation; /* see below */ 290 __u8 operation; /* see below */
293 __u8 data0; /* operation dependent */ 291 __u8 data0; /* operation dependent */
294 __u8 data1; /* operation dependent */ 292 __u8 data1; /* operation dependent */
295 __u8 data2; /* operation dependent */ 293 __u8 data2; /* operation dependent */
296 __u32 data3; /* operation dependent */ 294 __u32 data3; /* operation dependent */
297 __u32 data4; /* for future use */ 295 __u32 data4; /* for future use */
298}; 296};
299 297
300#define SUCMD_GET 0x01 /* for all: data0 = index, data3 = port */ 298#define SUCMD_GET 0x01 /* for all: data0 = index, data3 = port */
@@ -306,7 +304,7 @@ struct sisusb_command {
306 304
307#define SUCMD_CLRSCR 0x07 /* data0:1:2 = length, data3 = address */ 305#define SUCMD_CLRSCR 0x07 /* data0:1:2 = length, data3 = address */
308 306
309#define SUCMD_HANDLETEXTMODE 0x08 /* Reset/destroy text mode */ 307#define SUCMD_HANDLETEXTMODE 0x08 /* Reset/destroy text mode */
310 308
311#define SUCMD_SETMODE 0x09 /* Set a display mode (data3 = SiS mode) */ 309#define SUCMD_SETMODE 0x09 /* Set a display mode (data3 = SiS mode) */
312#define SUCMD_SETVESAMODE 0x0a /* Set a display mode (data3 = VESA mode) */ 310#define SUCMD_SETVESAMODE 0x0a /* Set a display mode (data3 = VESA mode) */
@@ -315,6 +313,4 @@ struct sisusb_command {
315#define SISUSB_GET_CONFIG_SIZE _IOR(0xF3,0x3E,__u32) 313#define SISUSB_GET_CONFIG_SIZE _IOR(0xF3,0x3E,__u32)
316#define SISUSB_GET_CONFIG _IOR(0xF3,0x3F,struct sisusb_info) 314#define SISUSB_GET_CONFIG _IOR(0xF3,0x3F,struct sisusb_info)
317 315
318
319#endif /* SISUSB_H */ 316#endif /* SISUSB_H */
320
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index 8d0edc867f..43722e5a49 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -52,6 +52,7 @@
52#include <linux/kernel.h> 52#include <linux/kernel.h>
53#include <linux/signal.h> 53#include <linux/signal.h>
54#include <linux/fs.h> 54#include <linux/fs.h>
55#include <linux/usb.h>
55#include <linux/tty.h> 56#include <linux/tty.h>
56#include <linux/console.h> 57#include <linux/console.h>
57#include <linux/string.h> 58#include <linux/string.h>
@@ -373,14 +374,6 @@ sisusbcon_putc(struct vc_data *c, int ch, int y, int x)
373 return; 374 return;
374 375
375 /* sisusb->lock is down */ 376 /* sisusb->lock is down */
376
377 /* Don't need to put the character into buffer ourselves,
378 * because the vt does this BEFORE calling us.
379 */
380#if 0
381 sisusbcon_writew(ch, SISUSB_VADDR(x, y));
382#endif
383
384 if (sisusb_is_inactive(c, sisusb)) { 377 if (sisusb_is_inactive(c, sisusb)) {
385 mutex_unlock(&sisusb->lock); 378 mutex_unlock(&sisusb->lock);
386 return; 379 return;
@@ -490,10 +483,6 @@ sisusbcon_bmove(struct vc_data *c, int sy, int sx,
490 struct sisusb_usb_data *sisusb; 483 struct sisusb_usb_data *sisusb;
491 ssize_t written; 484 ssize_t written;
492 int cols, length; 485 int cols, length;
493#if 0
494 u16 *src, *dest;
495 int i;
496#endif
497 486
498 if (width <= 0 || height <= 0) 487 if (width <= 0 || height <= 0)
499 return; 488 return;
@@ -505,41 +494,6 @@ sisusbcon_bmove(struct vc_data *c, int sy, int sx,
505 494
506 cols = sisusb->sisusb_num_columns; 495 cols = sisusb->sisusb_num_columns;
507 496
508 /* Don't need to move data outselves, because
509 * vt does this BEFORE calling us.
510 * This is only used by vt's insert/deletechar.
511 */
512#if 0
513 if (sx == 0 && dx == 0 && width >= c->vc_cols && width <= cols) {
514
515 sisusbcon_memmovew(SISUSB_VADDR(0, dy), SISUSB_VADDR(0, sy),
516 height * width * 2);
517
518 } else if (dy < sy || (dy == sy && dx < sx)) {
519
520 src = SISUSB_VADDR(sx, sy);
521 dest = SISUSB_VADDR(dx, dy);
522
523 for (i = height; i > 0; i--) {
524 sisusbcon_memmovew(dest, src, width * 2);
525 src += cols;
526 dest += cols;
527 }
528
529 } else {
530
531 src = SISUSB_VADDR(sx, sy + height - 1);
532 dest = SISUSB_VADDR(dx, dy + height - 1);
533
534 for (i = height; i > 0; i--) {
535 sisusbcon_memmovew(dest, src, width * 2);
536 src -= cols;
537 dest -= cols;
538 }
539
540 }
541#endif
542
543 if (sisusb_is_inactive(c, sisusb)) { 497 if (sisusb_is_inactive(c, sisusb)) {
544 mutex_unlock(&sisusb->lock); 498 mutex_unlock(&sisusb->lock);
545 return; 499 return;
@@ -584,7 +538,7 @@ sisusbcon_switch(struct vc_data *c)
584 */ 538 */
585 if (c->vc_origin == (unsigned long)c->vc_screenbuf) { 539 if (c->vc_origin == (unsigned long)c->vc_screenbuf) {
586 mutex_unlock(&sisusb->lock); 540 mutex_unlock(&sisusb->lock);
587 printk(KERN_DEBUG "sisusb: ASSERT ORIGIN != SCREENBUF!\n"); 541 dev_dbg(&sisusb->sisusb_dev->dev, "ASSERT ORIGIN != SCREENBUF!\n");
588 return 0; 542 return 0;
589 } 543 }
590 544
@@ -1475,7 +1429,7 @@ static const struct consw sisusb_dummy_con = {
1475int 1429int
1476sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last) 1430sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last)
1477{ 1431{
1478 int i, ret, minor = sisusb->minor; 1432 int i, ret;
1479 1433
1480 mutex_lock(&sisusb->lock); 1434 mutex_lock(&sisusb->lock);
1481 1435
@@ -1508,9 +1462,7 @@ sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last)
1508 /* Set up text mode (and upload default font) */ 1462 /* Set up text mode (and upload default font) */
1509 if (sisusb_reset_text_mode(sisusb, 1)) { 1463 if (sisusb_reset_text_mode(sisusb, 1)) {
1510 mutex_unlock(&sisusb->lock); 1464 mutex_unlock(&sisusb->lock);
1511 printk(KERN_ERR 1465 dev_err(&sisusb->sisusb_dev->dev, "Failed to set up text mode\n");
1512 "sisusbvga[%d]: Failed to set up text mode\n",
1513 minor);
1514 return 1; 1466 return 1;
1515 } 1467 }
1516 1468
@@ -1531,9 +1483,7 @@ sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last)
1531 /* Allocate screen buffer */ 1483 /* Allocate screen buffer */
1532 if (!(sisusb->scrbuf = (unsigned long)vmalloc(sisusb->scrbuf_size))) { 1484 if (!(sisusb->scrbuf = (unsigned long)vmalloc(sisusb->scrbuf_size))) {
1533 mutex_unlock(&sisusb->lock); 1485 mutex_unlock(&sisusb->lock);
1534 printk(KERN_ERR 1486 dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate screen buffer\n");
1535 "sisusbvga[%d]: Failed to allocate screen buffer\n",
1536 minor);
1537 return 1; 1487 return 1;
1538 } 1488 }
1539 1489
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.c b/drivers/usb/misc/sisusbvga/sisusb_init.c
index 9b30f89628..273de5d093 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.c
@@ -32,7 +32,7 @@
32 * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 * 34 *
35 * Author: Thomas Winischhofer <thomas@winischhofer.net> 35 * Author: Thomas Winischhofer <thomas@winischhofer.net>
36 * 36 *
37 */ 37 */
38 38
@@ -55,109 +55,18 @@
55/* POINTER INITIALIZATION */ 55/* POINTER INITIALIZATION */
56/*********************************************/ 56/*********************************************/
57 57
58static void 58static void SiSUSB_InitPtr(struct SiS_Private *SiS_Pr)
59SiSUSB_InitPtr(struct SiS_Private *SiS_Pr)
60{ 59{
61 SiS_Pr->SiS_ModeResInfo = SiSUSB_ModeResInfo; 60 SiS_Pr->SiS_ModeResInfo = SiSUSB_ModeResInfo;
62 SiS_Pr->SiS_StandTable = SiSUSB_StandTable; 61 SiS_Pr->SiS_StandTable = SiSUSB_StandTable;
63
64 SiS_Pr->SiS_SModeIDTable = SiSUSB_SModeIDTable;
65 SiS_Pr->SiS_EModeIDTable = SiSUSB_EModeIDTable;
66 SiS_Pr->SiS_RefIndex = SiSUSB_RefIndex;
67 SiS_Pr->SiS_CRT1Table = SiSUSB_CRT1Table;
68
69 SiS_Pr->SiS_VCLKData = SiSUSB_VCLKData;
70}
71
72/*********************************************/
73/* HELPER: Get ModeID */
74/*********************************************/
75 62
76#if 0 63 SiS_Pr->SiS_SModeIDTable = SiSUSB_SModeIDTable;
77unsigned short 64 SiS_Pr->SiS_EModeIDTable = SiSUSB_EModeIDTable;
78SiSUSB_GetModeID(int HDisplay, int VDisplay, int Depth) 65 SiS_Pr->SiS_RefIndex = SiSUSB_RefIndex;
79{ 66 SiS_Pr->SiS_CRT1Table = SiSUSB_CRT1Table;
80 unsigned short ModeIndex = 0;
81
82 switch (HDisplay)
83 {
84 case 320:
85 if (VDisplay == 200)
86 ModeIndex = ModeIndex_320x200[Depth];
87 else if (VDisplay == 240)
88 ModeIndex = ModeIndex_320x240[Depth];
89 break;
90 case 400:
91 if (VDisplay == 300)
92 ModeIndex = ModeIndex_400x300[Depth];
93 break;
94 case 512:
95 if (VDisplay == 384)
96 ModeIndex = ModeIndex_512x384[Depth];
97 break;
98 case 640:
99 if (VDisplay == 480)
100 ModeIndex = ModeIndex_640x480[Depth];
101 else if (VDisplay == 400)
102 ModeIndex = ModeIndex_640x400[Depth];
103 break;
104 case 720:
105 if (VDisplay == 480)
106 ModeIndex = ModeIndex_720x480[Depth];
107 else if (VDisplay == 576)
108 ModeIndex = ModeIndex_720x576[Depth];
109 break;
110 case 768:
111 if (VDisplay == 576)
112 ModeIndex = ModeIndex_768x576[Depth];
113 break;
114 case 800:
115 if (VDisplay == 600)
116 ModeIndex = ModeIndex_800x600[Depth];
117 else if (VDisplay == 480)
118 ModeIndex = ModeIndex_800x480[Depth];
119 break;
120 case 848:
121 if (VDisplay == 480)
122 ModeIndex = ModeIndex_848x480[Depth];
123 break;
124 case 856:
125 if (VDisplay == 480)
126 ModeIndex = ModeIndex_856x480[Depth];
127 break;
128 case 960:
129 if (VDisplay == 540)
130 ModeIndex = ModeIndex_960x540[Depth];
131 else if (VDisplay == 600)
132 ModeIndex = ModeIndex_960x600[Depth];
133 break;
134 case 1024:
135 if (VDisplay == 576)
136 ModeIndex = ModeIndex_1024x576[Depth];
137 else if (VDisplay == 768)
138 ModeIndex = ModeIndex_1024x768[Depth];
139 break;
140 case 1152:
141 if (VDisplay == 864)
142 ModeIndex = ModeIndex_1152x864[Depth];
143 break;
144 case 1280:
145 switch (VDisplay) {
146 case 720:
147 ModeIndex = ModeIndex_1280x720[Depth];
148 break;
149 case 768:
150 ModeIndex = ModeIndex_1280x768[Depth];
151 break;
152 case 1024:
153 ModeIndex = ModeIndex_1280x1024[Depth];
154 break;
155 }
156 }
157 67
158 return ModeIndex; 68 SiS_Pr->SiS_VCLKData = SiSUSB_VCLKData;
159} 69}
160#endif /* 0 */
161 70
162/*********************************************/ 71/*********************************************/
163/* HELPER: SetReg, GetReg */ 72/* HELPER: SetReg, GetReg */
@@ -165,21 +74,20 @@ SiSUSB_GetModeID(int HDisplay, int VDisplay, int Depth)
165 74
166static void 75static void
167SiS_SetReg(struct SiS_Private *SiS_Pr, unsigned long port, 76SiS_SetReg(struct SiS_Private *SiS_Pr, unsigned long port,
168 unsigned short index, unsigned short data) 77 unsigned short index, unsigned short data)
169{ 78{
170 sisusb_setidxreg(SiS_Pr->sisusb, port, index, data); 79 sisusb_setidxreg(SiS_Pr->sisusb, port, index, data);
171} 80}
172 81
173static void 82static void
174SiS_SetRegByte(struct SiS_Private *SiS_Pr, unsigned long port, 83SiS_SetRegByte(struct SiS_Private *SiS_Pr, unsigned long port,
175 unsigned short data) 84 unsigned short data)
176{ 85{
177 sisusb_setreg(SiS_Pr->sisusb, port, data); 86 sisusb_setreg(SiS_Pr->sisusb, port, data);
178} 87}
179 88
180static unsigned char 89static unsigned char
181SiS_GetReg(struct SiS_Private *SiS_Pr, unsigned long port, 90SiS_GetReg(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index)
182 unsigned short index)
183{ 91{
184 u8 data; 92 u8 data;
185 93
@@ -200,22 +108,22 @@ SiS_GetRegByte(struct SiS_Private *SiS_Pr, unsigned long port)
200 108
201static void 109static void
202SiS_SetRegANDOR(struct SiS_Private *SiS_Pr, unsigned long port, 110SiS_SetRegANDOR(struct SiS_Private *SiS_Pr, unsigned long port,
203 unsigned short index, unsigned short DataAND, 111 unsigned short index, unsigned short DataAND,
204 unsigned short DataOR) 112 unsigned short DataOR)
205{ 113{
206 sisusb_setidxregandor(SiS_Pr->sisusb, port, index, DataAND, DataOR); 114 sisusb_setidxregandor(SiS_Pr->sisusb, port, index, DataAND, DataOR);
207} 115}
208 116
209static void 117static void
210SiS_SetRegAND(struct SiS_Private *SiS_Pr, unsigned long port, 118SiS_SetRegAND(struct SiS_Private *SiS_Pr, unsigned long port,
211 unsigned short index, unsigned short DataAND) 119 unsigned short index, unsigned short DataAND)
212{ 120{
213 sisusb_setidxregand(SiS_Pr->sisusb, port, index, DataAND); 121 sisusb_setidxregand(SiS_Pr->sisusb, port, index, DataAND);
214} 122}
215 123
216static void 124static void
217SiS_SetRegOR(struct SiS_Private *SiS_Pr,unsigned long port, 125SiS_SetRegOR(struct SiS_Private *SiS_Pr, unsigned long port,
218 unsigned short index, unsigned short DataOR) 126 unsigned short index, unsigned short DataOR)
219{ 127{
220 sisusb_setidxregor(SiS_Pr->sisusb, port, index, DataOR); 128 sisusb_setidxregor(SiS_Pr->sisusb, port, index, DataOR);
221} 129}
@@ -224,8 +132,7 @@ SiS_SetRegOR(struct SiS_Private *SiS_Pr,unsigned long port,
224/* HELPER: DisplayOn, DisplayOff */ 132/* HELPER: DisplayOn, DisplayOff */
225/*********************************************/ 133/*********************************************/
226 134
227static void 135static void SiS_DisplayOn(struct SiS_Private *SiS_Pr)
228SiS_DisplayOn(struct SiS_Private *SiS_Pr)
229{ 136{
230 SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, 0xDF); 137 SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, 0xDF);
231} 138}
@@ -234,8 +141,7 @@ SiS_DisplayOn(struct SiS_Private *SiS_Pr)
234/* HELPER: Init Port Addresses */ 141/* HELPER: Init Port Addresses */
235/*********************************************/ 142/*********************************************/
236 143
237static void 144static void SiSUSBRegInit(struct SiS_Private *SiS_Pr, unsigned long BaseAddr)
238SiSUSBRegInit(struct SiS_Private *SiS_Pr, unsigned long BaseAddr)
239{ 145{
240 SiS_Pr->SiS_P3c4 = BaseAddr + 0x14; 146 SiS_Pr->SiS_P3c4 = BaseAddr + 0x14;
241 SiS_Pr->SiS_P3d4 = BaseAddr + 0x24; 147 SiS_Pr->SiS_P3d4 = BaseAddr + 0x24;
@@ -258,8 +164,7 @@ SiSUSBRegInit(struct SiS_Private *SiS_Pr, unsigned long BaseAddr)
258/* HELPER: GetSysFlags */ 164/* HELPER: GetSysFlags */
259/*********************************************/ 165/*********************************************/
260 166
261static void 167static void SiS_GetSysFlags(struct SiS_Private *SiS_Pr)
262SiS_GetSysFlags(struct SiS_Private *SiS_Pr)
263{ 168{
264 SiS_Pr->SiS_MyCR63 = 0x63; 169 SiS_Pr->SiS_MyCR63 = 0x63;
265} 170}
@@ -268,8 +173,7 @@ SiS_GetSysFlags(struct SiS_Private *SiS_Pr)
268/* HELPER: Init PCI & Engines */ 173/* HELPER: Init PCI & Engines */
269/*********************************************/ 174/*********************************************/
270 175
271static void 176static void SiSInitPCIetc(struct SiS_Private *SiS_Pr)
272SiSInitPCIetc(struct SiS_Private *SiS_Pr)
273{ 177{
274 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x20, 0xa1); 178 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x20, 0xa1);
275 /* - Enable 2D (0x40) 179 /* - Enable 2D (0x40)
@@ -285,8 +189,7 @@ SiSInitPCIetc(struct SiS_Private *SiS_Pr)
285/* HELPER: SET SEGMENT REGISTERS */ 189/* HELPER: SET SEGMENT REGISTERS */
286/*********************************************/ 190/*********************************************/
287 191
288static void 192static void SiS_SetSegRegLower(struct SiS_Private *SiS_Pr, unsigned short value)
289SiS_SetSegRegLower(struct SiS_Private *SiS_Pr, unsigned short value)
290{ 193{
291 unsigned short temp; 194 unsigned short temp;
292 195
@@ -299,8 +202,7 @@ SiS_SetSegRegLower(struct SiS_Private *SiS_Pr, unsigned short value)
299 SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp); 202 SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp);
300} 203}
301 204
302static void 205static void SiS_SetSegRegUpper(struct SiS_Private *SiS_Pr, unsigned short value)
303SiS_SetSegRegUpper(struct SiS_Private *SiS_Pr, unsigned short value)
304{ 206{
305 unsigned short temp; 207 unsigned short temp;
306 208
@@ -313,15 +215,13 @@ SiS_SetSegRegUpper(struct SiS_Private *SiS_Pr, unsigned short value)
313 SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp); 215 SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp);
314} 216}
315 217
316static void 218static void SiS_SetSegmentReg(struct SiS_Private *SiS_Pr, unsigned short value)
317SiS_SetSegmentReg(struct SiS_Private *SiS_Pr, unsigned short value)
318{ 219{
319 SiS_SetSegRegLower(SiS_Pr, value); 220 SiS_SetSegRegLower(SiS_Pr, value);
320 SiS_SetSegRegUpper(SiS_Pr, value); 221 SiS_SetSegRegUpper(SiS_Pr, value);
321} 222}
322 223
323static void 224static void SiS_ResetSegmentReg(struct SiS_Private *SiS_Pr)
324SiS_ResetSegmentReg(struct SiS_Private *SiS_Pr)
325{ 225{
326 SiS_SetSegmentReg(SiS_Pr, 0); 226 SiS_SetSegmentReg(SiS_Pr, 0);
327} 227}
@@ -337,14 +237,12 @@ SiS_SetSegmentRegOver(struct SiS_Private *SiS_Pr, unsigned short value)
337 SiS_SetSegmentReg(SiS_Pr, value); 237 SiS_SetSegmentReg(SiS_Pr, value);
338} 238}
339 239
340static void 240static void SiS_ResetSegmentRegOver(struct SiS_Private *SiS_Pr)
341SiS_ResetSegmentRegOver(struct SiS_Private *SiS_Pr)
342{ 241{
343 SiS_SetSegmentRegOver(SiS_Pr, 0); 242 SiS_SetSegmentRegOver(SiS_Pr, 0);
344} 243}
345 244
346static void 245static void SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr)
347SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr)
348{ 246{
349 SiS_ResetSegmentReg(SiS_Pr); 247 SiS_ResetSegmentReg(SiS_Pr);
350 SiS_ResetSegmentRegOver(SiS_Pr); 248 SiS_ResetSegmentRegOver(SiS_Pr);
@@ -356,7 +254,7 @@ SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr)
356 254
357static int 255static int
358SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo, 256SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo,
359 unsigned short *ModeIdIndex) 257 unsigned short *ModeIdIndex)
360{ 258{
361 if ((*ModeNo) <= 0x13) { 259 if ((*ModeNo) <= 0x13) {
362 260
@@ -367,12 +265,14 @@ SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo,
367 265
368 } else { 266 } else {
369 267
370 for(*ModeIdIndex = 0; ;(*ModeIdIndex)++) { 268 for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
371 269
372 if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID == (*ModeNo)) 270 if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID ==
271 (*ModeNo))
373 break; 272 break;
374 273
375 if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID == 0xFF) 274 if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID ==
275 0xFF)
376 return 0; 276 return 0;
377 } 277 }
378 278
@@ -385,8 +285,7 @@ SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo,
385/* HELPER: ENABLE CRT1 */ 285/* HELPER: ENABLE CRT1 */
386/*********************************************/ 286/*********************************************/
387 287
388static void 288static void SiS_HandleCRT1(struct SiS_Private *SiS_Pr)
389SiS_HandleCRT1(struct SiS_Private *SiS_Pr)
390{ 289{
391 /* Enable CRT1 gating */ 290 /* Enable CRT1 gating */
392 SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, SiS_Pr->SiS_MyCR63, 0xbf); 291 SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, SiS_Pr->SiS_MyCR63, 0xbf);
@@ -398,9 +297,9 @@ SiS_HandleCRT1(struct SiS_Private *SiS_Pr)
398 297
399static unsigned short 298static unsigned short
400SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 299SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
401 unsigned short ModeIdIndex) 300 unsigned short ModeIdIndex)
402{ 301{
403 static const unsigned short ColorDepth[6] = { 1, 2, 4, 4, 6, 8}; 302 static const unsigned short ColorDepth[6] = { 1, 2, 4, 4, 6, 8 };
404 unsigned short modeflag; 303 unsigned short modeflag;
405 short index; 304 short index;
406 305
@@ -411,7 +310,8 @@ SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
411 } 310 }
412 311
413 index = (modeflag & ModeTypeMask) - ModeEGA; 312 index = (modeflag & ModeTypeMask) - ModeEGA;
414 if (index < 0) index = 0; 313 if (index < 0)
314 index = 0;
415 return ColorDepth[index]; 315 return ColorDepth[index];
416} 316}
417 317
@@ -421,7 +321,7 @@ SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
421 321
422static unsigned short 322static unsigned short
423SiS_GetOffset(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 323SiS_GetOffset(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
424 unsigned short ModeIdIndex, unsigned short rrti) 324 unsigned short ModeIdIndex, unsigned short rrti)
425{ 325{
426 unsigned short xres, temp, colordepth, infoflag; 326 unsigned short xres, temp, colordepth, infoflag;
427 327
@@ -458,8 +358,8 @@ SiS_SetSeqRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
458 SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[0] | 0x20; 358 SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[0] | 0x20;
459 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, SRdata); 359 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, SRdata);
460 360
461 for(i = 2; i <= 4; i++) { 361 for (i = 2; i <= 4; i++) {
462 SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[i-1]; 362 SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[i - 1];
463 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, SRdata); 363 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, SRdata);
464 } 364 }
465} 365}
@@ -488,7 +388,7 @@ SiS_SetCRTCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
488 388
489 SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, 0x11, 0x7f); 389 SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, 0x11, 0x7f);
490 390
491 for(i = 0; i <= 0x18; i++) { 391 for (i = 0; i <= 0x18; i++) {
492 CRTCdata = SiS_Pr->SiS_StandTable[StandTableIndex].CRTC[i]; 392 CRTCdata = SiS_Pr->SiS_StandTable[StandTableIndex].CRTC[i];
493 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, i, CRTCdata); 393 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, i, CRTCdata);
494 } 394 }
@@ -504,7 +404,7 @@ SiS_SetATTRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
504 unsigned char ARdata; 404 unsigned char ARdata;
505 unsigned short i; 405 unsigned short i;
506 406
507 for(i = 0; i <= 0x13; i++) { 407 for (i = 0; i <= 0x13; i++) {
508 ARdata = SiS_Pr->SiS_StandTable[StandTableIndex].ATTR[i]; 408 ARdata = SiS_Pr->SiS_StandTable[StandTableIndex].ATTR[i];
509 SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da); 409 SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da);
510 SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, i); 410 SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, i);
@@ -529,7 +429,7 @@ SiS_SetGRCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
529 unsigned char GRdata; 429 unsigned char GRdata;
530 unsigned short i; 430 unsigned short i;
531 431
532 for(i = 0; i <= 0x08; i++) { 432 for (i = 0; i <= 0x08; i++) {
533 GRdata = SiS_Pr->SiS_StandTable[StandTableIndex].GRC[i]; 433 GRdata = SiS_Pr->SiS_StandTable[StandTableIndex].GRC[i];
534 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3ce, i, GRdata); 434 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3ce, i, GRdata);
535 } 435 }
@@ -544,12 +444,11 @@ SiS_SetGRCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
544/* CLEAR EXTENDED REGISTERS */ 444/* CLEAR EXTENDED REGISTERS */
545/*********************************************/ 445/*********************************************/
546 446
547static void 447static void SiS_ClearExt1Regs(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
548SiS_ClearExt1Regs(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
549{ 448{
550 int i; 449 int i;
551 450
552 for(i = 0x0A; i <= 0x0E; i++) { 451 for (i = 0x0A; i <= 0x0E; i++) {
553 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, 0x00); 452 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, 0x00);
554 } 453 }
555 454
@@ -562,15 +461,16 @@ SiS_ClearExt1Regs(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
562 461
563static unsigned short 462static unsigned short
564SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 463SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
565 unsigned short ModeIdIndex) 464 unsigned short ModeIdIndex)
566{ 465{
567 unsigned short rrti, i, index, temp; 466 unsigned short rrti, i, index, temp;
568 467
569 if (ModeNo <= 0x13) 468 if (ModeNo <= 0x13)
570 return 0xFFFF; 469 return 0xFFFF;
571 470
572 index = SiS_GetReg(SiS_Pr,SiS_Pr->SiS_P3d4, 0x33) & 0x0F; 471 index = SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x33) & 0x0F;
573 if (index > 0) index--; 472 if (index > 0)
473 index--;
574 474
575 rrti = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].REFindex; 475 rrti = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].REFindex;
576 ModeNo = SiS_Pr->SiS_RefIndex[rrti].ModeID; 476 ModeNo = SiS_Pr->SiS_RefIndex[rrti].ModeID;
@@ -580,13 +480,14 @@ SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
580 if (SiS_Pr->SiS_RefIndex[rrti + i].ModeID != ModeNo) 480 if (SiS_Pr->SiS_RefIndex[rrti + i].ModeID != ModeNo)
581 break; 481 break;
582 482
583 temp = SiS_Pr->SiS_RefIndex[rrti + i].Ext_InfoFlag & ModeTypeMask; 483 temp =
484 SiS_Pr->SiS_RefIndex[rrti + i].Ext_InfoFlag & ModeTypeMask;
584 if (temp < SiS_Pr->SiS_ModeType) 485 if (temp < SiS_Pr->SiS_ModeType)
585 break; 486 break;
586 487
587 i++; 488 i++;
588 index--; 489 index--;
589 } while(index != 0xFFFF); 490 } while (index != 0xFFFF);
590 491
591 i--; 492 i--;
592 493
@@ -597,8 +498,7 @@ SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
597/* SYNC */ 498/* SYNC */
598/*********************************************/ 499/*********************************************/
599 500
600static void 501static void SiS_SetCRT1Sync(struct SiS_Private *SiS_Pr, unsigned short rrti)
601SiS_SetCRT1Sync(struct SiS_Private *SiS_Pr, unsigned short rrti)
602{ 502{
603 unsigned short sync = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag >> 8; 503 unsigned short sync = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag >> 8;
604 sync &= 0xC0; 504 sync &= 0xC0;
@@ -612,39 +512,40 @@ SiS_SetCRT1Sync(struct SiS_Private *SiS_Pr, unsigned short rrti)
612 512
613static void 513static void
614SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 514SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
615 unsigned short ModeIdIndex, unsigned short rrti) 515 unsigned short ModeIdIndex, unsigned short rrti)
616{ 516{
617 unsigned char index; 517 unsigned char index;
618 unsigned short temp, i, j, modeflag; 518 unsigned short temp, i, j, modeflag;
619 519
620 SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4,0x11,0x7f); 520 SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, 0x11, 0x7f);
621 521
622 modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; 522 modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
623 523
624 index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRT1CRTC; 524 index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRT1CRTC;
625 525
626 for(i = 0,j = 0; i <= 7; i++, j++) { 526 for (i = 0, j = 0; i <= 7; i++, j++) {
627 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j, 527 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j,
628 SiS_Pr->SiS_CRT1Table[index].CR[i]); 528 SiS_Pr->SiS_CRT1Table[index].CR[i]);
629 } 529 }
630 for(j = 0x10; i <= 10; i++, j++) { 530 for (j = 0x10; i <= 10; i++, j++) {
631 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j, 531 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j,
632 SiS_Pr->SiS_CRT1Table[index].CR[i]); 532 SiS_Pr->SiS_CRT1Table[index].CR[i]);
633 } 533 }
634 for(j = 0x15; i <= 12; i++, j++) { 534 for (j = 0x15; i <= 12; i++, j++) {
635 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j, 535 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j,
636 SiS_Pr->SiS_CRT1Table[index].CR[i]); 536 SiS_Pr->SiS_CRT1Table[index].CR[i]);
637 } 537 }
638 for(j = 0x0A; i <= 15; i++, j++) { 538 for (j = 0x0A; i <= 15; i++, j++) {
639 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, j, 539 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, j,
640 SiS_Pr->SiS_CRT1Table[index].CR[i]); 540 SiS_Pr->SiS_CRT1Table[index].CR[i]);
641 } 541 }
642 542
643 temp = SiS_Pr->SiS_CRT1Table[index].CR[16] & 0xE0; 543 temp = SiS_Pr->SiS_CRT1Table[index].CR[16] & 0xE0;
644 SiS_SetReg(SiS_Pr,SiS_Pr->SiS_P3c4, 0x0E, temp); 544 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0E, temp);
645 545
646 temp = ((SiS_Pr->SiS_CRT1Table[index].CR[16]) & 0x01) << 5; 546 temp = ((SiS_Pr->SiS_CRT1Table[index].CR[16]) & 0x01) << 5;
647 if (modeflag & DoubleScanMode) temp |= 0x80; 547 if (modeflag & DoubleScanMode)
548 temp |= 0x80;
648 SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3d4, 0x09, 0x5F, temp); 549 SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3d4, 0x09, 0x5F, temp);
649 550
650 if (SiS_Pr->SiS_ModeType > ModeVGA) 551 if (SiS_Pr->SiS_ModeType > ModeVGA)
@@ -659,10 +560,10 @@ SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
659 560
660static void 561static void
661SiS_SetCRT1Offset(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 562SiS_SetCRT1Offset(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
662 unsigned short ModeIdIndex, unsigned short rrti) 563 unsigned short ModeIdIndex, unsigned short rrti)
663{ 564{
664 unsigned short du = SiS_GetOffset(SiS_Pr, ModeNo, ModeIdIndex, rrti); 565 unsigned short du = SiS_GetOffset(SiS_Pr, ModeNo, ModeIdIndex, rrti);
665 unsigned short infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag; 566 unsigned short infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag;
666 unsigned short temp; 567 unsigned short temp;
667 568
668 temp = (du >> 8) & 0x0f; 569 temp = (du >> 8) & 0x0f;
@@ -670,11 +571,13 @@ SiS_SetCRT1Offset(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
670 571
671 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x13, (du & 0xFF)); 572 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x13, (du & 0xFF));
672 573
673 if (infoflag & InterlaceMode) du >>= 1; 574 if (infoflag & InterlaceMode)
575 du >>= 1;
674 576
675 du <<= 5; 577 du <<= 5;
676 temp = (du >> 8) & 0xff; 578 temp = (du >> 8) & 0xff;
677 if (du & 0xff) temp++; 579 if (du & 0xff)
580 temp++;
678 temp++; 581 temp++;
679 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x10, temp); 582 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x10, temp);
680} 583}
@@ -685,17 +588,17 @@ SiS_SetCRT1Offset(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
685 588
686static void 589static void
687SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 590SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
688 unsigned short rrti) 591 unsigned short rrti)
689{ 592{
690 unsigned short index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRTVCLK; 593 unsigned short index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRTVCLK;
691 unsigned short clka = SiS_Pr->SiS_VCLKData[index].SR2B; 594 unsigned short clka = SiS_Pr->SiS_VCLKData[index].SR2B;
692 unsigned short clkb = SiS_Pr->SiS_VCLKData[index].SR2C; 595 unsigned short clkb = SiS_Pr->SiS_VCLKData[index].SR2C;
693 596
694 SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4,0x31,0xCF); 597 SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x31, 0xCF);
695 598
696 SiS_SetReg(SiS_Pr,SiS_Pr->SiS_P3c4,0x2B,clka); 599 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2B, clka);
697 SiS_SetReg(SiS_Pr,SiS_Pr->SiS_P3c4,0x2C,clkb); 600 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2C, clkb);
698 SiS_SetReg(SiS_Pr,SiS_Pr->SiS_P3c4,0x2D,0x01); 601 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2D, 0x01);
699} 602}
700 603
701/*********************************************/ 604/*********************************************/
@@ -704,7 +607,7 @@ SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
704 607
705static void 608static void
706SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 609SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
707 unsigned short mi) 610 unsigned short mi)
708{ 611{
709 unsigned short modeflag = SiS_Pr->SiS_EModeIDTable[mi].Ext_ModeFlag; 612 unsigned short modeflag = SiS_Pr->SiS_EModeIDTable[mi].Ext_ModeFlag;
710 613
@@ -729,7 +632,7 @@ SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
729 632
730static void 633static void
731SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 634SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
732 unsigned short rrti) 635 unsigned short rrti)
733{ 636{
734 unsigned short data = 0, VCLK = 0, index = 0; 637 unsigned short data = 0, VCLK = 0, index = 0;
735 638
@@ -738,7 +641,8 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
738 VCLK = SiS_Pr->SiS_VCLKData[index].CLOCK; 641 VCLK = SiS_Pr->SiS_VCLKData[index].CLOCK;
739 } 642 }
740 643
741 if (VCLK >= 166) data |= 0x0c; 644 if (VCLK >= 166)
645 data |= 0x0c;
742 SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x32, 0xf3, data); 646 SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x32, 0xf3, data);
743 647
744 if (VCLK >= 166) 648 if (VCLK >= 166)
@@ -758,7 +662,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
758 662
759static void 663static void
760SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 664SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
761 unsigned short ModeIdIndex, unsigned short rrti) 665 unsigned short ModeIdIndex, unsigned short rrti)
762{ 666{
763 unsigned short data, infoflag = 0, modeflag; 667 unsigned short data, infoflag = 0, modeflag;
764 668
@@ -778,17 +682,22 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
778 data |= 0x02; 682 data |= 0x02;
779 data |= ((SiS_Pr->SiS_ModeType - ModeVGA) << 2); 683 data |= ((SiS_Pr->SiS_ModeType - ModeVGA) << 2);
780 } 684 }
781 if (infoflag & InterlaceMode) data |= 0x20; 685 if (infoflag & InterlaceMode)
686 data |= 0x20;
782 } 687 }
783 SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x06, 0xC0, data); 688 SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x06, 0xC0, data);
784 689
785 data = 0; 690 data = 0;
786 if (infoflag & InterlaceMode) { 691 if (infoflag & InterlaceMode) {
787 /* data = (Hsync / 8) - ((Htotal / 8) / 2) + 3 */ 692 /* data = (Hsync / 8) - ((Htotal / 8) / 2) + 3 */
788 unsigned short hrs = (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x04) | 693 unsigned short hrs =
789 ((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0xc0) << 2)) - 3; 694 (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x04) |
790 unsigned short hto = (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x00) | 695 ((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0xc0) << 2))
791 ((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0x03) << 8)) + 5; 696 - 3;
697 unsigned short hto =
698 (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x00) |
699 ((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0x03) << 8))
700 + 5;
792 data = hrs - (hto >> 1) + 3; 701 data = hrs - (hto >> 1) + 3;
793 } 702 }
794 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x19, (data & 0xFF)); 703 SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x19, (data & 0xFF));
@@ -829,20 +738,26 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
829 738
830static void 739static void
831SiS_WriteDAC(struct SiS_Private *SiS_Pr, unsigned long DACData, 740SiS_WriteDAC(struct SiS_Private *SiS_Pr, unsigned long DACData,
832 unsigned short shiftflag, unsigned short dl, unsigned short ah, 741 unsigned short shiftflag, unsigned short dl, unsigned short ah,
833 unsigned short al, unsigned short dh) 742 unsigned short al, unsigned short dh)
834{ 743{
835 unsigned short d1, d2, d3; 744 unsigned short d1, d2, d3;
836 745
837 switch (dl) { 746 switch (dl) {
838 case 0: 747 case 0:
839 d1 = dh; d2 = ah; d3 = al; 748 d1 = dh;
840 break; 749 d2 = ah;
841 case 1: 750 d3 = al;
842 d1 = ah; d2 = al; d3 = dh; 751 break;
843 break; 752 case 1:
844 default: 753 d1 = ah;
845 d1 = al; d2 = dh; d3 = ah; 754 d2 = al;
755 d3 = dh;
756 break;
757 default:
758 d1 = al;
759 d2 = dh;
760 d3 = ah;
846 } 761 }
847 SiS_SetRegByte(SiS_Pr, DACData, (d1 << shiftflag)); 762 SiS_SetRegByte(SiS_Pr, DACData, (d1 << shiftflag));
848 SiS_SetRegByte(SiS_Pr, DACData, (d2 << shiftflag)); 763 SiS_SetRegByte(SiS_Pr, DACData, (d2 << shiftflag));
@@ -850,7 +765,8 @@ SiS_WriteDAC(struct SiS_Private *SiS_Pr, unsigned long DACData,
850} 765}
851 766
852static void 767static void
853SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short mi) 768SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
769 unsigned short mi)
854{ 770{
855 unsigned short data, data2, time, i, j, k, m, n, o; 771 unsigned short data, data2, time, i, j, k, m, n, o;
856 unsigned short si, di, bx, sf; 772 unsigned short si, di, bx, sf;
@@ -884,41 +800,45 @@ SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short mi
884 800
885 SiS_SetRegByte(SiS_Pr, DACAddr, 0x00); 801 SiS_SetRegByte(SiS_Pr, DACAddr, 0x00);
886 802
887 for(i = 0; i < j; i++) { 803 for (i = 0; i < j; i++) {
888 data = table[i]; 804 data = table[i];
889 for(k = 0; k < 3; k++) { 805 for (k = 0; k < 3; k++) {
890 data2 = 0; 806 data2 = 0;
891 if (data & 0x01) data2 += 0x2A; 807 if (data & 0x01)
892 if (data & 0x02) data2 += 0x15; 808 data2 += 0x2A;
809 if (data & 0x02)
810 data2 += 0x15;
893 SiS_SetRegByte(SiS_Pr, DACData, (data2 << sf)); 811 SiS_SetRegByte(SiS_Pr, DACData, (data2 << sf));
894 data >>= 2; 812 data >>= 2;
895 } 813 }
896 } 814 }
897 815
898 if (time == 256) { 816 if (time == 256) {
899 for(i = 16; i < 32; i++) { 817 for (i = 16; i < 32; i++) {
900 data = table[i] << sf; 818 data = table[i] << sf;
901 for(k = 0; k < 3; k++) 819 for (k = 0; k < 3; k++)
902 SiS_SetRegByte(SiS_Pr, DACData, data); 820 SiS_SetRegByte(SiS_Pr, DACData, data);
903 } 821 }
904 si = 32; 822 si = 32;
905 for(m = 0; m < 9; m++) { 823 for (m = 0; m < 9; m++) {
906 di = si; 824 di = si;
907 bx = si + 4; 825 bx = si + 4;
908 for(n = 0; n < 3; n++) { 826 for (n = 0; n < 3; n++) {
909 for(o = 0; o < 5; o++) { 827 for (o = 0; o < 5; o++) {
910 SiS_WriteDAC(SiS_Pr, DACData, sf, n, 828 SiS_WriteDAC(SiS_Pr, DACData, sf, n,
911 table[di], table[bx], table[si]); 829 table[di], table[bx],
830 table[si]);
912 si++; 831 si++;
913 } 832 }
914 si -= 2; 833 si -= 2;
915 for(o = 0; o < 3; o++) { 834 for (o = 0; o < 3; o++) {
916 SiS_WriteDAC(SiS_Pr, DACData, sf, n, 835 SiS_WriteDAC(SiS_Pr, DACData, sf, n,
917 table[di], table[si], table[bx]); 836 table[di], table[si],
837 table[bx]);
918 si--; 838 si--;
919 } 839 }
920 } 840 }
921 si += 5; 841 si += 5;
922 } 842 }
923 } 843 }
924} 844}
@@ -929,7 +849,7 @@ SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short mi
929 849
930static void 850static void
931SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 851SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
932 unsigned short ModeIdIndex) 852 unsigned short ModeIdIndex)
933{ 853{
934 unsigned short StandTableIndex, rrti; 854 unsigned short StandTableIndex, rrti;
935 855
@@ -970,11 +890,10 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
970/* SiSSetMode() */ 890/* SiSSetMode() */
971/*********************************************/ 891/*********************************************/
972 892
973int 893int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
974SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
975{ 894{
976 unsigned short ModeIdIndex; 895 unsigned short ModeIdIndex;
977 unsigned long BaseAddr = SiS_Pr->IOAddress; 896 unsigned long BaseAddr = SiS_Pr->IOAddress;
978 897
979 SiSUSB_InitPtr(SiS_Pr); 898 SiSUSB_InitPtr(SiS_Pr);
980 SiSUSBRegInit(SiS_Pr, BaseAddr); 899 SiSUSBRegInit(SiS_Pr, BaseAddr);
@@ -990,7 +909,7 @@ SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
990 ModeNo &= 0x7f; 909 ModeNo &= 0x7f;
991 910
992 SiS_Pr->SiS_ModeType = 911 SiS_Pr->SiS_ModeType =
993 SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag & ModeTypeMask; 912 SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag & ModeTypeMask;
994 913
995 SiS_Pr->SiS_SetFlag = LowModeTests; 914 SiS_Pr->SiS_SetFlag = LowModeTests;
996 915
@@ -1008,8 +927,7 @@ SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
1008 return 1; 927 return 1;
1009} 928}
1010 929
1011int 930int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo)
1012SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo)
1013{ 931{
1014 unsigned short ModeNo = 0; 932 unsigned short ModeNo = 0;
1015 int i; 933 int i;
@@ -1041,7 +959,3 @@ SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo)
1041} 959}
1042 960
1043#endif /* INCL_SISUSB_CON */ 961#endif /* INCL_SISUSB_CON */
1044
1045
1046
1047
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h
index 864bc0e965..c46ce42d44 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.h
@@ -46,7 +46,7 @@
46 * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 46 * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
47 * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 47 * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 * 48 *
49 * Author: Thomas Winischhofer <thomas@winischhofer.net> 49 * Author: Thomas Winischhofer <thomas@winischhofer.net>
50 * 50 *
51 */ 51 */
52 52
@@ -76,21 +76,21 @@
76#define CRT2Mode 0x0800 76#define CRT2Mode 0x0800
77#define HalfDCLK 0x1000 77#define HalfDCLK 0x1000
78#define NoSupportSimuTV 0x2000 78#define NoSupportSimuTV 0x2000
79#define NoSupportLCDScale 0x4000 /* SiS bridge: No scaling possible (no matter what panel) */ 79#define NoSupportLCDScale 0x4000 /* SiS bridge: No scaling possible (no matter what panel) */
80#define DoubleScanMode 0x8000 80#define DoubleScanMode 0x8000
81 81
82/* Infoflag */ 82/* Infoflag */
83#define SupportTV 0x0008 83#define SupportTV 0x0008
84#define SupportTV1024 0x0800 84#define SupportTV1024 0x0800
85#define SupportCHTV 0x0800 85#define SupportCHTV 0x0800
86#define Support64048060Hz 0x0800 /* Special for 640x480 LCD */ 86#define Support64048060Hz 0x0800 /* Special for 640x480 LCD */
87#define SupportHiVision 0x0010 87#define SupportHiVision 0x0010
88#define SupportYPbPr750p 0x1000 88#define SupportYPbPr750p 0x1000
89#define SupportLCD 0x0020 89#define SupportLCD 0x0020
90#define SupportRAMDAC2 0x0040 /* All (<= 100Mhz) */ 90#define SupportRAMDAC2 0x0040 /* All (<= 100Mhz) */
91#define SupportRAMDAC2_135 0x0100 /* All except DH (<= 135Mhz) */ 91#define SupportRAMDAC2_135 0x0100 /* All except DH (<= 135Mhz) */
92#define SupportRAMDAC2_162 0x0200 /* B, C (<= 162Mhz) */ 92#define SupportRAMDAC2_162 0x0200 /* B, C (<= 162Mhz) */
93#define SupportRAMDAC2_202 0x0400 /* C (<= 202Mhz) */ 93#define SupportRAMDAC2_202 0x0400 /* C (<= 202Mhz) */
94#define InterlaceMode 0x0080 94#define InterlaceMode 0x0080
95#define SyncPP 0x0000 95#define SyncPP 0x0000
96#define SyncPN 0x4000 96#define SyncPN 0x4000
@@ -129,7 +129,7 @@
129#define SIS_RI_856x480 19 129#define SIS_RI_856x480 19
130#define SIS_RI_1280x768 20 130#define SIS_RI_1280x768 20
131#define SIS_RI_1400x1050 21 131#define SIS_RI_1400x1050 21
132#define SIS_RI_1152x864 22 /* Up to here SiS conforming */ 132#define SIS_RI_1152x864 22 /* Up to here SiS conforming */
133#define SIS_RI_848x480 23 133#define SIS_RI_848x480 23
134#define SIS_RI_1360x768 24 134#define SIS_RI_1360x768 24
135#define SIS_RI_1024x600 25 135#define SIS_RI_1024x600 25
@@ -147,691 +147,691 @@
147#define SIS_CRT2_PORT_04 0x04 - 0x30 147#define SIS_CRT2_PORT_04 0x04 - 0x30
148 148
149/* Mode numbers */ 149/* Mode numbers */
150static const unsigned short ModeIndex_320x200[] = {0x59, 0x41, 0x00, 0x4f}; 150static const unsigned short ModeIndex_320x200[] = { 0x59, 0x41, 0x00, 0x4f };
151static const unsigned short ModeIndex_320x240[] = {0x50, 0x56, 0x00, 0x53}; 151static const unsigned short ModeIndex_320x240[] = { 0x50, 0x56, 0x00, 0x53 };
152static const unsigned short ModeIndex_400x300[] = {0x51, 0x57, 0x00, 0x54}; 152static const unsigned short ModeIndex_400x300[] = { 0x51, 0x57, 0x00, 0x54 };
153static const unsigned short ModeIndex_512x384[] = {0x52, 0x58, 0x00, 0x5c}; 153static const unsigned short ModeIndex_512x384[] = { 0x52, 0x58, 0x00, 0x5c };
154static const unsigned short ModeIndex_640x400[] = {0x2f, 0x5d, 0x00, 0x5e}; 154static const unsigned short ModeIndex_640x400[] = { 0x2f, 0x5d, 0x00, 0x5e };
155static const unsigned short ModeIndex_640x480[] = {0x2e, 0x44, 0x00, 0x62}; 155static const unsigned short ModeIndex_640x480[] = { 0x2e, 0x44, 0x00, 0x62 };
156static const unsigned short ModeIndex_720x480[] = {0x31, 0x33, 0x00, 0x35}; 156static const unsigned short ModeIndex_720x480[] = { 0x31, 0x33, 0x00, 0x35 };
157static const unsigned short ModeIndex_720x576[] = {0x32, 0x34, 0x00, 0x36}; 157static const unsigned short ModeIndex_720x576[] = { 0x32, 0x34, 0x00, 0x36 };
158static const unsigned short ModeIndex_768x576[] = {0x5f, 0x60, 0x00, 0x61}; 158static const unsigned short ModeIndex_768x576[] = { 0x5f, 0x60, 0x00, 0x61 };
159static const unsigned short ModeIndex_800x480[] = {0x70, 0x7a, 0x00, 0x76}; 159static const unsigned short ModeIndex_800x480[] = { 0x70, 0x7a, 0x00, 0x76 };
160static const unsigned short ModeIndex_800x600[] = {0x30, 0x47, 0x00, 0x63}; 160static const unsigned short ModeIndex_800x600[] = { 0x30, 0x47, 0x00, 0x63 };
161static const unsigned short ModeIndex_848x480[] = {0x39, 0x3b, 0x00, 0x3e}; 161static const unsigned short ModeIndex_848x480[] = { 0x39, 0x3b, 0x00, 0x3e };
162static const unsigned short ModeIndex_856x480[] = {0x3f, 0x42, 0x00, 0x45}; 162static const unsigned short ModeIndex_856x480[] = { 0x3f, 0x42, 0x00, 0x45 };
163static const unsigned short ModeIndex_960x540[] = {0x1d, 0x1e, 0x00, 0x1f}; 163static const unsigned short ModeIndex_960x540[] = { 0x1d, 0x1e, 0x00, 0x1f };
164static const unsigned short ModeIndex_960x600[] = {0x20, 0x21, 0x00, 0x22}; 164static const unsigned short ModeIndex_960x600[] = { 0x20, 0x21, 0x00, 0x22 };
165static const unsigned short ModeIndex_1024x768[] = {0x38, 0x4a, 0x00, 0x64}; 165static const unsigned short ModeIndex_1024x768[] = { 0x38, 0x4a, 0x00, 0x64 };
166static const unsigned short ModeIndex_1024x576[] = {0x71, 0x74, 0x00, 0x77}; 166static const unsigned short ModeIndex_1024x576[] = { 0x71, 0x74, 0x00, 0x77 };
167static const unsigned short ModeIndex_1152x864[] = {0x29, 0x2a, 0x00, 0x2b}; 167static const unsigned short ModeIndex_1152x864[] = { 0x29, 0x2a, 0x00, 0x2b };
168static const unsigned short ModeIndex_1280x720[] = {0x79, 0x75, 0x00, 0x78}; 168static const unsigned short ModeIndex_1280x720[] = { 0x79, 0x75, 0x00, 0x78 };
169static const unsigned short ModeIndex_1280x768[] = {0x23, 0x24, 0x00, 0x25}; 169static const unsigned short ModeIndex_1280x768[] = { 0x23, 0x24, 0x00, 0x25 };
170static const unsigned short ModeIndex_1280x1024[] = {0x3a, 0x4d, 0x00, 0x65}; 170static const unsigned short ModeIndex_1280x1024[] = { 0x3a, 0x4d, 0x00, 0x65 };
171 171
172static const unsigned char SiS_MDA_DAC[] = 172static const unsigned char SiS_MDA_DAC[] = {
173{ 173 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
174 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 174 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
175 0x15,0x15,0x15,0x15,0x15,0x15,0x15,0x15, 175 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
176 0x15,0x15,0x15,0x15,0x15,0x15,0x15,0x15, 176 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
177 0x3F,0x3F,0x3F,0x3F,0x3F,0x3F,0x3F,0x3F, 177 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
178 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 178 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
179 0x15,0x15,0x15,0x15,0x15,0x15,0x15,0x15, 179 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
180 0x15,0x15,0x15,0x15,0x15,0x15,0x15,0x15, 180 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F
181 0x3F,0x3F,0x3F,0x3F,0x3F,0x3F,0x3F,0x3F
182}; 181};
183 182
184static const unsigned char SiS_CGA_DAC[] = 183static const unsigned char SiS_CGA_DAC[] = {
185{ 184 0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
186 0x00,0x10,0x04,0x14,0x01,0x11,0x09,0x15, 185 0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
187 0x00,0x10,0x04,0x14,0x01,0x11,0x09,0x15, 186 0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
188 0x2A,0x3A,0x2E,0x3E,0x2B,0x3B,0x2F,0x3F, 187 0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
189 0x2A,0x3A,0x2E,0x3E,0x2B,0x3B,0x2F,0x3F, 188 0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
190 0x00,0x10,0x04,0x14,0x01,0x11,0x09,0x15, 189 0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
191 0x00,0x10,0x04,0x14,0x01,0x11,0x09,0x15, 190 0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
192 0x2A,0x3A,0x2E,0x3E,0x2B,0x3B,0x2F,0x3F, 191 0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F
193 0x2A,0x3A,0x2E,0x3E,0x2B,0x3B,0x2F,0x3F
194}; 192};
195 193
196static const unsigned char SiS_EGA_DAC[] = 194static const unsigned char SiS_EGA_DAC[] = {
197{ 195 0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x05, 0x15,
198 0x00,0x10,0x04,0x14,0x01,0x11,0x05,0x15, 196 0x20, 0x30, 0x24, 0x34, 0x21, 0x31, 0x25, 0x35,
199 0x20,0x30,0x24,0x34,0x21,0x31,0x25,0x35, 197 0x08, 0x18, 0x0C, 0x1C, 0x09, 0x19, 0x0D, 0x1D,
200 0x08,0x18,0x0C,0x1C,0x09,0x19,0x0D,0x1D, 198 0x28, 0x38, 0x2C, 0x3C, 0x29, 0x39, 0x2D, 0x3D,
201 0x28,0x38,0x2C,0x3C,0x29,0x39,0x2D,0x3D, 199 0x02, 0x12, 0x06, 0x16, 0x03, 0x13, 0x07, 0x17,
202 0x02,0x12,0x06,0x16,0x03,0x13,0x07,0x17, 200 0x22, 0x32, 0x26, 0x36, 0x23, 0x33, 0x27, 0x37,
203 0x22,0x32,0x26,0x36,0x23,0x33,0x27,0x37, 201 0x0A, 0x1A, 0x0E, 0x1E, 0x0B, 0x1B, 0x0F, 0x1F,
204 0x0A,0x1A,0x0E,0x1E,0x0B,0x1B,0x0F,0x1F, 202 0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F
205 0x2A,0x3A,0x2E,0x3E,0x2B,0x3B,0x2F,0x3F
206}; 203};
207 204
208static const unsigned char SiS_VGA_DAC[] = 205static const unsigned char SiS_VGA_DAC[] = {
209{ 206 0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
210 0x00,0x10,0x04,0x14,0x01,0x11,0x09,0x15, 207 0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
211 0x2A,0x3A,0x2E,0x3E,0x2B,0x3B,0x2F,0x3F, 208 0x00, 0x05, 0x08, 0x0B, 0x0E, 0x11, 0x14, 0x18,
212 0x00,0x05,0x08,0x0B,0x0E,0x11,0x14,0x18, 209 0x1C, 0x20, 0x24, 0x28, 0x2D, 0x32, 0x38, 0x3F,
213 0x1C,0x20,0x24,0x28,0x2D,0x32,0x38,0x3F, 210 0x00, 0x10, 0x1F, 0x2F, 0x3F, 0x1F, 0x27, 0x2F,
214 0x00,0x10,0x1F,0x2F,0x3F,0x1F,0x27,0x2F, 211 0x37, 0x3F, 0x2D, 0x31, 0x36, 0x3A, 0x3F, 0x00,
215 0x37,0x3F,0x2D,0x31,0x36,0x3A,0x3F,0x00, 212 0x07, 0x0E, 0x15, 0x1C, 0x0E, 0x11, 0x15, 0x18,
216 0x07,0x0E,0x15,0x1C,0x0E,0x11,0x15,0x18, 213 0x1C, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x00, 0x04,
217 0x1C,0x14,0x16,0x18,0x1A,0x1C,0x00,0x04, 214 0x08, 0x0C, 0x10, 0x08, 0x0A, 0x0C, 0x0E, 0x10,
218 0x08,0x0C,0x10,0x08,0x0A,0x0C,0x0E,0x10, 215 0x0B, 0x0C, 0x0D, 0x0F, 0x10
219 0x0B,0x0C,0x0D,0x0F,0x10
220}; 216};
221 217
222static const struct SiS_St SiSUSB_SModeIDTable[] = 218static const struct SiS_St SiSUSB_SModeIDTable[] = {
223{ 219 {0x03, 0x0010, 0x18, 0x02, 0x02, 0x00, 0x01, 0x03, 0x40},
224 {0x03,0x0010,0x18,0x02,0x02,0x00,0x01,0x03,0x40}, 220 {0xff, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
225 {0xff,0x0000,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
226}; 221};
227 222
228static const struct SiS_StResInfo_S SiSUSB_StResInfo[] = 223static const struct SiS_StResInfo_S SiSUSB_StResInfo[] = {
229{ 224 {640, 400},
230 { 640,400}, 225 {640, 350},
231 { 640,350}, 226 {720, 400},
232 { 720,400}, 227 {720, 350},
233 { 720,350}, 228 {640, 480}
234 { 640,480}
235}; 229};
236 230
237static const struct SiS_ModeResInfo SiSUSB_ModeResInfo[] = 231static const struct SiS_ModeResInfo SiSUSB_ModeResInfo[] = {
238{ 232 {320, 200, 8, 8}, /* 0x00 */
239 { 320, 200, 8, 8}, /* 0x00 */ 233 {320, 240, 8, 8}, /* 0x01 */
240 { 320, 240, 8, 8}, /* 0x01 */ 234 {320, 400, 8, 8}, /* 0x02 */
241 { 320, 400, 8, 8}, /* 0x02 */ 235 {400, 300, 8, 8}, /* 0x03 */
242 { 400, 300, 8, 8}, /* 0x03 */ 236 {512, 384, 8, 8}, /* 0x04 */
243 { 512, 384, 8, 8}, /* 0x04 */ 237 {640, 400, 8, 16}, /* 0x05 */
244 { 640, 400, 8,16}, /* 0x05 */ 238 {640, 480, 8, 16}, /* 0x06 */
245 { 640, 480, 8,16}, /* 0x06 */ 239 {800, 600, 8, 16}, /* 0x07 */
246 { 800, 600, 8,16}, /* 0x07 */ 240 {1024, 768, 8, 16}, /* 0x08 */
247 { 1024, 768, 8,16}, /* 0x08 */ 241 {1280, 1024, 8, 16}, /* 0x09 */
248 { 1280,1024, 8,16}, /* 0x09 */ 242 {1600, 1200, 8, 16}, /* 0x0a */
249 { 1600,1200, 8,16}, /* 0x0a */ 243 {1920, 1440, 8, 16}, /* 0x0b */
250 { 1920,1440, 8,16}, /* 0x0b */ 244 {2048, 1536, 8, 16}, /* 0x0c */
251 { 2048,1536, 8,16}, /* 0x0c */ 245 {720, 480, 8, 16}, /* 0x0d */
252 { 720, 480, 8,16}, /* 0x0d */ 246 {720, 576, 8, 16}, /* 0x0e */
253 { 720, 576, 8,16}, /* 0x0e */ 247 {1280, 960, 8, 16}, /* 0x0f */
254 { 1280, 960, 8,16}, /* 0x0f */ 248 {800, 480, 8, 16}, /* 0x10 */
255 { 800, 480, 8,16}, /* 0x10 */ 249 {1024, 576, 8, 16}, /* 0x11 */
256 { 1024, 576, 8,16}, /* 0x11 */ 250 {1280, 720, 8, 16}, /* 0x12 */
257 { 1280, 720, 8,16}, /* 0x12 */ 251 {856, 480, 8, 16}, /* 0x13 */
258 { 856, 480, 8,16}, /* 0x13 */ 252 {1280, 768, 8, 16}, /* 0x14 */
259 { 1280, 768, 8,16}, /* 0x14 */ 253 {1400, 1050, 8, 16}, /* 0x15 */
260 { 1400,1050, 8,16}, /* 0x15 */ 254 {1152, 864, 8, 16}, /* 0x16 */
261 { 1152, 864, 8,16}, /* 0x16 */ 255 {848, 480, 8, 16}, /* 0x17 */
262 { 848, 480, 8,16}, /* 0x17 */ 256 {1360, 768, 8, 16}, /* 0x18 */
263 { 1360, 768, 8,16}, /* 0x18 */ 257 {1024, 600, 8, 16}, /* 0x19 */
264 { 1024, 600, 8,16}, /* 0x19 */ 258 {1152, 768, 8, 16}, /* 0x1a */
265 { 1152, 768, 8,16}, /* 0x1a */ 259 {768, 576, 8, 16}, /* 0x1b */
266 { 768, 576, 8,16}, /* 0x1b */ 260 {1360, 1024, 8, 16}, /* 0x1c */
267 { 1360,1024, 8,16}, /* 0x1c */ 261 {1680, 1050, 8, 16}, /* 0x1d */
268 { 1680,1050, 8,16}, /* 0x1d */ 262 {1280, 800, 8, 16}, /* 0x1e */
269 { 1280, 800, 8,16}, /* 0x1e */ 263 {1920, 1080, 8, 16}, /* 0x1f */
270 { 1920,1080, 8,16}, /* 0x1f */ 264 {960, 540, 8, 16}, /* 0x20 */
271 { 960, 540, 8,16}, /* 0x20 */ 265 {960, 600, 8, 16} /* 0x21 */
272 { 960, 600, 8,16} /* 0x21 */
273}; 266};
274 267
275static const struct SiS_StandTable SiSUSB_StandTable[] = 268static const struct SiS_StandTable SiSUSB_StandTable[] = {
276{
277 /* MD_3_400 - mode 0x03 - 400 */ 269 /* MD_3_400 - mode 0x03 - 400 */
278 { 270 {
279 0x50,0x18,0x10,0x1000, 271 0x50, 0x18, 0x10, 0x1000,
280 { 0x00,0x03,0x00,0x02 }, 272 {0x00, 0x03, 0x00, 0x02},
281 0x67, 273 0x67,
282 { 0x5f,0x4f,0x50,0x82,0x55,0x81,0xbf,0x1f, 274 {0x5f, 0x4f, 0x50, 0x82, 0x55, 0x81, 0xbf, 0x1f,
283 0x00,0x4f,0x0d,0x0e,0x00,0x00,0x00,0x00, 275 0x00, 0x4f, 0x0d, 0x0e, 0x00, 0x00, 0x00, 0x00,
284 0x9c,0x8e,0x8f,0x28,0x1f,0x96,0xb9,0xa3, 276 0x9c, 0x8e, 0x8f, 0x28, 0x1f, 0x96, 0xb9, 0xa3,
285 0xff }, 277 0xff},
286 { 0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07, 278 {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x14, 0x07,
287 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f, 279 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
288 0x0c,0x00,0x0f,0x08 }, 280 0x0c, 0x00, 0x0f, 0x08},
289 { 0x00,0x00,0x00,0x00,0x00,0x10,0x0e,0x00, 0xff } 281 {0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x0e, 0x00, 0xff}
290 }, 282 },
291 /* Generic for VGA and higher */ 283 /* Generic for VGA and higher */
292 { 284 {
293 0x00,0x00,0x00,0x0000, 285 0x00, 0x00, 0x00, 0x0000,
294 { 0x01,0x0f,0x00,0x0e }, 286 {0x01, 0x0f, 0x00, 0x0e},
295 0x23, 287 0x23,
296 { 0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e, 288 {0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e,
297 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00, 289 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
298 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3, 290 0xea, 0x8c, 0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3,
299 0xff }, 291 0xff},
300 { 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07, 292 {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
301 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f, 293 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
302 0x01,0x00,0x00,0x00 }, 294 0x01, 0x00, 0x00, 0x00},
303 { 0x00,0x00,0x00,0x00,0x00,0x40,0x05,0x0f, 0xff } 295 {0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0f, 0xff}
304 } 296 }
305}; 297};
306 298
307static const struct SiS_Ext SiSUSB_EModeIDTable[] = 299static const struct SiS_Ext SiSUSB_EModeIDTable[] = {
308{ 300 {0x2e, 0x0a1b, 0x0101, SIS_RI_640x480, 0x00, 0x00, 0x05, 0x05, 0x08, 2}, /* 640x480x8 */
309 {0x2e,0x0a1b,0x0101,SIS_RI_640x480, 0x00,0x00,0x05,0x05,0x08, 2}, /* 640x480x8 */ 301 {0x2f, 0x0a1b, 0x0100, SIS_RI_640x400, 0x00, 0x00, 0x05, 0x05, 0x10, 0}, /* 640x400x8 */
310 {0x2f,0x0a1b,0x0100,SIS_RI_640x400, 0x00,0x00,0x05,0x05,0x10, 0}, /* 640x400x8 */ 302 {0x30, 0x2a1b, 0x0103, SIS_RI_800x600, 0x00, 0x00, 0x07, 0x06, 0x00, 3}, /* 800x600x8 */
311 {0x30,0x2a1b,0x0103,SIS_RI_800x600, 0x00,0x00,0x07,0x06,0x00, 3}, /* 800x600x8 */ 303 {0x31, 0x4a1b, 0x0000, SIS_RI_720x480, 0x00, 0x00, 0x06, 0x06, 0x11, -1}, /* 720x480x8 */
312 {0x31,0x4a1b,0x0000,SIS_RI_720x480, 0x00,0x00,0x06,0x06,0x11,-1}, /* 720x480x8 */ 304 {0x32, 0x4a1b, 0x0000, SIS_RI_720x576, 0x00, 0x00, 0x06, 0x06, 0x12, -1}, /* 720x576x8 */
313 {0x32,0x4a1b,0x0000,SIS_RI_720x576, 0x00,0x00,0x06,0x06,0x12,-1}, /* 720x576x8 */ 305 {0x33, 0x4a1d, 0x0000, SIS_RI_720x480, 0x00, 0x00, 0x06, 0x06, 0x11, -1}, /* 720x480x16 */
314 {0x33,0x4a1d,0x0000,SIS_RI_720x480, 0x00,0x00,0x06,0x06,0x11,-1}, /* 720x480x16 */ 306 {0x34, 0x6a1d, 0x0000, SIS_RI_720x576, 0x00, 0x00, 0x06, 0x06, 0x12, -1}, /* 720x576x16 */
315 {0x34,0x6a1d,0x0000,SIS_RI_720x576, 0x00,0x00,0x06,0x06,0x12,-1}, /* 720x576x16 */ 307 {0x35, 0x4a1f, 0x0000, SIS_RI_720x480, 0x00, 0x00, 0x06, 0x06, 0x11, -1}, /* 720x480x32 */
316 {0x35,0x4a1f,0x0000,SIS_RI_720x480, 0x00,0x00,0x06,0x06,0x11,-1}, /* 720x480x32 */ 308 {0x36, 0x6a1f, 0x0000, SIS_RI_720x576, 0x00, 0x00, 0x06, 0x06, 0x12, -1}, /* 720x576x32 */
317 {0x36,0x6a1f,0x0000,SIS_RI_720x576, 0x00,0x00,0x06,0x06,0x12,-1}, /* 720x576x32 */ 309 {0x38, 0x0a1b, 0x0105, SIS_RI_1024x768, 0x00, 0x00, 0x08, 0x07, 0x13, 4}, /* 1024x768x8 */
318 {0x38,0x0a1b,0x0105,SIS_RI_1024x768, 0x00,0x00,0x08,0x07,0x13, 4}, /* 1024x768x8 */ 310 {0x3a, 0x0e3b, 0x0107, SIS_RI_1280x1024, 0x00, 0x00, 0x00, 0x00, 0x2f, 8}, /* 1280x1024x8 */
319 {0x3a,0x0e3b,0x0107,SIS_RI_1280x1024,0x00,0x00,0x00,0x00,0x2f, 8}, /* 1280x1024x8 */ 311 {0x41, 0x9a1d, 0x010e, SIS_RI_320x200, 0x00, 0x00, 0x04, 0x04, 0x1a, 0}, /* 320x200x16 */
320 {0x41,0x9a1d,0x010e,SIS_RI_320x200, 0x00,0x00,0x04,0x04,0x1a, 0}, /* 320x200x16 */ 312 {0x44, 0x0a1d, 0x0111, SIS_RI_640x480, 0x00, 0x00, 0x05, 0x05, 0x08, 2}, /* 640x480x16 */
321 {0x44,0x0a1d,0x0111,SIS_RI_640x480, 0x00,0x00,0x05,0x05,0x08, 2}, /* 640x480x16 */ 313 {0x47, 0x2a1d, 0x0114, SIS_RI_800x600, 0x00, 0x00, 0x07, 0x06, 0x00, 3}, /* 800x600x16 */
322 {0x47,0x2a1d,0x0114,SIS_RI_800x600, 0x00,0x00,0x07,0x06,0x00, 3}, /* 800x600x16 */ 314 {0x4a, 0x0a3d, 0x0117, SIS_RI_1024x768, 0x00, 0x00, 0x08, 0x07, 0x13, 4}, /* 1024x768x16 */
323 {0x4a,0x0a3d,0x0117,SIS_RI_1024x768, 0x00,0x00,0x08,0x07,0x13, 4}, /* 1024x768x16 */ 315 {0x4d, 0x0e7d, 0x011a, SIS_RI_1280x1024, 0x00, 0x00, 0x00, 0x00, 0x2f, 8}, /* 1280x1024x16 */
324 {0x4d,0x0e7d,0x011a,SIS_RI_1280x1024,0x00,0x00,0x00,0x00,0x2f, 8}, /* 1280x1024x16 */ 316 {0x50, 0x9a1b, 0x0132, SIS_RI_320x240, 0x00, 0x00, 0x04, 0x04, 0x1b, 2}, /* 320x240x8 */
325 {0x50,0x9a1b,0x0132,SIS_RI_320x240, 0x00,0x00,0x04,0x04,0x1b, 2}, /* 320x240x8 */ 317 {0x51, 0xba1b, 0x0133, SIS_RI_400x300, 0x00, 0x00, 0x07, 0x07, 0x1c, 3}, /* 400x300x8 */
326 {0x51,0xba1b,0x0133,SIS_RI_400x300, 0x00,0x00,0x07,0x07,0x1c, 3}, /* 400x300x8 */ 318 {0x52, 0xba1b, 0x0134, SIS_RI_512x384, 0x00, 0x00, 0x00, 0x00, 0x1d, 4}, /* 512x384x8 */
327 {0x52,0xba1b,0x0134,SIS_RI_512x384, 0x00,0x00,0x00,0x00,0x1d, 4}, /* 512x384x8 */ 319 {0x56, 0x9a1d, 0x0135, SIS_RI_320x240, 0x00, 0x00, 0x04, 0x04, 0x1b, 2}, /* 320x240x16 */
328 {0x56,0x9a1d,0x0135,SIS_RI_320x240, 0x00,0x00,0x04,0x04,0x1b, 2}, /* 320x240x16 */ 320 {0x57, 0xba1d, 0x0136, SIS_RI_400x300, 0x00, 0x00, 0x07, 0x07, 0x1c, 3}, /* 400x300x16 */
329 {0x57,0xba1d,0x0136,SIS_RI_400x300, 0x00,0x00,0x07,0x07,0x1c, 3}, /* 400x300x16 */ 321 {0x58, 0xba1d, 0x0137, SIS_RI_512x384, 0x00, 0x00, 0x00, 0x00, 0x1d, 4}, /* 512x384x16 */
330 {0x58,0xba1d,0x0137,SIS_RI_512x384, 0x00,0x00,0x00,0x00,0x1d, 4}, /* 512x384x16 */ 322 {0x59, 0x9a1b, 0x0138, SIS_RI_320x200, 0x00, 0x00, 0x04, 0x04, 0x1a, 0}, /* 320x200x8 */
331 {0x59,0x9a1b,0x0138,SIS_RI_320x200, 0x00,0x00,0x04,0x04,0x1a, 0}, /* 320x200x8 */ 323 {0x5c, 0xba1f, 0x0000, SIS_RI_512x384, 0x00, 0x00, 0x00, 0x00, 0x1d, 4}, /* 512x384x32 */
332 {0x5c,0xba1f,0x0000,SIS_RI_512x384, 0x00,0x00,0x00,0x00,0x1d, 4}, /* 512x384x32 */ 324 {0x5d, 0x0a1d, 0x0139, SIS_RI_640x400, 0x00, 0x00, 0x05, 0x07, 0x10, 0}, /* 640x400x16 */
333 {0x5d,0x0a1d,0x0139,SIS_RI_640x400, 0x00,0x00,0x05,0x07,0x10, 0}, /* 640x400x16 */ 325 {0x5e, 0x0a1f, 0x0000, SIS_RI_640x400, 0x00, 0x00, 0x05, 0x07, 0x10, 0}, /* 640x400x32 */
334 {0x5e,0x0a1f,0x0000,SIS_RI_640x400, 0x00,0x00,0x05,0x07,0x10, 0}, /* 640x400x32 */ 326 {0x62, 0x0a3f, 0x013a, SIS_RI_640x480, 0x00, 0x00, 0x05, 0x05, 0x08, 2}, /* 640x480x32 */
335 {0x62,0x0a3f,0x013a,SIS_RI_640x480, 0x00,0x00,0x05,0x05,0x08, 2}, /* 640x480x32 */ 327 {0x63, 0x2a3f, 0x013b, SIS_RI_800x600, 0x00, 0x00, 0x07, 0x06, 0x00, 3}, /* 800x600x32 */
336 {0x63,0x2a3f,0x013b,SIS_RI_800x600, 0x00,0x00,0x07,0x06,0x00, 3}, /* 800x600x32 */ 328 {0x64, 0x0a7f, 0x013c, SIS_RI_1024x768, 0x00, 0x00, 0x08, 0x07, 0x13, 4}, /* 1024x768x32 */
337 {0x64,0x0a7f,0x013c,SIS_RI_1024x768, 0x00,0x00,0x08,0x07,0x13, 4}, /* 1024x768x32 */ 329 {0x65, 0x0eff, 0x013d, SIS_RI_1280x1024, 0x00, 0x00, 0x00, 0x00, 0x2f, 8}, /* 1280x1024x32 */
338 {0x65,0x0eff,0x013d,SIS_RI_1280x1024,0x00,0x00,0x00,0x00,0x2f, 8}, /* 1280x1024x32 */ 330 {0x70, 0x6a1b, 0x0000, SIS_RI_800x480, 0x00, 0x00, 0x07, 0x07, 0x1e, -1}, /* 800x480x8 */
339 {0x70,0x6a1b,0x0000,SIS_RI_800x480, 0x00,0x00,0x07,0x07,0x1e,-1}, /* 800x480x8 */ 331 {0x71, 0x4a1b, 0x0000, SIS_RI_1024x576, 0x00, 0x00, 0x00, 0x00, 0x21, -1}, /* 1024x576x8 */
340 {0x71,0x4a1b,0x0000,SIS_RI_1024x576, 0x00,0x00,0x00,0x00,0x21,-1}, /* 1024x576x8 */ 332 {0x74, 0x4a1d, 0x0000, SIS_RI_1024x576, 0x00, 0x00, 0x00, 0x00, 0x21, -1}, /* 1024x576x16 */
341 {0x74,0x4a1d,0x0000,SIS_RI_1024x576, 0x00,0x00,0x00,0x00,0x21,-1}, /* 1024x576x16 */ 333 {0x75, 0x0a3d, 0x0000, SIS_RI_1280x720, 0x00, 0x00, 0x00, 0x00, 0x24, 5}, /* 1280x720x16 */
342 {0x75,0x0a3d,0x0000,SIS_RI_1280x720, 0x00,0x00,0x00,0x00,0x24, 5}, /* 1280x720x16 */ 334 {0x76, 0x6a1f, 0x0000, SIS_RI_800x480, 0x00, 0x00, 0x07, 0x07, 0x1e, -1}, /* 800x480x32 */
343 {0x76,0x6a1f,0x0000,SIS_RI_800x480, 0x00,0x00,0x07,0x07,0x1e,-1}, /* 800x480x32 */ 335 {0x77, 0x4a1f, 0x0000, SIS_RI_1024x576, 0x00, 0x00, 0x00, 0x00, 0x21, -1}, /* 1024x576x32 */
344 {0x77,0x4a1f,0x0000,SIS_RI_1024x576, 0x00,0x00,0x00,0x00,0x21,-1}, /* 1024x576x32 */ 336 {0x78, 0x0a3f, 0x0000, SIS_RI_1280x720, 0x00, 0x00, 0x00, 0x00, 0x24, 5}, /* 1280x720x32 */
345 {0x78,0x0a3f,0x0000,SIS_RI_1280x720, 0x00,0x00,0x00,0x00,0x24, 5}, /* 1280x720x32 */ 337 {0x79, 0x0a3b, 0x0000, SIS_RI_1280x720, 0x00, 0x00, 0x00, 0x00, 0x24, 5}, /* 1280x720x8 */
346 {0x79,0x0a3b,0x0000,SIS_RI_1280x720, 0x00,0x00,0x00,0x00,0x24, 5}, /* 1280x720x8 */ 338 {0x7a, 0x6a1d, 0x0000, SIS_RI_800x480, 0x00, 0x00, 0x07, 0x07, 0x1e, -1}, /* 800x480x16 */
347 {0x7a,0x6a1d,0x0000,SIS_RI_800x480, 0x00,0x00,0x07,0x07,0x1e,-1}, /* 800x480x16 */ 339 {0x23, 0x0e3b, 0x0000, SIS_RI_1280x768, 0x00, 0x00, 0x00, 0x00, 0x27, 6}, /* 1280x768x8 */
348 {0x23,0x0e3b,0x0000,SIS_RI_1280x768, 0x00,0x00,0x00,0x00,0x27, 6}, /* 1280x768x8 */ 340 {0x24, 0x0e7d, 0x0000, SIS_RI_1280x768, 0x00, 0x00, 0x00, 0x00, 0x27, 6}, /* 1280x768x16 */
349 {0x24,0x0e7d,0x0000,SIS_RI_1280x768, 0x00,0x00,0x00,0x00,0x27, 6}, /* 1280x768x16 */ 341 {0x25, 0x0eff, 0x0000, SIS_RI_1280x768, 0x00, 0x00, 0x00, 0x00, 0x27, 6}, /* 1280x768x32 */
350 {0x25,0x0eff,0x0000,SIS_RI_1280x768, 0x00,0x00,0x00,0x00,0x27, 6}, /* 1280x768x32 */ 342 {0x39, 0x6a1b, 0x0000, SIS_RI_848x480, 0x00, 0x00, 0x00, 0x00, 0x28, -1}, /* 848x480 */
351 {0x39,0x6a1b,0x0000,SIS_RI_848x480, 0x00,0x00,0x00,0x00,0x28,-1}, /* 848x480 */ 343 {0x3b, 0x6a3d, 0x0000, SIS_RI_848x480, 0x00, 0x00, 0x00, 0x00, 0x28,
352 {0x3b,0x6a3d,0x0000,SIS_RI_848x480, 0x00,0x00,0x00,0x00,0x28,-1}, 344 -1},
353 {0x3e,0x6a7f,0x0000,SIS_RI_848x480, 0x00,0x00,0x00,0x00,0x28,-1}, 345 {0x3e, 0x6a7f, 0x0000, SIS_RI_848x480, 0x00, 0x00, 0x00, 0x00, 0x28,
354 {0x3f,0x6a1b,0x0000,SIS_RI_856x480, 0x00,0x00,0x00,0x00,0x2a,-1}, /* 856x480 */ 346 -1},
355 {0x42,0x6a3d,0x0000,SIS_RI_856x480, 0x00,0x00,0x00,0x00,0x2a,-1}, 347 {0x3f, 0x6a1b, 0x0000, SIS_RI_856x480, 0x00, 0x00, 0x00, 0x00, 0x2a, -1}, /* 856x480 */
356 {0x45,0x6a7f,0x0000,SIS_RI_856x480, 0x00,0x00,0x00,0x00,0x2a,-1}, 348 {0x42, 0x6a3d, 0x0000, SIS_RI_856x480, 0x00, 0x00, 0x00, 0x00, 0x2a,
357 {0x4f,0x9a1f,0x0000,SIS_RI_320x200, 0x00,0x00,0x04,0x04,0x1a, 0}, /* 320x200x32 */ 349 -1},
358 {0x53,0x9a1f,0x0000,SIS_RI_320x240, 0x00,0x00,0x04,0x04,0x1b, 2}, /* 320x240x32 */ 350 {0x45, 0x6a7f, 0x0000, SIS_RI_856x480, 0x00, 0x00, 0x00, 0x00, 0x2a,
359 {0x54,0xba1f,0x0000,SIS_RI_400x300, 0x00,0x00,0x07,0x07,0x1c, 3}, /* 400x300x32 */ 351 -1},
360 {0x5f,0x6a1b,0x0000,SIS_RI_768x576, 0x00,0x00,0x06,0x06,0x2c,-1}, /* 768x576 */ 352 {0x4f, 0x9a1f, 0x0000, SIS_RI_320x200, 0x00, 0x00, 0x04, 0x04, 0x1a, 0}, /* 320x200x32 */
361 {0x60,0x6a1d,0x0000,SIS_RI_768x576, 0x00,0x00,0x06,0x06,0x2c,-1}, 353 {0x53, 0x9a1f, 0x0000, SIS_RI_320x240, 0x00, 0x00, 0x04, 0x04, 0x1b, 2}, /* 320x240x32 */
362 {0x61,0x6a3f,0x0000,SIS_RI_768x576, 0x00,0x00,0x06,0x06,0x2c,-1}, 354 {0x54, 0xba1f, 0x0000, SIS_RI_400x300, 0x00, 0x00, 0x07, 0x07, 0x1c, 3}, /* 400x300x32 */
363 {0x1d,0x6a1b,0x0000,SIS_RI_960x540, 0x00,0x00,0x00,0x00,0x2d,-1}, /* 960x540 */ 355 {0x5f, 0x6a1b, 0x0000, SIS_RI_768x576, 0x00, 0x00, 0x06, 0x06, 0x2c, -1}, /* 768x576 */
364 {0x1e,0x6a3d,0x0000,SIS_RI_960x540, 0x00,0x00,0x00,0x00,0x2d,-1}, 356 {0x60, 0x6a1d, 0x0000, SIS_RI_768x576, 0x00, 0x00, 0x06, 0x06, 0x2c,
365 {0x1f,0x6a7f,0x0000,SIS_RI_960x540, 0x00,0x00,0x00,0x00,0x2d,-1}, 357 -1},
366 {0x20,0x6a1b,0x0000,SIS_RI_960x600, 0x00,0x00,0x00,0x00,0x2e,-1}, /* 960x600 */ 358 {0x61, 0x6a3f, 0x0000, SIS_RI_768x576, 0x00, 0x00, 0x06, 0x06, 0x2c,
367 {0x21,0x6a3d,0x0000,SIS_RI_960x600, 0x00,0x00,0x00,0x00,0x2e,-1}, 359 -1},
368 {0x22,0x6a7f,0x0000,SIS_RI_960x600, 0x00,0x00,0x00,0x00,0x2e,-1}, 360 {0x1d, 0x6a1b, 0x0000, SIS_RI_960x540, 0x00, 0x00, 0x00, 0x00, 0x2d, -1}, /* 960x540 */
369 {0x29,0x4e1b,0x0000,SIS_RI_1152x864, 0x00,0x00,0x00,0x00,0x33,-1}, /* 1152x864 */ 361 {0x1e, 0x6a3d, 0x0000, SIS_RI_960x540, 0x00, 0x00, 0x00, 0x00, 0x2d,
370 {0x2a,0x4e3d,0x0000,SIS_RI_1152x864, 0x00,0x00,0x00,0x00,0x33,-1}, 362 -1},
371 {0x2b,0x4e7f,0x0000,SIS_RI_1152x864, 0x00,0x00,0x00,0x00,0x33,-1}, 363 {0x1f, 0x6a7f, 0x0000, SIS_RI_960x540, 0x00, 0x00, 0x00, 0x00, 0x2d,
372 {0xff,0x0000,0x0000,0, 0x00,0x00,0x00,0x00,0x00,-1} 364 -1},
365 {0x20, 0x6a1b, 0x0000, SIS_RI_960x600, 0x00, 0x00, 0x00, 0x00, 0x2e, -1}, /* 960x600 */
366 {0x21, 0x6a3d, 0x0000, SIS_RI_960x600, 0x00, 0x00, 0x00, 0x00, 0x2e,
367 -1},
368 {0x22, 0x6a7f, 0x0000, SIS_RI_960x600, 0x00, 0x00, 0x00, 0x00, 0x2e,
369 -1},
370 {0x29, 0x4e1b, 0x0000, SIS_RI_1152x864, 0x00, 0x00, 0x00, 0x00, 0x33, -1}, /* 1152x864 */
371 {0x2a, 0x4e3d, 0x0000, SIS_RI_1152x864, 0x00, 0x00, 0x00, 0x00, 0x33,
372 -1},
373 {0x2b, 0x4e7f, 0x0000, SIS_RI_1152x864, 0x00, 0x00, 0x00, 0x00, 0x33,
374 -1},
375 {0xff, 0x0000, 0x0000, 0, 0x00, 0x00, 0x00, 0x00, 0x00, -1}
373}; 376};
374 377
375static const struct SiS_Ext2 SiSUSB_RefIndex[] = 378static const struct SiS_Ext2 SiSUSB_RefIndex[] = {
376{ 379 {0x085f, 0x0d, 0x03, 0x05, 0x05, 0x30, 800, 600, 0x40, 0x00, 0x00}, /* 0x0 */
377 {0x085f,0x0d,0x03,0x05,0x05,0x30, 800, 600, 0x40, 0x00, 0x00}, /* 0x0 */ 380 {0x0067, 0x0e, 0x04, 0x05, 0x05, 0x30, 800, 600, 0x40, 0x00, 0x00}, /* 0x1 */
378 {0x0067,0x0e,0x04,0x05,0x05,0x30, 800, 600, 0x40, 0x00, 0x00}, /* 0x1 */ 381 {0x0067, 0x0f, 0x08, 0x48, 0x05, 0x30, 800, 600, 0x40, 0x00, 0x00}, /* 0x2 */
379 {0x0067,0x0f,0x08,0x48,0x05,0x30, 800, 600, 0x40, 0x00, 0x00}, /* 0x2 */ 382 {0x0067, 0x10, 0x07, 0x8b, 0x05, 0x30, 800, 600, 0x40, 0x00, 0x00}, /* 0x3 */
380 {0x0067,0x10,0x07,0x8b,0x05,0x30, 800, 600, 0x40, 0x00, 0x00}, /* 0x3 */ 383 {0x0047, 0x11, 0x0a, 0x00, 0x05, 0x30, 800, 600, 0x40, 0x00, 0x00}, /* 0x4 */
381 {0x0047,0x11,0x0a,0x00,0x05,0x30, 800, 600, 0x40, 0x00, 0x00}, /* 0x4 */ 384 {0x0047, 0x12, 0x0d, 0x00, 0x05, 0x30, 800, 600, 0x40, 0x00, 0x00}, /* 0x5 */
382 {0x0047,0x12,0x0d,0x00,0x05,0x30, 800, 600, 0x40, 0x00, 0x00}, /* 0x5 */ 385 {0x0047, 0x13, 0x13, 0x00, 0x05, 0x30, 800, 600, 0x20, 0x00, 0x00}, /* 0x6 */
383 {0x0047,0x13,0x13,0x00,0x05,0x30, 800, 600, 0x20, 0x00, 0x00}, /* 0x6 */ 386 {0x0107, 0x14, 0x1c, 0x00, 0x05, 0x30, 800, 600, 0x20, 0x00, 0x00}, /* 0x7 */
384 {0x0107,0x14,0x1c,0x00,0x05,0x30, 800, 600, 0x20, 0x00, 0x00}, /* 0x7 */ 387 {0xc85f, 0x05, 0x00, 0x04, 0x04, 0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0x8 */
385 {0xc85f,0x05,0x00,0x04,0x04,0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0x8 */ 388 {0xc067, 0x06, 0x02, 0x04, 0x04, 0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0x9 */
386 {0xc067,0x06,0x02,0x04,0x04,0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0x9 */ 389 {0xc067, 0x07, 0x02, 0x47, 0x04, 0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0xa */
387 {0xc067,0x07,0x02,0x47,0x04,0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0xa */ 390 {0xc067, 0x08, 0x03, 0x8a, 0x04, 0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0xb */
388 {0xc067,0x08,0x03,0x8a,0x04,0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0xb */ 391 {0xc047, 0x09, 0x05, 0x00, 0x04, 0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0xc */
389 {0xc047,0x09,0x05,0x00,0x04,0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0xc */ 392 {0xc047, 0x0a, 0x09, 0x00, 0x04, 0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0xd */
390 {0xc047,0x0a,0x09,0x00,0x04,0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0xd */ 393 {0xc047, 0x0b, 0x0e, 0x00, 0x04, 0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0xe */
391 {0xc047,0x0b,0x0e,0x00,0x04,0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0xe */ 394 {0xc047, 0x0c, 0x15, 0x00, 0x04, 0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0xf */
392 {0xc047,0x0c,0x15,0x00,0x04,0x2e, 640, 480, 0x40, 0x00, 0x00}, /* 0xf */ 395 {0x487f, 0x04, 0x00, 0x00, 0x00, 0x2f, 640, 400, 0x30, 0x55, 0x6e}, /* 0x10 */
393 {0x487f,0x04,0x00,0x00,0x00,0x2f, 640, 400, 0x30, 0x55, 0x6e}, /* 0x10 */ 396 {0xc06f, 0x3c, 0x01, 0x06, 0x13, 0x31, 720, 480, 0x30, 0x00, 0x00}, /* 0x11 */
394 {0xc06f,0x3c,0x01,0x06,0x13,0x31, 720, 480, 0x30, 0x00, 0x00}, /* 0x11 */ 397 {0x006f, 0x3d, 0x6f, 0x06, 0x14, 0x32, 720, 576, 0x30, 0x00, 0x00}, /* 0x12 (6f was 03) */
395 {0x006f,0x3d,0x6f,0x06,0x14,0x32, 720, 576, 0x30, 0x00, 0x00}, /* 0x12 (6f was 03) */ 398 {0x0087, 0x15, 0x06, 0x00, 0x06, 0x38, 1024, 768, 0x30, 0x00, 0x00}, /* 0x13 */
396 {0x0087,0x15,0x06,0x00,0x06,0x38,1024, 768, 0x30, 0x00, 0x00}, /* 0x13 */ 399 {0xc877, 0x16, 0x0b, 0x06, 0x06, 0x38, 1024, 768, 0x20, 0x00, 0x00}, /* 0x14 */
397 {0xc877,0x16,0x0b,0x06,0x06,0x38,1024, 768, 0x20, 0x00, 0x00}, /* 0x14 */ 400 {0xc067, 0x17, 0x0f, 0x49, 0x06, 0x38, 1024, 768, 0x20, 0x00, 0x00}, /* 0x15 */
398 {0xc067,0x17,0x0f,0x49,0x06,0x38,1024, 768, 0x20, 0x00, 0x00}, /* 0x15 */ 401 {0x0067, 0x18, 0x11, 0x00, 0x06, 0x38, 1024, 768, 0x20, 0x00, 0x00}, /* 0x16 */
399 {0x0067,0x18,0x11,0x00,0x06,0x38,1024, 768, 0x20, 0x00, 0x00}, /* 0x16 */ 402 {0x0047, 0x19, 0x16, 0x8c, 0x06, 0x38, 1024, 768, 0x20, 0x00, 0x00}, /* 0x17 */
400 {0x0047,0x19,0x16,0x8c,0x06,0x38,1024, 768, 0x20, 0x00, 0x00}, /* 0x17 */ 403 {0x0107, 0x1a, 0x1b, 0x00, 0x06, 0x38, 1024, 768, 0x10, 0x00, 0x00}, /* 0x18 */
401 {0x0107,0x1a,0x1b,0x00,0x06,0x38,1024, 768, 0x10, 0x00, 0x00}, /* 0x18 */ 404 {0x0107, 0x1b, 0x1f, 0x00, 0x06, 0x38, 1024, 768, 0x10, 0x00, 0x00}, /* 0x19 */
402 {0x0107,0x1b,0x1f,0x00,0x06,0x38,1024, 768, 0x10, 0x00, 0x00}, /* 0x19 */ 405 {0x407f, 0x00, 0x00, 0x00, 0x00, 0x41, 320, 200, 0x30, 0x56, 0x4e}, /* 0x1a */
403 {0x407f,0x00,0x00,0x00,0x00,0x41, 320, 200, 0x30, 0x56, 0x4e}, /* 0x1a */ 406 {0xc07f, 0x01, 0x00, 0x04, 0x04, 0x50, 320, 240, 0x30, 0x00, 0x00}, /* 0x1b */
404 {0xc07f,0x01,0x00,0x04,0x04,0x50, 320, 240, 0x30, 0x00, 0x00}, /* 0x1b */ 407 {0x007f, 0x02, 0x04, 0x05, 0x05, 0x51, 400, 300, 0x30, 0x00, 0x00}, /* 0x1c */
405 {0x007f,0x02,0x04,0x05,0x05,0x51, 400, 300, 0x30, 0x00, 0x00}, /* 0x1c */ 408 {0xc077, 0x03, 0x0b, 0x06, 0x06, 0x52, 512, 384, 0x30, 0x00, 0x00}, /* 0x1d */
406 {0xc077,0x03,0x0b,0x06,0x06,0x52, 512, 384, 0x30, 0x00, 0x00}, /* 0x1d */ 409 {0x0077, 0x32, 0x40, 0x08, 0x18, 0x70, 800, 480, 0x30, 0x00, 0x00}, /* 0x1e */
407 {0x0077,0x32,0x40,0x08,0x18,0x70, 800, 480, 0x30, 0x00, 0x00}, /* 0x1e */ 410 {0x0047, 0x33, 0x07, 0x08, 0x18, 0x70, 800, 480, 0x30, 0x00, 0x00}, /* 0x1f */
408 {0x0047,0x33,0x07,0x08,0x18,0x70, 800, 480, 0x30, 0x00, 0x00}, /* 0x1f */ 411 {0x0047, 0x34, 0x0a, 0x08, 0x18, 0x70, 800, 480, 0x30, 0x00, 0x00}, /* 0x20 */
409 {0x0047,0x34,0x0a,0x08,0x18,0x70, 800, 480, 0x30, 0x00, 0x00}, /* 0x20 */ 412 {0x0077, 0x35, 0x0b, 0x09, 0x19, 0x71, 1024, 576, 0x30, 0x00, 0x00}, /* 0x21 */
410 {0x0077,0x35,0x0b,0x09,0x19,0x71,1024, 576, 0x30, 0x00, 0x00}, /* 0x21 */ 413 {0x0047, 0x36, 0x11, 0x09, 0x19, 0x71, 1024, 576, 0x30, 0x00, 0x00}, /* 0x22 */
411 {0x0047,0x36,0x11,0x09,0x19,0x71,1024, 576, 0x30, 0x00, 0x00}, /* 0x22 */ 414 {0x0047, 0x37, 0x16, 0x09, 0x19, 0x71, 1024, 576, 0x30, 0x00, 0x00}, /* 0x23 */
412 {0x0047,0x37,0x16,0x09,0x19,0x71,1024, 576, 0x30, 0x00, 0x00}, /* 0x23 */ 415 {0x1137, 0x38, 0x19, 0x0a, 0x0c, 0x75, 1280, 720, 0x30, 0x00, 0x00}, /* 0x24 */
413 {0x1137,0x38,0x19,0x0a,0x0c,0x75,1280, 720, 0x30, 0x00, 0x00}, /* 0x24 */ 416 {0x1107, 0x39, 0x1e, 0x0a, 0x0c, 0x75, 1280, 720, 0x30, 0x00, 0x00}, /* 0x25 */
414 {0x1107,0x39,0x1e,0x0a,0x0c,0x75,1280, 720, 0x30, 0x00, 0x00}, /* 0x25 */ 417 {0x1307, 0x3a, 0x20, 0x0a, 0x0c, 0x75, 1280, 720, 0x30, 0x00, 0x00}, /* 0x26 */
415 {0x1307,0x3a,0x20,0x0a,0x0c,0x75,1280, 720, 0x30, 0x00, 0x00}, /* 0x26 */ 418 {0x0077, 0x42, 0x5b, 0x08, 0x11, 0x23, 1280, 768, 0x30, 0x00, 0x00}, /* 0x27 */
416 {0x0077,0x42,0x5b,0x08,0x11,0x23,1280, 768, 0x30, 0x00, 0x00}, /* 0x27 */ 419 {0x0087, 0x45, 0x57, 0x00, 0x16, 0x39, 848, 480, 0x30, 0x00, 0x00}, /* 0x28 38Hzi */
417 {0x0087,0x45,0x57,0x00,0x16,0x39, 848, 480, 0x30, 0x00, 0x00}, /* 0x28 38Hzi */ 420 {0xc067, 0x46, 0x55, 0x0b, 0x16, 0x39, 848, 480, 0x30, 0x00, 0x00}, /* 0x29 848x480-60Hz */
418 {0xc067,0x46,0x55,0x0b,0x16,0x39, 848, 480, 0x30, 0x00, 0x00}, /* 0x29 848x480-60Hz */ 421 {0x0087, 0x47, 0x57, 0x00, 0x17, 0x3f, 856, 480, 0x30, 0x00, 0x00}, /* 0x2a 856x480-38Hzi */
419 {0x0087,0x47,0x57,0x00,0x17,0x3f, 856, 480, 0x30, 0x00, 0x00}, /* 0x2a 856x480-38Hzi */ 422 {0xc067, 0x48, 0x57, 0x00, 0x17, 0x3f, 856, 480, 0x30, 0x00, 0x00}, /* 0x2b 856x480-60Hz */
420 {0xc067,0x48,0x57,0x00,0x17,0x3f, 856, 480, 0x30, 0x00, 0x00}, /* 0x2b 856x480-60Hz */ 423 {0x006f, 0x4d, 0x71, 0x06, 0x15, 0x5f, 768, 576, 0x30, 0x00, 0x00}, /* 0x2c 768x576-56Hz */
421 {0x006f,0x4d,0x71,0x06,0x15,0x5f, 768, 576, 0x30, 0x00, 0x00}, /* 0x2c 768x576-56Hz */ 424 {0x0067, 0x52, 0x6a, 0x00, 0x1c, 0x1d, 960, 540, 0x30, 0x00, 0x00}, /* 0x2d 960x540 60Hz */
422 {0x0067,0x52,0x6a,0x00,0x1c,0x1d, 960, 540, 0x30, 0x00, 0x00}, /* 0x2d 960x540 60Hz */ 425 {0x0077, 0x53, 0x6b, 0x0b, 0x1d, 0x20, 960, 600, 0x30, 0x00, 0x00}, /* 0x2e 960x600 60Hz */
423 {0x0077,0x53,0x6b,0x0b,0x1d,0x20, 960, 600, 0x30, 0x00, 0x00}, /* 0x2e 960x600 60Hz */ 426 {0x0087, 0x1c, 0x11, 0x00, 0x07, 0x3a, 1280, 1024, 0x30, 0x00, 0x00}, /* 0x2f */
424 {0x0087,0x1c,0x11,0x00,0x07,0x3a,1280,1024, 0x30, 0x00, 0x00}, /* 0x2f */ 427 {0x0137, 0x1d, 0x19, 0x07, 0x07, 0x3a, 1280, 1024, 0x00, 0x00, 0x00}, /* 0x30 */
425 {0x0137,0x1d,0x19,0x07,0x07,0x3a,1280,1024, 0x00, 0x00, 0x00}, /* 0x30 */ 428 {0x0107, 0x1e, 0x1e, 0x00, 0x07, 0x3a, 1280, 1024, 0x00, 0x00, 0x00}, /* 0x31 */
426 {0x0107,0x1e,0x1e,0x00,0x07,0x3a,1280,1024, 0x00, 0x00, 0x00}, /* 0x31 */ 429 {0x0207, 0x1f, 0x20, 0x00, 0x07, 0x3a, 1280, 1024, 0x00, 0x00, 0x00}, /* 0x32 */
427 {0x0207,0x1f,0x20,0x00,0x07,0x3a,1280,1024, 0x00, 0x00, 0x00}, /* 0x32 */ 430 {0x0127, 0x54, 0x6d, 0x00, 0x1a, 0x29, 1152, 864, 0x30, 0x00, 0x00}, /* 0x33 1152x864-60Hz */
428 {0x0127,0x54,0x6d,0x00,0x1a,0x29,1152, 864, 0x30, 0x00, 0x00}, /* 0x33 1152x864-60Hz */ 431 {0x0127, 0x44, 0x19, 0x00, 0x1a, 0x29, 1152, 864, 0x30, 0x00, 0x00}, /* 0x34 1152x864-75Hz */
429 {0x0127,0x44,0x19,0x00,0x1a,0x29,1152, 864, 0x30, 0x00, 0x00}, /* 0x34 1152x864-75Hz */ 432 {0x0127, 0x4a, 0x1e, 0x00, 0x1a, 0x29, 1152, 864, 0x30, 0x00, 0x00}, /* 0x35 1152x864-85Hz */
430 {0x0127,0x4a,0x1e,0x00,0x1a,0x29,1152, 864, 0x30, 0x00, 0x00}, /* 0x35 1152x864-85Hz */ 433 {0xffff, 0x00, 0x00, 0x00, 0x00, 0x00, 0, 0, 0, 0x00, 0x00}
431 {0xffff,0x00,0x00,0x00,0x00,0x00, 0, 0, 0, 0x00, 0x00}
432}; 434};
433 435
434static const struct SiS_CRT1Table SiSUSB_CRT1Table[] = 436static const struct SiS_CRT1Table SiSUSB_CRT1Table[] = {
435{ 437 {{0x2d, 0x27, 0x28, 0x90, 0x2c, 0x80, 0xbf, 0x1f,
436 {{0x2d,0x27,0x28,0x90,0x2c,0x80,0xbf,0x1f, 438 0x9c, 0x8e, 0x8f, 0x96, 0xb9, 0x30, 0x00, 0x00,
437 0x9c,0x8e,0x8f,0x96,0xb9,0x30,0x00,0x00, 439 0x00}}, /* 0x0 */
438 0x00}}, /* 0x0 */ 440 {{0x2d, 0x27, 0x28, 0x90, 0x2c, 0x80, 0x0b, 0x3e,
439 {{0x2d,0x27,0x28,0x90,0x2c,0x80,0x0b,0x3e, 441 0xe9, 0x8b, 0xdf, 0xe7, 0x04, 0x00, 0x00, 0x00,
440 0xe9,0x8b,0xdf,0xe7,0x04,0x00,0x00,0x00, 442 0x00}}, /* 0x1 */
441 0x00}}, /* 0x1 */ 443 {{0x3d, 0x31, 0x31, 0x81, 0x37, 0x1f, 0x72, 0xf0,
442 {{0x3d,0x31,0x31,0x81,0x37,0x1f,0x72,0xf0, 444 0x58, 0x8c, 0x57, 0x57, 0x73, 0x20, 0x00, 0x05,
443 0x58,0x8c,0x57,0x57,0x73,0x20,0x00,0x05, 445 0x01}}, /* 0x2 */
444 0x01}}, /* 0x2 */ 446 {{0x4f, 0x3f, 0x3f, 0x93, 0x45, 0x0d, 0x24, 0xf5,
445 {{0x4f,0x3f,0x3f,0x93,0x45,0x0d,0x24,0xf5, 447 0x02, 0x88, 0xff, 0xff, 0x25, 0x10, 0x00, 0x01,
446 0x02,0x88,0xff,0xff,0x25,0x10,0x00,0x01, 448 0x01}}, /* 0x3 */
447 0x01}}, /* 0x3 */ 449 {{0x5f, 0x4f, 0x50, 0x82, 0x55, 0x81, 0xbf, 0x1f,
448 {{0x5f,0x4f,0x50,0x82,0x55,0x81,0xbf,0x1f, 450 0x9c, 0x8e, 0x8f, 0x96, 0xb9, 0x30, 0x00, 0x05,
449 0x9c,0x8e,0x8f,0x96,0xb9,0x30,0x00,0x05, 451 0x00}}, /* 0x4 */
450 0x00}}, /* 0x4 */ 452 {{0x5f, 0x4f, 0x4f, 0x83, 0x55, 0x81, 0x0b, 0x3e,
451 {{0x5f,0x4f,0x4f,0x83,0x55,0x81,0x0b,0x3e, 453 0xe9, 0x8b, 0xdf, 0xe8, 0x0c, 0x00, 0x00, 0x05,
452 0xe9,0x8b,0xdf,0xe8,0x0c,0x00,0x00,0x05, 454 0x00}}, /* 0x5 */
453 0x00}}, /* 0x5 */ 455 {{0x63, 0x4f, 0x4f, 0x87, 0x56, 0x9b, 0x06, 0x3e,
454 {{0x63,0x4f,0x4f,0x87,0x56,0x9b,0x06,0x3e, 456 0xe8, 0x8a, 0xdf, 0xe7, 0x07, 0x00, 0x00, 0x01,
455 0xe8,0x8a,0xdf,0xe7,0x07,0x00,0x00,0x01, 457 0x00}}, /* 0x6 */
456 0x00}}, /* 0x6 */ 458 {{0x64, 0x4f, 0x4f, 0x88, 0x55, 0x9d, 0xf2, 0x1f,
457 {{0x64,0x4f,0x4f,0x88,0x55,0x9d,0xf2,0x1f, 459 0xe0, 0x83, 0xdf, 0xdf, 0xf3, 0x10, 0x00, 0x01,
458 0xe0,0x83,0xdf,0xdf,0xf3,0x10,0x00,0x01, 460 0x00}}, /* 0x7 */
459 0x00}}, /* 0x7 */ 461 {{0x63, 0x4f, 0x4f, 0x87, 0x5a, 0x81, 0xfb, 0x1f,
460 {{0x63,0x4f,0x4f,0x87,0x5a,0x81,0xfb,0x1f, 462 0xe0, 0x83, 0xdf, 0xdf, 0xfc, 0x10, 0x00, 0x05,
461 0xe0,0x83,0xdf,0xdf,0xfc,0x10,0x00,0x05, 463 0x00}}, /* 0x8 */
462 0x00}}, /* 0x8 */ 464 {{0x65, 0x4f, 0x4f, 0x89, 0x58, 0x80, 0xfb, 0x1f,
463 {{0x65,0x4f,0x4f,0x89,0x58,0x80,0xfb,0x1f, 465 0xe0, 0x83, 0xdf, 0xdf, 0xfc, 0x10, 0x00, 0x05,
464 0xe0,0x83,0xdf,0xdf,0xfc,0x10,0x00,0x05, 466 0x61}}, /* 0x9 */
465 0x61}}, /* 0x9 */ 467 {{0x65, 0x4f, 0x4f, 0x89, 0x58, 0x80, 0x01, 0x3e,
466 {{0x65,0x4f,0x4f,0x89,0x58,0x80,0x01,0x3e, 468 0xe0, 0x83, 0xdf, 0xdf, 0x02, 0x00, 0x00, 0x05,
467 0xe0,0x83,0xdf,0xdf,0x02,0x00,0x00,0x05, 469 0x61}}, /* 0xa */
468 0x61}}, /* 0xa */ 470 {{0x67, 0x4f, 0x4f, 0x8b, 0x58, 0x81, 0x0d, 0x3e,
469 {{0x67,0x4f,0x4f,0x8b,0x58,0x81,0x0d,0x3e, 471 0xe0, 0x83, 0xdf, 0xdf, 0x0e, 0x00, 0x00, 0x05,
470 0xe0,0x83,0xdf,0xdf,0x0e,0x00,0x00,0x05, 472 0x61}}, /* 0xb */
471 0x61}}, /* 0xb */ 473 {{0x65, 0x4f, 0x4f, 0x89, 0x57, 0x9f, 0xfb, 0x1f,
472 {{0x65,0x4f,0x4f,0x89,0x57,0x9f,0xfb,0x1f, 474 0xe6, 0x8a, 0xdf, 0xdf, 0xfc, 0x10, 0x00, 0x01,
473 0xe6,0x8a,0xdf,0xdf,0xfc,0x10,0x00,0x01, 475 0x00}}, /* 0xc */
474 0x00}}, /* 0xc */ 476 {{0x7b, 0x63, 0x63, 0x9f, 0x6a, 0x93, 0x6f, 0xf0,
475 {{0x7b,0x63,0x63,0x9f,0x6a,0x93,0x6f,0xf0, 477 0x58, 0x8a, 0x57, 0x57, 0x70, 0x20, 0x00, 0x05,
476 0x58,0x8a,0x57,0x57,0x70,0x20,0x00,0x05, 478 0x01}}, /* 0xd */
477 0x01}}, /* 0xd */ 479 {{0x7f, 0x63, 0x63, 0x83, 0x6c, 0x1c, 0x72, 0xf0,
478 {{0x7f,0x63,0x63,0x83,0x6c,0x1c,0x72,0xf0, 480 0x58, 0x8c, 0x57, 0x57, 0x73, 0x20, 0x00, 0x06,
479 0x58,0x8c,0x57,0x57,0x73,0x20,0x00,0x06, 481 0x01}}, /* 0xe */
480 0x01}}, /* 0xe */ 482 {{0x7d, 0x63, 0x63, 0x81, 0x6e, 0x1d, 0x98, 0xf0,
481 {{0x7d,0x63,0x63,0x81,0x6e,0x1d,0x98,0xf0, 483 0x7c, 0x82, 0x57, 0x57, 0x99, 0x00, 0x00, 0x06,
482 0x7c,0x82,0x57,0x57,0x99,0x00,0x00,0x06, 484 0x01}}, /* 0xf */
483 0x01}}, /* 0xf */ 485 {{0x7f, 0x63, 0x63, 0x83, 0x69, 0x13, 0x6f, 0xf0,
484 {{0x7f,0x63,0x63,0x83,0x69,0x13,0x6f,0xf0, 486 0x58, 0x8b, 0x57, 0x57, 0x70, 0x20, 0x00, 0x06,
485 0x58,0x8b,0x57,0x57,0x70,0x20,0x00,0x06, 487 0x01}}, /* 0x10 */
486 0x01}}, /* 0x10 */ 488 {{0x7e, 0x63, 0x63, 0x82, 0x6b, 0x13, 0x75, 0xf0,
487 {{0x7e,0x63,0x63,0x82,0x6b,0x13,0x75,0xf0, 489 0x58, 0x8b, 0x57, 0x57, 0x76, 0x20, 0x00, 0x06,
488 0x58,0x8b,0x57,0x57,0x76,0x20,0x00,0x06, 490 0x01}}, /* 0x11 */
489 0x01}}, /* 0x11 */ 491 {{0x81, 0x63, 0x63, 0x85, 0x6d, 0x18, 0x7a, 0xf0,
490 {{0x81,0x63,0x63,0x85,0x6d,0x18,0x7a,0xf0, 492 0x58, 0x8b, 0x57, 0x57, 0x7b, 0x20, 0x00, 0x06,
491 0x58,0x8b,0x57,0x57,0x7b,0x20,0x00,0x06, 493 0x61}}, /* 0x12 */
492 0x61}}, /* 0x12 */ 494 {{0x83, 0x63, 0x63, 0x87, 0x6e, 0x19, 0x81, 0xf0,
493 {{0x83,0x63,0x63,0x87,0x6e,0x19,0x81,0xf0, 495 0x58, 0x8b, 0x57, 0x57, 0x82, 0x20, 0x00, 0x06,
494 0x58,0x8b,0x57,0x57,0x82,0x20,0x00,0x06, 496 0x61}}, /* 0x13 */
495 0x61}}, /* 0x13 */ 497 {{0x85, 0x63, 0x63, 0x89, 0x6f, 0x1a, 0x91, 0xf0,
496 {{0x85,0x63,0x63,0x89,0x6f,0x1a,0x91,0xf0, 498 0x58, 0x8b, 0x57, 0x57, 0x92, 0x20, 0x00, 0x06,
497 0x58,0x8b,0x57,0x57,0x92,0x20,0x00,0x06, 499 0x61}}, /* 0x14 */
498 0x61}}, /* 0x14 */ 500 {{0x99, 0x7f, 0x7f, 0x9d, 0x84, 0x1a, 0x96, 0x1f,
499 {{0x99,0x7f,0x7f,0x9d,0x84,0x1a,0x96,0x1f, 501 0x7f, 0x83, 0x7f, 0x7f, 0x97, 0x10, 0x00, 0x02,
500 0x7f,0x83,0x7f,0x7f,0x97,0x10,0x00,0x02, 502 0x00}}, /* 0x15 */
501 0x00}}, /* 0x15 */ 503 {{0xa3, 0x7f, 0x7f, 0x87, 0x86, 0x97, 0x24, 0xf5,
502 {{0xa3,0x7f,0x7f,0x87,0x86,0x97,0x24,0xf5, 504 0x02, 0x88, 0xff, 0xff, 0x25, 0x10, 0x00, 0x02,
503 0x02,0x88,0xff,0xff,0x25,0x10,0x00,0x02, 505 0x01}}, /* 0x16 */
504 0x01}}, /* 0x16 */ 506 {{0xa1, 0x7f, 0x7f, 0x85, 0x86, 0x97, 0x24, 0xf5,
505 {{0xa1,0x7f,0x7f,0x85,0x86,0x97,0x24,0xf5, 507 0x02, 0x88, 0xff, 0xff, 0x25, 0x10, 0x00, 0x02,
506 0x02,0x88,0xff,0xff,0x25,0x10,0x00,0x02, 508 0x01}}, /* 0x17 */
507 0x01}}, /* 0x17 */ 509 {{0x9f, 0x7f, 0x7f, 0x83, 0x85, 0x91, 0x1e, 0xf5,
508 {{0x9f,0x7f,0x7f,0x83,0x85,0x91,0x1e,0xf5, 510 0x00, 0x83, 0xff, 0xff, 0x1f, 0x10, 0x00, 0x02,
509 0x00,0x83,0xff,0xff,0x1f,0x10,0x00,0x02, 511 0x01}}, /* 0x18 */
510 0x01}}, /* 0x18 */ 512 {{0xa7, 0x7f, 0x7f, 0x8b, 0x89, 0x95, 0x26, 0xf5,
511 {{0xa7,0x7f,0x7f,0x8b,0x89,0x95,0x26,0xf5, 513 0x00, 0x83, 0xff, 0xff, 0x27, 0x10, 0x00, 0x02,
512 0x00,0x83,0xff,0xff,0x27,0x10,0x00,0x02, 514 0x01}}, /* 0x19 */
513 0x01}}, /* 0x19 */ 515 {{0xa9, 0x7f, 0x7f, 0x8d, 0x8c, 0x9a, 0x2c, 0xf5,
514 {{0xa9,0x7f,0x7f,0x8d,0x8c,0x9a,0x2c,0xf5, 516 0x00, 0x83, 0xff, 0xff, 0x2d, 0x14, 0x00, 0x02,
515 0x00,0x83,0xff,0xff,0x2d,0x14,0x00,0x02, 517 0x62}}, /* 0x1a */
516 0x62}}, /* 0x1a */ 518 {{0xab, 0x7f, 0x7f, 0x8f, 0x8d, 0x9b, 0x35, 0xf5,
517 {{0xab,0x7f,0x7f,0x8f,0x8d,0x9b,0x35,0xf5, 519 0x00, 0x83, 0xff, 0xff, 0x36, 0x14, 0x00, 0x02,
518 0x00,0x83,0xff,0xff,0x36,0x14,0x00,0x02, 520 0x62}}, /* 0x1b */
519 0x62}}, /* 0x1b */ 521 {{0xcf, 0x9f, 0x9f, 0x93, 0xb2, 0x01, 0x14, 0xba,
520 {{0xcf,0x9f,0x9f,0x93,0xb2,0x01,0x14,0xba, 522 0x00, 0x83, 0xff, 0xff, 0x15, 0x00, 0x00, 0x03,
521 0x00,0x83,0xff,0xff,0x15,0x00,0x00,0x03, 523 0x00}}, /* 0x1c */
522 0x00}}, /* 0x1c */ 524 {{0xce, 0x9f, 0x9f, 0x92, 0xa9, 0x17, 0x28, 0x5a,
523 {{0xce,0x9f,0x9f,0x92,0xa9,0x17,0x28,0x5a, 525 0x00, 0x83, 0xff, 0xff, 0x29, 0x09, 0x00, 0x07,
524 0x00,0x83,0xff,0xff,0x29,0x09,0x00,0x07, 526 0x01}}, /* 0x1d */
525 0x01}}, /* 0x1d */ 527 {{0xce, 0x9f, 0x9f, 0x92, 0xa5, 0x17, 0x28, 0x5a,
526 {{0xce,0x9f,0x9f,0x92,0xa5,0x17,0x28,0x5a, 528 0x00, 0x83, 0xff, 0xff, 0x29, 0x09, 0x00, 0x07,
527 0x00,0x83,0xff,0xff,0x29,0x09,0x00,0x07, 529 0x01}}, /* 0x1e */
528 0x01}}, /* 0x1e */ 530 {{0xd3, 0x9f, 0x9f, 0x97, 0xab, 0x1f, 0x2e, 0x5a,
529 {{0xd3,0x9f,0x9f,0x97,0xab,0x1f,0x2e,0x5a, 531 0x00, 0x83, 0xff, 0xff, 0x2f, 0x09, 0x00, 0x07,
530 0x00,0x83,0xff,0xff,0x2f,0x09,0x00,0x07, 532 0x01}}, /* 0x1f */
531 0x01}}, /* 0x1f */ 533 {{0x09, 0xc7, 0xc7, 0x8d, 0xd3, 0x0b, 0xe0, 0x10,
532 {{0x09,0xc7,0xc7,0x8d,0xd3,0x0b,0xe0,0x10, 534 0xb0, 0x83, 0xaf, 0xaf, 0xe1, 0x2f, 0x01, 0x04,
533 0xb0,0x83,0xaf,0xaf,0xe1,0x2f,0x01,0x04, 535 0x00}}, /* 0x20 */
534 0x00}}, /* 0x20 */ 536 {{0x09, 0xc7, 0xc7, 0x8d, 0xd3, 0x0b, 0xe0, 0x10,
535 {{0x09,0xc7,0xc7,0x8d,0xd3,0x0b,0xe0,0x10, 537 0xb0, 0x83, 0xaf, 0xaf, 0xe1, 0x2f, 0x01, 0x04,
536 0xb0,0x83,0xaf,0xaf,0xe1,0x2f,0x01,0x04, 538 0x00}}, /* 0x21 */
537 0x00}}, /* 0x21 */ 539 {{0x09, 0xc7, 0xc7, 0x8d, 0xd3, 0x0b, 0xe0, 0x10,
538 {{0x09,0xc7,0xc7,0x8d,0xd3,0x0b,0xe0,0x10, 540 0xb0, 0x83, 0xaf, 0xaf, 0xe1, 0x2f, 0x01, 0x04,
539 0xb0,0x83,0xaf,0xaf,0xe1,0x2f,0x01,0x04, 541 0x00}}, /* 0x22 */
540 0x00}}, /* 0x22 */ 542 {{0x09, 0xc7, 0xc7, 0x8d, 0xd3, 0x0b, 0xe0, 0x10,
541 {{0x09,0xc7,0xc7,0x8d,0xd3,0x0b,0xe0,0x10, 543 0xb0, 0x83, 0xaf, 0xaf, 0xe1, 0x2f, 0x01, 0x04,
542 0xb0,0x83,0xaf,0xaf,0xe1,0x2f,0x01,0x04, 544 0x00}}, /* 0x23 */
543 0x00}}, /* 0x23 */ 545 {{0x09, 0xc7, 0xc7, 0x8d, 0xd3, 0x0b, 0xe0, 0x10,
544 {{0x09,0xc7,0xc7,0x8d,0xd3,0x0b,0xe0,0x10, 546 0xb0, 0x83, 0xaf, 0xaf, 0xe1, 0x2f, 0x01, 0x04,
545 0xb0,0x83,0xaf,0xaf,0xe1,0x2f,0x01,0x04, 547 0x00}}, /* 0x24 */
546 0x00}}, /* 0x24 */ 548 {{0x09, 0xc7, 0xc7, 0x8d, 0xd3, 0x0b, 0xe0, 0x10,
547 {{0x09,0xc7,0xc7,0x8d,0xd3,0x0b,0xe0,0x10, 549 0xb0, 0x83, 0xaf, 0xaf, 0xe1, 0x2f, 0x01, 0x04,
548 0xb0,0x83,0xaf,0xaf,0xe1,0x2f,0x01,0x04, 550 0x00}}, /* 0x25 */
549 0x00}}, /* 0x25 */ 551 {{0x09, 0xc7, 0xc7, 0x8d, 0xd3, 0x0b, 0xe0, 0x10,
550 {{0x09,0xc7,0xc7,0x8d,0xd3,0x0b,0xe0,0x10, 552 0xb0, 0x83, 0xaf, 0xaf, 0xe1, 0x2f, 0x01, 0x04,
551 0xb0,0x83,0xaf,0xaf,0xe1,0x2f,0x01,0x04, 553 0x00}}, /* 0x26 */
552 0x00}}, /* 0x26 */ 554 {{0x40, 0xef, 0xef, 0x84, 0x03, 0x1d, 0xda, 0x1f,
553 {{0x40,0xef,0xef,0x84,0x03,0x1d,0xda,0x1f, 555 0xa0, 0x83, 0x9f, 0x9f, 0xdb, 0x1f, 0x41, 0x01,
554 0xa0,0x83,0x9f,0x9f,0xdb,0x1f,0x41,0x01, 556 0x00}}, /* 0x27 */
555 0x00}}, /* 0x27 */ 557 {{0x43, 0xef, 0xef, 0x87, 0x06, 0x00, 0xd4, 0x1f,
556 {{0x43,0xef,0xef,0x87,0x06,0x00,0xd4,0x1f, 558 0xa0, 0x83, 0x9f, 0x9f, 0xd5, 0x1f, 0x41, 0x05,
557 0xa0,0x83,0x9f,0x9f,0xd5,0x1f,0x41,0x05, 559 0x63}}, /* 0x28 */
558 0x63}}, /* 0x28 */ 560 {{0x45, 0xef, 0xef, 0x89, 0x07, 0x01, 0xd9, 0x1f,
559 {{0x45,0xef,0xef,0x89,0x07,0x01,0xd9,0x1f, 561 0xa0, 0x83, 0x9f, 0x9f, 0xda, 0x1f, 0x41, 0x05,
560 0xa0,0x83,0x9f,0x9f,0xda,0x1f,0x41,0x05, 562 0x63}}, /* 0x29 */
561 0x63}}, /* 0x29 */ 563 {{0x40, 0xef, 0xef, 0x84, 0x03, 0x1d, 0xda, 0x1f,
562 {{0x40,0xef,0xef,0x84,0x03,0x1d,0xda,0x1f, 564 0xa0, 0x83, 0x9f, 0x9f, 0xdb, 0x1f, 0x41, 0x01,
563 0xa0,0x83,0x9f,0x9f,0xdb,0x1f,0x41,0x01, 565 0x00}}, /* 0x2a */
564 0x00}}, /* 0x2a */ 566 {{0x40, 0xef, 0xef, 0x84, 0x03, 0x1d, 0xda, 0x1f,
565 {{0x40,0xef,0xef,0x84,0x03,0x1d,0xda,0x1f, 567 0xa0, 0x83, 0x9f, 0x9f, 0xdb, 0x1f, 0x41, 0x01,
566 0xa0,0x83,0x9f,0x9f,0xdb,0x1f,0x41,0x01, 568 0x00}}, /* 0x2b */
567 0x00}}, /* 0x2b */ 569 {{0x40, 0xef, 0xef, 0x84, 0x03, 0x1d, 0xda, 0x1f,
568 {{0x40,0xef,0xef,0x84,0x03,0x1d,0xda,0x1f, 570 0xa0, 0x83, 0x9f, 0x9f, 0xdb, 0x1f, 0x41, 0x01,
569 0xa0,0x83,0x9f,0x9f,0xdb,0x1f,0x41,0x01, 571 0x00}}, /* 0x2c */
570 0x00}}, /* 0x2c */ 572 {{0x59, 0xff, 0xff, 0x9d, 0x17, 0x13, 0x33, 0xba,
571 {{0x59,0xff,0xff,0x9d,0x17,0x13,0x33,0xba, 573 0x00, 0x83, 0xff, 0xff, 0x34, 0x0f, 0x41, 0x05,
572 0x00,0x83,0xff,0xff,0x34,0x0f,0x41,0x05, 574 0x44}}, /* 0x2d */
573 0x44}}, /* 0x2d */ 575 {{0x5b, 0xff, 0xff, 0x9f, 0x18, 0x14, 0x38, 0xba,
574 {{0x5b,0xff,0xff,0x9f,0x18,0x14,0x38,0xba, 576 0x00, 0x83, 0xff, 0xff, 0x39, 0x0f, 0x41, 0x05,
575 0x00,0x83,0xff,0xff,0x39,0x0f,0x41,0x05, 577 0x44}}, /* 0x2e */
576 0x44}}, /* 0x2e */ 578 {{0x5b, 0xff, 0xff, 0x9f, 0x18, 0x14, 0x3d, 0xba,
577 {{0x5b,0xff,0xff,0x9f,0x18,0x14,0x3d,0xba, 579 0x00, 0x83, 0xff, 0xff, 0x3e, 0x0f, 0x41, 0x05,
578 0x00,0x83,0xff,0xff,0x3e,0x0f,0x41,0x05, 580 0x44}}, /* 0x2f */
579 0x44}}, /* 0x2f */ 581 {{0x5d, 0xff, 0xff, 0x81, 0x19, 0x95, 0x41, 0xba,
580 {{0x5d,0xff,0xff,0x81,0x19,0x95,0x41,0xba, 582 0x00, 0x84, 0xff, 0xff, 0x42, 0x0f, 0x41, 0x05,
581 0x00,0x84,0xff,0xff,0x42,0x0f,0x41,0x05, 583 0x44}}, /* 0x30 */
582 0x44}}, /* 0x30 */ 584 {{0x55, 0xff, 0xff, 0x99, 0x0d, 0x0c, 0x3e, 0xba,
583 {{0x55,0xff,0xff,0x99,0x0d,0x0c,0x3e,0xba, 585 0x00, 0x84, 0xff, 0xff, 0x3f, 0x0f, 0x41, 0x05,
584 0x00,0x84,0xff,0xff,0x3f,0x0f,0x41,0x05, 586 0x00}}, /* 0x31 */
585 0x00}}, /* 0x31 */ 587 {{0x7f, 0x63, 0x63, 0x83, 0x6c, 0x1c, 0x72, 0xba,
586 {{0x7f,0x63,0x63,0x83,0x6c,0x1c,0x72,0xba, 588 0x27, 0x8b, 0xdf, 0xdf, 0x73, 0x00, 0x00, 0x06,
587 0x27,0x8b,0xdf,0xdf,0x73,0x00,0x00,0x06, 589 0x01}}, /* 0x32 */
588 0x01}}, /* 0x32 */ 590 {{0x7f, 0x63, 0x63, 0x83, 0x69, 0x13, 0x6f, 0xba,
589 {{0x7f,0x63,0x63,0x83,0x69,0x13,0x6f,0xba, 591 0x26, 0x89, 0xdf, 0xdf, 0x6f, 0x00, 0x00, 0x06,
590 0x26,0x89,0xdf,0xdf,0x6f,0x00,0x00,0x06, 592 0x01}}, /* 0x33 */
591 0x01}}, /* 0x33 */ 593 {{0x7f, 0x63, 0x63, 0x82, 0x6b, 0x13, 0x75, 0xba,
592 {{0x7f,0x63,0x63,0x82,0x6b,0x13,0x75,0xba, 594 0x29, 0x8c, 0xdf, 0xdf, 0x75, 0x00, 0x00, 0x06,
593 0x29,0x8c,0xdf,0xdf,0x75,0x00,0x00,0x06, 595 0x01}}, /* 0x34 */
594 0x01}}, /* 0x34 */ 596 {{0xa3, 0x7f, 0x7f, 0x87, 0x86, 0x97, 0x24, 0xf1,
595 {{0xa3,0x7f,0x7f,0x87,0x86,0x97,0x24,0xf1, 597 0xaf, 0x85, 0x3f, 0x3f, 0x25, 0x30, 0x00, 0x02,
596 0xaf,0x85,0x3f,0x3f,0x25,0x30,0x00,0x02, 598 0x01}}, /* 0x35 */
597 0x01}}, /* 0x35 */ 599 {{0x9f, 0x7f, 0x7f, 0x83, 0x85, 0x91, 0x1e, 0xf1,
598 {{0x9f,0x7f,0x7f,0x83,0x85,0x91,0x1e,0xf1, 600 0xad, 0x81, 0x3f, 0x3f, 0x1f, 0x30, 0x00, 0x02,
599 0xad,0x81,0x3f,0x3f,0x1f,0x30,0x00,0x02, 601 0x01}}, /* 0x36 */
600 0x01}}, /* 0x36 */ 602 {{0xa7, 0x7f, 0x7f, 0x88, 0x89, 0x95, 0x26, 0xf1,
601 {{0xa7,0x7f,0x7f,0x88,0x89,0x95,0x26,0xf1, 603 0xb1, 0x85, 0x3f, 0x3f, 0x27, 0x30, 0x00, 0x02,
602 0xb1,0x85,0x3f,0x3f,0x27,0x30,0x00,0x02, 604 0x01}}, /* 0x37 */
603 0x01}}, /* 0x37 */ 605 {{0xce, 0x9f, 0x9f, 0x92, 0xa9, 0x17, 0x28, 0xc4,
604 {{0xce,0x9f,0x9f,0x92,0xa9,0x17,0x28,0xc4, 606 0x7a, 0x8e, 0xcf, 0xcf, 0x29, 0x21, 0x00, 0x07,
605 0x7a,0x8e,0xcf,0xcf,0x29,0x21,0x00,0x07, 607 0x01}}, /* 0x38 */
606 0x01}}, /* 0x38 */ 608 {{0xce, 0x9f, 0x9f, 0x92, 0xa5, 0x17, 0x28, 0xd4,
607 {{0xce,0x9f,0x9f,0x92,0xa5,0x17,0x28,0xd4, 609 0x7a, 0x8e, 0xcf, 0xcf, 0x29, 0x21, 0x00, 0x07,
608 0x7a,0x8e,0xcf,0xcf,0x29,0x21,0x00,0x07, 610 0x01}}, /* 0x39 */
609 0x01}}, /* 0x39 */ 611 {{0xd3, 0x9f, 0x9f, 0x97, 0xab, 0x1f, 0x2e, 0xd4,
610 {{0xd3,0x9f,0x9f,0x97,0xab,0x1f,0x2e,0xd4, 612 0x7d, 0x81, 0xcf, 0xcf, 0x2f, 0x21, 0x00, 0x07,
611 0x7d,0x81,0xcf,0xcf,0x2f,0x21,0x00,0x07, 613 0x01}}, /* 0x3a */
612 0x01}}, /* 0x3a */ 614 {{0xdc, 0x9f, 0x9f, 0x80, 0xaf, 0x9d, 0xe6, 0xff,
613 {{0xdc,0x9f,0x9f,0x80,0xaf,0x9d,0xe6,0xff, 615 0xc0, 0x83, 0xbf, 0xbf, 0xe7, 0x10, 0x00, 0x07,
614 0xc0,0x83,0xbf,0xbf,0xe7,0x10,0x00,0x07, 616 0x01}}, /* 0x3b */
615 0x01}}, /* 0x3b */ 617 {{0x6b, 0x59, 0x59, 0x8f, 0x5e, 0x8c, 0x0b, 0x3e,
616 {{0x6b,0x59,0x59,0x8f,0x5e,0x8c,0x0b,0x3e, 618 0xe9, 0x8b, 0xdf, 0xe7, 0x04, 0x00, 0x00, 0x05,
617 0xe9,0x8b,0xdf,0xe7,0x04,0x00,0x00,0x05, 619 0x00}}, /* 0x3c */
618 0x00}}, /* 0x3c */ 620 {{0x6d, 0x59, 0x59, 0x91, 0x60, 0x89, 0x53, 0xf0,
619 {{0x6d,0x59,0x59,0x91,0x60,0x89,0x53,0xf0, 621 0x41, 0x84, 0x3f, 0x3f, 0x54, 0x00, 0x00, 0x05,
620 0x41,0x84,0x3f,0x3f,0x54,0x00,0x00,0x05, 622 0x41}}, /* 0x3d */
621 0x41}}, /* 0x3d */ 623 {{0x86, 0x6a, 0x6a, 0x8a, 0x74, 0x06, 0x8c, 0x15,
622 {{0x86,0x6a,0x6a,0x8a,0x74,0x06,0x8c,0x15, 624 0x4f, 0x83, 0xef, 0xef, 0x8d, 0x30, 0x00, 0x02,
623 0x4f,0x83,0xef,0xef,0x8d,0x30,0x00,0x02, 625 0x00}}, /* 0x3e */
624 0x00}}, /* 0x3e */ 626 {{0x81, 0x6a, 0x6a, 0x85, 0x70, 0x00, 0x0f, 0x3e,
625 {{0x81,0x6a,0x6a,0x85,0x70,0x00,0x0f,0x3e, 627 0xeb, 0x8e, 0xdf, 0xdf, 0x10, 0x00, 0x00, 0x02,
626 0xeb,0x8e,0xdf,0xdf,0x10,0x00,0x00,0x02, 628 0x00}}, /* 0x3f */
627 0x00}}, /* 0x3f */ 629 {{0xa3, 0x7f, 0x7f, 0x87, 0x86, 0x97, 0x1e, 0xf1,
628 {{0xa3,0x7f,0x7f,0x87,0x86,0x97,0x1e,0xf1, 630 0xae, 0x85, 0x57, 0x57, 0x1f, 0x30, 0x00, 0x02,
629 0xae,0x85,0x57,0x57,0x1f,0x30,0x00,0x02, 631 0x01}}, /* 0x40 */
630 0x01}}, /* 0x40 */ 632 {{0xa3, 0x7f, 0x7f, 0x87, 0x86, 0x97, 0x24, 0xf5,
631 {{0xa3,0x7f,0x7f,0x87,0x86,0x97,0x24,0xf5, 633 0x02, 0x88, 0xff, 0xff, 0x25, 0x10, 0x00, 0x02,
632 0x02,0x88,0xff,0xff,0x25,0x10,0x00,0x02, 634 0x01}}, /* 0x41 */
633 0x01}}, /* 0x41 */ 635 {{0xce, 0x9f, 0x9f, 0x92, 0xa9, 0x17, 0x20, 0xf5,
634 {{0xce,0x9f,0x9f,0x92,0xa9,0x17,0x20,0xf5, 636 0x03, 0x88, 0xff, 0xff, 0x21, 0x10, 0x00, 0x07,
635 0x03,0x88,0xff,0xff,0x21,0x10,0x00,0x07, 637 0x01}}, /* 0x42 */
636 0x01}}, /* 0x42 */ 638 {{0xe6, 0xae, 0xae, 0x8a, 0xbd, 0x90, 0x3d, 0x10,
637 {{0xe6,0xae,0xae,0x8a,0xbd,0x90,0x3d,0x10, 639 0x1a, 0x8d, 0x19, 0x19, 0x3e, 0x2f, 0x00, 0x03,
638 0x1a,0x8d,0x19,0x19,0x3e,0x2f,0x00,0x03, 640 0x00}}, /* 0x43 */
639 0x00}}, /* 0x43 */ 641 {{0xc3, 0x8f, 0x8f, 0x87, 0x9b, 0x0b, 0x82, 0xef,
640 {{0xc3,0x8f,0x8f,0x87,0x9b,0x0b,0x82,0xef, 642 0x60, 0x83, 0x5f, 0x5f, 0x83, 0x10, 0x00, 0x07,
641 0x60,0x83,0x5f,0x5f,0x83,0x10,0x00,0x07, 643 0x01}}, /* 0x44 */
642 0x01}}, /* 0x44 */ 644 {{0x86, 0x69, 0x69, 0x8A, 0x74, 0x06, 0x8C, 0x15,
643 {{0x86,0x69,0x69,0x8A,0x74,0x06,0x8C,0x15, 645 0x4F, 0x83, 0xEF, 0xEF, 0x8D, 0x30, 0x00, 0x02,
644 0x4F,0x83,0xEF,0xEF,0x8D,0x30,0x00,0x02, 646 0x00}}, /* 0x45 */
645 0x00}}, /* 0x45 */ 647 {{0x83, 0x69, 0x69, 0x87, 0x6f, 0x1d, 0x03, 0x3E,
646 {{0x83,0x69,0x69,0x87,0x6f,0x1d,0x03,0x3E, 648 0xE5, 0x8d, 0xDF, 0xe4, 0x04, 0x00, 0x00, 0x06,
647 0xE5,0x8d,0xDF,0xe4,0x04,0x00,0x00,0x06, 649 0x00}}, /* 0x46 */
648 0x00}}, /* 0x46 */ 650 {{0x86, 0x6A, 0x6A, 0x8A, 0x74, 0x06, 0x8C, 0x15,
649 {{0x86,0x6A,0x6A,0x8A,0x74,0x06,0x8C,0x15, 651 0x4F, 0x83, 0xEF, 0xEF, 0x8D, 0x30, 0x00, 0x02,
650 0x4F,0x83,0xEF,0xEF,0x8D,0x30,0x00,0x02, 652 0x00}}, /* 0x47 */
651 0x00}}, /* 0x47 */ 653 {{0x81, 0x6A, 0x6A, 0x85, 0x70, 0x00, 0x0F, 0x3E,
652 {{0x81,0x6A,0x6A,0x85,0x70,0x00,0x0F,0x3E, 654 0xEB, 0x8E, 0xDF, 0xDF, 0x10, 0x00, 0x00, 0x02,
653 0xEB,0x8E,0xDF,0xDF,0x10,0x00,0x00,0x02, 655 0x00}}, /* 0x48 */
654 0x00}}, /* 0x48 */ 656 {{0xdd, 0xa9, 0xa9, 0x81, 0xb4, 0x97, 0x26, 0xfd,
655 {{0xdd,0xa9,0xa9,0x81,0xb4,0x97,0x26,0xfd, 657 0x01, 0x8d, 0xff, 0x00, 0x27, 0x10, 0x00, 0x03,
656 0x01,0x8d,0xff,0x00,0x27,0x10,0x00,0x03, 658 0x01}}, /* 0x49 */
657 0x01}}, /* 0x49 */ 659 {{0xd9, 0x8f, 0x8f, 0x9d, 0xba, 0x0a, 0x8a, 0xff,
658 {{0xd9,0x8f,0x8f,0x9d,0xba,0x0a,0x8a,0xff, 660 0x60, 0x8b, 0x5f, 0x5f, 0x8b, 0x10, 0x00, 0x03,
659 0x60,0x8b,0x5f,0x5f,0x8b,0x10,0x00,0x03, 661 0x01}}, /* 0x4a */
660 0x01}}, /* 0x4a */ 662 {{0xea, 0xae, 0xae, 0x8e, 0xba, 0x82, 0x40, 0x10,
661 {{0xea,0xae,0xae,0x8e,0xba,0x82,0x40,0x10, 663 0x1b, 0x87, 0x19, 0x1a, 0x41, 0x0f, 0x00, 0x03,
662 0x1b,0x87,0x19,0x1a,0x41,0x0f,0x00,0x03, 664 0x00}}, /* 0x4b */
663 0x00}}, /* 0x4b */ 665 {{0xd3, 0x9f, 0x9f, 0x97, 0xab, 0x1f, 0xf1, 0xff,
664 {{0xd3,0x9f,0x9f,0x97,0xab,0x1f,0xf1,0xff, 666 0xc0, 0x83, 0xbf, 0xbf, 0xf2, 0x10, 0x00, 0x07,
665 0xc0,0x83,0xbf,0xbf,0xf2,0x10,0x00,0x07, 667 0x01}}, /* 0x4c */
666 0x01}}, /* 0x4c */ 668 {{0x75, 0x5f, 0x5f, 0x99, 0x66, 0x90, 0x53, 0xf0,
667 {{0x75,0x5f,0x5f,0x99,0x66,0x90,0x53,0xf0, 669 0x41, 0x84, 0x3f, 0x3f, 0x54, 0x00, 0x00, 0x05,
668 0x41,0x84,0x3f,0x3f,0x54,0x00,0x00,0x05, 670 0x41}},
669 0x41}}, 671 {{0x2d, 0x27, 0x28, 0x90, 0x2c, 0x80, 0x0b, 0x3e,
670 {{0x2d,0x27,0x28,0x90,0x2c,0x80,0x0b,0x3e, 672 0xe9, 0x8b, 0xdf, 0xe7, 0x04, 0x00, 0x00, 0x00,
671 0xe9,0x8b,0xdf,0xe7,0x04,0x00,0x00,0x00, 673 0x00}}, /* 0x4e */
672 0x00}}, /* 0x4e */ 674 {{0xcd, 0x9f, 0x9f, 0x91, 0xab, 0x1c, 0x3a, 0xff,
673 {{0xcd,0x9f,0x9f,0x91,0xab,0x1c,0x3a,0xff, 675 0x20, 0x83, 0x1f, 0x1f, 0x3b, 0x10, 0x00, 0x07,
674 0x20,0x83,0x1f,0x1f,0x3b,0x10,0x00,0x07, 676 0x21}}, /* 0x4f */
675 0x21}}, /* 0x4f */ 677 {{0x15, 0xd1, 0xd1, 0x99, 0xe2, 0x19, 0x3d, 0x10,
676 {{0x15,0xd1,0xd1,0x99,0xe2,0x19,0x3d,0x10, 678 0x1a, 0x8d, 0x19, 0x19, 0x3e, 0x2f, 0x01, 0x0c,
677 0x1a,0x8d,0x19,0x19,0x3e,0x2f,0x01,0x0c, 679 0x20}}, /* 0x50 */
678 0x20}}, /* 0x50 */ 680 {{0x0e, 0xef, 0xef, 0x92, 0xfe, 0x03, 0x30, 0xf0,
679 {{0x0e,0xef,0xef,0x92,0xfe,0x03,0x30,0xf0, 681 0x1e, 0x83, 0x1b, 0x1c, 0x31, 0x00, 0x01, 0x00,
680 0x1e,0x83,0x1b,0x1c,0x31,0x00,0x01,0x00, 682 0x61}}, /* 0x51 */
681 0x61}}, /* 0x51 */ 683 {{0x85, 0x77, 0x77, 0x89, 0x7d, 0x01, 0x31, 0xf0,
682 {{0x85,0x77,0x77,0x89,0x7d,0x01,0x31,0xf0, 684 0x1e, 0x84, 0x1b, 0x1c, 0x32, 0x00, 0x00, 0x02,
683 0x1e,0x84,0x1b,0x1c,0x32,0x00,0x00,0x02, 685 0x41}}, /* 0x52 */
684 0x41}}, /* 0x52 */ 686 {{0x87, 0x77, 0x77, 0x8b, 0x81, 0x0b, 0x68, 0xf0,
685 {{0x87,0x77,0x77,0x8b,0x81,0x0b,0x68,0xf0, 687 0x5a, 0x80, 0x57, 0x57, 0x69, 0x00, 0x00, 0x02,
686 0x5a,0x80,0x57,0x57,0x69,0x00,0x00,0x02, 688 0x01}}, /* 0x53 */
687 0x01}}, /* 0x53 */ 689 {{0xcd, 0x8f, 0x8f, 0x91, 0x9b, 0x1b, 0x7a, 0xff,
688 {{0xcd,0x8f,0x8f,0x91,0x9b,0x1b,0x7a,0xff, 690 0x64, 0x8c, 0x5f, 0x62, 0x7b, 0x10, 0x00, 0x07,
689 0x64,0x8c,0x5f,0x62,0x7b,0x10,0x00,0x07, 691 0x41}} /* 0x54 */
690 0x41}} /* 0x54 */
691}; 692};
692 693
693static const struct SiS_VCLKData SiSUSB_VCLKData[] = 694static const struct SiS_VCLKData SiSUSB_VCLKData[] = {
694{ 695 {0x1b, 0xe1, 25}, /* 0x00 */
695 { 0x1b,0xe1, 25}, /* 0x00 */ 696 {0x4e, 0xe4, 28}, /* 0x01 */
696 { 0x4e,0xe4, 28}, /* 0x01 */ 697 {0x57, 0xe4, 31}, /* 0x02 */
697 { 0x57,0xe4, 31}, /* 0x02 */ 698 {0xc3, 0xc8, 36}, /* 0x03 */
698 { 0xc3,0xc8, 36}, /* 0x03 */ 699 {0x42, 0xe2, 40}, /* 0x04 */
699 { 0x42,0xe2, 40}, /* 0x04 */ 700 {0xfe, 0xcd, 43}, /* 0x05 */
700 { 0xfe,0xcd, 43}, /* 0x05 */ 701 {0x5d, 0xc4, 44}, /* 0x06 */
701 { 0x5d,0xc4, 44}, /* 0x06 */ 702 {0x52, 0xe2, 49}, /* 0x07 */
702 { 0x52,0xe2, 49}, /* 0x07 */ 703 {0x53, 0xe2, 50}, /* 0x08 */
703 { 0x53,0xe2, 50}, /* 0x08 */ 704 {0x74, 0x67, 52}, /* 0x09 */
704 { 0x74,0x67, 52}, /* 0x09 */ 705 {0x6d, 0x66, 56}, /* 0x0a */
705 { 0x6d,0x66, 56}, /* 0x0a */ 706 {0x5a, 0x64, 65}, /* 0x0b */
706 { 0x5a,0x64, 65}, /* 0x0b */ 707 {0x46, 0x44, 67}, /* 0x0c */
707 { 0x46,0x44, 67}, /* 0x0c */ 708 {0xb1, 0x46, 68}, /* 0x0d */
708 { 0xb1,0x46, 68}, /* 0x0d */ 709 {0xd3, 0x4a, 72}, /* 0x0e */
709 { 0xd3,0x4a, 72}, /* 0x0e */ 710 {0x29, 0x61, 75}, /* 0x0f */
710 { 0x29,0x61, 75}, /* 0x0f */ 711 {0x6e, 0x46, 76}, /* 0x10 */
711 { 0x6e,0x46, 76}, /* 0x10 */ 712 {0x2b, 0x61, 78}, /* 0x11 */
712 { 0x2b,0x61, 78}, /* 0x11 */ 713 {0x31, 0x42, 79}, /* 0x12 */
713 { 0x31,0x42, 79}, /* 0x12 */ 714 {0xab, 0x44, 83}, /* 0x13 */
714 { 0xab,0x44, 83}, /* 0x13 */ 715 {0x46, 0x25, 84}, /* 0x14 */
715 { 0x46,0x25, 84}, /* 0x14 */ 716 {0x78, 0x29, 86}, /* 0x15 */
716 { 0x78,0x29, 86}, /* 0x15 */ 717 {0x62, 0x44, 94}, /* 0x16 */
717 { 0x62,0x44, 94}, /* 0x16 */ 718 {0x2b, 0x41, 104}, /* 0x17 */
718 { 0x2b,0x41,104}, /* 0x17 */ 719 {0x3a, 0x23, 105}, /* 0x18 */
719 { 0x3a,0x23,105}, /* 0x18 */ 720 {0x70, 0x44, 108}, /* 0x19 */
720 { 0x70,0x44,108}, /* 0x19 */ 721 {0x3c, 0x23, 109}, /* 0x1a */
721 { 0x3c,0x23,109}, /* 0x1a */ 722 {0x5e, 0x43, 113}, /* 0x1b */
722 { 0x5e,0x43,113}, /* 0x1b */ 723 {0xbc, 0x44, 116}, /* 0x1c */
723 { 0xbc,0x44,116}, /* 0x1c */ 724 {0xe0, 0x46, 132}, /* 0x1d */
724 { 0xe0,0x46,132}, /* 0x1d */ 725 {0x54, 0x42, 135}, /* 0x1e */
725 { 0x54,0x42,135}, /* 0x1e */ 726 {0xea, 0x2a, 139}, /* 0x1f */
726 { 0xea,0x2a,139}, /* 0x1f */ 727 {0x41, 0x22, 157}, /* 0x20 */
727 { 0x41,0x22,157}, /* 0x20 */ 728 {0x70, 0x24, 162}, /* 0x21 */
728 { 0x70,0x24,162}, /* 0x21 */ 729 {0x30, 0x21, 175}, /* 0x22 */
729 { 0x30,0x21,175}, /* 0x22 */ 730 {0x4e, 0x22, 189}, /* 0x23 */
730 { 0x4e,0x22,189}, /* 0x23 */ 731 {0xde, 0x26, 194}, /* 0x24 */
731 { 0xde,0x26,194}, /* 0x24 */ 732 {0x62, 0x06, 202}, /* 0x25 */
732 { 0x62,0x06,202}, /* 0x25 */ 733 {0x3f, 0x03, 229}, /* 0x26 */
733 { 0x3f,0x03,229}, /* 0x26 */ 734 {0xb8, 0x06, 234}, /* 0x27 */
734 { 0xb8,0x06,234}, /* 0x27 */ 735 {0x34, 0x02, 253}, /* 0x28 */
735 { 0x34,0x02,253}, /* 0x28 */ 736 {0x58, 0x04, 255}, /* 0x29 */
736 { 0x58,0x04,255}, /* 0x29 */ 737 {0x24, 0x01, 265}, /* 0x2a */
737 { 0x24,0x01,265}, /* 0x2a */ 738 {0x9b, 0x02, 267}, /* 0x2b */
738 { 0x9b,0x02,267}, /* 0x2b */ 739 {0x70, 0x05, 270}, /* 0x2c */
739 { 0x70,0x05,270}, /* 0x2c */ 740 {0x25, 0x01, 272}, /* 0x2d */
740 { 0x25,0x01,272}, /* 0x2d */ 741 {0x9c, 0x02, 277}, /* 0x2e */
741 { 0x9c,0x02,277}, /* 0x2e */ 742 {0x27, 0x01, 286}, /* 0x2f */
742 { 0x27,0x01,286}, /* 0x2f */ 743 {0x3c, 0x02, 291}, /* 0x30 */
743 { 0x3c,0x02,291}, /* 0x30 */ 744 {0xef, 0x0a, 292}, /* 0x31 */
744 { 0xef,0x0a,292}, /* 0x31 */ 745 {0xf6, 0x0a, 310}, /* 0x32 */
745 { 0xf6,0x0a,310}, /* 0x32 */ 746 {0x95, 0x01, 315}, /* 0x33 */
746 { 0x95,0x01,315}, /* 0x33 */ 747 {0xf0, 0x09, 324}, /* 0x34 */
747 { 0xf0,0x09,324}, /* 0x34 */ 748 {0xfe, 0x0a, 331}, /* 0x35 */
748 { 0xfe,0x0a,331}, /* 0x35 */ 749 {0xf3, 0x09, 332}, /* 0x36 */
749 { 0xf3,0x09,332}, /* 0x36 */ 750 {0xea, 0x08, 340}, /* 0x37 */
750 { 0xea,0x08,340}, /* 0x37 */ 751 {0xe8, 0x07, 376}, /* 0x38 */
751 { 0xe8,0x07,376}, /* 0x38 */ 752 {0xde, 0x06, 389}, /* 0x39 */
752 { 0xde,0x06,389}, /* 0x39 */ 753 {0x52, 0x2a, 54}, /* 0x3a 301 TV */
753 { 0x52,0x2a, 54}, /* 0x3a 301 TV */ 754 {0x52, 0x6a, 27}, /* 0x3b 301 TV */
754 { 0x52,0x6a, 27}, /* 0x3b 301 TV */ 755 {0x62, 0x24, 70}, /* 0x3c 301 TV */
755 { 0x62,0x24, 70}, /* 0x3c 301 TV */ 756 {0x62, 0x64, 70}, /* 0x3d 301 TV */
756 { 0x62,0x64, 70}, /* 0x3d 301 TV */ 757 {0xa8, 0x4c, 30}, /* 0x3e 301 TV */
757 { 0xa8,0x4c, 30}, /* 0x3e 301 TV */ 758 {0x20, 0x26, 33}, /* 0x3f 301 TV */
758 { 0x20,0x26, 33}, /* 0x3f 301 TV */ 759 {0x31, 0xc2, 39}, /* 0x40 */
759 { 0x31,0xc2, 39}, /* 0x40 */ 760 {0x60, 0x36, 30}, /* 0x41 Chrontel */
760 { 0x60,0x36, 30}, /* 0x41 Chrontel */ 761 {0x40, 0x4a, 28}, /* 0x42 Chrontel */
761 { 0x40,0x4a, 28}, /* 0x42 Chrontel */ 762 {0x9f, 0x46, 44}, /* 0x43 Chrontel */
762 { 0x9f,0x46, 44}, /* 0x43 Chrontel */ 763 {0x97, 0x2c, 26}, /* 0x44 */
763 { 0x97,0x2c, 26}, /* 0x44 */ 764 {0x44, 0xe4, 25}, /* 0x45 Chrontel */
764 { 0x44,0xe4, 25}, /* 0x45 Chrontel */ 765 {0x7e, 0x32, 47}, /* 0x46 Chrontel */
765 { 0x7e,0x32, 47}, /* 0x46 Chrontel */ 766 {0x8a, 0x24, 31}, /* 0x47 Chrontel */
766 { 0x8a,0x24, 31}, /* 0x47 Chrontel */ 767 {0x97, 0x2c, 26}, /* 0x48 Chrontel */
767 { 0x97,0x2c, 26}, /* 0x48 Chrontel */ 768 {0xce, 0x3c, 39}, /* 0x49 */
768 { 0xce,0x3c, 39}, /* 0x49 */ 769 {0x52, 0x4a, 36}, /* 0x4a Chrontel */
769 { 0x52,0x4a, 36}, /* 0x4a Chrontel */ 770 {0x34, 0x61, 95}, /* 0x4b */
770 { 0x34,0x61, 95}, /* 0x4b */ 771 {0x78, 0x27, 108}, /* 0x4c - was 102 */
771 { 0x78,0x27,108}, /* 0x4c - was 102 */ 772 {0x66, 0x43, 123}, /* 0x4d Modes 0x26-0x28 (1400x1050) */
772 { 0x66,0x43,123}, /* 0x4d Modes 0x26-0x28 (1400x1050) */ 773 {0x41, 0x4e, 21}, /* 0x4e */
773 { 0x41,0x4e, 21}, /* 0x4e */ 774 {0xa1, 0x4a, 29}, /* 0x4f Chrontel */
774 { 0xa1,0x4a, 29}, /* 0x4f Chrontel */ 775 {0x19, 0x42, 42}, /* 0x50 */
775 { 0x19,0x42, 42}, /* 0x50 */ 776 {0x54, 0x46, 58}, /* 0x51 Chrontel */
776 { 0x54,0x46, 58}, /* 0x51 Chrontel */ 777 {0x25, 0x42, 61}, /* 0x52 */
777 { 0x25,0x42, 61}, /* 0x52 */ 778 {0x44, 0x44, 66}, /* 0x53 Chrontel */
778 { 0x44,0x44, 66}, /* 0x53 Chrontel */ 779 {0x3a, 0x62, 70}, /* 0x54 Chrontel */
779 { 0x3a,0x62, 70}, /* 0x54 Chrontel */ 780 {0x62, 0xc6, 34}, /* 0x55 848x480-60 */
780 { 0x62,0xc6, 34}, /* 0x55 848x480-60 */ 781 {0x6a, 0xc6, 37}, /* 0x56 848x480-75 - TEMP */
781 { 0x6a,0xc6, 37}, /* 0x56 848x480-75 - TEMP */ 782 {0xbf, 0xc8, 35}, /* 0x57 856x480-38i,60 */
782 { 0xbf,0xc8, 35}, /* 0x57 856x480-38i,60 */ 783 {0x30, 0x23, 88}, /* 0x58 1360x768-62 (is 60Hz!) */
783 { 0x30,0x23, 88}, /* 0x58 1360x768-62 (is 60Hz!) */ 784 {0x52, 0x07, 149}, /* 0x59 1280x960-85 */
784 { 0x52,0x07,149}, /* 0x59 1280x960-85 */ 785 {0x56, 0x07, 156}, /* 0x5a 1400x1050-75 */
785 { 0x56,0x07,156}, /* 0x5a 1400x1050-75 */ 786 {0x70, 0x29, 81}, /* 0x5b 1280x768 LCD */
786 { 0x70,0x29, 81}, /* 0x5b 1280x768 LCD */ 787 {0x45, 0x25, 83}, /* 0x5c 1280x800 */
787 { 0x45,0x25, 83}, /* 0x5c 1280x800 */ 788 {0x70, 0x0a, 147}, /* 0x5d 1680x1050 */
788 { 0x70,0x0a,147}, /* 0x5d 1680x1050 */ 789 {0x70, 0x24, 162}, /* 0x5e 1600x1200 */
789 { 0x70,0x24,162}, /* 0x5e 1600x1200 */ 790 {0x5a, 0x64, 65}, /* 0x5f 1280x720 - temp */
790 { 0x5a,0x64, 65}, /* 0x5f 1280x720 - temp */ 791 {0x63, 0x46, 68}, /* 0x60 1280x768_2 */
791 { 0x63,0x46, 68}, /* 0x60 1280x768_2 */ 792 {0x31, 0x42, 79}, /* 0x61 1280x768_3 - temp */
792 { 0x31,0x42, 79}, /* 0x61 1280x768_3 - temp */ 793 {0, 0, 0}, /* 0x62 - custom (will be filled out at run-time) */
793 { 0, 0, 0}, /* 0x62 - custom (will be filled out at run-time) */ 794 {0x5a, 0x64, 65}, /* 0x63 1280x720 (LCD LVDS) */
794 { 0x5a,0x64, 65}, /* 0x63 1280x720 (LCD LVDS) */ 795 {0x70, 0x28, 90}, /* 0x64 1152x864@60 */
795 { 0x70,0x28, 90}, /* 0x64 1152x864@60 */ 796 {0x41, 0xc4, 32}, /* 0x65 848x480@60 */
796 { 0x41,0xc4, 32}, /* 0x65 848x480@60 */ 797 {0x5c, 0xc6, 32}, /* 0x66 856x480@60 */
797 { 0x5c,0xc6, 32}, /* 0x66 856x480@60 */ 798 {0x76, 0xe7, 27}, /* 0x67 720x480@60 */
798 { 0x76,0xe7, 27}, /* 0x67 720x480@60 */ 799 {0x5f, 0xc6, 33}, /* 0x68 720/768x576@60 */
799 { 0x5f,0xc6, 33}, /* 0x68 720/768x576@60 */ 800 {0x52, 0x27, 75}, /* 0x69 1920x1080i 60Hz interlaced */
800 { 0x52,0x27, 75}, /* 0x69 1920x1080i 60Hz interlaced */ 801 {0x7c, 0x6b, 38}, /* 0x6a 960x540@60 */
801 { 0x7c,0x6b, 38}, /* 0x6a 960x540@60 */ 802 {0xe3, 0x56, 41}, /* 0x6b 960x600@60 */
802 { 0xe3,0x56, 41}, /* 0x6b 960x600@60 */ 803 {0x45, 0x25, 83}, /* 0x6c 1280x800 */
803 { 0x45,0x25, 83}, /* 0x6c 1280x800 */ 804 {0x70, 0x28, 90}, /* 0x6d 1152x864@60 */
804 { 0x70,0x28, 90}, /* 0x6d 1152x864@60 */ 805 {0x15, 0xe1, 20}, /* 0x6e 640x400@60 (fake, not actually used) */
805 { 0x15,0xe1, 20}, /* 0x6e 640x400@60 (fake, not actually used) */ 806 {0x5f, 0xc6, 33}, /* 0x6f 720x576@60 */
806 { 0x5f,0xc6, 33}, /* 0x6f 720x576@60 */ 807 {0x37, 0x5a, 10}, /* 0x70 320x200@60 (fake, not actually used) */
807 { 0x37,0x5a, 10}, /* 0x70 320x200@60 (fake, not actually used) */ 808 {0x2b, 0xc2, 35} /* 0x71 768@576@60 */
808 { 0x2b,0xc2, 35} /* 0x71 768@576@60 */
809}; 809};
810 810
811int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); 811int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo);
812int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo); 812int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo);
813 813
814extern int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data); 814extern int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data);
815extern int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data); 815extern int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 * data);
816extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, 816extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
817 u8 index, u8 data); 817 u8 index, u8 data);
818extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, 818extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
819 u8 index, u8 *data); 819 u8 index, u8 * data);
820extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, 820extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port,
821 u8 idx, u8 myand, u8 myor); 821 u8 idx, u8 myand, u8 myor);
822extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port, 822extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port,
823 u8 index, u8 myor); 823 u8 index, u8 myor);
824extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port, 824extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
825 u8 idx, u8 myand); 825 u8 idx, u8 myand);
826 826
827void sisusb_delete(struct kref *kref); 827void sisusb_delete(struct kref *kref);
828int sisusb_writeb(struct sisusb_usb_data *sisusb, u32 adr, u8 data); 828int sisusb_writeb(struct sisusb_usb_data *sisusb, u32 adr, u8 data);
829int sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 *data); 829int sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 * data);
830int sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src, 830int sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src,
831 u32 dest, int length, size_t *bytes_written); 831 u32 dest, int length, size_t * bytes_written);
832int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init); 832int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init);
833int sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot, 833int sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot,
834 u8 *arg, int cmapsz, int ch512, int dorecalc, 834 u8 * arg, int cmapsz, int ch512, int dorecalc,
835 struct vc_data *c, int fh, int uplock); 835 struct vc_data *c, int fh, int uplock);
836void sisusb_set_cursor(struct sisusb_usb_data *sisusb, unsigned int location); 836void sisusb_set_cursor(struct sisusb_usb_data *sisusb, unsigned int location);
837int sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last); 837int sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last);
@@ -839,4 +839,3 @@ void sisusb_console_exit(struct sisusb_usb_data *sisusb);
839void sisusb_init_concode(void); 839void sisusb_init_concode(void);
840 840
841#endif 841#endif
842
diff --git a/drivers/usb/misc/sisusbvga/sisusb_struct.h b/drivers/usb/misc/sisusbvga/sisusb_struct.h
index f325ecb29a..1c4240e802 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_struct.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_struct.h
@@ -44,7 +44,7 @@
44 * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 44 * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
45 * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 * 46 *
47 * Author: Thomas Winischhofer <thomas@winischhofer.net> 47 * Author: Thomas Winischhofer <thomas@winischhofer.net>
48 * 48 *
49 */ 49 */
50 50
@@ -52,85 +52,78 @@
52#define _SISUSB_STRUCT_H_ 52#define _SISUSB_STRUCT_H_
53 53
54struct SiS_St { 54struct SiS_St {
55 unsigned char St_ModeID; 55 unsigned char St_ModeID;
56 unsigned short St_ModeFlag; 56 unsigned short St_ModeFlag;
57 unsigned char St_StTableIndex; 57 unsigned char St_StTableIndex;
58 unsigned char St_CRT2CRTC; 58 unsigned char St_CRT2CRTC;
59 unsigned char St_ResInfo; 59 unsigned char St_ResInfo;
60 unsigned char VB_StTVFlickerIndex; 60 unsigned char VB_StTVFlickerIndex;
61 unsigned char VB_StTVEdgeIndex; 61 unsigned char VB_StTVEdgeIndex;
62 unsigned char VB_StTVYFilterIndex; 62 unsigned char VB_StTVYFilterIndex;
63 unsigned char St_PDC; 63 unsigned char St_PDC;
64}; 64};
65 65
66struct SiS_StandTable 66struct SiS_StandTable {
67{ 67 unsigned char CRT_COLS;
68 unsigned char CRT_COLS; 68 unsigned char ROWS;
69 unsigned char ROWS; 69 unsigned char CHAR_HEIGHT;
70 unsigned char CHAR_HEIGHT; 70 unsigned short CRT_LEN;
71 unsigned short CRT_LEN; 71 unsigned char SR[4];
72 unsigned char SR[4]; 72 unsigned char MISC;
73 unsigned char MISC; 73 unsigned char CRTC[0x19];
74 unsigned char CRTC[0x19]; 74 unsigned char ATTR[0x14];
75 unsigned char ATTR[0x14]; 75 unsigned char GRC[9];
76 unsigned char GRC[9];
77}; 76};
78 77
79struct SiS_StResInfo_S { 78struct SiS_StResInfo_S {
80 unsigned short HTotal; 79 unsigned short HTotal;
81 unsigned short VTotal; 80 unsigned short VTotal;
82}; 81};
83 82
84struct SiS_Ext 83struct SiS_Ext {
85{ 84 unsigned char Ext_ModeID;
86 unsigned char Ext_ModeID; 85 unsigned short Ext_ModeFlag;
87 unsigned short Ext_ModeFlag; 86 unsigned short Ext_VESAID;
88 unsigned short Ext_VESAID; 87 unsigned char Ext_RESINFO;
89 unsigned char Ext_RESINFO; 88 unsigned char VB_ExtTVFlickerIndex;
90 unsigned char VB_ExtTVFlickerIndex; 89 unsigned char VB_ExtTVEdgeIndex;
91 unsigned char VB_ExtTVEdgeIndex; 90 unsigned char VB_ExtTVYFilterIndex;
92 unsigned char VB_ExtTVYFilterIndex; 91 unsigned char VB_ExtTVYFilterIndexROM661;
93 unsigned char VB_ExtTVYFilterIndexROM661; 92 unsigned char REFindex;
94 unsigned char REFindex; 93 char ROMMODEIDX661;
95 char ROMMODEIDX661;
96}; 94};
97 95
98struct SiS_Ext2 96struct SiS_Ext2 {
99{ 97 unsigned short Ext_InfoFlag;
100 unsigned short Ext_InfoFlag; 98 unsigned char Ext_CRT1CRTC;
101 unsigned char Ext_CRT1CRTC; 99 unsigned char Ext_CRTVCLK;
102 unsigned char Ext_CRTVCLK; 100 unsigned char Ext_CRT2CRTC;
103 unsigned char Ext_CRT2CRTC; 101 unsigned char Ext_CRT2CRTC_NS;
104 unsigned char Ext_CRT2CRTC_NS; 102 unsigned char ModeID;
105 unsigned char ModeID; 103 unsigned short XRes;
106 unsigned short XRes; 104 unsigned short YRes;
107 unsigned short YRes; 105 unsigned char Ext_PDC;
108 unsigned char Ext_PDC; 106 unsigned char Ext_FakeCRT2CRTC;
109 unsigned char Ext_FakeCRT2CRTC; 107 unsigned char Ext_FakeCRT2Clk;
110 unsigned char Ext_FakeCRT2Clk;
111}; 108};
112 109
113struct SiS_CRT1Table 110struct SiS_CRT1Table {
114{ 111 unsigned char CR[17];
115 unsigned char CR[17];
116}; 112};
117 113
118struct SiS_VCLKData 114struct SiS_VCLKData {
119{ 115 unsigned char SR2B, SR2C;
120 unsigned char SR2B,SR2C; 116 unsigned short CLOCK;
121 unsigned short CLOCK;
122}; 117};
123 118
124struct SiS_ModeResInfo 119struct SiS_ModeResInfo {
125{ 120 unsigned short HTotal;
126 unsigned short HTotal; 121 unsigned short VTotal;
127 unsigned short VTotal; 122 unsigned char XChar;
128 unsigned char XChar; 123 unsigned char YChar;
129 unsigned char YChar;
130}; 124};
131 125
132struct SiS_Private 126struct SiS_Private {
133{
134 void *sisusb; 127 void *sisusb;
135 128
136 unsigned long IOAddress; 129 unsigned long IOAddress;
@@ -151,19 +144,18 @@ struct SiS_Private
151 unsigned long SiS_P3da; 144 unsigned long SiS_P3da;
152 unsigned long SiS_Part1Port; 145 unsigned long SiS_Part1Port;
153 146
154 unsigned char SiS_MyCR63; 147 unsigned char SiS_MyCR63;
155 unsigned short SiS_CRT1Mode; 148 unsigned short SiS_CRT1Mode;
156 unsigned short SiS_ModeType; 149 unsigned short SiS_ModeType;
157 unsigned short SiS_SetFlag; 150 unsigned short SiS_SetFlag;
158 151
159 const struct SiS_StandTable *SiS_StandTable; 152 const struct SiS_StandTable *SiS_StandTable;
160 const struct SiS_St *SiS_SModeIDTable; 153 const struct SiS_St *SiS_SModeIDTable;
161 const struct SiS_Ext *SiS_EModeIDTable; 154 const struct SiS_Ext *SiS_EModeIDTable;
162 const struct SiS_Ext2 *SiS_RefIndex; 155 const struct SiS_Ext2 *SiS_RefIndex;
163 const struct SiS_CRT1Table *SiS_CRT1Table; 156 const struct SiS_CRT1Table *SiS_CRT1Table;
164 const struct SiS_VCLKData *SiS_VCLKData; 157 const struct SiS_VCLKData *SiS_VCLKData;
165 const struct SiS_ModeResInfo *SiS_ModeResInfo; 158 const struct SiS_ModeResInfo *SiS_ModeResInfo;
166}; 159};
167 160
168#endif 161#endif
169
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index c03dfd7a9d..f06e4e2b49 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -172,6 +172,10 @@ static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp,
172 172
173#define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) 173#define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0)
174 174
175static unsigned char xfer_to_pipe[4] = {
176 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
177};
178
175static struct class *mon_bin_class; 179static struct class *mon_bin_class;
176static dev_t mon_bin_dev0; 180static dev_t mon_bin_dev0;
177static struct cdev mon_bin_cdev; 181static struct cdev mon_bin_cdev;
@@ -354,13 +358,9 @@ static inline char mon_bin_get_setup(unsigned char *setupb,
354 const struct urb *urb, char ev_type) 358 const struct urb *urb, char ev_type)
355{ 359{
356 360
357 if (!usb_pipecontrol(urb->pipe) || ev_type != 'S') 361 if (!usb_endpoint_xfer_control(&urb->ep->desc) || ev_type != 'S')
358 return '-'; 362 return '-';
359 363
360 if (urb->dev->bus->uses_dma &&
361 (urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
362 return mon_dmapeek(setupb, urb->setup_dma, SETUP_LEN);
363 }
364 if (urb->setup_packet == NULL) 364 if (urb->setup_packet == NULL)
365 return 'Z'; 365 return 'Z';
366 366
@@ -386,13 +386,15 @@ static char mon_bin_get_data(const struct mon_reader_bin *rp,
386} 386}
387 387
388static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, 388static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
389 char ev_type) 389 char ev_type, int status)
390{ 390{
391 const struct usb_endpoint_descriptor *epd = &urb->ep->desc;
391 unsigned long flags; 392 unsigned long flags;
392 struct timeval ts; 393 struct timeval ts;
393 unsigned int urb_length; 394 unsigned int urb_length;
394 unsigned int offset; 395 unsigned int offset;
395 unsigned int length; 396 unsigned int length;
397 unsigned char dir;
396 struct mon_bin_hdr *ep; 398 struct mon_bin_hdr *ep;
397 char data_tag = 0; 399 char data_tag = 0;
398 400
@@ -410,16 +412,19 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
410 if (length >= rp->b_size/5) 412 if (length >= rp->b_size/5)
411 length = rp->b_size/5; 413 length = rp->b_size/5;
412 414
413 if (usb_pipein(urb->pipe)) { 415 if (usb_urb_dir_in(urb)) {
414 if (ev_type == 'S') { 416 if (ev_type == 'S') {
415 length = 0; 417 length = 0;
416 data_tag = '<'; 418 data_tag = '<';
417 } 419 }
420 /* Cannot rely on endpoint number in case of control ep.0 */
421 dir = USB_DIR_IN;
418 } else { 422 } else {
419 if (ev_type == 'C') { 423 if (ev_type == 'C') {
420 length = 0; 424 length = 0;
421 data_tag = '>'; 425 data_tag = '>';
422 } 426 }
427 dir = 0;
423 } 428 }
424 429
425 if (rp->mmap_active) 430 if (rp->mmap_active)
@@ -440,15 +445,14 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
440 */ 445 */
441 memset(ep, 0, PKT_SIZE); 446 memset(ep, 0, PKT_SIZE);
442 ep->type = ev_type; 447 ep->type = ev_type;
443 ep->xfer_type = usb_pipetype(urb->pipe); 448 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)];
444 /* We use the fact that usb_pipein() returns 0x80 */ 449 ep->epnum = dir | usb_endpoint_num(epd);
445 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe); 450 ep->devnum = urb->dev->devnum;
446 ep->devnum = usb_pipedevice(urb->pipe);
447 ep->busnum = urb->dev->bus->busnum; 451 ep->busnum = urb->dev->bus->busnum;
448 ep->id = (unsigned long) urb; 452 ep->id = (unsigned long) urb;
449 ep->ts_sec = ts.tv_sec; 453 ep->ts_sec = ts.tv_sec;
450 ep->ts_usec = ts.tv_usec; 454 ep->ts_usec = ts.tv_usec;
451 ep->status = urb->status; 455 ep->status = status;
452 ep->len_urb = urb_length; 456 ep->len_urb = urb_length;
453 ep->len_cap = length; 457 ep->len_cap = length;
454 458
@@ -471,13 +475,13 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
471static void mon_bin_submit(void *data, struct urb *urb) 475static void mon_bin_submit(void *data, struct urb *urb)
472{ 476{
473 struct mon_reader_bin *rp = data; 477 struct mon_reader_bin *rp = data;
474 mon_bin_event(rp, urb, 'S'); 478 mon_bin_event(rp, urb, 'S', -EINPROGRESS);
475} 479}
476 480
477static void mon_bin_complete(void *data, struct urb *urb) 481static void mon_bin_complete(void *data, struct urb *urb, int status)
478{ 482{
479 struct mon_reader_bin *rp = data; 483 struct mon_reader_bin *rp = data;
480 mon_bin_event(rp, urb, 'C'); 484 mon_bin_event(rp, urb, 'C', status);
481} 485}
482 486
483static void mon_bin_error(void *data, struct urb *urb, int error) 487static void mon_bin_error(void *data, struct urb *urb, int error)
@@ -500,10 +504,10 @@ static void mon_bin_error(void *data, struct urb *urb, int error)
500 504
501 memset(ep, 0, PKT_SIZE); 505 memset(ep, 0, PKT_SIZE);
502 ep->type = 'E'; 506 ep->type = 'E';
503 ep->xfer_type = usb_pipetype(urb->pipe); 507 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)];
504 /* We use the fact that usb_pipein() returns 0x80 */ 508 ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0;
505 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe); 509 ep->epnum |= usb_endpoint_num(&urb->ep->desc);
506 ep->devnum = usb_pipedevice(urb->pipe); 510 ep->devnum = urb->dev->devnum;
507 ep->busnum = urb->dev->bus->busnum; 511 ep->busnum = urb->dev->bus->busnum;
508 ep->id = (unsigned long) urb; 512 ep->id = (unsigned long) urb;
509 ep->status = error; 513 ep->status = error;
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index ce61d8b0fd..b371ffd39d 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -129,7 +129,8 @@ static void mon_submit_error(struct usb_bus *ubus, struct urb *urb, int error)
129 129
130/* 130/*
131 */ 131 */
132static void mon_bus_complete(struct mon_bus *mbus, struct urb *urb) 132static void mon_bus_complete(struct mon_bus *mbus, struct urb *urb,
133 int status)
133{ 134{
134 unsigned long flags; 135 unsigned long flags;
135 struct list_head *pos; 136 struct list_head *pos;
@@ -139,28 +140,18 @@ static void mon_bus_complete(struct mon_bus *mbus, struct urb *urb)
139 mbus->cnt_events++; 140 mbus->cnt_events++;
140 list_for_each (pos, &mbus->r_list) { 141 list_for_each (pos, &mbus->r_list) {
141 r = list_entry(pos, struct mon_reader, r_link); 142 r = list_entry(pos, struct mon_reader, r_link);
142 r->rnf_complete(r->r_data, urb); 143 r->rnf_complete(r->r_data, urb, status);
143 } 144 }
144 spin_unlock_irqrestore(&mbus->lock, flags); 145 spin_unlock_irqrestore(&mbus->lock, flags);
145} 146}
146 147
147static void mon_complete(struct usb_bus *ubus, struct urb *urb) 148static void mon_complete(struct usb_bus *ubus, struct urb *urb, int status)
148{ 149{
149 struct mon_bus *mbus; 150 struct mon_bus *mbus;
150 151
151 mbus = ubus->mon_bus; 152 if ((mbus = ubus->mon_bus) != NULL)
152 if (mbus == NULL) { 153 mon_bus_complete(mbus, urb, status);
153 /* 154 mon_bus_complete(&mon_bus0, urb, status);
154 * This should not happen.
155 * At this point we do not even know the bus number...
156 */
157 printk(KERN_ERR TAG ": Null mon bus in URB, pipe 0x%x\n",
158 urb->pipe);
159 return;
160 }
161
162 mon_bus_complete(mbus, urb);
163 mon_bus_complete(&mon_bus0, urb);
164} 155}
165 156
166/* int (*unlink_urb) (struct urb *urb, int status); */ 157/* int (*unlink_urb) (struct urb *urb, int status); */
@@ -170,7 +161,7 @@ static void mon_complete(struct usb_bus *ubus, struct urb *urb)
170 */ 161 */
171static void mon_stop(struct mon_bus *mbus) 162static void mon_stop(struct mon_bus *mbus)
172{ 163{
173 struct usb_bus *ubus = mbus->u_bus; 164 struct usb_bus *ubus;
174 struct list_head *p; 165 struct list_head *p;
175 166
176 if (mbus == &mon_bus0) { 167 if (mbus == &mon_bus0) {
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 8f27a9e1c3..ebb04ac485 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -50,10 +50,13 @@ struct mon_iso_desc {
50struct mon_event_text { 50struct mon_event_text {
51 struct list_head e_link; 51 struct list_head e_link;
52 int type; /* submit, complete, etc. */ 52 int type; /* submit, complete, etc. */
53 unsigned int pipe; /* Pipe */
54 unsigned long id; /* From pointer, most of the time */ 53 unsigned long id; /* From pointer, most of the time */
55 unsigned int tstamp; 54 unsigned int tstamp;
56 int busnum; 55 int busnum;
56 char devnum;
57 char epnum;
58 char is_in;
59 char xfertype;
57 int length; /* Depends on type: xfer length or act length */ 60 int length; /* Depends on type: xfer length or act length */
58 int status; 61 int status;
59 int interval; 62 int interval;
@@ -121,13 +124,9 @@ static inline char mon_text_get_setup(struct mon_event_text *ep,
121 struct urb *urb, char ev_type, struct mon_bus *mbus) 124 struct urb *urb, char ev_type, struct mon_bus *mbus)
122{ 125{
123 126
124 if (!usb_pipecontrol(urb->pipe) || ev_type != 'S') 127 if (ep->xfertype != USB_ENDPOINT_XFER_CONTROL || ev_type != 'S')
125 return '-'; 128 return '-';
126 129
127 if (urb->dev->bus->uses_dma &&
128 (urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
129 return mon_dmapeek(ep->setup, urb->setup_dma, SETUP_MAX);
130 }
131 if (urb->setup_packet == NULL) 130 if (urb->setup_packet == NULL)
132 return 'Z'; /* '0' would be not as pretty. */ 131 return 'Z'; /* '0' would be not as pretty. */
133 132
@@ -138,14 +137,12 @@ static inline char mon_text_get_setup(struct mon_event_text *ep,
138static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb, 137static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
139 int len, char ev_type, struct mon_bus *mbus) 138 int len, char ev_type, struct mon_bus *mbus)
140{ 139{
141 int pipe = urb->pipe;
142
143 if (len <= 0) 140 if (len <= 0)
144 return 'L'; 141 return 'L';
145 if (len >= DATA_MAX) 142 if (len >= DATA_MAX)
146 len = DATA_MAX; 143 len = DATA_MAX;
147 144
148 if (usb_pipein(pipe)) { 145 if (ep->is_in) {
149 if (ev_type != 'C') 146 if (ev_type != 'C')
150 return '<'; 147 return '<';
151 } else { 148 } else {
@@ -186,7 +183,7 @@ static inline unsigned int mon_get_timestamp(void)
186} 183}
187 184
188static void mon_text_event(struct mon_reader_text *rp, struct urb *urb, 185static void mon_text_event(struct mon_reader_text *rp, struct urb *urb,
189 char ev_type) 186 char ev_type, int status)
190{ 187{
191 struct mon_event_text *ep; 188 struct mon_event_text *ep;
192 unsigned int stamp; 189 unsigned int stamp;
@@ -203,24 +200,28 @@ static void mon_text_event(struct mon_reader_text *rp, struct urb *urb,
203 } 200 }
204 201
205 ep->type = ev_type; 202 ep->type = ev_type;
206 ep->pipe = urb->pipe;
207 ep->id = (unsigned long) urb; 203 ep->id = (unsigned long) urb;
208 ep->busnum = urb->dev->bus->busnum; 204 ep->busnum = urb->dev->bus->busnum;
205 ep->devnum = urb->dev->devnum;
206 ep->epnum = usb_endpoint_num(&urb->ep->desc);
207 ep->xfertype = usb_endpoint_type(&urb->ep->desc);
208 ep->is_in = usb_urb_dir_in(urb);
209 ep->tstamp = stamp; 209 ep->tstamp = stamp;
210 ep->length = (ev_type == 'S') ? 210 ep->length = (ev_type == 'S') ?
211 urb->transfer_buffer_length : urb->actual_length; 211 urb->transfer_buffer_length : urb->actual_length;
212 /* Collecting status makes debugging sense for submits, too */ 212 /* Collecting status makes debugging sense for submits, too */
213 ep->status = urb->status; 213 ep->status = status;
214 214
215 if (usb_pipeint(urb->pipe)) { 215 if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
216 ep->interval = urb->interval; 216 ep->interval = urb->interval;
217 } else if (usb_pipeisoc(urb->pipe)) { 217 } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
218 ep->interval = urb->interval; 218 ep->interval = urb->interval;
219 ep->start_frame = urb->start_frame; 219 ep->start_frame = urb->start_frame;
220 ep->error_count = urb->error_count; 220 ep->error_count = urb->error_count;
221 } 221 }
222 ep->numdesc = urb->number_of_packets; 222 ep->numdesc = urb->number_of_packets;
223 if (usb_pipeisoc(urb->pipe) && urb->number_of_packets > 0) { 223 if (ep->xfertype == USB_ENDPOINT_XFER_ISOC &&
224 urb->number_of_packets > 0) {
224 if ((ndesc = urb->number_of_packets) > ISODESC_MAX) 225 if ((ndesc = urb->number_of_packets) > ISODESC_MAX)
225 ndesc = ISODESC_MAX; 226 ndesc = ISODESC_MAX;
226 fp = urb->iso_frame_desc; 227 fp = urb->iso_frame_desc;
@@ -247,13 +248,13 @@ static void mon_text_event(struct mon_reader_text *rp, struct urb *urb,
247static void mon_text_submit(void *data, struct urb *urb) 248static void mon_text_submit(void *data, struct urb *urb)
248{ 249{
249 struct mon_reader_text *rp = data; 250 struct mon_reader_text *rp = data;
250 mon_text_event(rp, urb, 'S'); 251 mon_text_event(rp, urb, 'S', -EINPROGRESS);
251} 252}
252 253
253static void mon_text_complete(void *data, struct urb *urb) 254static void mon_text_complete(void *data, struct urb *urb, int status)
254{ 255{
255 struct mon_reader_text *rp = data; 256 struct mon_reader_text *rp = data;
256 mon_text_event(rp, urb, 'C'); 257 mon_text_event(rp, urb, 'C', status);
257} 258}
258 259
259static void mon_text_error(void *data, struct urb *urb, int error) 260static void mon_text_error(void *data, struct urb *urb, int error)
@@ -268,9 +269,12 @@ static void mon_text_error(void *data, struct urb *urb, int error)
268 } 269 }
269 270
270 ep->type = 'E'; 271 ep->type = 'E';
271 ep->pipe = urb->pipe;
272 ep->id = (unsigned long) urb; 272 ep->id = (unsigned long) urb;
273 ep->busnum = 0; 273 ep->busnum = 0;
274 ep->devnum = urb->dev->devnum;
275 ep->epnum = usb_endpoint_num(&urb->ep->desc);
276 ep->xfertype = usb_endpoint_type(&urb->ep->desc);
277 ep->is_in = usb_urb_dir_in(urb);
274 ep->tstamp = 0; 278 ep->tstamp = 0;
275 ep->length = 0; 279 ep->length = 0;
276 ep->status = error; 280 ep->status = error;
@@ -413,10 +417,10 @@ static ssize_t mon_text_read_u(struct file *file, char __user *buf,
413 mon_text_read_head_u(rp, &ptr, ep); 417 mon_text_read_head_u(rp, &ptr, ep);
414 if (ep->type == 'E') { 418 if (ep->type == 'E') {
415 mon_text_read_statset(rp, &ptr, ep); 419 mon_text_read_statset(rp, &ptr, ep);
416 } else if (usb_pipeisoc(ep->pipe)) { 420 } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
417 mon_text_read_isostat(rp, &ptr, ep); 421 mon_text_read_isostat(rp, &ptr, ep);
418 mon_text_read_isodesc(rp, &ptr, ep); 422 mon_text_read_isodesc(rp, &ptr, ep);
419 } else if (usb_pipeint(ep->pipe)) { 423 } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
420 mon_text_read_intstat(rp, &ptr, ep); 424 mon_text_read_intstat(rp, &ptr, ep);
421 } else { 425 } else {
422 mon_text_read_statset(rp, &ptr, ep); 426 mon_text_read_statset(rp, &ptr, ep);
@@ -468,18 +472,17 @@ static void mon_text_read_head_t(struct mon_reader_text *rp,
468{ 472{
469 char udir, utype; 473 char udir, utype;
470 474
471 udir = usb_pipein(ep->pipe) ? 'i' : 'o'; 475 udir = (ep->is_in ? 'i' : 'o');
472 switch (usb_pipetype(ep->pipe)) { 476 switch (ep->xfertype) {
473 case PIPE_ISOCHRONOUS: utype = 'Z'; break; 477 case USB_ENDPOINT_XFER_ISOC: utype = 'Z'; break;
474 case PIPE_INTERRUPT: utype = 'I'; break; 478 case USB_ENDPOINT_XFER_INT: utype = 'I'; break;
475 case PIPE_CONTROL: utype = 'C'; break; 479 case USB_ENDPOINT_XFER_CONTROL: utype = 'C'; break;
476 default: /* PIPE_BULK */ utype = 'B'; 480 default: /* PIPE_BULK */ utype = 'B';
477 } 481 }
478 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, 482 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
479 "%lx %u %c %c%c:%03u:%02u", 483 "%lx %u %c %c%c:%03u:%02u",
480 ep->id, ep->tstamp, ep->type, 484 ep->id, ep->tstamp, ep->type,
481 utype, udir, 485 utype, udir, ep->devnum, ep->epnum);
482 usb_pipedevice(ep->pipe), usb_pipeendpoint(ep->pipe));
483} 486}
484 487
485static void mon_text_read_head_u(struct mon_reader_text *rp, 488static void mon_text_read_head_u(struct mon_reader_text *rp,
@@ -487,18 +490,17 @@ static void mon_text_read_head_u(struct mon_reader_text *rp,
487{ 490{
488 char udir, utype; 491 char udir, utype;
489 492
490 udir = usb_pipein(ep->pipe) ? 'i' : 'o'; 493 udir = (ep->is_in ? 'i' : 'o');
491 switch (usb_pipetype(ep->pipe)) { 494 switch (ep->xfertype) {
492 case PIPE_ISOCHRONOUS: utype = 'Z'; break; 495 case USB_ENDPOINT_XFER_ISOC: utype = 'Z'; break;
493 case PIPE_INTERRUPT: utype = 'I'; break; 496 case USB_ENDPOINT_XFER_INT: utype = 'I'; break;
494 case PIPE_CONTROL: utype = 'C'; break; 497 case USB_ENDPOINT_XFER_CONTROL: utype = 'C'; break;
495 default: /* PIPE_BULK */ utype = 'B'; 498 default: /* PIPE_BULK */ utype = 'B';
496 } 499 }
497 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, 500 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
498 "%lx %u %c %c%c:%d:%03u:%u", 501 "%lx %u %c %c%c:%d:%03u:%u",
499 ep->id, ep->tstamp, ep->type, 502 ep->id, ep->tstamp, ep->type,
500 utype, udir, 503 utype, udir, ep->busnum, ep->devnum, ep->epnum);
501 ep->busnum, usb_pipedevice(ep->pipe), usb_pipeendpoint(ep->pipe));
502} 504}
503 505
504static void mon_text_read_statset(struct mon_reader_text *rp, 506static void mon_text_read_statset(struct mon_reader_text *rp,
diff --git a/drivers/usb/mon/usb_mon.h b/drivers/usb/mon/usb_mon.h
index f68ad6d99a..f5d84ff8c1 100644
--- a/drivers/usb/mon/usb_mon.h
+++ b/drivers/usb/mon/usb_mon.h
@@ -46,7 +46,7 @@ struct mon_reader {
46 46
47 void (*rnf_submit)(void *data, struct urb *urb); 47 void (*rnf_submit)(void *data, struct urb *urb);
48 void (*rnf_error)(void *data, struct urb *urb, int error); 48 void (*rnf_error)(void *data, struct urb *urb, int error);
49 void (*rnf_complete)(void *data, struct urb *urb); 49 void (*rnf_complete)(void *data, struct urb *urb, int status);
50}; 50};
51 51
52void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r); 52void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r);
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 43d6db696f..99fefed779 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -92,6 +92,16 @@ config USB_SERIAL_BELKIN
92 To compile this driver as a module, choose M here: the 92 To compile this driver as a module, choose M here: the
93 module will be called belkin_sa. 93 module will be called belkin_sa.
94 94
95config USB_SERIAL_CH341
96 tristate "USB Winchiphead CH341 Single Port Serial Driver"
97 depends on USB_SERIAL
98 help
99 Say Y here if you want to use a Winchiphead CH341 single port
100 USB to serial adapter.
101
102 To compile this driver as a module, choose M here: the
103 module will be called ch341.
104
95config USB_SERIAL_WHITEHEAT 105config USB_SERIAL_WHITEHEAT
96 tristate "USB ConnectTech WhiteHEAT Serial Driver" 106 tristate "USB ConnectTech WhiteHEAT Serial Driver"
97 depends on USB_SERIAL 107 depends on USB_SERIAL
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 07a976eca6..d6fb384e52 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_USB_SERIAL_AIRCABLE) += aircable.o
15obj-$(CONFIG_USB_SERIAL_AIRPRIME) += airprime.o 15obj-$(CONFIG_USB_SERIAL_AIRPRIME) += airprime.o
16obj-$(CONFIG_USB_SERIAL_ARK3116) += ark3116.o 16obj-$(CONFIG_USB_SERIAL_ARK3116) += ark3116.o
17obj-$(CONFIG_USB_SERIAL_BELKIN) += belkin_sa.o 17obj-$(CONFIG_USB_SERIAL_BELKIN) += belkin_sa.o
18obj-$(CONFIG_USB_SERIAL_CH341) += ch341.o
18obj-$(CONFIG_USB_SERIAL_CP2101) += cp2101.o 19obj-$(CONFIG_USB_SERIAL_CP2101) += cp2101.o
19obj-$(CONFIG_USB_SERIAL_CYBERJACK) += cyberjack.o 20obj-$(CONFIG_USB_SERIAL_CYBERJACK) += cyberjack.o
20obj-$(CONFIG_USB_SERIAL_CYPRESS_M8) += cypress_m8.o 21obj-$(CONFIG_USB_SERIAL_CYPRESS_M8) += cypress_m8.o
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index c9fd486c1c..2a8e537cb0 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -172,11 +172,6 @@ static void ark3116_set_termios(struct usb_serial_port *port,
172 172
173 dbg("%s - port %d", __FUNCTION__, port->number); 173 dbg("%s - port %d", __FUNCTION__, port->number);
174 174
175 if (!port->tty || !port->tty->termios) {
176 dbg("%s - no tty structures", __FUNCTION__);
177 return;
178 }
179
180 spin_lock_irqsave(&priv->lock, flags); 175 spin_lock_irqsave(&priv->lock, flags);
181 if (!priv->termios_initialized) { 176 if (!priv->termios_initialized) {
182 *(port->tty->termios) = tty_std_termios; 177 *(port->tty->termios) = tty_std_termios;
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index a47a24f882..0b14aea8eb 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -36,6 +36,16 @@ static int usb_serial_device_match (struct device *dev, struct device_driver *dr
36 return 0; 36 return 0;
37} 37}
38 38
39static ssize_t show_port_number(struct device *dev,
40 struct device_attribute *attr, char *buf)
41{
42 struct usb_serial_port *port = to_usb_serial_port(dev);
43
44 return sprintf(buf, "%d\n", port->number - port->serial->minor);
45}
46
47static DEVICE_ATTR(port_number, S_IRUGO, show_port_number, NULL);
48
39static int usb_serial_device_probe (struct device *dev) 49static int usb_serial_device_probe (struct device *dev)
40{ 50{
41 struct usb_serial_driver *driver; 51 struct usb_serial_driver *driver;
@@ -62,6 +72,10 @@ static int usb_serial_device_probe (struct device *dev)
62 goto exit; 72 goto exit;
63 } 73 }
64 74
75 retval = device_create_file(dev, &dev_attr_port_number);
76 if (retval)
77 goto exit;
78
65 minor = port->number; 79 minor = port->number;
66 tty_register_device (usb_serial_tty_driver, minor, dev); 80 tty_register_device (usb_serial_tty_driver, minor, dev);
67 dev_info(&port->serial->dev->dev, 81 dev_info(&port->serial->dev->dev,
@@ -84,6 +98,8 @@ static int usb_serial_device_remove (struct device *dev)
84 return -ENODEV; 98 return -ENODEV;
85 } 99 }
86 100
101 device_remove_file(&port->dev, &dev_attr_port_number);
102
87 driver = port->serial->type; 103 driver = port->serial->type;
88 if (driver->port_remove) { 104 if (driver->port_remove) {
89 if (!try_module_get(driver->driver.owner)) { 105 if (!try_module_get(driver->driver.owner)) {
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
new file mode 100644
index 0000000000..6b252ceb39
--- /dev/null
+++ b/drivers/usb/serial/ch341.c
@@ -0,0 +1,354 @@
1/*
2 * Copyright 2007, Frank A Kingswood <frank@kingswood-consulting.co.uk>
3 *
4 * ch341.c implements a serial port driver for the Winchiphead CH341.
5 *
6 * The CH341 device can be used to implement an RS232 asynchronous
7 * serial port, an IEEE-1284 parallel printer port or a memory-like
8 * interface. In all cases the CH341 supports an I2C interface as well.
9 * This driver only supports the asynchronous serial interface.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License version
13 * 2 as published by the Free Software Foundation.
14 */
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/tty.h>
19#include <linux/module.h>
20#include <linux/usb.h>
21#include <linux/usb/serial.h>
22#include <linux/serial.h>
23
24#define DEFAULT_BAUD_RATE 2400
25#define DEFAULT_TIMEOUT 1000
26
27static int debug;
28
29static struct usb_device_id id_table [] = {
30 { USB_DEVICE(0x4348, 0x5523) },
31 { },
32};
33MODULE_DEVICE_TABLE(usb, id_table);
34
35struct ch341_private {
36 unsigned baud_rate;
37 u8 dtr;
38 u8 rts;
39};
40
41static int ch341_control_out(struct usb_device *dev, u8 request,
42 u16 value, u16 index)
43{
44 int r;
45 dbg("ch341_control_out(%02x,%02x,%04x,%04x)", USB_DIR_OUT|0x40,
46 (int)request, (int)value, (int)index);
47
48 r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
49 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
50 value, index, NULL, 0, DEFAULT_TIMEOUT);
51
52 return r;
53}
54
55static int ch341_control_in(struct usb_device *dev,
56 u8 request, u16 value, u16 index,
57 char *buf, unsigned bufsize)
58{
59 int r;
60 dbg("ch341_control_in(%02x,%02x,%04x,%04x,%p,%u)", USB_DIR_IN|0x40,
61 (int)request, (int)value, (int)index, buf, (int)bufsize);
62
63 r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
64 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
65 value, index, buf, bufsize, DEFAULT_TIMEOUT);
66 return r;
67}
68
69static int ch341_set_baudrate(struct usb_device *dev,
70 struct ch341_private *priv)
71{
72 short a, b;
73 int r;
74
75 dbg("ch341_set_baudrate(%d)", priv->baud_rate);
76 switch (priv->baud_rate) {
77 case 2400:
78 a = 0xd901;
79 b = 0x0038;
80 break;
81 case 4800:
82 a = 0x6402;
83 b = 0x001f;
84 break;
85 case 9600:
86 a = 0xb202;
87 b = 0x0013;
88 break;
89 case 19200:
90 a = 0xd902;
91 b = 0x000d;
92 break;
93 case 38400:
94 a = 0x6403;
95 b = 0x000a;
96 break;
97 case 115200:
98 a = 0xcc03;
99 b = 0x0008;
100 break;
101 default:
102 return -EINVAL;
103 }
104
105 r = ch341_control_out(dev, 0x9a, 0x1312, a);
106 if (!r)
107 r = ch341_control_out(dev, 0x9a, 0x0f2c, b);
108
109 return r;
110}
111
112static int ch341_set_handshake(struct usb_device *dev,
113 struct ch341_private *priv)
114{
115 dbg("ch341_set_handshake(%d,%d)", priv->dtr, priv->rts);
116 return ch341_control_out(dev, 0xa4,
117 ~((priv->dtr?1<<5:0)|(priv->rts?1<<6:0)), 0);
118}
119
120static int ch341_get_status(struct usb_device *dev)
121{
122 char *buffer;
123 int r;
124 const unsigned size = 8;
125
126 dbg("ch341_get_status()");
127
128 buffer = kmalloc(size, GFP_KERNEL);
129 if (!buffer)
130 return -ENOMEM;
131
132 r = ch341_control_in(dev, 0x95, 0x0706, 0, buffer, size);
133 if ( r < 0)
134 goto out;
135
136 /* Not having the datasheet for the CH341, we ignore the bytes returned
137 * from the device. Return error if the device did not respond in time.
138 */
139 r = 0;
140
141out: kfree(buffer);
142 return r;
143}
144
145/* -------------------------------------------------------------------------- */
146
147static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
148{
149 char *buffer;
150 int r;
151 const unsigned size = 8;
152
153 dbg("ch341_configure()");
154
155 buffer = kmalloc(size, GFP_KERNEL);
156 if (!buffer)
157 return -ENOMEM;
158
159 /* expect two bytes 0x27 0x00 */
160 r = ch341_control_in(dev, 0x5f, 0, 0, buffer, size);
161 if (r < 0)
162 goto out;
163
164 r = ch341_control_out(dev, 0xa1, 0, 0);
165 if (r < 0)
166 goto out;
167
168 r = ch341_set_baudrate(dev, priv);
169 if (r < 0)
170 goto out;
171
172 /* expect two bytes 0x56 0x00 */
173 r = ch341_control_in(dev, 0x95, 0x2518, 0, buffer, size);
174 if (r < 0)
175 goto out;
176
177 r = ch341_control_out(dev, 0x9a, 0x2518, 0x0050);
178 if (r < 0)
179 goto out;
180
181 /* expect 0xff 0xee */
182 r = ch341_get_status(dev);
183 if (r < 0)
184 goto out;
185
186 r = ch341_control_out(dev, 0xa1, 0x501f, 0xd90a);
187 if (r < 0)
188 goto out;
189
190 r = ch341_set_baudrate(dev, priv);
191 if (r < 0)
192 goto out;
193
194 r = ch341_set_handshake(dev, priv);
195 if (r < 0)
196 goto out;
197
198 /* expect 0x9f 0xee */
199 r = ch341_get_status(dev);
200
201out: kfree(buffer);
202 return r;
203}
204
205/* allocate private data */
206static int ch341_attach(struct usb_serial *serial)
207{
208 struct ch341_private *priv;
209 int r;
210
211 dbg("ch341_attach()");
212
213 /* private data */
214 priv = kzalloc(sizeof(struct ch341_private), GFP_KERNEL);
215 if (!priv)
216 return -ENOMEM;
217
218 priv->baud_rate = DEFAULT_BAUD_RATE;
219 priv->dtr = 1;
220 priv->rts = 1;
221
222 r = ch341_configure(serial->dev, priv);
223 if (r < 0)
224 goto error;
225
226 usb_set_serial_port_data(serial->port[0], priv);
227 return 0;
228
229error: kfree(priv);
230 return r;
231}
232
233/* open this device, set default parameters */
234static int ch341_open(struct usb_serial_port *port, struct file *filp)
235{
236 struct usb_serial *serial = port->serial;
237 struct ch341_private *priv = usb_get_serial_port_data(serial->port[0]);
238 int r;
239
240 dbg("ch341_open()");
241
242 priv->baud_rate = DEFAULT_BAUD_RATE;
243 priv->dtr = 1;
244 priv->rts = 1;
245
246 r = ch341_configure(serial->dev, priv);
247 if (r)
248 goto out;
249
250 r = ch341_set_handshake(serial->dev, priv);
251 if (r)
252 goto out;
253
254 r = ch341_set_baudrate(serial->dev, priv);
255 if (r)
256 goto out;
257
258 r = usb_serial_generic_open(port, filp);
259
260out: return r;
261}
262
263/* Old_termios contains the original termios settings and
264 * tty->termios contains the new setting to be used.
265 */
266static void ch341_set_termios(struct usb_serial_port *port,
267 struct ktermios *old_termios)
268{
269 struct ch341_private *priv = usb_get_serial_port_data(port);
270 struct tty_struct *tty = port->tty;
271 unsigned baud_rate;
272
273 dbg("ch341_set_termios()");
274
275 if (!tty || !tty->termios)
276 return;
277
278 baud_rate = tty_get_baud_rate(tty);
279
280 switch (baud_rate) {
281 case 2400:
282 case 4800:
283 case 9600:
284 case 19200:
285 case 38400:
286 case 115200:
287 priv->baud_rate = baud_rate;
288 break;
289 default:
290 dbg("Rate %d not supported, using %d",
291 baud_rate, DEFAULT_BAUD_RATE);
292 priv->baud_rate = DEFAULT_BAUD_RATE;
293 }
294
295 ch341_set_baudrate(port->serial->dev, priv);
296
297 /* Unimplemented:
298 * (cflag & CSIZE) : data bits [5, 8]
299 * (cflag & PARENB) : parity {NONE, EVEN, ODD}
300 * (cflag & CSTOPB) : stop bits [1, 2]
301 */
302}
303
304static struct usb_driver ch341_driver = {
305 .name = "ch341",
306 .probe = usb_serial_probe,
307 .disconnect = usb_serial_disconnect,
308 .id_table = id_table,
309 .no_dynamic_id = 1,
310};
311
312static struct usb_serial_driver ch341_device = {
313 .driver = {
314 .owner = THIS_MODULE,
315 .name = "ch341-uart",
316 },
317 .id_table = id_table,
318 .usb_driver = &ch341_driver,
319 .num_interrupt_in = NUM_DONT_CARE,
320 .num_bulk_in = 1,
321 .num_bulk_out = 1,
322 .num_ports = 1,
323 .open = ch341_open,
324 .set_termios = ch341_set_termios,
325 .attach = ch341_attach,
326};
327
328static int __init ch341_init(void)
329{
330 int retval;
331
332 retval = usb_serial_register(&ch341_device);
333 if (retval)
334 return retval;
335 retval = usb_register(&ch341_driver);
336 if (retval)
337 usb_serial_deregister(&ch341_device);
338 return retval;
339}
340
341static void __exit ch341_exit(void)
342{
343 usb_deregister(&ch341_driver);
344 usb_serial_deregister(&ch341_device);
345}
346
347module_init(ch341_init);
348module_exit(ch341_exit);
349MODULE_LICENSE("GPL");
350
351module_param(debug, bool, S_IRUGO | S_IWUSR);
352MODULE_PARM_DESC(debug, "Debug enabled or not");
353
354/* EOF ch341.c */
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c
index 33f6ee50b8..eb7df1835c 100644
--- a/drivers/usb/serial/cp2101.c
+++ b/drivers/usb/serial/cp2101.c
@@ -53,6 +53,7 @@ static void cp2101_shutdown(struct usb_serial*);
53static int debug; 53static int debug;
54 54
55static struct usb_device_id id_table [] = { 55static struct usb_device_id id_table [] = {
56 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
56 { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ 57 { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
57 { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ 58 { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
58 { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ 59 { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
@@ -521,7 +522,7 @@ static void cp2101_set_termios (struct usb_serial_port *port,
521 522
522 dbg("%s - port %d", __FUNCTION__, port->number); 523 dbg("%s - port %d", __FUNCTION__, port->number);
523 524
524 if ((!port->tty) || (!port->tty->termios)) { 525 if (!port->tty || !port->tty->termios) {
525 dbg("%s - no tty structures", __FUNCTION__); 526 dbg("%s - no tty structures", __FUNCTION__);
526 return; 527 return;
527 } 528 }
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 2d045857b1..e4c248c98e 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1169,7 +1169,9 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
1169 /* XXX see create_sysfs_attrs */ 1169 /* XXX see create_sysfs_attrs */
1170 if (priv->chip_type != SIO) { 1170 if (priv->chip_type != SIO) {
1171 device_remove_file(&port->dev, &dev_attr_event_char); 1171 device_remove_file(&port->dev, &dev_attr_event_char);
1172 if (priv->chip_type == FT232BM || priv->chip_type == FT2232C) { 1172 if (priv->chip_type == FT232BM ||
1173 priv->chip_type == FT2232C ||
1174 priv->chip_type == FT232RL) {
1173 device_remove_file(&port->dev, &dev_attr_latency_timer); 1175 device_remove_file(&port->dev, &dev_attr_latency_timer);
1174 } 1176 }
1175 } 1177 }
@@ -2102,6 +2104,7 @@ static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file)
2102 case FT8U232AM: 2104 case FT8U232AM:
2103 case FT232BM: 2105 case FT232BM:
2104 case FT2232C: 2106 case FT2232C:
2107 case FT232RL:
2105 /* the 8U232AM returns a two byte value (the sio is a 1 byte value) - in the same 2108 /* the 8U232AM returns a two byte value (the sio is a 1 byte value) - in the same
2106 format as the data returned from the in point */ 2109 format as the data returned from the in point */
2107 if ((ret = usb_control_msg(port->serial->dev, 2110 if ((ret = usb_control_msg(port->serial->dev,
diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c
index 4092f6dc9e..b5194dc7d3 100644
--- a/drivers/usb/serial/funsoft.c
+++ b/drivers/usb/serial/funsoft.c
@@ -24,26 +24,6 @@ static struct usb_device_id id_table [] = {
24}; 24};
25MODULE_DEVICE_TABLE(usb, id_table); 25MODULE_DEVICE_TABLE(usb, id_table);
26 26
27static int funsoft_ioctl(struct usb_serial_port *port, struct file *file,
28 unsigned int cmd, unsigned long arg)
29{
30 struct ktermios t;
31
32 dbg("%s - port %d, cmd 0x%04x", __FUNCTION__, port->number, cmd);
33
34 if (cmd == TCSETSF) {
35 if (user_termios_to_kernel_termios(&t, (struct termios __user *)arg))
36 return -EFAULT;
37
38 dbg("%s - iflag:%x oflag:%x cflag:%x lflag:%x", __FUNCTION__,
39 t.c_iflag, t.c_oflag, t.c_cflag, t.c_lflag);
40
41 if (!(t.c_lflag & ICANON))
42 return -EINVAL;
43 }
44 return -ENOIOCTLCMD;
45}
46
47static struct usb_driver funsoft_driver = { 27static struct usb_driver funsoft_driver = {
48 .name = "funsoft", 28 .name = "funsoft",
49 .probe = usb_serial_probe, 29 .probe = usb_serial_probe,
@@ -63,7 +43,6 @@ static struct usb_serial_driver funsoft_device = {
63 .num_bulk_in = NUM_DONT_CARE, 43 .num_bulk_in = NUM_DONT_CARE,
64 .num_bulk_out = NUM_DONT_CARE, 44 .num_bulk_out = NUM_DONT_CARE,
65 .num_ports = 1, 45 .num_ports = 1,
66 .ioctl = funsoft_ioctl,
67}; 46};
68 47
69static int __init funsoft_init(void) 48static int __init funsoft_init(void)
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 6a3a704b58..e836ad07fd 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -256,6 +256,7 @@ static struct usb_device_id ipaq_id_table [] = {
256 { USB_DEVICE(0x04DD, 0x9121) }, /* SHARP WS004SH USB Modem */ 256 { USB_DEVICE(0x04DD, 0x9121) }, /* SHARP WS004SH USB Modem */
257 { USB_DEVICE(0x04DD, 0x9123) }, /* SHARP WS007SH USB Modem */ 257 { USB_DEVICE(0x04DD, 0x9123) }, /* SHARP WS007SH USB Modem */
258 { USB_DEVICE(0x04DD, 0x9151) }, /* SHARP S01SH USB Modem */ 258 { USB_DEVICE(0x04DD, 0x9151) }, /* SHARP S01SH USB Modem */
259 { USB_DEVICE(0x04DD, 0x91AC) }, /* SHARP WS011SH USB Modem */
259 { USB_DEVICE(0x04E8, 0x5F00) }, /* Samsung NEXiO USB Sync */ 260 { USB_DEVICE(0x04E8, 0x5F00) }, /* Samsung NEXiO USB Sync */
260 { USB_DEVICE(0x04E8, 0x5F01) }, /* Samsung NEXiO USB Sync */ 261 { USB_DEVICE(0x04E8, 0x5F01) }, /* Samsung NEXiO USB Sync */
261 { USB_DEVICE(0x04E8, 0x5F02) }, /* Samsung NEXiO USB Sync */ 262 { USB_DEVICE(0x04E8, 0x5F02) }, /* Samsung NEXiO USB Sync */
@@ -646,11 +647,13 @@ static int ipaq_open(struct usb_serial_port *port, struct file *filp)
646 kfree(port->bulk_out_buffer); 647 kfree(port->bulk_out_buffer);
647 port->bulk_in_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL); 648 port->bulk_in_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL);
648 if (port->bulk_in_buffer == NULL) { 649 if (port->bulk_in_buffer == NULL) {
650 port->bulk_out_buffer = NULL; /* prevent double free */
649 goto enomem; 651 goto enomem;
650 } 652 }
651 port->bulk_out_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL); 653 port->bulk_out_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL);
652 if (port->bulk_out_buffer == NULL) { 654 if (port->bulk_out_buffer == NULL) {
653 kfree(port->bulk_in_buffer); 655 kfree(port->bulk_in_buffer);
656 port->bulk_in_buffer = NULL;
654 goto enomem; 657 goto enomem;
655 } 658 }
656 port->read_urb->transfer_buffer = port->bulk_in_buffer; 659 port->read_urb->transfer_buffer = port->bulk_in_buffer;
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 5a4127e62c..90e3216abd 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -728,24 +728,32 @@ static void klsi_105_set_termios (struct usb_serial_port *port,
728#endif 728#endif
729 } 729 }
730 730
731 switch(cflag & CBAUD) { 731 switch(tty_get_baud_rate(port->tty)) {
732 case B0: /* handled below */ 732 case 0: /* handled below */
733 break; 733 break;
734 case B1200: priv->cfg.baudrate = kl5kusb105a_sio_b1200; 734 case 1200:
735 priv->cfg.baudrate = kl5kusb105a_sio_b1200;
735 break; 736 break;
736 case B2400: priv->cfg.baudrate = kl5kusb105a_sio_b2400; 737 case 2400:
738 priv->cfg.baudrate = kl5kusb105a_sio_b2400;
737 break; 739 break;
738 case B4800: priv->cfg.baudrate = kl5kusb105a_sio_b4800; 740 case 4800:
741 priv->cfg.baudrate = kl5kusb105a_sio_b4800;
739 break; 742 break;
740 case B9600: priv->cfg.baudrate = kl5kusb105a_sio_b9600; 743 case 9600:
744 priv->cfg.baudrate = kl5kusb105a_sio_b9600;
741 break; 745 break;
742 case B19200: priv->cfg.baudrate = kl5kusb105a_sio_b19200; 746 case 19200:
747 priv->cfg.baudrate = kl5kusb105a_sio_b19200;
743 break; 748 break;
744 case B38400: priv->cfg.baudrate = kl5kusb105a_sio_b38400; 749 case 38400:
750 priv->cfg.baudrate = kl5kusb105a_sio_b38400;
745 break; 751 break;
746 case B57600: priv->cfg.baudrate = kl5kusb105a_sio_b57600; 752 case 57600:
753 priv->cfg.baudrate = kl5kusb105a_sio_b57600;
747 break; 754 break;
748 case B115200: priv->cfg.baudrate = kl5kusb105a_sio_b115200; 755 case 115200:
756 priv->cfg.baudrate = kl5kusb105a_sio_b115200;
749 break; 757 break;
750 default: 758 default:
751 err("KLSI USB->Serial converter:" 759 err("KLSI USB->Serial converter:"
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 02a86dbc0e..6f224195bd 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -82,6 +82,7 @@ static int kobil_tiocmset(struct usb_serial_port *port, struct file *file,
82 unsigned int set, unsigned int clear); 82 unsigned int set, unsigned int clear);
83static void kobil_read_int_callback( struct urb *urb ); 83static void kobil_read_int_callback( struct urb *urb );
84static void kobil_write_callback( struct urb *purb ); 84static void kobil_write_callback( struct urb *purb );
85static void kobil_set_termios(struct usb_serial_port *port, struct ktermios *old);
85 86
86 87
87static struct usb_device_id id_table [] = { 88static struct usb_device_id id_table [] = {
@@ -119,6 +120,7 @@ static struct usb_serial_driver kobil_device = {
119 .attach = kobil_startup, 120 .attach = kobil_startup,
120 .shutdown = kobil_shutdown, 121 .shutdown = kobil_shutdown,
121 .ioctl = kobil_ioctl, 122 .ioctl = kobil_ioctl,
123 .set_termios = kobil_set_termios,
122 .tiocmget = kobil_tiocmget, 124 .tiocmget = kobil_tiocmget,
123 .tiocmset = kobil_tiocmset, 125 .tiocmset = kobil_tiocmset,
124 .open = kobil_open, 126 .open = kobil_open,
@@ -137,7 +139,6 @@ struct kobil_private {
137 int cur_pos; // index of the next char to send in buf 139 int cur_pos; // index of the next char to send in buf
138 __u16 device_type; 140 __u16 device_type;
139 int line_state; 141 int line_state;
140 struct ktermios internal_termios;
141}; 142};
142 143
143 144
@@ -216,7 +217,7 @@ static void kobil_shutdown (struct usb_serial *serial)
216 217
217static int kobil_open (struct usb_serial_port *port, struct file *filp) 218static int kobil_open (struct usb_serial_port *port, struct file *filp)
218{ 219{
219 int i, result = 0; 220 int result = 0;
220 struct kobil_private *priv; 221 struct kobil_private *priv;
221 unsigned char *transfer_buffer; 222 unsigned char *transfer_buffer;
222 int transfer_buffer_length = 8; 223 int transfer_buffer_length = 8;
@@ -242,16 +243,6 @@ static int kobil_open (struct usb_serial_port *port, struct file *filp)
242 port->tty->termios->c_iflag = IGNBRK | IGNPAR | IXOFF; 243 port->tty->termios->c_iflag = IGNBRK | IGNPAR | IXOFF;
243 port->tty->termios->c_oflag &= ~ONLCR; // do NOT translate CR to CR-NL (0x0A -> 0x0A 0x0D) 244 port->tty->termios->c_oflag &= ~ONLCR; // do NOT translate CR to CR-NL (0x0A -> 0x0A 0x0D)
244 245
245 // set up internal termios structure
246 priv->internal_termios.c_iflag = port->tty->termios->c_iflag;
247 priv->internal_termios.c_oflag = port->tty->termios->c_oflag;
248 priv->internal_termios.c_cflag = port->tty->termios->c_cflag;
249 priv->internal_termios.c_lflag = port->tty->termios->c_lflag;
250
251 for (i=0; i<NCCS; i++) {
252 priv->internal_termios.c_cc[i] = port->tty->termios->c_cc[i];
253 }
254
255 // allocate memory for transfer buffer 246 // allocate memory for transfer buffer
256 transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL); 247 transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL);
257 if (! transfer_buffer) { 248 if (! transfer_buffer) {
@@ -607,102 +598,79 @@ static int kobil_tiocmset(struct usb_serial_port *port, struct file *file,
607 return (result < 0) ? result : 0; 598 return (result < 0) ? result : 0;
608} 599}
609 600
610 601static void kobil_set_termios(struct usb_serial_port *port, struct ktermios *old)
611static int kobil_ioctl(struct usb_serial_port *port, struct file *file,
612 unsigned int cmd, unsigned long arg)
613{ 602{
614 struct kobil_private * priv; 603 struct kobil_private * priv;
615 int result; 604 int result;
616 unsigned short urb_val = 0; 605 unsigned short urb_val = 0;
617 unsigned char *transfer_buffer; 606 int c_cflag = port->tty->termios->c_cflag;
618 int transfer_buffer_length = 8; 607 speed_t speed;
619 char *settings; 608 void * settings;
620 void __user *user_arg = (void __user *)arg;
621 609
622 priv = usb_get_serial_port_data(port); 610 priv = usb_get_serial_port_data(port);
623 if ((priv->device_type == KOBIL_USBTWIN_PRODUCT_ID) || (priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID)) { 611 if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID || priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID)
624 // This device doesn't support ioctl calls 612 // This device doesn't support ioctl calls
625 return 0; 613 return;
626 }
627
628 switch (cmd) {
629 case TCGETS: // 0x5401
630 if (!access_ok(VERIFY_WRITE, user_arg, sizeof(struct ktermios))) {
631 dbg("%s - port %d Error in access_ok", __FUNCTION__, port->number);
632 return -EFAULT;
633 }
634 if (kernel_termios_to_user_termios((struct ktermios __user *)arg,
635 &priv->internal_termios))
636 return -EFAULT;
637 return 0;
638
639 case TCSETS: // 0x5402
640 if (!(port->tty->termios)) {
641 dbg("%s - port %d Error: port->tty->termios is NULL", __FUNCTION__, port->number);
642 return -ENOTTY;
643 }
644 if (!access_ok(VERIFY_READ, user_arg, sizeof(struct ktermios))) {
645 dbg("%s - port %d Error in access_ok", __FUNCTION__, port->number);
646 return -EFAULT;
647 }
648 if (user_termios_to_kernel_termios(&priv->internal_termios,
649 (struct ktermios __user *)arg))
650 return -EFAULT;
651
652 settings = kzalloc(50, GFP_KERNEL);
653 if (! settings) {
654 return -ENOBUFS;
655 }
656 614
657 switch (priv->internal_termios.c_cflag & CBAUD) { 615 switch (speed = tty_get_baud_rate(port->tty)) {
658 case B1200: 616 case 1200:
659 urb_val = SUSBCR_SBR_1200; 617 urb_val = SUSBCR_SBR_1200;
660 strcat(settings, "1200 ");
661 break; 618 break;
662 case B9600: 619 case 9600:
663 default: 620 default:
664 urb_val = SUSBCR_SBR_9600; 621 urb_val = SUSBCR_SBR_9600;
665 strcat(settings, "9600 ");
666 break; 622 break;
667 } 623 }
624 urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits : SUSBCR_SPASB_1StopBit;
625
626 settings = kzalloc(50, GFP_KERNEL);
627 if (! settings)
628 return;
668 629
669 urb_val |= (priv->internal_termios.c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits : SUSBCR_SPASB_1StopBit; 630 sprintf(settings, "%d ", speed);
670 strcat(settings, (priv->internal_termios.c_cflag & CSTOPB) ? "2 StopBits " : "1 StopBit ");
671 631
672 if (priv->internal_termios.c_cflag & PARENB) { 632 if (c_cflag & PARENB) {
673 if (priv->internal_termios.c_cflag & PARODD) { 633 if (c_cflag & PARODD) {
674 urb_val |= SUSBCR_SPASB_OddParity; 634 urb_val |= SUSBCR_SPASB_OddParity;
675 strcat(settings, "Odd Parity"); 635 strcat(settings, "Odd Parity");
676 } else {
677 urb_val |= SUSBCR_SPASB_EvenParity;
678 strcat(settings, "Even Parity");
679 }
680 } else { 636 } else {
681 urb_val |= SUSBCR_SPASB_NoParity; 637 urb_val |= SUSBCR_SPASB_EvenParity;
682 strcat(settings, "No Parity"); 638 strcat(settings, "Even Parity");
683 } 639 }
684 dbg("%s - port %d setting port to: %s", __FUNCTION__, port->number, settings ); 640 } else {
641 urb_val |= SUSBCR_SPASB_NoParity;
642 strcat(settings, "No Parity");
643 }
685 644
686 result = usb_control_msg( port->serial->dev, 645 result = usb_control_msg( port->serial->dev,
687 usb_rcvctrlpipe(port->serial->dev, 0 ), 646 usb_rcvctrlpipe(port->serial->dev, 0 ),
688 SUSBCRequest_SetBaudRateParityAndStopBits, 647 SUSBCRequest_SetBaudRateParityAndStopBits,
689 USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, 648 USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
690 urb_val, 649 urb_val,
691 0, 650 0,
692 settings, 651 settings,
693 0, 652 0,
694 KOBIL_TIMEOUT 653 KOBIL_TIMEOUT
695 ); 654 );
655 kfree(settings);
656}
696 657
697 dbg("%s - port %d Send set_baudrate URB returns: %i", __FUNCTION__, port->number, result); 658static int kobil_ioctl(struct usb_serial_port *port, struct file * file, unsigned int cmd, unsigned long arg)
698 kfree(settings); 659{
660 struct kobil_private * priv = usb_get_serial_port_data(port);
661 unsigned char *transfer_buffer;
662 int transfer_buffer_length = 8;
663 int result;
664
665 if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID || priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID)
666 // This device doesn't support ioctl calls
699 return 0; 667 return 0;
700 668
669 switch (cmd) {
701 case TCFLSH: // 0x540B 670 case TCFLSH: // 0x540B
702 transfer_buffer = kmalloc(transfer_buffer_length, GFP_KERNEL); 671 transfer_buffer = kmalloc(transfer_buffer_length, GFP_KERNEL);
703 if (! transfer_buffer) { 672 if (! transfer_buffer)
704 return -ENOBUFS; 673 return -ENOBUFS;
705 }
706 674
707 result = usb_control_msg( port->serial->dev, 675 result = usb_control_msg( port->serial->dev,
708 usb_rcvctrlpipe(port->serial->dev, 0 ), 676 usb_rcvctrlpipe(port->serial->dev, 0 ),
@@ -716,15 +684,13 @@ static int kobil_ioctl(struct usb_serial_port *port, struct file *file,
716 ); 684 );
717 685
718 dbg("%s - port %d Send reset_all_queues (FLUSH) URB returns: %i", __FUNCTION__, port->number, result); 686 dbg("%s - port %d Send reset_all_queues (FLUSH) URB returns: %i", __FUNCTION__, port->number, result);
719
720 kfree(transfer_buffer); 687 kfree(transfer_buffer);
721 return ((result < 0) ? -EFAULT : 0); 688 return (result < 0) ? -EFAULT : 0;
722 689 default:
690 return -ENOIOCTLCMD;
723 } 691 }
724 return -ENOIOCTLCMD;
725} 692}
726 693
727
728static int __init kobil_init (void) 694static int __init kobil_init (void)
729{ 695{
730 int retval; 696 int retval;
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index e08c9bb403..0dc99f75bb 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -206,20 +206,20 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
206 } 206 }
207 } else { 207 } else {
208 switch (value) { 208 switch (value) {
209 case 300: break; 209 case 300: break;
210 case 600: break; 210 case 600: break;
211 case 1200: break; 211 case 1200: break;
212 case 2400: break; 212 case 2400: break;
213 case 4800: break; 213 case 4800: break;
214 case 9600: break; 214 case 9600: break;
215 case 19200: break; 215 case 19200: break;
216 case 38400: break; 216 case 38400: break;
217 case 57600: break; 217 case 57600: break;
218 case 115200: break; 218 case 115200: break;
219 default: 219 default:
220 err("MCT USB-RS232: unsupported baudrate request 0x%x," 220 err("MCT USB-RS232: unsupported baudrate request 0x%x,"
221 " using default of B9600", value); 221 " using default of B9600", value);
222 value = 9600; 222 value = 9600;
223 } 223 }
224 return 115200/value; 224 return 115200/value;
225 } 225 }
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 64f3f66a7a..d19861166b 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -1144,7 +1144,7 @@ static struct pl2303_buf *pl2303_buf_alloc(unsigned int size)
1144 if (size == 0) 1144 if (size == 0)
1145 return NULL; 1145 return NULL;
1146 1146
1147 pb = (struct pl2303_buf *)kmalloc(sizeof(struct pl2303_buf), GFP_KERNEL); 1147 pb = kmalloc(sizeof(struct pl2303_buf), GFP_KERNEL);
1148 if (pb == NULL) 1148 if (pb == NULL)
1149 return NULL; 1149 return NULL;
1150 1150
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index f9f85f56f0..1da57fd9ea 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -73,6 +73,7 @@ static struct usb_device_id id_table [] = {
73 { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1) }, 73 { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1) },
74 { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65) }, 74 { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65) },
75 { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X75) }, 75 { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X75) },
76 { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_EF81) },
76 { USB_DEVICE(SYNTECH_VENDOR_ID, SYNTECH_PRODUCT_ID) }, 77 { USB_DEVICE(SYNTECH_VENDOR_ID, SYNTECH_PRODUCT_ID) },
77 { USB_DEVICE(NOKIA_CA42_VENDOR_ID, NOKIA_CA42_PRODUCT_ID) }, 78 { USB_DEVICE(NOKIA_CA42_VENDOR_ID, NOKIA_CA42_PRODUCT_ID) },
78 { USB_DEVICE(CA_42_CA42_VENDOR_ID, CA_42_CA42_PRODUCT_ID) }, 79 { USB_DEVICE(CA_42_CA42_VENDOR_ID, CA_42_CA42_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index f9a71d0c10..c39bace5cb 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -59,6 +59,7 @@
59#define SIEMENS_PRODUCT_ID_SX1 0x0001 59#define SIEMENS_PRODUCT_ID_SX1 0x0001
60#define SIEMENS_PRODUCT_ID_X65 0x0003 60#define SIEMENS_PRODUCT_ID_X65 0x0003
61#define SIEMENS_PRODUCT_ID_X75 0x0004 61#define SIEMENS_PRODUCT_ID_X75 0x0004
62#define SIEMENS_PRODUCT_ID_EF81 0x0005
62 63
63#define SYNTECH_VENDOR_ID 0x0745 64#define SYNTECH_VENDOR_ID 0x0745
64#define SYNTECH_PRODUCT_ID 0x0001 65#define SYNTECH_PRODUCT_ID 0x0001
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 51669b7622..4e6dcc199b 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -90,18 +90,12 @@ MODULE_AUTHOR (DRIVER_AUTHOR);
90MODULE_DESCRIPTION (DRIVER_DESC); 90MODULE_DESCRIPTION (DRIVER_DESC);
91MODULE_LICENSE("GPL"); 91MODULE_LICENSE("GPL");
92 92
93#if defined(CONFIG_USBD_SAFE_SERIAL_VENDOR) && !defined(CONFIG_USBD_SAFE_SERIAL_PRODUCT)
94#error "SAFE_SERIAL_VENDOR defined without SAFE_SERIAL_PRODUCT"
95#endif
96
97#if ! defined(CONFIG_USBD_SAFE_SERIAL_VENDOR)
98static __u16 vendor; // no default 93static __u16 vendor; // no default
99static __u16 product; // no default 94static __u16 product; // no default
100module_param(vendor, ushort, 0); 95module_param(vendor, ushort, 0);
101MODULE_PARM_DESC(vendor, "User specified USB idVendor (required)"); 96MODULE_PARM_DESC(vendor, "User specified USB idVendor (required)");
102module_param(product, ushort, 0); 97module_param(product, ushort, 0);
103MODULE_PARM_DESC(product, "User specified USB idProduct (required)"); 98MODULE_PARM_DESC(product, "User specified USB idProduct (required)");
104#endif
105 99
106module_param(debug, bool, S_IRUGO | S_IWUSR); 100module_param(debug, bool, S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(debug, "Debug enabled or not"); 101MODULE_PARM_DESC(debug, "Debug enabled or not");
@@ -145,11 +139,6 @@ static struct usb_device_id id_table[] = {
145 {MY_USB_DEVICE (0x4dd, 0x8003, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, // Collie 139 {MY_USB_DEVICE (0x4dd, 0x8003, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, // Collie
146 {MY_USB_DEVICE (0x4dd, 0x8004, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, // Collie 140 {MY_USB_DEVICE (0x4dd, 0x8004, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, // Collie
147 {MY_USB_DEVICE (0x5f9, 0xffff, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, // Sharp tmp 141 {MY_USB_DEVICE (0x5f9, 0xffff, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, // Sharp tmp
148#if defined(CONFIG_USB_SAFE_SERIAL_VENDOR)
149 {MY_USB_DEVICE
150 (CONFIG_USB_SAFE_SERIAL_VENDOR, CONFIG_USB_SAFE_SERIAL_PRODUCT, CDC_DEVICE_CLASS,
151 LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)},
152#endif
153 // extra null entry for module 142 // extra null entry for module
154 // vendor/produc parameters 143 // vendor/produc parameters
155 {MY_USB_DEVICE (0, 0, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, 144 {MY_USB_DEVICE (0, 0, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)},
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 9bf01a5efc..4b1bd7def4 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -578,6 +578,17 @@ static void kill_traffic(struct usb_serial_port *port)
578{ 578{
579 usb_kill_urb(port->read_urb); 579 usb_kill_urb(port->read_urb);
580 usb_kill_urb(port->write_urb); 580 usb_kill_urb(port->write_urb);
581 /*
582 * This is tricky.
583 * Some drivers submit the read_urb in the
584 * handler for the write_urb or vice versa
585 * this order determines the order in which
586 * usb_kill_urb() must be used to reliably
587 * kill the URBs. As it is unknown here,
588 * both orders must be used in turn.
589 * The call below is not redundant.
590 */
591 usb_kill_urb(port->read_urb);
581 usb_kill_urb(port->interrupt_in_urb); 592 usb_kill_urb(port->interrupt_in_urb);
582 usb_kill_urb(port->interrupt_out_urb); 593 usb_kill_urb(port->interrupt_out_urb);
583} 594}
@@ -651,16 +662,14 @@ exit:
651 662
652static struct usb_serial_driver *search_serial_device(struct usb_interface *iface) 663static struct usb_serial_driver *search_serial_device(struct usb_interface *iface)
653{ 664{
654 struct list_head *p;
655 const struct usb_device_id *id; 665 const struct usb_device_id *id;
656 struct usb_serial_driver *t; 666 struct usb_serial_driver *drv;
657 667
658 /* Check if the usb id matches a known device */ 668 /* Check if the usb id matches a known device */
659 list_for_each(p, &usb_serial_driver_list) { 669 list_for_each_entry(drv, &usb_serial_driver_list, driver_list) {
660 t = list_entry(p, struct usb_serial_driver, driver_list); 670 id = get_iface_id(drv, iface);
661 id = get_iface_id(t, iface);
662 if (id) 671 if (id)
663 return t; 672 return drv;
664 } 673 }
665 674
666 return NULL; 675 return NULL;
@@ -800,9 +809,6 @@ int usb_serial_probe(struct usb_interface *interface,
800 /* END HORRIBLE HACK FOR PL2303 */ 809 /* END HORRIBLE HACK FOR PL2303 */
801#endif 810#endif
802 811
803 /* found all that we need */
804 dev_info(&interface->dev, "%s converter detected\n", type->description);
805
806#ifdef CONFIG_USB_SERIAL_GENERIC 812#ifdef CONFIG_USB_SERIAL_GENERIC
807 if (type == &usb_serial_generic_device) { 813 if (type == &usb_serial_generic_device) {
808 num_ports = num_bulk_out; 814 num_ports = num_bulk_out;
@@ -836,6 +842,24 @@ int usb_serial_probe(struct usb_interface *interface,
836 serial->num_interrupt_in = num_interrupt_in; 842 serial->num_interrupt_in = num_interrupt_in;
837 serial->num_interrupt_out = num_interrupt_out; 843 serial->num_interrupt_out = num_interrupt_out;
838 844
845 /* check that the device meets the driver's requirements */
846 if ((type->num_interrupt_in != NUM_DONT_CARE &&
847 type->num_interrupt_in != num_interrupt_in)
848 || (type->num_interrupt_out != NUM_DONT_CARE &&
849 type->num_interrupt_out != num_interrupt_out)
850 || (type->num_bulk_in != NUM_DONT_CARE &&
851 type->num_bulk_in != num_bulk_in)
852 || (type->num_bulk_out != NUM_DONT_CARE &&
853 type->num_bulk_out != num_bulk_out)) {
854 dbg("wrong number of endpoints");
855 kfree(serial);
856 return -EIO;
857 }
858
859 /* found all that we need */
860 dev_info(&interface->dev, "%s converter detected\n",
861 type->description);
862
839 /* create our ports, we need as many as the max endpoints */ 863 /* create our ports, we need as many as the max endpoints */
840 /* we don't use num_ports here cauz some devices have more endpoint pairs than ports */ 864 /* we don't use num_ports here cauz some devices have more endpoint pairs than ports */
841 max_endpoints = max(num_bulk_in, num_bulk_out); 865 max_endpoints = max(num_bulk_in, num_bulk_out);
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 30e08c0bcd..7ee087fed9 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -46,7 +46,6 @@ static int visor_probe (struct usb_serial *serial, const struct usb_device_id
46static int visor_calc_num_ports(struct usb_serial *serial); 46static int visor_calc_num_ports(struct usb_serial *serial);
47static void visor_shutdown (struct usb_serial *serial); 47static void visor_shutdown (struct usb_serial *serial);
48static int visor_ioctl (struct usb_serial_port *port, struct file * file, unsigned int cmd, unsigned long arg); 48static int visor_ioctl (struct usb_serial_port *port, struct file * file, unsigned int cmd, unsigned long arg);
49static void visor_set_termios (struct usb_serial_port *port, struct ktermios *old_termios);
50static void visor_write_bulk_callback (struct urb *urb); 49static void visor_write_bulk_callback (struct urb *urb);
51static void visor_read_bulk_callback (struct urb *urb); 50static void visor_read_bulk_callback (struct urb *urb);
52static void visor_read_int_callback (struct urb *urb); 51static void visor_read_int_callback (struct urb *urb);
@@ -203,7 +202,6 @@ static struct usb_serial_driver handspring_device = {
203 .calc_num_ports = visor_calc_num_ports, 202 .calc_num_ports = visor_calc_num_ports,
204 .shutdown = visor_shutdown, 203 .shutdown = visor_shutdown,
205 .ioctl = visor_ioctl, 204 .ioctl = visor_ioctl,
206 .set_termios = visor_set_termios,
207 .write = visor_write, 205 .write = visor_write,
208 .write_room = visor_write_room, 206 .write_room = visor_write_room,
209 .chars_in_buffer = visor_chars_in_buffer, 207 .chars_in_buffer = visor_chars_in_buffer,
@@ -234,7 +232,6 @@ static struct usb_serial_driver clie_5_device = {
234 .calc_num_ports = visor_calc_num_ports, 232 .calc_num_ports = visor_calc_num_ports,
235 .shutdown = visor_shutdown, 233 .shutdown = visor_shutdown,
236 .ioctl = visor_ioctl, 234 .ioctl = visor_ioctl,
237 .set_termios = visor_set_termios,
238 .write = visor_write, 235 .write = visor_write,
239 .write_room = visor_write_room, 236 .write_room = visor_write_room,
240 .chars_in_buffer = visor_chars_in_buffer, 237 .chars_in_buffer = visor_chars_in_buffer,
@@ -262,7 +259,6 @@ static struct usb_serial_driver clie_3_5_device = {
262 .unthrottle = visor_unthrottle, 259 .unthrottle = visor_unthrottle,
263 .attach = clie_3_5_startup, 260 .attach = clie_3_5_startup,
264 .ioctl = visor_ioctl, 261 .ioctl = visor_ioctl,
265 .set_termios = visor_set_termios,
266 .write = visor_write, 262 .write = visor_write,
267 .write_room = visor_write_room, 263 .write_room = visor_write_room,
268 .chars_in_buffer = visor_chars_in_buffer, 264 .chars_in_buffer = visor_chars_in_buffer,
@@ -936,66 +932,6 @@ static int visor_ioctl (struct usb_serial_port *port, struct file * file, unsign
936 return -ENOIOCTLCMD; 932 return -ENOIOCTLCMD;
937} 933}
938 934
939
940/* This function is all nice and good, but we don't change anything based on it :) */
941static void visor_set_termios (struct usb_serial_port *port, struct ktermios *old_termios)
942{
943 unsigned int cflag;
944
945 dbg("%s - port %d", __FUNCTION__, port->number);
946
947 if ((!port->tty) || (!port->tty->termios)) {
948 dbg("%s - no tty structures", __FUNCTION__);
949 return;
950 }
951
952 cflag = port->tty->termios->c_cflag;
953
954 /* get the byte size */
955 switch (cflag & CSIZE) {
956 case CS5: dbg("%s - data bits = 5", __FUNCTION__); break;
957 case CS6: dbg("%s - data bits = 6", __FUNCTION__); break;
958 case CS7: dbg("%s - data bits = 7", __FUNCTION__); break;
959 default:
960 case CS8: dbg("%s - data bits = 8", __FUNCTION__); break;
961 }
962
963 /* determine the parity */
964 if (cflag & PARENB)
965 if (cflag & PARODD)
966 dbg("%s - parity = odd", __FUNCTION__);
967 else
968 dbg("%s - parity = even", __FUNCTION__);
969 else
970 dbg("%s - parity = none", __FUNCTION__);
971
972 /* figure out the stop bits requested */
973 if (cflag & CSTOPB)
974 dbg("%s - stop bits = 2", __FUNCTION__);
975 else
976 dbg("%s - stop bits = 1", __FUNCTION__);
977
978
979 /* figure out the flow control settings */
980 if (cflag & CRTSCTS)
981 dbg("%s - RTS/CTS is enabled", __FUNCTION__);
982 else
983 dbg("%s - RTS/CTS is disabled", __FUNCTION__);
984
985 /* determine software flow control */
986 if (I_IXOFF(port->tty))
987 dbg("%s - XON/XOFF is enabled, XON = %2x, XOFF = %2x",
988 __FUNCTION__, START_CHAR(port->tty), STOP_CHAR(port->tty));
989 else
990 dbg("%s - XON/XOFF is disabled", __FUNCTION__);
991
992 /* get the baud rate wanted */
993 dbg("%s - baud rate = %d", __FUNCTION__, tty_get_baud_rate(port->tty));
994
995 return;
996}
997
998
999static int __init visor_init (void) 935static int __init visor_init (void)
1000{ 936{
1001 int i, retval; 937 int i, retval;
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 3a41740cad..ee5b42aa53 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -90,3 +90,17 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
90 90
91 return (res ? -1 : 0); 91 return (res ? -1 : 0);
92} 92}
93
94/* This places the HUAWEI E220 devices in multi-port mode */
95int usb_stor_huawei_e220_init(struct us_data *us)
96{
97 int result;
98
99 us->iobuf[0] = 0x1;
100 result = usb_stor_control_msg(us, us->send_ctrl_pipe,
101 USB_REQ_SET_FEATURE,
102 USB_TYPE_STANDARD | USB_RECIP_DEVICE,
103 0x01, 0x0, us->iobuf, 0x1, 1000);
104 US_DEBUGP("usb_control_msg performing result is %d\n", result);
105 return (result ? 0 : -1);
106}
diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
index e2967a4d48..ad3ffd4236 100644
--- a/drivers/usb/storage/initializers.h
+++ b/drivers/usb/storage/initializers.h
@@ -47,3 +47,6 @@ int usb_stor_euscsi_init(struct us_data *us);
47/* This function is required to activate all four slots on the UCR-61S2B 47/* This function is required to activate all four slots on the UCR-61S2B
48 * flash reader */ 48 * flash reader */
49int usb_stor_ucr61s2b_init(struct us_data *us); 49int usb_stor_ucr61s2b_init(struct us_data *us);
50
51/* This places the HUAWEI E220 devices in multi-port mode */
52int usb_stor_huawei_e220_init(struct us_data *us);
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 5e27297c01..17ca4d7357 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -190,9 +190,6 @@ static int usbat_check_status(struct us_data *us)
190 unsigned char *reply = us->iobuf; 190 unsigned char *reply = us->iobuf;
191 int rc; 191 int rc;
192 192
193 if (!us)
194 return USB_STOR_TRANSPORT_ERROR;
195
196 rc = usbat_get_status(us, reply); 193 rc = usbat_get_status(us, reply);
197 if (rc != USB_STOR_XFER_GOOD) 194 if (rc != USB_STOR_XFER_GOOD)
198 return USB_STOR_TRANSPORT_FAILED; 195 return USB_STOR_TRANSPORT_FAILED;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c6b78ba815..9b656ec427 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -198,7 +198,7 @@ UNUSUAL_DEV( 0x0421, 0x044e, 0x0100, 0x0100,
198 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ), 198 US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ),
199 199
200/* Reported by Bardur Arantsson <bardur@scientician.net> */ 200/* Reported by Bardur Arantsson <bardur@scientician.net> */
201UNUSUAL_DEV( 0x0421, 0x047c, 0x0370, 0x0370, 201UNUSUAL_DEV( 0x0421, 0x047c, 0x0370, 0x0610,
202 "Nokia", 202 "Nokia",
203 "6131", 203 "6131",
204 US_SC_DEVICE, US_PR_DEVICE, NULL, 204 US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -341,6 +341,13 @@ UNUSUAL_DEV( 0x04b0, 0x040d, 0x0100, 0x0100,
341 US_SC_DEVICE, US_PR_DEVICE, NULL, 341 US_SC_DEVICE, US_PR_DEVICE, NULL,
342 US_FL_FIX_CAPACITY), 342 US_FL_FIX_CAPACITY),
343 343
344/* Reported by Graber and Mike Pagano <mpagano-kernel@mpagano.com> */
345UNUSUAL_DEV( 0x04b0, 0x040f, 0x0200, 0x0200,
346 "NIKON",
347 "NIKON DSC D200",
348 US_SC_DEVICE, US_PR_DEVICE, NULL,
349 US_FL_FIX_CAPACITY),
350
344/* Reported by Emil Larsson <emil@swip.net> */ 351/* Reported by Emil Larsson <emil@swip.net> */
345UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0101, 352UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0101,
346 "NIKON", 353 "NIKON",
@@ -355,6 +362,20 @@ UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0110,
355 US_SC_DEVICE, US_PR_DEVICE, NULL, 362 US_SC_DEVICE, US_PR_DEVICE, NULL,
356 US_FL_FIX_CAPACITY), 363 US_FL_FIX_CAPACITY),
357 364
365/* Reported by Paul Check <paul@openstreet.com> */
366UNUSUAL_DEV( 0x04b0, 0x0415, 0x0100, 0x0100,
367 "NIKON",
368 "NIKON DSC D2Xs",
369 US_SC_DEVICE, US_PR_DEVICE, NULL,
370 US_FL_FIX_CAPACITY),
371
372/* Reported by Shan Destromp (shansan@gmail.com) */
373UNUSUAL_DEV( 0x04b0, 0x0417, 0x0100, 0x0100,
374 "NIKON",
375 "NIKON DSC D40X",
376 US_SC_DEVICE, US_PR_DEVICE, NULL,
377 US_FL_FIX_CAPACITY),
378
358/* BENQ DC5330 379/* BENQ DC5330
359 * Reported by Manuel Fombuena <mfombuena@ya.com> and 380 * Reported by Manuel Fombuena <mfombuena@ya.com> and
360 * Frank Copeland <fjc@thingy.apana.org.au> */ 381 * Frank Copeland <fjc@thingy.apana.org.au> */
@@ -1463,6 +1484,17 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
1463 US_SC_DEVICE, US_PR_DEVICE, NULL, 1484 US_SC_DEVICE, US_PR_DEVICE, NULL,
1464 US_FL_IGNORE_RESIDUE ), 1485 US_FL_IGNORE_RESIDUE ),
1465 1486
1487/* Reported by fangxiaozhi <fangxiaozhi60675@huawei.com>
1488 * and by linlei <linlei83@huawei.com>
1489 * Patch reworked by Johann Wilhelm <johann.wilhelm@student.tugraz.at>
1490 * This brings the HUAWEI E220 devices into multi-port mode
1491 */
1492UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
1493 "HUAWEI MOBILE",
1494 "Mass Storage",
1495 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1496 0),
1497
1466/* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */ 1498/* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
1467UNUSUAL_DEV( 0x132b, 0x000b, 0x0001, 0x0001, 1499UNUSUAL_DEV( 0x132b, 0x000b, 0x0001, 0x0001,
1468 "Minolta", 1500 "Minolta",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 5918166706..3451e8d03a 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -960,6 +960,10 @@ static int storage_probe(struct usb_interface *intf,
960 return -ENOMEM; 960 return -ENOMEM;
961 } 961 }
962 962
963 /*
964 * Allow 16-byte CDBs and thus > 2TB
965 */
966 host->max_cmd_len = 16;
963 us = host_to_us(host); 967 us = host_to_us(host);
964 memset(us, 0, sizeof(struct us_data)); 968 memset(us, 0, sizeof(struct us_data));
965 mutex_init(&(us->dev_mutex)); 969 mutex_init(&(us->dev_mutex));
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 8de11deb5d..c815a40e16 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -125,6 +125,7 @@ static int skel_open(struct inode *inode, struct file *file)
125 125
126 /* save our object in the file's private structure */ 126 /* save our object in the file's private structure */
127 file->private_data = dev; 127 file->private_data = dev;
128 mutex_unlock(&dev->io_mutex);
128 129
129exit: 130exit:
130 return retval; 131 return retval;
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 0899fccbd5..fbea2bd129 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -125,8 +125,8 @@ static int hp680bl_remove(struct platform_device *pdev)
125{ 125{
126 struct backlight_device *bd = platform_get_drvdata(pdev); 126 struct backlight_device *bd = platform_get_drvdata(pdev);
127 127
128 hp680bl_data.brightness = 0; 128 bd->props.brightness = 0;
129 hp680bl_data.power = 0; 129 bd->props.power = 0;
130 hp680bl_send_intensity(bd); 130 hp680bl_send_intensity(bd);
131 131
132 backlight_device_unregister(bd); 132 backlight_device_unregister(bd);
diff --git a/drivers/video/output.c b/drivers/video/output.c
index 1473f2c892..f2df5519c9 100644
--- a/drivers/video/output.c
+++ b/drivers/video/output.c
@@ -31,7 +31,8 @@ MODULE_DESCRIPTION("Display Output Switcher Lowlevel Control Abstraction");
31MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Luming Yu <luming.yu@intel.com>"); 32MODULE_AUTHOR("Luming Yu <luming.yu@intel.com>");
33 33
34static ssize_t video_output_show_state(struct class_device *dev,char *buf) 34static ssize_t video_output_show_state(struct device *dev,
35 struct device_attribute *attr, char *buf)
35{ 36{
36 ssize_t ret_size = 0; 37 ssize_t ret_size = 0;
37 struct output_device *od = to_output_device(dev); 38 struct output_device *od = to_output_device(dev);
@@ -40,8 +41,9 @@ static ssize_t video_output_show_state(struct class_device *dev,char *buf)
40 return ret_size; 41 return ret_size;
41} 42}
42 43
43static ssize_t video_output_store_state(struct class_device *dev, 44static ssize_t video_output_store_state(struct device *dev,
44 const char *buf,size_t count) 45 struct device_attribute *attr,
46 const char *buf,size_t count)
45{ 47{
46 char *endp; 48 char *endp;
47 struct output_device *od = to_output_device(dev); 49 struct output_device *od = to_output_device(dev);
@@ -60,21 +62,22 @@ static ssize_t video_output_store_state(struct class_device *dev,
60 return count; 62 return count;
61} 63}
62 64
63static void video_output_class_release(struct class_device *dev) 65static void video_output_release(struct device *dev)
64{ 66{
65 struct output_device *od = to_output_device(dev); 67 struct output_device *od = to_output_device(dev);
66 kfree(od); 68 kfree(od);
67} 69}
68 70
69static struct class_device_attribute video_output_attributes[] = { 71static struct device_attribute video_output_attributes[] = {
70 __ATTR(state, 0644, video_output_show_state, video_output_store_state), 72 __ATTR(state, 0644, video_output_show_state, video_output_store_state),
71 __ATTR_NULL, 73 __ATTR_NULL,
72}; 74};
73 75
76
74static struct class video_output_class = { 77static struct class video_output_class = {
75 .name = "video_output", 78 .name = "video_output",
76 .release = video_output_class_release, 79 .dev_release = video_output_release,
77 .class_dev_attrs = video_output_attributes, 80 .dev_attrs = video_output_attributes,
78}; 81};
79 82
80struct output_device *video_output_register(const char *name, 83struct output_device *video_output_register(const char *name,
@@ -91,11 +94,11 @@ struct output_device *video_output_register(const char *name,
91 goto error_return; 94 goto error_return;
92 } 95 }
93 new_dev->props = op; 96 new_dev->props = op;
94 new_dev->class_dev.class = &video_output_class; 97 new_dev->dev.class = &video_output_class;
95 new_dev->class_dev.dev = dev; 98 new_dev->dev.parent = dev;
96 strlcpy(new_dev->class_dev.class_id,name,KOBJ_NAME_LEN); 99 strlcpy(new_dev->dev.bus_id,name, BUS_ID_SIZE);
97 class_set_devdata(&new_dev->class_dev,devdata); 100 dev_set_drvdata(&new_dev->dev, devdata);
98 ret_code = class_device_register(&new_dev->class_dev); 101 ret_code = device_register(&new_dev->dev);
99 if (ret_code) { 102 if (ret_code) {
100 kfree(new_dev); 103 kfree(new_dev);
101 goto error_return; 104 goto error_return;
@@ -111,7 +114,7 @@ void video_output_unregister(struct output_device *dev)
111{ 114{
112 if (!dev) 115 if (!dev)
113 return; 116 return;
114 class_device_unregister(&dev->class_dev); 117 device_unregister(&dev->dev);
115} 118}
116EXPORT_SYMBOL(video_output_unregister); 119EXPORT_SYMBOL(video_output_unregister);
117 120
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index 7d6c29800d..06805c9b23 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -667,6 +667,8 @@ static int pvr2_init_cable(void)
667 related */ 667 related */
668 if (cable_type == CT_COMPOSITE) 668 if (cable_type == CT_COMPOSITE)
669 fb_writel(3 << 8, VOUTC); 669 fb_writel(3 << 8, VOUTC);
670 else if (cable_type == CT_RGB)
671 fb_writel(1 << 9, VOUTC);
670 else 672 else
671 fb_writel(0, VOUTC); 673 fb_writel(0, VOUTC);
672 674
@@ -890,7 +892,7 @@ static int __init pvr2fb_dc_init(void)
890 pvr2_fix.mmio_start = 0xa05f8000; /* registers start here */ 892 pvr2_fix.mmio_start = 0xa05f8000; /* registers start here */
891 pvr2_fix.mmio_len = 0x2000; 893 pvr2_fix.mmio_len = 0x2000;
892 894
893 if (request_irq(HW_EVENT_VSYNC, pvr2fb_interrupt, 0, 895 if (request_irq(HW_EVENT_VSYNC, pvr2fb_interrupt, IRQF_SHARED,
894 "pvr2 VBL handler", fb_info)) { 896 "pvr2 VBL handler", fb_info)) {
895 return -EBUSY; 897 return -EBUSY;
896 } 898 }
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index a593f900ef..070217322c 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -197,7 +197,7 @@ static struct w1_family w1_default_family = {
197 .fops = &w1_default_fops, 197 .fops = &w1_default_fops,
198}; 198};
199 199
200static int w1_uevent(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size); 200static int w1_uevent(struct device *dev, struct kobj_uevent_env *env);
201 201
202static struct bus_type w1_bus_type = { 202static struct bus_type w1_bus_type = {
203 .name = "w1", 203 .name = "w1",
@@ -396,13 +396,12 @@ static void w1_destroy_master_attributes(struct w1_master *master)
396} 396}
397 397
398#ifdef CONFIG_HOTPLUG 398#ifdef CONFIG_HOTPLUG
399static int w1_uevent(struct device *dev, char **envp, int num_envp, 399static int w1_uevent(struct device *dev, struct kobj_uevent_env *env)
400 char *buffer, int buffer_size)
401{ 400{
402 struct w1_master *md = NULL; 401 struct w1_master *md = NULL;
403 struct w1_slave *sl = NULL; 402 struct w1_slave *sl = NULL;
404 char *event_owner, *name; 403 char *event_owner, *name;
405 int err, cur_index=0, cur_len=0; 404 int err;
406 405
407 if (dev->driver == &w1_master_driver) { 406 if (dev->driver == &w1_master_driver) {
408 md = container_of(dev, struct w1_master, dev); 407 md = container_of(dev, struct w1_master, dev);
@@ -423,23 +422,19 @@ static int w1_uevent(struct device *dev, char **envp, int num_envp,
423 if (dev->driver != &w1_slave_driver || !sl) 422 if (dev->driver != &w1_slave_driver || !sl)
424 return 0; 423 return 0;
425 424
426 err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size, 425 err = add_uevent_var(env, "W1_FID=%02X", sl->reg_num.family);
427 &cur_len, "W1_FID=%02X", sl->reg_num.family);
428 if (err) 426 if (err)
429 return err; 427 return err;
430 428
431 err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size, 429 err = add_uevent_var(env, "W1_SLAVE_ID=%024LX",
432 &cur_len, "W1_SLAVE_ID=%024LX", 430 (unsigned long long)sl->reg_num.id);
433 (unsigned long long)sl->reg_num.id);
434 envp[cur_index] = NULL;
435 if (err) 431 if (err)
436 return err; 432 return err;
437 433
438 return 0; 434 return 0;
439}; 435};
440#else 436#else
441static int w1_uevent(struct device *dev, char **envp, int num_envp, 437static int w1_uevent(struct device *dev, struct kobj_uevent_env *env)
442 char *buffer, int buffer_size)
443{ 438{
444 return 0; 439 return 0;
445} 440}